diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/__init__.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..14cf8bb5879320c3838808bea5715ac06b046fd9 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/__init__.py @@ -0,0 +1,71 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available + + +_import_structure = {"configuration_bert_generation": ["BertGenerationConfig"]} + +try: + if not is_sentencepiece_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["tokenization_bert_generation"] = ["BertGenerationTokenizer"] + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_bert_generation"] = [ + "BertGenerationDecoder", + "BertGenerationEncoder", + "BertGenerationPreTrainedModel", + "load_tf_weights_in_bert_generation", + ] + + +if TYPE_CHECKING: + from .configuration_bert_generation import BertGenerationConfig + + try: + if not is_sentencepiece_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .tokenization_bert_generation import BertGenerationTokenizer + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_bert_generation import ( + BertGenerationDecoder, + BertGenerationEncoder, + BertGenerationPreTrainedModel, + load_tf_weights_in_bert_generation, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..570b0e101e9f2669b409b2871f16b40b24aba4fa Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/configuration_bert_generation.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/configuration_bert_generation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..690ed04a6d3b83d13f77fc6bc7770b45ed71ff26 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/configuration_bert_generation.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/modeling_bert_generation.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/modeling_bert_generation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..455557c5bfa8005db5d7a8dede38ed6b72c73daa Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/modeling_bert_generation.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/tokenization_bert_generation.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/tokenization_bert_generation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e45073fb78f8930144ea2a2e543172ca62c26e9 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/tokenization_bert_generation.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/configuration_bert_generation.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/configuration_bert_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..841aec5c0fb7acc3fb651aa213bf4cf2e1a6a581 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/configuration_bert_generation.py @@ -0,0 +1,124 @@ +# coding=utf-8 +# Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" BertGeneration model configuration""" + +from ...configuration_utils import PretrainedConfig + + +class BertGenerationConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`BertGenerationPreTrainedModel`]. It is used to + instantiate a BertGeneration model according to the specified arguments, defining the model architecture. + Instantiating a configuration with the defaults will yield a similar configuration to that of the BertGeneration + [google/bert_for_seq_generation_L-24_bbc_encoder](https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder) + architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + vocab_size (`int`, *optional*, defaults to 50358): + Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`BertGeneration`]. + hidden_size (`int`, *optional*, defaults to 1024): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 24): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 4096): + Dimensionality of the "intermediate" (often called feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + max_position_embeddings (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + pad_token_id (`int`, *optional*, defaults to 0): + Padding token id. + bos_token_id (`int`, *optional*, defaults to 2): + Beginning of stream token id. + eos_token_id (`int`, *optional*, defaults to 1): + End of stream token id. + position_embedding_type (`str`, *optional*, defaults to `"absolute"`): + Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For + positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to + [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). + For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models + with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + + Examples: + + ```python + >>> from transformers import BertGenerationConfig, BertGenerationEncoder + + >>> # Initializing a BertGeneration config + >>> configuration = BertGenerationConfig() + + >>> # Initializing a model (with random weights) from the config + >>> model = BertGenerationEncoder(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "bert-generation" + + def __init__( + self, + vocab_size=50358, + hidden_size=1024, + num_hidden_layers=24, + num_attention_heads=16, + intermediate_size=4096, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + initializer_range=0.02, + layer_norm_eps=1e-12, + pad_token_id=0, + bos_token_id=2, + eos_token_id=1, + position_embedding_type="absolute", + use_cache=True, + **kwargs, + ): + super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.position_embedding_type = position_embedding_type + self.use_cache = use_cache diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/modeling_bert_generation.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/modeling_bert_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..b7250f6f7b926fc21102007ce34568d9276615f9 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/modeling_bert_generation.py @@ -0,0 +1,1008 @@ +# coding=utf-8 +# Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch BERT model specific for generation.""" + +import math +from typing import Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import CrossEntropyLoss + +from ...activations import ACT2FN +from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from ...utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_bert_generation import BertGenerationConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "google/bert_for_seq_generation_L-24_bbc_encoder" +_CONFIG_FOR_DOC = "BertGenerationConfig" + + +# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BertGeneration +class BertGenerationSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->BertGeneration +class BertGenerationSelfAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = position_embedding_type or getattr( + config, "position_embedding_type", "absolute" + ) + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) + + self.is_decoder = config.is_decoder + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_layer = past_key_value[0] + value_layer = past_key_value[1] + attention_mask = encoder_attention_mask + elif is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + use_cache = past_key_value is not None + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + query_length, key_length = query_layer.shape[2], key_layer.shape[2] + if use_cache: + position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( + -1, 1 + ) + else: + position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + + positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in BertGenerationModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + if self.is_decoder: + outputs = outputs + (past_key_value,) + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->BertGeneration +class BertGenerationAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + self.self = BertGenerationSelfAttention(config, position_embedding_type=position_embedding_type) + self.output = BertGenerationSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BertGeneration +class BertGenerationIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BertGeneration +class BertGenerationOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->BertGeneration +class BertGenerationLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = BertGenerationAttention(config) + self.is_decoder = config.is_decoder + self.add_cross_attention = config.add_cross_attention + if self.add_cross_attention: + if not self.is_decoder: + raise ValueError(f"{self} should be used as a decoder model if cross attention is added") + self.crossattention = BertGenerationAttention(config, position_embedding_type="absolute") + self.intermediate = BertGenerationIntermediate(config) + self.output = BertGenerationOutput(config) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + # if decoder, the last output is tuple of self-attn cache + if self.is_decoder: + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + else: + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + cross_attn_present_key_value = None + if self.is_decoder and encoder_hidden_states is not None: + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" + " by setting `config.add_cross_attention=True`" + ) + + # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + cross_attn_past_key_value, + output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + + # add cross-attn cache to positions 3,4 of present_key_value tuple + cross_attn_present_key_value = cross_attention_outputs[-1] + present_key_value = present_key_value + cross_attn_present_key_value + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + + # if decoder, return the attn key/values as the last output + if self.is_decoder: + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->BertGeneration +class BertEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([BertGenerationLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = False, + output_hidden_states: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + next_decoder_cache = () if use_cache else None + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + layer_module.__call__, + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +def load_tf_weights_in_bert_generation( + model, tf_hub_path, model_class, is_encoder_named_decoder=False, is_encoder=False +): + try: + import numpy as np + import tensorflow.compat.v1 as tf + import tensorflow_hub as hub + import tensorflow_text # noqa: F401 + + tf.disable_eager_execution() + except ImportError: + logger.error( + "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " + "https://www.tensorflow.org/install/ for installation instructions." + ) + raise + tf_model = hub.Module(tf_hub_path) + init = tf.global_variables_initializer() + with tf.Session() as sess: + init.run() + all_variables = tf_model.variable_map + keep_track_variables = all_variables.copy() + for key in list(all_variables.keys()): + if "global" in key: + logger.info(f"Skipping {key}...") + continue + if not is_encoder: + model_pointer = getattr(model, model_class) + else: + model_pointer = model + is_embedding = False + logger.info(f"Trying to match {key}...") + # remove start_string = "module/bert/" + sub_layers = key.split("/")[2:] + if is_encoder_named_decoder and sub_layers[0] == "encoder": + logger.info(f"Skipping encoder layer {key} for decoder") + continue + if is_encoder and sub_layers[0] == "decoder": + logger.info(f"Skipping decoder layer {key} for encoder") + continue + for i, sub_layer in enumerate(sub_layers): + if sub_layer == "embeddings": + is_embedding = True + elif sub_layer == "LayerNorm": + is_embedding = False + if "layer" in sub_layer: + model_pointer = model_pointer.layer[int(sub_layer.split("_")[-1])] + elif sub_layer in ["kernel", "gamma"]: + model_pointer = model_pointer.weight + elif sub_layer == "beta": + model_pointer = model_pointer.bias + elif sub_layer == "encdec": + model_pointer = model_pointer.crossattention.self + elif sub_layer == "encdec_output": + model_pointer = model_pointer.crossattention.output + elif is_encoder_named_decoder and sub_layer == "decoder": + model_pointer = model_pointer.encoder + else: + if sub_layer == "attention" and "encdec" in sub_layers[i + 1]: + continue + try: + model_pointer = getattr(model_pointer, sub_layer) + except AttributeError: + logger.info(f"Skipping to initialize {key} at {sub_layer}...") + raise AttributeError + + array = np.asarray(sess.run(all_variables[key])) + if not is_embedding: + logger.info(f"Transposing numpy weight of shape {array.shape} for {key}") + array = np.transpose(array) + else: + model_pointer = model_pointer.weight + + if model_pointer.shape != array.shape: + raise ValueError(f"Pointer shape {model_pointer.shape} and array shape {array.shape} mismatched") + logger.info(f"Initialize PyTorch weight {key}") + + model_pointer.data = torch.from_numpy(array.astype(np.float32)) + keep_track_variables.pop(key, None) + + logger.info(f"Weights not copied to PyTorch model: {', '.join(keep_track_variables.keys())}") + return model + + +class BertGenerationEmbeddings(nn.Module): + """Construct the embeddings from word and position embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer( + "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False + ) + + def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + position_embeddings = self.position_embeddings(position_ids) + + embeddings = inputs_embeds + position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertGenerationPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = BertGenerationConfig + base_model_prefix = "bert" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, nn.Linear): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +BERT_GENERATION_START_DOCSTRING = r""" + + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`BertGenerationConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +BERT_GENERATION_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and + [`PreTrainedTokenizer.encode`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare BertGeneration model transformer outputting raw hidden-states without any specific head on top.", + BERT_GENERATION_START_DOCSTRING, +) +class BertGenerationEncoder(BertGenerationPreTrainedModel): + """ + + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in [Attention is + all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, + Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + + This model should be used when leveraging Bert or Roberta checkpoints for the [`EncoderDecoderModel`] class as + described in [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) + by Sascha Rothe, Shashi Narayan, and Aliaksei Severyn. + + To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set + to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and + `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. + """ + + def __init__(self, config): + super().__init__(config) + self.config = config + + self.embeddings = BertGenerationEmbeddings(config) + self.encoder = BertEncoder(config) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPastAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: `1` for + tokens that are NOT MASKED, `0` for MASKED tokens. + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if self.config.is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + batch_size, seq_length = input_shape + device = input_ids.device if input_ids is not None else inputs_embeds.device + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask = None + if not use_cache: + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.config.is_decoder and encoder_hidden_states is not None: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + if encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + + if not return_dict: + return (sequence_output,) + encoder_outputs[1:] + + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=sequence_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +class BertGenerationOnlyLMHead(nn.Module): + def __init__(self, config): + super().__init__() + self.decoder = nn.Linear(config.hidden_size, config.vocab_size) + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + self.decoder.bias = self.bias + + def forward(self, hidden_states): + logits = self.decoder(hidden_states) + return logits + + def _tie_weights(self): + # To tie those two weights if they get disconnected (on TPU or when the bias is resized) + self.bias = self.decoder.bias + + +@add_start_docstrings( + """BertGeneration Model with a `language modeling` head on top for CLM fine-tuning.""", + BERT_GENERATION_START_DOCSTRING, +) +class BertGenerationDecoder(BertGenerationPreTrainedModel): + _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + + if not config.is_decoder: + logger.warning("If you want to use `BertGenerationDecoder` as a standalone, add `is_decoder=True.`") + + self.bert = BertGenerationEncoder(config) + self.lm_head = BertGenerationOnlyLMHead(config) + + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.lm_head.decoder + + def set_output_embeddings(self, new_embeddings): + self.lm_head.decoder = new_embeddings + + @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are + ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, BertGenerationDecoder, BertGenerationConfig + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") + >>> config = BertGenerationConfig.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") + >>> config.is_decoder = True + >>> model = BertGenerationDecoder.from_pretrained( + ... "google/bert_for_seq_generation_L-24_bbc_encoder", config=config + ... ) + + >>> inputs = tokenizer("Hello, my dog is cute", return_token_type_ids=False, return_tensors="pt") + >>> outputs = model(**inputs) + + >>> prediction_logits = outputs.logits + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if labels is not None: + use_cache = False + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + prediction_scores = self.lm_head(sequence_output) + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss() + lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (prediction_scores,) + outputs[1:] + return ((lm_loss,) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + # cut decoder_input_ids if past_key_values is used + if past_key_values is not None: + past_length = past_key_values[0][0].shape[2] + + # Some generation methods already pass only the last input ID + if input_ids.shape[1] > past_length: + remove_prefix_length = past_length + else: + # Default to old behavior: keep only final ID + remove_prefix_length = input_ids.shape[1] - 1 + + input_ids = input_ids[:, remove_prefix_length:] + + return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values} + + def _reorder_cache(self, past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/tokenization_bert_generation.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/tokenization_bert_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..3b6298fcbd8f6e054f7fac417095b188b070f472 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/bert_generation/tokenization_bert_generation.py @@ -0,0 +1,185 @@ +# coding=utf-8 +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Tokenization class for model BertGeneration.""" + + +import os +from shutil import copyfile +from typing import Any, Dict, List, Optional, Tuple + +import sentencepiece as spm + +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import logging + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} + +PRETRAINED_VOCAB_FILES_MAP = { + "vocab_file": { + "bert_for_seq_generation": ( + "https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model" + ), + } +} + +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"bert_for_seq_generation": 512} + + +class BertGenerationTokenizer(PreTrainedTokenizer): + """ + Construct a BertGeneration tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). + + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to + this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that + contains the vocabulary necessary to instantiate a tokenizer. + bos_token (`str`, *optional*, defaults to `""`): + The begin of sequence token. + eos_token (`str`, *optional*, defaults to `""`): + The end of sequence token. + unk_token (`str`, *optional*, defaults to `""`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + pad_token (`str`, *optional*, defaults to `""`): + The token used for padding, for example when batching sequences of different lengths. + sep_token (`str`, *optional*, defaults to `"<::::>"`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + sp_model_kwargs (`dict`, *optional*): + Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for + SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, + to set: + + - `enable_sampling`: Enable subword regularization. + - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. + + - `nbest_size = {0,1}`: No sampling is performed. + - `nbest_size > 1`: samples from the nbest_size results. + - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) + using forward-filtering-and-backward-sampling algorithm. + + - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for + BPE-dropout. + """ + + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + prefix_tokens: List[int] = [] + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, + vocab_file, + bos_token="", + eos_token="", + unk_token="", + pad_token="", + sep_token="<::::>", + sp_model_kwargs: Optional[Dict[str, Any]] = None, + **kwargs, + ) -> None: + self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs + + self.vocab_file = vocab_file + + self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) + self.sp_model.Load(vocab_file) + + # Add extra_ids to the special token list + super().__init__( + bos_token=bos_token, + eos_token=eos_token, + unk_token=unk_token, + pad_token=pad_token, + sep_token=sep_token, + sp_model_kwargs=self.sp_model_kwargs, + **kwargs, + ) + + @property + def vocab_size(self): + return self.sp_model.get_piece_size() + + def get_vocab(self): + vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} + vocab.update(self.added_tokens_encoder) + return vocab + + def __getstate__(self): + state = self.__dict__.copy() + state["sp_model"] = None + return state + + def __setstate__(self, d): + self.__dict__ = d + + # for backward compatibility + if not hasattr(self, "sp_model_kwargs"): + self.sp_model_kwargs = {} + + self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) + self.sp_model.Load(self.vocab_file) + + def _tokenize(self, text: str) -> List[str]: + """Take as input a string and return a list of strings (tokens) for words/sub-words""" + return self.sp_model.encode(text, out_type=str) + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.sp_model.piece_to_id(token) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + token = self.sp_model.IdToPiece(index) + return token + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + current_sub_tokens = [] + out_string = "" + for token in tokens: + # make sure that special tokens are not decoded using sentencepiece model + if token in self.all_special_tokens: + out_string += self.sp_model.decode(current_sub_tokens) + token + current_sub_tokens = [] + else: + current_sub_tokens.append(token) + out_string += self.sp_model.decode(current_sub_tokens) + return out_string.strip() + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + out_vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + + if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): + copyfile(self.vocab_file, out_vocab_file) + elif not os.path.isfile(self.vocab_file): + with open(out_vocab_file, "wb") as fi: + content_spiece_model = self.sp_model.serialized_model_proto() + fi.write(content_spiece_model) + + return (out_vocab_file,) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__init__.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5622ab70de642935e75967c9121355cb65bc2c8f --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__init__.py @@ -0,0 +1,138 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_flax_available, + is_tf_available, + is_tokenizers_available, + is_torch_available, +) + + +_import_structure = { + "configuration_blenderbot_small": [ + "BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP", + "BlenderbotSmallConfig", + "BlenderbotSmallOnnxConfig", + ], + "tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"], +} + +try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["tokenization_blenderbot_small_fast"] = ["BlenderbotSmallTokenizerFast"] + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_blenderbot_small"] = [ + "BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST", + "BlenderbotSmallForCausalLM", + "BlenderbotSmallForConditionalGeneration", + "BlenderbotSmallModel", + "BlenderbotSmallPreTrainedModel", + ] + +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tf_blenderbot_small"] = [ + "TFBlenderbotSmallForConditionalGeneration", + "TFBlenderbotSmallModel", + "TFBlenderbotSmallPreTrainedModel", + ] + +try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_flax_blenderbot_small"] = [ + "FlaxBlenderbotSmallForConditionalGeneration", + "FlaxBlenderbotSmallModel", + "FlaxBlenderbotSmallPreTrainedModel", + ] + +if TYPE_CHECKING: + from .configuration_blenderbot_small import ( + BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, + BlenderbotSmallConfig, + BlenderbotSmallOnnxConfig, + ) + from .tokenization_blenderbot_small import BlenderbotSmallTokenizer + + try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_blenderbot_small import ( + BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, + BlenderbotSmallForCausalLM, + BlenderbotSmallForConditionalGeneration, + BlenderbotSmallModel, + BlenderbotSmallPreTrainedModel, + ) + + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tf_blenderbot_small import ( + TFBlenderbotSmallForConditionalGeneration, + TFBlenderbotSmallModel, + TFBlenderbotSmallPreTrainedModel, + ) + + try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_flax_blenderbot_small import ( + FlaxBlenderbotSmallForConditionalGeneration, + FlaxBlenderbotSmallModel, + FlaxBlenderbotSmallPreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..820dd5d93165254d582ea6940916b497140c114b Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/configuration_blenderbot_small.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/configuration_blenderbot_small.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f9bc973700127ad8da7f3758f6c8b496ce635c7 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/configuration_blenderbot_small.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_blenderbot_small.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_blenderbot_small.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1e7a797f66d448336af1c2cd26d2ba6b048e09c Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_blenderbot_small.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_flax_blenderbot_small.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_flax_blenderbot_small.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f70a869896395d6fd403e3c5abaa3d14122d8e3 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_flax_blenderbot_small.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_tf_blenderbot_small.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_tf_blenderbot_small.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe27390be0680c562c04d06aa88a5bea3467347d Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_tf_blenderbot_small.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/tokenization_blenderbot_small.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/tokenization_blenderbot_small.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37238f40754848a7b7fe904a6d6949c1b20bd2fb Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/tokenization_blenderbot_small.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/tokenization_blenderbot_small_fast.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/tokenization_blenderbot_small_fast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d686f38ad5391874216a7ebb7ebcac7eca125168 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/tokenization_blenderbot_small_fast.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/configuration_blenderbot_small.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/configuration_blenderbot_small.py new file mode 100644 index 0000000000000000000000000000000000000000..b41330656d39abee211a69ba6c0f94462fa67bbf --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/configuration_blenderbot_small.py @@ -0,0 +1,392 @@ +# coding=utf-8 +# Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" BlenderbotSmall model configuration""" + +from collections import OrderedDict +from typing import Any, Mapping, Optional + +from ... import PreTrainedTokenizer +from ...configuration_utils import PretrainedConfig +from ...file_utils import TensorType, is_torch_available +from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast +from ...onnx.utils import compute_effective_axis_dimension +from ...utils import logging + + +logger = logging.get_logger(__name__) + +BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", + # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small +} + + +class BlenderbotSmallConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`BlenderbotSmallModel`]. It is used to instantiate + an BlenderbotSmall model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the BlenderbotSmall + [facebook/blenderbot_small-90M](https://huggingface.co/facebook/blenderbot_small-90M) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 50265): + Vocabulary size of the BlenderbotSmall model. Defines the number of different tokens that can be + represented by the `inputs_ids` passed when calling [`BlenderbotSmallModel`] or [`TFBlenderbotSmallModel`]. + d_model (`int`, *optional*, defaults to 512): + Dimensionality of the layers and the pooler layer. + encoder_layers (`int`, *optional*, defaults to 8): + Number of encoder layers. + decoder_layers (`int`, *optional*, defaults to 8): + Number of decoder layers. + encoder_attention_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer encoder. + decoder_attention_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer decoder. + decoder_ffn_dim (`int`, *optional*, defaults to 2048): + Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. + encoder_ffn_dim (`int`, *optional*, defaults to 2048): + Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. + activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + dropout (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + activation_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for activations inside the fully connected layer. + max_position_embeddings (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + init_std (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + encoder_layerdrop (`float`, *optional*, defaults to 0.0): + The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) + for more details. + decoder_layerdrop (`float`, *optional*, defaults to 0.0): + The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) + for more details. + scale_embedding (`bool`, *optional*, defaults to `False`): + Scale embeddings by diving by sqrt(d_model). + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models) + forced_eos_token_id (`int`, *optional*, defaults to 2): + The id of the token to force as the last generated token when `max_length` is reached. Usually set to + `eos_token_id`. + + Example: + + ```python + >>> from transformers import BlenderbotSmallConfig, BlenderbotSmallModel + + >>> # Initializing a BlenderbotSmall facebook/blenderbot_small-90M style configuration + >>> configuration = BlenderbotSmallConfig() + + >>> # Initializing a model (with random weights) from the facebook/blenderbot_small-90M style configuration + >>> model = BlenderbotSmallModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "blenderbot-small" + keys_to_ignore_at_inference = ["past_key_values"] + attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} + + def __init__( + self, + vocab_size=50265, + max_position_embeddings=512, + encoder_layers=8, + encoder_ffn_dim=2048, + encoder_attention_heads=16, + decoder_layers=8, + decoder_ffn_dim=2048, + decoder_attention_heads=16, + encoder_layerdrop=0.0, + decoder_layerdrop=0.0, + use_cache=True, + is_encoder_decoder=True, + activation_function="gelu", + d_model=512, + dropout=0.1, + attention_dropout=0.0, + activation_dropout=0.0, + init_std=0.02, + decoder_start_token_id=1, + scale_embedding=False, + pad_token_id=0, + bos_token_id=1, + eos_token_id=2, + forced_eos_token_id=2, + **kwargs, + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.d_model = d_model + self.encoder_ffn_dim = encoder_ffn_dim + self.encoder_layers = encoder_layers + self.encoder_attention_heads = encoder_attention_heads + self.decoder_ffn_dim = decoder_ffn_dim + self.decoder_layers = decoder_layers + self.decoder_attention_heads = decoder_attention_heads + self.dropout = dropout + self.attention_dropout = attention_dropout + self.activation_dropout = activation_dropout + self.activation_function = activation_function + self.init_std = init_std + self.encoder_layerdrop = encoder_layerdrop + self.decoder_layerdrop = decoder_layerdrop + self.use_cache = use_cache + self.num_hidden_layers = encoder_layers + self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + is_encoder_decoder=is_encoder_decoder, + decoder_start_token_id=decoder_start_token_id, + forced_eos_token_id=forced_eos_token_id, + **kwargs, + ) + + +# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig +class BlenderbotSmallOnnxConfig(OnnxSeq2SeqConfigWithPast): + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + if self.task in ["default", "seq2seq-lm"]: + common_inputs = OrderedDict( + [ + ("input_ids", {0: "batch", 1: "encoder_sequence"}), + ("attention_mask", {0: "batch", 1: "encoder_sequence"}), + ] + ) + + if self.use_past: + common_inputs["decoder_input_ids"] = {0: "batch"} + common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"} + else: + common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"} + common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"} + + if self.use_past: + self.fill_with_past_key_values_(common_inputs, direction="inputs") + elif self.task == "causal-lm": + # TODO: figure this case out. + common_inputs = OrderedDict( + [ + ("input_ids", {0: "batch", 1: "encoder_sequence"}), + ("attention_mask", {0: "batch", 1: "encoder_sequence"}), + ] + ) + if self.use_past: + num_encoder_layers, _ = self.num_layers + for i in range(num_encoder_layers): + common_inputs[f"past_key_values.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} + common_inputs[f"past_key_values.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} + else: + common_inputs = OrderedDict( + [ + ("input_ids", {0: "batch", 1: "encoder_sequence"}), + ("attention_mask", {0: "batch", 1: "encoder_sequence"}), + ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), + ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), + ] + ) + + return common_inputs + + @property + def outputs(self) -> Mapping[str, Mapping[int, str]]: + if self.task in ["default", "seq2seq-lm"]: + common_outputs = super().outputs + else: + common_outputs = super(OnnxConfigWithPast, self).outputs + if self.use_past: + num_encoder_layers, _ = self.num_layers + for i in range(num_encoder_layers): + common_outputs[f"present.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} + common_outputs[f"present.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} + return common_outputs + + def _generate_dummy_inputs_for_default_and_seq2seq_lm( + self, + tokenizer: PreTrainedTokenizer, + batch_size: int = -1, + seq_length: int = -1, + is_pair: bool = False, + framework: Optional[TensorType] = None, + ) -> Mapping[str, Any]: + encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( + tokenizer, batch_size, seq_length, is_pair, framework + ) + + # Generate decoder inputs + decoder_seq_length = seq_length if not self.use_past else 1 + decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( + tokenizer, batch_size, decoder_seq_length, is_pair, framework + ) + decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} + common_inputs = dict(**encoder_inputs, **decoder_inputs) + + if self.use_past: + if not is_torch_available(): + raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") + else: + import torch + batch, encoder_seq_length = common_inputs["input_ids"].shape + decoder_seq_length = common_inputs["decoder_input_ids"].shape[1] + num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads + encoder_shape = ( + batch, + num_encoder_attention_heads, + encoder_seq_length, + self._config.hidden_size // num_encoder_attention_heads, + ) + decoder_past_length = decoder_seq_length + 3 + decoder_shape = ( + batch, + num_decoder_attention_heads, + decoder_past_length, + self._config.hidden_size // num_decoder_attention_heads, + ) + + common_inputs["decoder_attention_mask"] = torch.cat( + [common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1 + ) + + common_inputs["past_key_values"] = [] + # If the number of encoder and decoder layers are present in the model configuration, both are considered + num_encoder_layers, num_decoder_layers = self.num_layers + min_num_layers = min(num_encoder_layers, num_decoder_layers) + max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers + remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" + + for _ in range(min_num_layers): + common_inputs["past_key_values"].append( + ( + torch.zeros(decoder_shape), + torch.zeros(decoder_shape), + torch.zeros(encoder_shape), + torch.zeros(encoder_shape), + ) + ) + # TODO: test this. + shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape + for _ in range(min_num_layers, max_num_layers): + common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape))) + return common_inputs + + def _generate_dummy_inputs_for_causal_lm( + self, + tokenizer: PreTrainedTokenizer, + batch_size: int = -1, + seq_length: int = -1, + is_pair: bool = False, + framework: Optional[TensorType] = None, + ) -> Mapping[str, Any]: + common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( + tokenizer, batch_size, seq_length, is_pair, framework + ) + + if self.use_past: + if not is_torch_available(): + raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") + else: + import torch + batch, seqlen = common_inputs["input_ids"].shape + # Not using the same length for past_key_values + past_key_values_length = seqlen + 2 + num_encoder_layers, _ = self.num_layers + num_encoder_attention_heads, _ = self.num_attention_heads + past_shape = ( + batch, + num_encoder_attention_heads, + past_key_values_length, + self._config.hidden_size // num_encoder_attention_heads, + ) + + mask_dtype = common_inputs["attention_mask"].dtype + common_inputs["attention_mask"] = torch.cat( + [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1 + ) + common_inputs["past_key_values"] = [ + (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers) + ] + return common_inputs + + def _generate_dummy_inputs_for_sequence_classification_and_question_answering( + self, + tokenizer: PreTrainedTokenizer, + batch_size: int = -1, + seq_length: int = -1, + is_pair: bool = False, + framework: Optional[TensorType] = None, + ) -> Mapping[str, Any]: + # Copied from OnnxConfig.generate_dummy_inputs + # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. + # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX + batch_size = compute_effective_axis_dimension( + batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 + ) + + # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX + token_to_add = tokenizer.num_special_tokens_to_add(is_pair) + seq_length = compute_effective_axis_dimension( + seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add + ) + + # Generate dummy inputs according to compute batch and sequence + dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size + common_inputs = dict(tokenizer(dummy_input, return_tensors=framework)) + return common_inputs + + def generate_dummy_inputs( + self, + tokenizer: PreTrainedTokenizer, + batch_size: int = -1, + seq_length: int = -1, + is_pair: bool = False, + framework: Optional[TensorType] = None, + ) -> Mapping[str, Any]: + if self.task in ["default", "seq2seq-lm"]: + common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm( + tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework + ) + + elif self.task == "causal-lm": + common_inputs = self._generate_dummy_inputs_for_causal_lm( + tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework + ) + else: + common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( + tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework + ) + + return common_inputs + + def _flatten_past_key_values_(self, flattened_output, name, idx, t): + if self.task in ["default", "seq2seq-lm"]: + flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t) + else: + flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_( + flattened_output, name, idx, t + ) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_blenderbot_small.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_blenderbot_small.py new file mode 100644 index 0000000000000000000000000000000000000000..f9a9508e5905575dedf832e01b239a9a14ae3cc8 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_blenderbot_small.py @@ -0,0 +1,1570 @@ +# coding=utf-8 +# Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch BlenderbotSmall model.""" + + +import copy +import math +from typing import List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import CrossEntropyLoss + +from ...activations import ACT2FN +from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask +from ...modeling_outputs import ( + BaseModelOutput, + BaseModelOutputWithPastAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + Seq2SeqLMOutput, + Seq2SeqModelOutput, +) +from ...modeling_utils import PreTrainedModel +from ...utils import ( + add_end_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_blenderbot_small import BlenderbotSmallConfig + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "BlenderbotSmallConfig" + + +BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "facebook/blenderbot_small-90M", + # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small +] + + +# Copied from transformers.models.bart.modeling_bart.shift_tokens_right +def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): + """ + Shift input ids one token to the right. + """ + shifted_input_ids = input_ids.new_zeros(input_ids.shape) + shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() + shifted_input_ids[:, 0] = decoder_start_token_id + + if pad_token_id is None: + raise ValueError("self.model.config.pad_token_id has to be defined.") + # replace possible -100 values in labels by `pad_token_id` + shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) + + return shifted_input_ids + + +# Copied from transformers.models.blenderbot.modeling_blenderbot.BlenderbotLearnedPositionalEmbedding with Blenderbot->BlenderbotSmall +class BlenderbotSmallLearnedPositionalEmbedding(nn.Embedding): + """ + This module learns positional embeddings up to a fixed maximum size. + """ + + def __init__(self, num_embeddings: int, embedding_dim: int): + super().__init__(num_embeddings, embedding_dim) + + def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0): + """`input_ids_shape` is expected to be [bsz x seqlen].""" + bsz, seq_len = input_ids_shape[:2] + positions = torch.arange( + past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device + ) + return super().forward(positions) + + +# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->BlenderbotSmall +class BlenderbotSmallAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + is_decoder: bool = False, + bias: bool = True, + is_causal: bool = False, + config: Optional[BlenderbotSmallConfig] = None, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + self.config = config + + if (self.head_dim * num_heads) != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" + f" and `num_heads`: {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + self.is_decoder = is_decoder + self.is_causal = is_causal + + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + key_value_states: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + + bsz, tgt_len, _ = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) * self.scaling + # get key, value proj + # `past_key_value[0].shape[2] == key_value_states.shape[1]` + # is checking that the `sequence_length` of the `past_key_value` is the same as + # the provided `key_value_states` to support prefix tuning + if ( + is_cross_attention + and past_key_value is not None + and past_key_value[0].shape[2] == key_value_states.shape[1] + ): + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + elif is_cross_attention: + # cross_attentions + key_states = self._shape(self.k_proj(key_value_states), -1, bsz) + value_states = self._shape(self.v_proj(key_value_states), -1, bsz) + elif past_key_value is not None: + # reuse k, v, self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + else: + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.reshape(*proj_shape) + value_states = value_states.reshape(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if layer_head_mask is not None: + if layer_head_mask.size() != (self.num_heads,): + raise ValueError( + f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" + f" {layer_head_mask.size()}" + ) + attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to be reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + + # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be + # partitioned across GPUs when using tensor-parallelism. + attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped, past_key_value + + +# Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->BlenderbotSmall, BART->BLENDERBOT_SMALL +class BlenderbotSmallEncoderLayer(nn.Module): + def __init__(self, config: BlenderbotSmallConfig): + super().__init__() + self.embed_dim = config.d_model + + self.self_attn = BLENDERBOT_SMALL_ATTENTION_CLASSES[config._attn_implementation]( + embed_dim=self.embed_dim, + num_heads=config.encoder_attention_heads, + dropout=config.attention_dropout, + config=config, + ) + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) + self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.FloatTensor, + attention_mask: torch.FloatTensor, + layer_head_mask: torch.FloatTensor, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size + `(encoder_attention_heads,)`. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + hidden_states, attn_weights, _ = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + if hidden_states.dtype == torch.float16 and ( + torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() + ): + clamp_value = torch.finfo(hidden_states.dtype).max - 1000 + hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +# TODO: Implement attention with SDPA for TimeSeriesTransformer. +BLENDERBOT_SMALL_ATTENTION_CLASSES = { + "eager": BlenderbotSmallAttention, +} + + +# Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->BlenderbotSmall, BART->BLENDERBOT_SMALL +class BlenderbotSmallDecoderLayer(nn.Module): + def __init__(self, config: BlenderbotSmallConfig): + super().__init__() + self.embed_dim = config.d_model + + self.self_attn = BLENDERBOT_SMALL_ATTENTION_CLASSES[config._attn_implementation]( + embed_dim=self.embed_dim, + num_heads=config.decoder_attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + is_causal=True, + config=config, + ) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.encoder_attn = BLENDERBOT_SMALL_ATTENTION_CLASSES[config._attn_implementation]( + self.embed_dim, + config.decoder_attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + config=config, + ) + self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) + self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + cross_attn_layer_head_mask: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = True, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + encoder_hidden_states (`torch.FloatTensor`): + cross attention input to the layer of shape `(batch, seq_len, embed_dim)` + encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size + `(encoder_attention_heads,)`. + cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of + size `(decoder_attention_heads,)`. + past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + + # Self Attention + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + # add present self-attn cache to positions 1,2 of present_key_value tuple + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + past_key_value=self_attn_past_key_value, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + # Cross-Attention Block + cross_attn_present_key_value = None + cross_attn_weights = None + if encoder_hidden_states is not None: + residual = hidden_states + + # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( + hidden_states=hidden_states, + key_value_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + layer_head_mask=cross_attn_layer_head_mask, + past_key_value=cross_attn_past_key_value, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.encoder_attn_layer_norm(hidden_states) + + # add cross-attn to positions 3,4 of present_key_value tuple + present_key_value = present_key_value + cross_attn_present_key_value + + # Fully Connected + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights, cross_attn_weights) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +class BlenderbotSmallPreTrainedModel(PreTrainedModel): + config_class = BlenderbotSmallConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + std = self.config.init_std + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + @property + def dummy_inputs(self): + pad_token = self.config.pad_token_id + input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) + dummy_inputs = { + "attention_mask": input_ids.ne(pad_token), + "input_ids": input_ids, + "decoder_input_ids": input_ids, + } + return dummy_inputs + + +BLENDERBOT_SMALL_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`BlenderbotSmallConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +BLENDERBOT_SMALL_GENERATION_EXAMPLE = r""" + Conversation example: + + ```python + >>> from transformers import AutoTokenizer, BlenderbotSmallForConditionalGeneration + + >>> mname = "facebook/blenderbot_small-90M" + >>> model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname) + >>> tokenizer = AutoTokenizer.from_pretrained(mname) + >>> UTTERANCE = "My friends are cool but they eat too many carbs." + >>> print("Human: ", UTTERANCE) + Human: My friends are cool but they eat too many carbs. + + >>> inputs = tokenizer([UTTERANCE], return_tensors="pt") + >>> reply_ids = model.generate(**inputs) + >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0]) + Bot: what kind of carbs do they eat? i don't know much about carbs. + + >>> REPLY = "I'm not sure" + >>> print("Human: ", REPLY) + Human: I'm not sure + + >>> NEXT_UTTERANCE = ( + ... "My friends are cool but they eat too many carbs.__end__ __start__what kind of carbs do they eat? " + ... "i don't know much about carbs__end__ " + ... "__start__ I'm not sure." + ... ) + >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="pt") + >>> next_reply_ids = model.generate(**inputs) + >>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0]) + Bot: they eat a lot of carbs. carbs are high in fat, protein, and fats. + ``` +""" + +BLENDERBOT_SMALL_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Indices of decoder input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are decoder input IDs?](../glossary#decoder-input-ids) + + BlenderbotSmall uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If + `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also + be used by default. + head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, + 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): + Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) + `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of + hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded + representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be + input (see `past_key_values`). This is useful if you want more control over how to convert + `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. + + If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value + of `inputs_embeds`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +class BlenderbotSmallEncoder(BlenderbotSmallPreTrainedModel): + """ + Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a + [`BlenderbotSmallEncoderLayer`]. + + Args: + config: BlenderbotSmallConfig + embed_tokens (nn.Embedding): output embedding + """ + + def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embedding] = None): + super().__init__(config) + + self.dropout = config.dropout + self.layerdrop = config.encoder_layerdrop + + embed_dim = config.d_model + self.padding_idx = config.pad_token_id + self.max_source_positions = config.max_position_embeddings + self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 + + if embed_tokens is not None: + self.embed_tokens = embed_tokens + else: + self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx) + + self.embed_positions = BlenderbotSmallLearnedPositionalEmbedding( + config.max_position_embeddings, + embed_dim, + ) + self.layers = nn.ModuleList([BlenderbotSmallEncoderLayer(config) for _ in range(config.encoder_layers)]) + self.layernorm_embedding = nn.LayerNorm(embed_dim) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + input_ids=None, + attention_mask=None, + head_mask=None, + inputs_embeds=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + + embed_pos = self.embed_positions(input_shape) + + hidden_states = inputs_embeds + embed_pos + hidden_states = self.layernorm_embedding(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + # expand attention_mask + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + # check if head_mask has a correct number of layers specified if desired + if head_mask is not None: + if head_mask.size()[0] != len(self.layers): + raise ValueError( + f"The head_mask should be specified for {len(self.layers)} layers, but it is for" + f" {head_mask.size()[0]}." + ) + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + to_drop = False + if self.training: + dropout_probability = torch.rand([]) + if dropout_probability < self.layerdrop: # skip the layer + to_drop = True + + if to_drop: + layer_outputs = (None, None) + else: + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + encoder_layer.__call__, + hidden_states, + attention_mask, + (head_mask[idx] if head_mask is not None else None), + output_attentions, + ) + else: + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + layer_head_mask=(head_mask[idx] if head_mask is not None else None), + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +class BlenderbotSmallDecoder(BlenderbotSmallPreTrainedModel): + """ + Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`BlenderbotSmallDecoderLayer`] + + Args: + config: BlenderbotSmallConfig + embed_tokens (nn.Embedding): output embedding + """ + + def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embedding] = None): + super().__init__(config) + self.dropout = config.dropout + self.layerdrop = config.decoder_layerdrop + self.padding_idx = config.pad_token_id + self.max_target_positions = config.max_position_embeddings + self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 + + if embed_tokens is not None: + self.embed_tokens = embed_tokens + else: + self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) + + self.embed_positions = BlenderbotSmallLearnedPositionalEmbedding( + config.max_position_embeddings, + config.d_model, + ) + self.layers = nn.ModuleList([BlenderbotSmallDecoderLayer(config) for _ in range(config.decoder_layers)]) + self.layernorm_embedding = nn.LayerNorm(config.d_model) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + def forward( + self, + input_ids=None, + attention_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + head_mask=None, + cross_attn_head_mask=None, + past_key_values=None, + inputs_embeds=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + of the decoder. + encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): + Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values + selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing + cross-attention on hidden heads. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of + shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the + cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those + that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of + all `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + + attention_mask = _prepare_4d_causal_attention_mask( + attention_mask, input_shape, inputs_embeds, past_key_values_length + ) + + # expand encoder attention mask + if encoder_hidden_states is not None and encoder_attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + encoder_attention_mask = _prepare_4d_attention_mask( + encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] + ) + + # embed positions + positions = self.embed_positions(input_shape, past_key_values_length) + + # BlenderbotSmall applies layer norm on hidden_states + inputs_embeds = self.layernorm_embedding(inputs_embeds) + hidden_states = inputs_embeds + positions + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None + next_decoder_cache = () if use_cache else None + + # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired + for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): + if attn_mask is not None: + if attn_mask.size()[0] != len(self.layers): + raise ValueError( + f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" + f" {head_mask.size()[0]}." + ) + for idx, decoder_layer in enumerate(self.layers): + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + if output_hidden_states: + all_hidden_states += (hidden_states,) + if self.training: + dropout_probability = torch.rand([]) + if dropout_probability < self.layerdrop: + continue + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + attention_mask, + encoder_hidden_states, + encoder_attention_mask, + head_mask[idx] if head_mask is not None else None, + cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, + None, + output_attentions, + use_cache, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + layer_head_mask=(head_mask[idx] if head_mask is not None else None), + cross_attn_layer_head_mask=( + cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None + ), + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + if encoder_hidden_states is not None: + all_cross_attentions += (layer_outputs[2],) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple( + v + for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + cross_attentions=all_cross_attentions, + ) + + +@add_start_docstrings( + "The bare BlenderbotSmall Model outputting raw hidden-states without any specific head on top.", + BLENDERBOT_SMALL_START_DOCSTRING, +) +class BlenderbotSmallModel(BlenderbotSmallPreTrainedModel): + _tied_weights_keys = ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight"] + + def __init__(self, config: BlenderbotSmallConfig): + super().__init__(config) + + padding_idx, vocab_size = config.pad_token_id, config.vocab_size + self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx) + + self.encoder = BlenderbotSmallEncoder(config, self.shared) + self.decoder = BlenderbotSmallDecoder(config, self.shared) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.shared + + def set_input_embeddings(self, value): + self.shared = value + self.encoder.embed_tokens = self.shared + self.decoder.embed_tokens = self.shared + + def get_encoder(self): + return self.encoder + + def get_decoder(self): + return self.decoder + + @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + decoder_head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[Union[Tuple, BaseModelOutput]] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: + r""" + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, BlenderbotSmallModel + + >>> model = BlenderbotSmallModel.from_pretrained("facebook/blenderbot_small-90M") + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M") + + >>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt") + >>> decoder_inputs = tokenizer("Studies show that", return_tensors="pt") # Batch size 1 + >>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids) + + >>> last_hidden_states = outputs.last_hidden_state + >>> list(last_hidden_states.shape) + [1, 3, 512] + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if encoder_outputs is None: + encoder_outputs = self.encoder( + input_ids=input_ids, + attention_mask=attention_mask, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) + decoder_outputs = self.decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + encoder_hidden_states=encoder_outputs[0], + encoder_attention_mask=attention_mask, + head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + return decoder_outputs + encoder_outputs + + return Seq2SeqModelOutput( + last_hidden_state=decoder_outputs.last_hidden_state, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + +@add_start_docstrings( + "The BlenderbotSmall Model with a language modeling head. Can be used for summarization.", + BLENDERBOT_SMALL_START_DOCSTRING, +) +class BlenderbotSmallForConditionalGeneration(BlenderbotSmallPreTrainedModel): + base_model_prefix = "model" + _keys_to_ignore_on_load_missing = ["final_logits_bias"] + _tied_weights_keys = ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "lm_head.weight"] + + def __init__(self, config: BlenderbotSmallConfig): + super().__init__(config) + self.model = BlenderbotSmallModel(config) + self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) + self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_encoder(self): + return self.model.get_encoder() + + def get_decoder(self): + return self.model.get_decoder() + + def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding: + new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of) + self._resize_final_logits_bias(new_embeddings.weight.shape[0]) + return new_embeddings + + def _resize_final_logits_bias(self, new_num_tokens: int) -> None: + old_num_tokens = self.final_logits_bias.shape[-1] + if new_num_tokens <= old_num_tokens: + new_bias = self.final_logits_bias[:, :new_num_tokens] + else: + extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) + new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) + self.register_buffer("final_logits_bias", new_bias) + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) + @add_end_docstrings(BLENDERBOT_SMALL_GENERATION_EXAMPLE) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + decoder_head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[Union[Tuple, BaseModelOutput]] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if labels is not None: + if use_cache: + logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") + use_cache = False + if decoder_input_ids is None and decoder_inputs_embeds is None: + decoder_input_ids = shift_tokens_right( + labels, self.config.pad_token_id, self.config.decoder_start_token_id + ) + + outputs = self.model( + input_ids, + attention_mask=attention_mask, + decoder_input_ids=decoder_input_ids, + encoder_outputs=encoder_outputs, + decoder_attention_mask=decoder_attention_mask, + head_mask=head_mask, + decoder_head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + decoder_inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (lm_logits,) + outputs[1:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + return Seq2SeqLMOutput( + loss=masked_lm_loss, + logits=lm_logits, + past_key_values=outputs.past_key_values, + decoder_hidden_states=outputs.decoder_hidden_states, + decoder_attentions=outputs.decoder_attentions, + cross_attentions=outputs.cross_attentions, + encoder_last_hidden_state=outputs.encoder_last_hidden_state, + encoder_hidden_states=outputs.encoder_hidden_states, + encoder_attentions=outputs.encoder_attentions, + ) + + def prepare_inputs_for_generation( + self, + decoder_input_ids, + past_key_values=None, + attention_mask=None, + head_mask=None, + decoder_head_mask=None, + cross_attn_head_mask=None, + use_cache=None, + encoder_outputs=None, + **kwargs, + ): + # cut decoder_input_ids if past is used + if past_key_values is not None: + past_length = past_key_values[0][0].shape[2] + + # Some generation methods already pass only the last input ID + if decoder_input_ids.shape[1] > past_length: + remove_prefix_length = past_length + else: + # Default to old behavior: keep only final ID + remove_prefix_length = decoder_input_ids.shape[1] - 1 + + decoder_input_ids = decoder_input_ids[:, remove_prefix_length:] + + return { + "input_ids": None, # encoder_outputs is defined. input_ids not needed + "encoder_outputs": encoder_outputs, + "past_key_values": past_key_values, + "decoder_input_ids": decoder_input_ids, + "attention_mask": attention_mask, + "head_mask": head_mask, + "decoder_head_mask": decoder_head_mask, + "cross_attn_head_mask": cross_attn_head_mask, + "use_cache": use_cache, # change this to avoid caching (presumably for debugging) + } + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + # cached cross_attention states don't have to be reordered -> they are always the same + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2]) + + layer_past[2:], + ) + return reordered_past + + +# Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->BlenderbotSmall +class BlenderbotSmallDecoderWrapper(BlenderbotSmallPreTrainedModel): + """ + This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is + used in combination with the [`EncoderDecoderModel`] framework. + """ + + def __init__(self, config): + super().__init__(config) + self.decoder = BlenderbotSmallDecoder(config) + + def forward(self, *args, **kwargs): + return self.decoder(*args, **kwargs) + + +# Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->BlenderbotSmall, facebook/bart-base->facebook/blenderbot_small-90M +class BlenderbotSmallForCausalLM(BlenderbotSmallPreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + config = copy.deepcopy(config) + config.is_decoder = True + config.is_encoder_decoder = False + super().__init__(config) + self.model = BlenderbotSmallDecoderWrapper(config) + + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.decoder.embed_tokens + + def set_input_embeddings(self, value): + self.model.decoder.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model.decoder = decoder + + def get_decoder(self): + return self.model.decoder + + @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + if the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used + in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of + shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional + tensors are only required when the model is used as a decoder in a Sequence to Sequence model. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the + cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those + that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of + all `decoder_input_ids` of shape `(batch_size, sequence_length)`. + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, BlenderbotSmallForCausalLM + + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M") + >>> model = BlenderbotSmallForCausalLM.from_pretrained("facebook/blenderbot_small-90M", add_cross_attention=False) + >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + + >>> logits = outputs.logits + >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size] + >>> list(logits.shape) == expected_shape + True + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model.decoder( + input_ids=input_ids, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + head_mask=head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + logits = self.lm_head(outputs[0]) + + loss = None + if labels is not None: + labels = labels.to(logits.device) + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs + ): + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_ids.shape) + + if past_key_values: + past_length = past_key_values[0][0].shape[2] + + # Some generation methods already pass only the last input ID + if input_ids.shape[1] > past_length: + remove_prefix_length = past_length + else: + # Default to old behavior: keep only final ID + remove_prefix_length = input_ids.shape[1] - 1 + + input_ids = input_ids[:, remove_prefix_length:] + # first step, decoder_cached_states are empty + return { + "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed + "attention_mask": attention_mask, + "past_key_values": past_key_values, + "use_cache": use_cache, + } + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py new file mode 100644 index 0000000000000000000000000000000000000000..b5272fb3bca9e2eef8e307f738f6d456cb5e8218 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py @@ -0,0 +1,1522 @@ +# coding=utf-8 +# Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Flax BlenderbotSmall model.""" + + +import math +import random +from functools import partial +from typing import Callable, Optional, Tuple + +import flax.linen as nn +import jax +import jax.numpy as jnp +from flax.core.frozen_dict import FrozenDict, freeze, unfreeze +from flax.linen import combine_masks, make_causal_mask +from flax.linen.attention import dot_product_attention_weights +from flax.traverse_util import flatten_dict, unflatten_dict +from jax import lax +from jax.random import PRNGKey + +from ...modeling_flax_outputs import ( + FlaxBaseModelOutput, + FlaxBaseModelOutputWithPastAndCrossAttentions, + FlaxCausalLMOutputWithCrossAttentions, + FlaxSeq2SeqLMOutput, + FlaxSeq2SeqModelOutput, +) +from ...modeling_flax_utils import ( + ACT2FN, + FlaxPreTrainedModel, + append_call_sample_docstring, + append_replace_return_docstrings, + overwrite_call_docstring, +) +from ...utils import add_start_docstrings, logging, replace_return_docstrings +from .configuration_blenderbot_small import BlenderbotSmallConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "facebook/blenderbot_small-90M" +_CONFIG_FOR_DOC = "BlenderbotSmallConfig" + +BLENDERBOT_SMALL_START_DOCSTRING = r""" + This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a Flax Linen + [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a + regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. + + Finally, this model supports inherent JAX features such as: + + - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) + - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) + - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) + - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) + + Parameters: + config ([`BlenderbotSmallConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. + dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): + The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and + `jax.numpy.bfloat16` (on TPUs). + + This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If + specified all the computation will be performed with the given `dtype`. + + **Note that this only specifies the dtype of the computation and does not influence the dtype of model + parameters.** + + If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and + [`~FlaxPreTrainedModel.to_bf16`]. +""" + +BLENDERBOT_SMALL_INPUTS_DOCSTRING = r""" + Args: + input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): + Indices of decoder input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are decoder input IDs?](../glossary#decoder-input-ids) + + For translation and summarization training, `decoder_input_ids` should be provided. If no + `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right + for denoising pre-training following the paper. + decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): + Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also + be used by default. + + If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the + paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. + position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the + range `[0, config.max_position_embeddings - 1]`. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +BLENDERBOT_SMALL_ENCODE_INPUTS_DOCSTRING = r""" + Args: + input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +BLENDERBOT_SMALL_DECODE_INPUTS_DOCSTRING = r""" + Args: + decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`): + Indices of decoder input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are decoder input IDs?](../glossary#decoder-input-ids) + + For translation and summarization training, `decoder_input_ids` should be provided. If no + `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right + for denoising pre-training following the paper. + encoder_outputs (`tuple(tuple(jnp.ndarray)`): + Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) + `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of + hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. + encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): + Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also + be used by default. + + If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the + paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. + decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the + range `[0, config.max_position_embeddings - 1]`. + past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`): + Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast + auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +# Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right +def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray: + """ + Shift input ids one token to the right. + """ + shifted_input_ids = jnp.zeros_like(input_ids) + shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1]) + shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id) + + shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) + return shifted_input_ids + + +# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention with Bart->BlenderbotSmall +class FlaxBlenderbotSmallAttention(nn.Module): + config: BlenderbotSmallConfig + embed_dim: int + num_heads: int + dropout: float = 0.0 + causal: bool = False + bias: bool = True + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + + def setup(self) -> None: + self.head_dim = self.embed_dim // self.num_heads + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" + f" and `num_heads`: {self.num_heads})." + ) + + dense = partial( + nn.Dense, + self.embed_dim, + use_bias=self.bias, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(self.config.init_std), + ) + + self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense() + self.out_proj = dense() + + self.dropout_layer = nn.Dropout(rate=self.dropout) + + if self.causal: + self.causal_mask = make_causal_mask( + jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool" + ) + + def _split_heads(self, hidden_states): + return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) + + def _merge_heads(self, hidden_states): + return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) + + @nn.compact + def _concatenate_to_cache(self, key, value, query, attention_mask): + """ + This function takes projected key, value states from a single input token and concatenates the states to cached + states from previous steps. This function is slighly adapted from the official Flax repository: + https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 + """ + # detect if we're initializing by absence of existing cache data. + is_initialized = self.has_variable("cache", "cached_key") + cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) + cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) + cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) + + if is_initialized: + *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape + # update key, value caches with our new 1d spatial slices + cur_index = cache_index.value + indices = (0,) * len(batch_dims) + (cur_index, 0, 0) + key = lax.dynamic_update_slice(cached_key.value, key, indices) + value = lax.dynamic_update_slice(cached_value.value, value, indices) + cached_key.value = key + cached_value.value = value + num_updated_cache_vectors = query.shape[1] + cache_index.value = cache_index.value + num_updated_cache_vectors + # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements. + pad_mask = jnp.broadcast_to( + jnp.arange(max_length) < cur_index + num_updated_cache_vectors, + tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), + ) + attention_mask = combine_masks(pad_mask, attention_mask) + return key, value, attention_mask + + def __call__( + self, + hidden_states: jnp.ndarray, + key_value_states: Optional[jnp.ndarray] = None, + attention_mask: Optional[jnp.ndarray] = None, + init_cache: bool = False, + deterministic: bool = True, + ) -> Tuple[jnp.ndarray]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + batch_size = hidden_states.shape[0] + + # get query proj + query_states = self.q_proj(hidden_states) + # get key, value proj + if is_cross_attention: + # cross_attentions + key_states = self.k_proj(key_value_states) + value_states = self.v_proj(key_value_states) + else: + # self_attention + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = self._split_heads(query_states) + key_states = self._split_heads(key_states) + value_states = self._split_heads(value_states) + + # handle cache prepare causal attention mask + if self.causal: + query_length, key_length = query_states.shape[1], key_states.shape[1] + if self.has_variable("cache", "cached_key"): + mask_shift = self.variables["cache"]["cache_index"] + max_decoder_length = self.variables["cache"]["cached_key"].shape[1] + causal_mask = lax.dynamic_slice( + self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) + ) + else: + causal_mask = self.causal_mask[:, :, :query_length, :key_length] + causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) + + # combine masks if needed + if attention_mask is not None and self.causal: + attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) + attention_mask = combine_masks(attention_mask, causal_mask) + elif self.causal: + attention_mask = causal_mask + elif attention_mask is not None: + attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) + + # During fast autoregressive decoding, we feed one position at a time, + # and cache the keys and values step by step. + if self.causal and (self.has_variable("cache", "cached_key") or init_cache): + key_states, value_states, attention_mask = self._concatenate_to_cache( + key_states, value_states, query_states, attention_mask + ) + + # Convert the boolean attention mask to an attention bias. + if attention_mask is not None: + # attention mask in the form of attention bias + attention_bias = lax.select( + attention_mask > 0, + jnp.full(attention_mask.shape, 0.0).astype(self.dtype), + jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), + ) + else: + attention_bias = None + + dropout_rng = None + if not deterministic and self.dropout > 0.0: + dropout_rng = self.make_rng("dropout") + + attn_weights = dot_product_attention_weights( + query_states, + key_states, + bias=attention_bias, + dropout_rng=dropout_rng, + dropout_rate=self.dropout, + broadcast_dropout=True, + deterministic=deterministic, + dtype=self.dtype, + precision=None, + ) + + attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) + attn_output = self._merge_heads(attn_output) + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights + + +# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayer with Bart->BlenderbotSmall +class FlaxBlenderbotSmallEncoderLayer(nn.Module): + config: BlenderbotSmallConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self) -> None: + self.embed_dim = self.config.d_model + self.self_attn = FlaxBlenderbotSmallAttention( + config=self.config, + embed_dim=self.embed_dim, + num_heads=self.config.encoder_attention_heads, + dropout=self.config.attention_dropout, + dtype=self.dtype, + ) + self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) + self.dropout_layer = nn.Dropout(rate=self.config.dropout) + self.activation_fn = ACT2FN[self.config.activation_function] + self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout) + self.fc1 = nn.Dense( + self.config.encoder_ffn_dim, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(self.config.init_std), + ) + self.fc2 = nn.Dense( + self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std) + ) + self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) + + def __call__( + self, + hidden_states: jnp.ndarray, + attention_mask: jnp.ndarray, + output_attentions: bool = True, + deterministic: bool = True, + ) -> Tuple[jnp.ndarray]: + residual = hidden_states + hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask) + + hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic) + hidden_states = self.fc2(hidden_states) + hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayerCollection with Bart->BlenderbotSmall +class FlaxBlenderbotSmallEncoderLayerCollection(nn.Module): + config: BlenderbotSmallConfig + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + + def setup(self): + self.layers = [ + FlaxBlenderbotSmallEncoderLayer(self.config, name=str(i), dtype=self.dtype) + for i in range(self.config.encoder_layers) + ] + self.layerdrop = self.config.encoder_layerdrop + + def __call__( + self, + hidden_states, + attention_mask, + deterministic: bool = True, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + all_attentions = () if output_attentions else None + all_hidden_states = () if output_hidden_states else None + + for encoder_layer in self.layers: + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + dropout_probability = random.uniform(0, 1) + if not deterministic and (dropout_probability < self.layerdrop): # skip the layer + layer_outputs = (None, None) + else: + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + output_attentions, + deterministic, + ) + hidden_states = layer_outputs[0] + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + all_hidden_states += (hidden_states,) + + outputs = (hidden_states, all_hidden_states, all_attentions) + + if not return_dict: + return tuple(v for v in outputs if v is not None) + + return FlaxBaseModelOutput( + last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions + ) + + +# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayer with Bart->BlenderbotSmall +class FlaxBlenderbotSmallDecoderLayer(nn.Module): + config: BlenderbotSmallConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self) -> None: + self.embed_dim = self.config.d_model + self.self_attn = FlaxBlenderbotSmallAttention( + config=self.config, + embed_dim=self.embed_dim, + num_heads=self.config.decoder_attention_heads, + dropout=self.config.attention_dropout, + causal=True, + dtype=self.dtype, + ) + self.dropout_layer = nn.Dropout(rate=self.config.dropout) + self.activation_fn = ACT2FN[self.config.activation_function] + self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout) + + self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) + self.encoder_attn = FlaxBlenderbotSmallAttention( + config=self.config, + embed_dim=self.embed_dim, + num_heads=self.config.decoder_attention_heads, + dropout=self.config.attention_dropout, + dtype=self.dtype, + ) + self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) + self.fc1 = nn.Dense( + self.config.decoder_ffn_dim, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(self.config.init_std), + ) + self.fc2 = nn.Dense( + self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std) + ) + self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) + + def __call__( + self, + hidden_states: jnp.ndarray, + attention_mask: jnp.ndarray, + encoder_hidden_states: Optional[jnp.ndarray] = None, + encoder_attention_mask: Optional[jnp.ndarray] = None, + init_cache: bool = False, + output_attentions: bool = True, + deterministic: bool = True, + ) -> Tuple[jnp.ndarray]: + residual = hidden_states + + # Self Attention + hidden_states, self_attn_weights = self.self_attn( + hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache + ) + hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + # Cross-Attention Block + cross_attn_weights = None + if encoder_hidden_states is not None: + residual = hidden_states + + hidden_states, cross_attn_weights = self.encoder_attn( + hidden_states=hidden_states, + key_value_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + ) + hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) + hidden_states = residual + hidden_states + hidden_states = self.encoder_attn_layer_norm(hidden_states) + + # Fully Connected + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic) + hidden_states = self.fc2(hidden_states) + hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights, cross_attn_weights) + + return outputs + + +# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayerCollection with Bart->BlenderbotSmall +class FlaxBlenderbotSmallDecoderLayerCollection(nn.Module): + config: BlenderbotSmallConfig + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + + def setup(self): + self.layers = [ + FlaxBlenderbotSmallDecoderLayer(self.config, name=str(i), dtype=self.dtype) + for i in range(self.config.decoder_layers) + ] + self.layerdrop = self.config.decoder_layerdrop + + def __call__( + self, + hidden_states, + attention_mask, + encoder_hidden_states: Optional[jnp.ndarray] = None, + encoder_attention_mask: Optional[jnp.ndarray] = None, + deterministic: bool = True, + init_cache: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None + + for decoder_layer in self.layers: + if output_hidden_states: + all_hidden_states += (hidden_states,) + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + dropout_probability = random.uniform(0, 1) + if not deterministic and (dropout_probability < self.layerdrop): + layer_outputs = (None, None, None) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + init_cache=init_cache, + output_attentions=output_attentions, + deterministic=deterministic, + ) + + hidden_states = layer_outputs[0] + if output_attentions: + all_self_attns += (layer_outputs[1],) + + if encoder_hidden_states is not None: + all_cross_attentions += (layer_outputs[2],) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions] + + if not return_dict: + return tuple(v for v in outputs if v is not None) + + return FlaxBaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attns, + cross_attentions=all_cross_attentions, + ) + + +class FlaxBlenderbotSmallEncoder(nn.Module): + config: BlenderbotSmallConfig + embed_tokens: nn.Embed + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + + def setup(self): + self.dropout_layer = nn.Dropout(rate=self.config.dropout) + + embed_dim = self.config.d_model + self.padding_idx = self.config.pad_token_id + self.max_source_positions = self.config.max_position_embeddings + self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0 + + self.embed_positions = nn.Embed( + self.config.max_position_embeddings, + embed_dim, + embedding_init=jax.nn.initializers.normal(self.config.init_std), + ) + self.layers = FlaxBlenderbotSmallEncoderLayerCollection(self.config, self.dtype) + self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) + + def __call__( + self, + input_ids, + attention_mask, + position_ids, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + deterministic: bool = True, + ): + input_shape = input_ids.shape + input_ids = input_ids.reshape(-1, input_shape[-1]) + + inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + + embed_pos = self.embed_positions(position_ids) + + hidden_states = inputs_embeds + embed_pos + hidden_states = self.layernorm_embedding(hidden_states) + hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) + + outputs = self.layers( + hidden_states, + attention_mask, + deterministic=deterministic, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + return outputs + + return FlaxBaseModelOutput( + last_hidden_state=outputs.last_hidden_state, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +class FlaxBlenderbotSmallDecoder(nn.Module): + config: BlenderbotSmallConfig + embed_tokens: nn.Embed + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + + def setup(self): + self.dropout_layer = nn.Dropout(rate=self.config.dropout) + + embed_dim = self.config.d_model + self.padding_idx = self.config.pad_token_id + self.max_target_positions = self.config.max_position_embeddings + self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0 + + self.embed_positions = nn.Embed( + self.config.max_position_embeddings, + embed_dim, + embedding_init=jax.nn.initializers.normal(self.config.init_std), + ) + + self.layers = FlaxBlenderbotSmallDecoderLayerCollection(self.config, self.dtype) + self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) + + def __call__( + self, + input_ids, + attention_mask, + position_ids, + encoder_hidden_states: Optional[jnp.ndarray] = None, + encoder_attention_mask: Optional[jnp.ndarray] = None, + init_cache: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + deterministic: bool = True, + ): + input_shape = input_ids.shape + input_ids = input_ids.reshape(-1, input_shape[-1]) + + inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + + # embed positions + positions = self.embed_positions(position_ids) + + # BlenderbotSmall applies layer norm on inputs_embeds in decoder + inputs_embeds = self.layernorm_embedding(inputs_embeds) + hidden_states = inputs_embeds + positions + + hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) + + outputs = self.layers( + hidden_states, + attention_mask, + encoder_hidden_states, + encoder_attention_mask, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + return outputs + + return FlaxBaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=outputs.last_hidden_state, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + +# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartModule with Bart->BlenderbotSmall +class FlaxBlenderbotSmallModule(nn.Module): + config: BlenderbotSmallConfig + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + + def setup(self): + self.shared = nn.Embed( + self.config.vocab_size, + self.config.d_model, + embedding_init=jax.nn.initializers.normal(self.config.init_std), + dtype=self.dtype, + ) + + self.encoder = FlaxBlenderbotSmallEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared) + self.decoder = FlaxBlenderbotSmallDecoder(self.config, dtype=self.dtype, embed_tokens=self.shared) + + def _get_encoder_module(self): + return self.encoder + + def _get_decoder_module(self): + return self.decoder + + def __call__( + self, + input_ids, + attention_mask, + decoder_input_ids, + decoder_attention_mask, + position_ids, + decoder_position_ids, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + deterministic: bool = True, + ): + encoder_outputs = self.encoder( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + deterministic=deterministic, + ) + + decoder_outputs = self.decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + position_ids=decoder_position_ids, + encoder_hidden_states=encoder_outputs[0], + encoder_attention_mask=attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + deterministic=deterministic, + ) + + if not return_dict: + return decoder_outputs + encoder_outputs + + return FlaxSeq2SeqModelOutput( + last_hidden_state=decoder_outputs.last_hidden_state, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + +class FlaxBlenderbotSmallPreTrainedModel(FlaxPreTrainedModel): + config_class = BlenderbotSmallConfig + base_model_prefix: str = "model" + module_class: nn.Module = None + + def __init__( + self, + config: BlenderbotSmallConfig, + input_shape: Tuple[int] = (1, 1), + seed: int = 0, + dtype: jnp.dtype = jnp.float32, + _do_init: bool = True, + **kwargs, + ): + module = self.module_class(config=config, dtype=dtype, **kwargs) + super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) + + def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: + # init input tensors + input_ids = jnp.zeros(input_shape, dtype="i4") + # make sure initialization pass will work for FlaxBlenderbotSmallForSequenceClassificationModule + input_ids = input_ids.at[(..., -1)].set(self.config.eos_token_id) + attention_mask = jnp.ones_like(input_ids) + decoder_input_ids = input_ids + decoder_attention_mask = jnp.ones_like(input_ids) + + batch_size, sequence_length = input_ids.shape + position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) + decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) + + params_rng, dropout_rng = jax.random.split(rng) + rngs = {"params": params_rng, "dropout": dropout_rng} + + random_params = self.module.init( + rngs, + input_ids, + attention_mask, + decoder_input_ids, + decoder_attention_mask, + position_ids, + decoder_position_ids, + )["params"] + + if params is not None: + random_params = flatten_dict(unfreeze(random_params)) + params = flatten_dict(unfreeze(params)) + for missing_key in self._missing_keys: + params[missing_key] = random_params[missing_key] + self._missing_keys = set() + return freeze(unflatten_dict(params)) + else: + return random_params + + def init_cache(self, batch_size, max_length, encoder_outputs): + r""" + Args: + batch_size (`int`): + batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. + max_length (`int`): + maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized + cache. + encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`): + `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: + `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) + is a sequence of hidden-states at the output of the last layer of the encoder. Used in the + cross-attention of the decoder. + """ + # init input variables to retrieve cache + decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4") + decoder_attention_mask = jnp.ones_like(decoder_input_ids) + decoder_position_ids = jnp.broadcast_to( + jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape + ) + + def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): + decoder_module = module._get_decoder_module() + return decoder_module( + decoder_input_ids, + decoder_attention_mask, + decoder_position_ids, + **kwargs, + ) + + init_variables = self.module.init( + jax.random.PRNGKey(0), + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + decoder_position_ids=decoder_position_ids, + encoder_hidden_states=encoder_outputs[0], + init_cache=True, + method=_decoder_forward, # we only need to call the decoder to init the cache + ) + return unfreeze(init_variables["cache"]) + + @add_start_docstrings(BLENDERBOT_SMALL_ENCODE_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=BlenderbotSmallConfig) + def encode( + self, + input_ids: jnp.ndarray, + attention_mask: Optional[jnp.ndarray] = None, + position_ids: Optional[jnp.ndarray] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + train: bool = False, + params: dict = None, + dropout_rng: PRNGKey = None, + ): + r""" + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration + + >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M") + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M") + + >>> text = "My friends are cool but they eat too many carbs." + >>> inputs = tokenizer(text, max_length=1024, return_tensors="np") + >>> encoder_outputs = model.encode(**inputs) + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.return_dict + + if attention_mask is None: + attention_mask = jnp.ones_like(input_ids) + if position_ids is None: + batch_size, sequence_length = input_ids.shape + position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) + + # Handle any PRNG if needed + rngs = {} + if dropout_rng is not None: + rngs["dropout"] = dropout_rng + + def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs): + encode_module = module._get_encoder_module() + return encode_module(input_ids, attention_mask, position_ids, **kwargs) + + return self.module.apply( + {"params": params or self.params}, + input_ids=jnp.array(input_ids, dtype="i4"), + attention_mask=jnp.array(attention_mask, dtype="i4"), + position_ids=jnp.array(position_ids, dtype="i4"), + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + deterministic=not train, + rngs=rngs, + method=_encoder_forward, + ) + + @add_start_docstrings(BLENDERBOT_SMALL_DECODE_INPUTS_DOCSTRING) + @replace_return_docstrings( + output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=BlenderbotSmallConfig + ) + def decode( + self, + decoder_input_ids, + encoder_outputs, + encoder_attention_mask: Optional[jnp.ndarray] = None, + decoder_attention_mask: Optional[jnp.ndarray] = None, + decoder_position_ids: Optional[jnp.ndarray] = None, + past_key_values: dict = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + train: bool = False, + params: dict = None, + dropout_rng: PRNGKey = None, + ): + r""" + Returns: + + Example: + + ```python + >>> import jax.numpy as jnp + >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration + + >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M") + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M") + + >>> text = "My friends are cool but they eat too many carbs." + >>> inputs = tokenizer(text, max_length=1024, return_tensors="np") + >>> encoder_outputs = model.encode(**inputs) + + >>> decoder_start_token_id = model.config.decoder_start_token_id + >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id + + >>> outputs = model.decode(decoder_input_ids, encoder_outputs) + >>> last_decoder_hidden_states = outputs.last_hidden_state + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.return_dict + + encoder_hidden_states = encoder_outputs[0] + if encoder_attention_mask is None: + batch_size, sequence_length = encoder_hidden_states.shape[:2] + encoder_attention_mask = jnp.ones((batch_size, sequence_length)) + + batch_size, sequence_length = decoder_input_ids.shape + if decoder_attention_mask is None: + decoder_attention_mask = jnp.ones((batch_size, sequence_length)) + + if decoder_position_ids is None: + if past_key_values is not None: + raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.") + + decoder_position_ids = jnp.broadcast_to( + jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) + ) + + # Handle any PRNG if needed + rngs = {} + if dropout_rng is not None: + rngs["dropout"] = dropout_rng + + inputs = {"params": params or self.params} + + # if past_key_values are passed then cache is already initialized a private flag init_cache has to be + # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that + # it can be changed by FlaxBlenderbotSmallAttention module + if past_key_values: + inputs["cache"] = past_key_values + mutable = ["cache"] + else: + mutable = False + + def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): + decoder_module = module._get_decoder_module() + return decoder_module( + decoder_input_ids, + decoder_attention_mask, + decoder_position_ids, + **kwargs, + ) + + outputs = self.module.apply( + inputs, + decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), + decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), + decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + deterministic=not train, + rngs=rngs, + mutable=mutable, + method=_decoder_forward, + ) + + # add updated cache to model output + if past_key_values is not None and return_dict: + outputs, past = outputs + outputs["past_key_values"] = unfreeze(past["cache"]) + return outputs + elif past_key_values is not None and not return_dict: + outputs, past = outputs + outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] + + return outputs + + def __call__( + self, + input_ids: jnp.ndarray, + attention_mask: Optional[jnp.ndarray] = None, + decoder_input_ids: Optional[jnp.ndarray] = None, + decoder_attention_mask: Optional[jnp.ndarray] = None, + position_ids: Optional[jnp.ndarray] = None, + decoder_position_ids: Optional[jnp.ndarray] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + train: bool = False, + params: dict = None, + dropout_rng: PRNGKey = None, + ): + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.return_dict + + # prepare encoder inputs + if attention_mask is None: + attention_mask = jnp.ones_like(input_ids) + if position_ids is None: + batch_size, sequence_length = input_ids.shape + position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) + + # prepare decoder inputs + if decoder_input_ids is None: + decoder_input_ids = shift_tokens_right( + input_ids, self.config.pad_token_id, decoder_start_token_id=self.config.decoder_start_token_id + ) + if decoder_attention_mask is None: + decoder_attention_mask = jnp.ones_like(decoder_input_ids) + if decoder_position_ids is None: + batch_size, sequence_length = decoder_input_ids.shape + decoder_position_ids = jnp.broadcast_to( + jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) + ) + + # Handle any PRNG if needed + rngs = {"dropout": dropout_rng} if dropout_rng is not None else {} + + return self.module.apply( + {"params": params or self.params}, + input_ids=jnp.array(input_ids, dtype="i4"), + attention_mask=jnp.array(attention_mask, dtype="i4"), + position_ids=jnp.array(position_ids, dtype="i4"), + decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), + decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), + decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + deterministic=not train, + rngs=rngs, + ) + + +@add_start_docstrings( + "The bare BlenderbotSmall Model transformer outputting raw hidden-states without any specific head on top.", + BLENDERBOT_SMALL_START_DOCSTRING, +) +class FlaxBlenderbotSmallModel(FlaxBlenderbotSmallPreTrainedModel): + config: BlenderbotSmallConfig + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + module_class = FlaxBlenderbotSmallModule + + +append_call_sample_docstring(FlaxBlenderbotSmallModel, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC) + + +# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartForConditionalGenerationModule with Bart->BlenderbotSmall +class FlaxBlenderbotSmallForConditionalGenerationModule(nn.Module): + config: BlenderbotSmallConfig + dtype: jnp.dtype = jnp.float32 + bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros + + def setup(self): + self.model = FlaxBlenderbotSmallModule(config=self.config, dtype=self.dtype) + self.lm_head = nn.Dense( + self.model.shared.num_embeddings, + use_bias=False, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(self.config.init_std), + ) + self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.model.shared.num_embeddings)) + + def _get_encoder_module(self): + return self.model.encoder + + def _get_decoder_module(self): + return self.model.decoder + + def __call__( + self, + input_ids, + attention_mask, + decoder_input_ids, + decoder_attention_mask, + position_ids, + decoder_position_ids, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + deterministic: bool = True, + ): + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + position_ids=position_ids, + decoder_position_ids=decoder_position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + deterministic=deterministic, + ) + + hidden_states = outputs[0] + + if self.config.tie_word_embeddings: + shared_embedding = self.model.variables["params"]["shared"]["embedding"] + lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) + else: + lm_logits = self.lm_head(hidden_states) + + lm_logits += jax.lax.stop_gradient(self.final_logits_bias.astype(self.dtype)) + + if not return_dict: + output = (lm_logits,) + outputs[1:] + return output + + return FlaxSeq2SeqLMOutput( + logits=lm_logits, + decoder_hidden_states=outputs.decoder_hidden_states, + decoder_attentions=outputs.decoder_attentions, + cross_attentions=outputs.cross_attentions, + encoder_last_hidden_state=outputs.encoder_last_hidden_state, + encoder_hidden_states=outputs.encoder_hidden_states, + encoder_attentions=outputs.encoder_attentions, + ) + + +@add_start_docstrings( + "The BLENDERBOT_SMALL Model with a language modeling head. Can be used for summarization.", + BLENDERBOT_SMALL_START_DOCSTRING, +) +class FlaxBlenderbotSmallForConditionalGeneration(FlaxBlenderbotSmallPreTrainedModel): + module_class = FlaxBlenderbotSmallForConditionalGenerationModule + dtype: jnp.dtype = jnp.float32 + + @add_start_docstrings(BLENDERBOT_SMALL_DECODE_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=BlenderbotSmallConfig) + def decode( + self, + decoder_input_ids, + encoder_outputs, + encoder_attention_mask: Optional[jnp.ndarray] = None, + decoder_attention_mask: Optional[jnp.ndarray] = None, + decoder_position_ids: Optional[jnp.ndarray] = None, + past_key_values: dict = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + deterministic: bool = True, + params: dict = None, + dropout_rng: PRNGKey = None, + ): + r""" + Returns: + + Example: + + ```python + >>> import jax.numpy as jnp + >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration + + >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M") + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M") + + >>> text = "My friends are cool but they eat too many carbs." + >>> inputs = tokenizer(text, max_length=1024, return_tensors="np") + >>> encoder_outputs = model.encode(**inputs) + + >>> decoder_start_token_id = model.config.decoder_start_token_id + >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id + + >>> outputs = model.decode(decoder_input_ids, encoder_outputs) + >>> logits = outputs.logits + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.return_dict + + encoder_hidden_states = encoder_outputs[0] + if encoder_attention_mask is None: + batch_size, sequence_length = encoder_hidden_states.shape[:2] + encoder_attention_mask = jnp.ones((batch_size, sequence_length)) + + batch_size, sequence_length = decoder_input_ids.shape + if decoder_attention_mask is None: + decoder_attention_mask = jnp.ones((batch_size, sequence_length)) + + if decoder_position_ids is None: + if past_key_values is not None: + raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.") + + decoder_position_ids = jnp.broadcast_to( + jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) + ) + + # Handle any PRNG if needed + rngs = {} + if dropout_rng is not None: + rngs["dropout"] = dropout_rng + + inputs = {"params": params or self.params} + + # if past_key_values are passed then cache is already initialized a private flag init_cache has to be + # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that + # it can be changed by FlaxBlenderbotSmallAttention module + if past_key_values: + inputs["cache"] = past_key_values + mutable = ["cache"] + else: + mutable = False + + def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): + decoder_module = module._get_decoder_module() + outputs = decoder_module( + decoder_input_ids, + decoder_attention_mask, + decoder_position_ids, + **kwargs, + ) + hidden_states = outputs[0] + + if self.config.tie_word_embeddings: + shared_embedding = module.model.variables["params"]["shared"]["embedding"] + lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) + else: + lm_logits = module.lm_head(hidden_states) + + lm_logits += module.final_logits_bias.astype(self.dtype) + return lm_logits, outputs + + outputs = self.module.apply( + inputs, + decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), + decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), + decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + deterministic=deterministic, + rngs=rngs, + mutable=mutable, + method=_decoder_forward, + ) + + if past_key_values is None: + lm_logits, decoder_outputs = outputs + else: + (lm_logits, decoder_outputs), past = outputs + + if return_dict: + outputs = FlaxCausalLMOutputWithCrossAttentions( + logits=lm_logits, + hidden_states=decoder_outputs.hidden_states, + attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + ) + else: + outputs = (lm_logits,) + decoder_outputs[1:] + + # add updated cache to model output + if past_key_values is not None and return_dict: + outputs["past_key_values"] = unfreeze(past["cache"]) + return outputs + elif past_key_values is not None and not return_dict: + outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] + + return outputs + + def prepare_inputs_for_generation( + self, + decoder_input_ids, + max_length, + attention_mask: Optional[jax.Array] = None, + decoder_attention_mask: Optional[jax.Array] = None, + encoder_outputs=None, + **kwargs, + ): + # initializing the cache + batch_size, seq_length = decoder_input_ids.shape + + past_key_values = self.init_cache(batch_size, max_length, encoder_outputs) + # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. + # But since the decoder uses a causal mask, those positions are masked anyways. + # Thus we can create a single static attention_mask here, which is more efficient for compilation + extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") + if decoder_attention_mask is not None: + position_ids = decoder_attention_mask.cumsum(axis=-1) - 1 + extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0)) + else: + position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) + + return { + "past_key_values": past_key_values, + "encoder_outputs": encoder_outputs, + "encoder_attention_mask": attention_mask, + "decoder_attention_mask": extended_attention_mask, + "decoder_position_ids": position_ids, + } + + def update_inputs_for_generation(self, model_outputs, model_kwargs): + model_kwargs["past_key_values"] = model_outputs.past_key_values + model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1 + return model_kwargs + + +FLAX_BLENDERBOT_SMALL_CONDITIONAL_GENERATION_DOCSTRING = """ + Returns: + + Summarization example: + + ```py + >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration + + >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M") + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M") + + >>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs." + >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors="np") + + >>> # Generate Summary + >>> summary_ids = model.generate(inputs["input_ids"]).sequences + >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)) + ``` + + Mask filling example: + + ```py + >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration + + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M") + >>> TXT = "My friends are but they eat too many carbs." + + >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M") + >>> input_ids = tokenizer([TXT], return_tensors="np")["input_ids"] + >>> logits = model(input_ids).logits + + >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() + >>> probs = jax.nn.softmax(logits[0, masked_index], axis=0) + >>> values, predictions = jax.lax.top_k(probs) + + >>> tokenizer.decode(predictions).split() + ``` +""" + +overwrite_call_docstring( + FlaxBlenderbotSmallForConditionalGeneration, + BLENDERBOT_SMALL_INPUTS_DOCSTRING + FLAX_BLENDERBOT_SMALL_CONDITIONAL_GENERATION_DOCSTRING, +) +append_replace_return_docstrings( + FlaxBlenderbotSmallForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC +) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py new file mode 100644 index 0000000000000000000000000000000000000000..93a480b1ea2715d6a666c0ad78239d11dbccdf49 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py @@ -0,0 +1,1529 @@ +# coding=utf-8 +# Copyright 2021 The Facebook, Inc and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" TF 2.0 BlenderbotSmall model.""" + + +from __future__ import annotations + +import random +from typing import List, Optional, Tuple, Union + +import numpy as np +import tensorflow as tf + +from ...activations_tf import get_tf_activation +from ...modeling_tf_outputs import ( + TFBaseModelOutput, + TFBaseModelOutputWithPastAndCrossAttentions, + TFSeq2SeqLMOutput, + TFSeq2SeqModelOutput, +) + +# Public API +from ...modeling_tf_utils import ( + TFCausalLanguageModelingLoss, + TFPreTrainedModel, + keras_serializable, + unpack_inputs, +) +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax +from ...utils import ( + add_code_sample_docstrings, + add_end_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_blenderbot_small import BlenderbotSmallConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "facebook/blenderbot_small-90M" +_CONFIG_FOR_DOC = "BlenderbotSmallConfig" + + +LARGE_NEGATIVE = -1e8 + + +# Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right +def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): + pad_token_id = tf.cast(pad_token_id, input_ids.dtype) + decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) + start_tokens = tf.fill( + (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype) + ) + shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) + # replace possible -100 values in labels by `pad_token_id` + shifted_input_ids = tf.where( + shifted_input_ids == -100, + tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)), + shifted_input_ids, + ) + + # "Verify that `labels` has only positive values and -100" + assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) + + # Make sure the assertion op is called by wrapping the result in an identity no-op + with tf.control_dependencies([assert_gte0]): + shifted_input_ids = tf.identity(shifted_input_ids) + + return shifted_input_ids + + +# Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask +def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz = input_ids_shape[0] + tgt_len = input_ids_shape[1] + mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE + mask_cond = tf.range(shape_list(mask)[-1]) + + mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask) + + if past_key_values_length > 0: + mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1) + + return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) + + +# Copied from transformers.models.bart.modeling_tf_bart._expand_mask +def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + src_len = shape_list(mask)[1] + tgt_len = tgt_len if tgt_len is not None else src_len + one_cst = tf.constant(1.0) + mask = tf.cast(mask, dtype=one_cst.dtype) + expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) + + return (one_cst - expanded_mask) * LARGE_NEGATIVE + + +# Copied from transformers.models.blenderbot.modeling_tf_blenderbot.TFBlenderbotLearnedPositionalEmbedding with Blenderbot->BlenderbotSmall +class TFBlenderbotSmallLearnedPositionalEmbedding(tf.keras.layers.Embedding): + """ + This module learns positional embeddings up to a fixed maximum size. + """ + + def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs): + super().__init__(num_embeddings, embedding_dim, **kwargs) + + def call( + self, input_shape: tf.TensorShape, past_key_values_length: int = 0, position_ids: tf.Tensor | None = None + ): + """Input is expected to be of size [bsz x seqlen].""" + if position_ids is None: + seq_len = input_shape[1] + position_ids = tf.range(seq_len, delta=1, name="range") + position_ids += past_key_values_length + + return super().call(tf.cast(position_ids, dtype=tf.int32)) + + +# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->BlenderbotSmall +class TFBlenderbotSmallAttention(tf.keras.layers.Layer): + """Multi-headed attention from "Attention Is All You Need""" + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + is_decoder: bool = False, + bias: bool = True, + **kwargs, + ): + super().__init__(**kwargs) + self.embed_dim = embed_dim + + self.num_heads = num_heads + self.dropout = tf.keras.layers.Dropout(dropout) + self.head_dim = embed_dim // num_heads + if (self.head_dim * num_heads) != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" + f" and `num_heads`: {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + self.is_decoder = is_decoder + + self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj") + self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") + self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") + self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") + + def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): + return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) + + def call( + self, + hidden_states: tf.Tensor, + key_value_states: tf.Tensor | None = None, + past_key_value: Tuple[Tuple[tf.Tensor]] | None = None, + attention_mask: tf.Tensor | None = None, + layer_head_mask: tf.Tensor | None = None, + training: Optional[bool] = False, + ) -> Tuple[tf.Tensor, tf.Tensor | None]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + bsz, tgt_len, embed_dim = shape_list(hidden_states) + + # get query proj + query_states = self.q_proj(hidden_states) * self.scaling + # get key, value proj + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + elif is_cross_attention: + # cross_attentions + key_states = self._shape(self.k_proj(key_value_states), -1, bsz) + value_states = self._shape(self.v_proj(key_value_states), -1, bsz) + elif past_key_value is not None: + # reuse k, v, self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + key_states = tf.concat([past_key_value[0], key_states], axis=2) + value_states = tf.concat([past_key_value[1], value_states], axis=2) + else: + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + if self.is_decoder: + # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) + key_states = tf.reshape(key_states, proj_shape) + value_states = tf.reshape(value_states, proj_shape) + + src_len = shape_list(key_states)[1] + attn_weights = tf.matmul(query_states, key_states, transpose_b=True) + + tf.debugging.assert_equal( + shape_list(attn_weights), + [bsz * self.num_heads, tgt_len, src_len], + message=( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {shape_list(attn_weights)}" + ), + ) + + if attention_mask is not None: + tf.debugging.assert_equal( + shape_list(attention_mask), + [bsz, 1, tgt_len, src_len], + message=( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" + f" {shape_list(attention_mask)}" + ), + ) + + attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) + attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask + attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) + + attn_weights = stable_softmax(attn_weights, axis=-1) + + if layer_head_mask is not None: + tf.debugging.assert_equal( + shape_list(layer_head_mask), + [self.num_heads], + message=( + f"Head mask for a single layer should be of size {(self.num_heads)}, but is" + f" {shape_list(layer_head_mask)}" + ), + ) + + attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( + attn_weights, (bsz, self.num_heads, tgt_len, src_len) + ) + attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) + + attn_probs = self.dropout(attn_weights, training=training) + attn_output = tf.matmul(attn_probs, value_states) + + tf.debugging.assert_equal( + shape_list(attn_output), + [bsz * self.num_heads, tgt_len, self.head_dim], + message=( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {shape_list(attn_output)}" + ), + ) + + attn_output = tf.transpose( + tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) + ) + attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) + + attn_output = self.out_proj(attn_output) + attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + + return attn_output, attn_weights, past_key_value + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "k_proj", None) is not None: + with tf.name_scope(self.k_proj.name): + self.k_proj.build([None, None, self.embed_dim]) + if getattr(self, "q_proj", None) is not None: + with tf.name_scope(self.q_proj.name): + self.q_proj.build([None, None, self.embed_dim]) + if getattr(self, "v_proj", None) is not None: + with tf.name_scope(self.v_proj.name): + self.v_proj.build([None, None, self.embed_dim]) + if getattr(self, "out_proj", None) is not None: + with tf.name_scope(self.out_proj.name): + self.out_proj.build([None, None, self.embed_dim]) + + +# Copied from transformers.models.bart.modeling_tf_bart.TFBartEncoderLayer with Bart->BlenderbotSmall +class TFBlenderbotSmallEncoderLayer(tf.keras.layers.Layer): + def __init__(self, config: BlenderbotSmallConfig, **kwargs): + super().__init__(**kwargs) + self.embed_dim = config.d_model + self.self_attn = TFBlenderbotSmallAttention( + self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn" + ) + self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") + self.dropout = tf.keras.layers.Dropout(config.dropout) + self.activation_fn = get_tf_activation(config.activation_function) + self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout) + self.fc1 = tf.keras.layers.Dense(config.encoder_ffn_dim, name="fc1") + self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2") + self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") + self.config = config + + def call( + self, + hidden_states: tf.Tensor, + attention_mask: np.ndarray | tf.Tensor | None, + layer_head_mask: tf.Tensor | None, + training: Optional[bool] = False, + ) -> tf.Tensor: + """ + Args: + hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`tf.Tensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size + `(encoder_attention_heads,)` + """ + residual = hidden_states + hidden_states, self_attn_weights, _ = self.self_attn( + hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask + ) + + tf.debugging.assert_equal( + shape_list(hidden_states), + shape_list(residual), + message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", + ) + + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = self.activation_dropout(hidden_states, training=training) + hidden_states = self.fc2(hidden_states) + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + return hidden_states, self_attn_weights + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "self_attn", None) is not None: + with tf.name_scope(self.self_attn.name): + self.self_attn.build(None) + if getattr(self, "self_attn_layer_norm", None) is not None: + with tf.name_scope(self.self_attn_layer_norm.name): + self.self_attn_layer_norm.build([None, None, self.embed_dim]) + if getattr(self, "fc1", None) is not None: + with tf.name_scope(self.fc1.name): + self.fc1.build([None, None, self.embed_dim]) + if getattr(self, "fc2", None) is not None: + with tf.name_scope(self.fc2.name): + self.fc2.build([None, None, self.config.encoder_ffn_dim]) + if getattr(self, "final_layer_norm", None) is not None: + with tf.name_scope(self.final_layer_norm.name): + self.final_layer_norm.build([None, None, self.embed_dim]) + + +# Copied from transformers.models.bart.modeling_tf_bart.TFBartDecoderLayer with Bart->BlenderbotSmall +class TFBlenderbotSmallDecoderLayer(tf.keras.layers.Layer): + def __init__(self, config: BlenderbotSmallConfig, **kwargs): + super().__init__(**kwargs) + self.embed_dim = config.d_model + self.self_attn = TFBlenderbotSmallAttention( + embed_dim=self.embed_dim, + num_heads=config.decoder_attention_heads, + dropout=config.attention_dropout, + name="self_attn", + is_decoder=True, + ) + self.dropout = tf.keras.layers.Dropout(config.dropout) + self.activation_fn = get_tf_activation(config.activation_function) + self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout) + + self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") + self.encoder_attn = TFBlenderbotSmallAttention( + self.embed_dim, + config.decoder_attention_heads, + dropout=config.attention_dropout, + name="encoder_attn", + is_decoder=True, + ) + self.encoder_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm") + self.fc1 = tf.keras.layers.Dense(config.decoder_ffn_dim, name="fc1") + self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2") + self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") + self.config = config + + def call( + self, + hidden_states: tf.Tensor, + attention_mask: np.ndarray | tf.Tensor | None = None, + encoder_hidden_states: np.ndarray | tf.Tensor | None = None, + encoder_attention_mask: np.ndarray | tf.Tensor | None = None, + layer_head_mask: tf.Tensor | None = None, + cross_attn_layer_head_mask: tf.Tensor | None = None, + past_key_value: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + training: Optional[bool] = False, + ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: + """ + Args: + hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`tf.Tensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + encoder_hidden_states (`tf.Tensor`): + cross attention input to the layer of shape `(batch, seq_len, embed_dim)` + encoder_attention_mask (`tf.Tensor`): encoder attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size + `(decoder_attention_heads,)` + cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module. + `(decoder_attention_heads,)` + past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states + """ + residual = hidden_states + + # Self Attention + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + # add present self-attn cache to positions 1,2 of present_key_value tuple + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + past_key_value=self_attn_past_key_value, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + ) + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + # Cross-Attention Block + cross_attn_present_key_value = None + cross_attn_weights = None + if encoder_hidden_states is not None: + residual = hidden_states + + # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( + hidden_states=hidden_states, + key_value_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + layer_head_mask=cross_attn_layer_head_mask, + past_key_value=cross_attn_past_key_value, + ) + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = residual + hidden_states + hidden_states = self.encoder_attn_layer_norm(hidden_states) + + # add cross-attn to positions 3,4 of present_key_value tuple + present_key_value = present_key_value + cross_attn_present_key_value + + # Fully Connected + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = self.activation_dropout(hidden_states, training=training) + hidden_states = self.fc2(hidden_states) + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + return ( + hidden_states, + self_attn_weights, + cross_attn_weights, + present_key_value, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "self_attn", None) is not None: + with tf.name_scope(self.self_attn.name): + self.self_attn.build(None) + if getattr(self, "self_attn_layer_norm", None) is not None: + with tf.name_scope(self.self_attn_layer_norm.name): + self.self_attn_layer_norm.build([None, None, self.embed_dim]) + if getattr(self, "encoder_attn", None) is not None: + with tf.name_scope(self.encoder_attn.name): + self.encoder_attn.build(None) + if getattr(self, "encoder_attn_layer_norm", None) is not None: + with tf.name_scope(self.encoder_attn_layer_norm.name): + self.encoder_attn_layer_norm.build([None, None, self.embed_dim]) + if getattr(self, "fc1", None) is not None: + with tf.name_scope(self.fc1.name): + self.fc1.build([None, None, self.embed_dim]) + if getattr(self, "fc2", None) is not None: + with tf.name_scope(self.fc2.name): + self.fc2.build([None, None, self.config.decoder_ffn_dim]) + if getattr(self, "final_layer_norm", None) is not None: + with tf.name_scope(self.final_layer_norm.name): + self.final_layer_norm.build([None, None, self.embed_dim]) + + +class TFBlenderbotSmallPreTrainedModel(TFPreTrainedModel): + config_class = BlenderbotSmallConfig + base_model_prefix = "model" + + +BLENDERBOT_SMALL_START_DOCSTRING = r""" + This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it + as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and + behavior. + + + + TensorFlow models and layers in `transformers` accept two formats as input: + + - having all inputs as keyword arguments (like PyTorch models), or + - having all inputs as a list, tuple or dict in the first positional argument. + + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: + + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` + - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: + `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` + - a dictionary with one or several input Tensors associated to the input names given in the docstring: + `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + + + + Args: + config ([`BlenderbotSmallConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. +""" + +BLENDERBOT_SMALL_GENERATION_EXAMPLE = r""" + Conversation example:: + + ```py + >>> from transformers import AutoTokenizer, TFBlenderbotSmallForConditionalGeneration + + >>> mname = "facebook/blenderbot_small-90M" + >>> model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname) + >>> tokenizer = AutoTokenizer.from_pretrained(mname) + + >>> UTTERANCE = "My friends are cool but they eat too many carbs." + >>> print("Human: ", UTTERANCE) + >>> inputs = tokenizer([UTTERANCE], return_tensors="tf") + + >>> reply_ids = model.generate(**inputs) + >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0]) + what kind of carbs do they eat? i don't know much about carbs. + + >>> REPLY = "I'm not sure" + >>> print("Human: ", REPLY) + >>> NEXT_UTTERANCE = ( + ... "My friends are cool but they eat too many carbs. " + ... "what kind of carbs do they eat? i don't know much about carbs. " + ... "I'm not sure." + ... ) + + >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="tf") + >>> inputs.pop("token_type_ids") + >>> next_reply_ids = model.generate(**inputs) + >>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0]) + ``` +""" + +BLENDERBOT_SMALL_INPUTS_DOCSTRING = r""" + Args: + input_ids (`tf.Tensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`tf.Tensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Indices of decoder input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are decoder input IDs?](../glossary#decoder-input-ids) + + BlenderbotSmall uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If + `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): + will be made by default and ignore pad tokens. It is not recommended to set this for most use cases. + decoder_position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the + range `[0, config.max_position_embeddings - 1]`. + head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + encoder_outputs (`tf.FloatTensor`, *optional*): + hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. + of shape `(batch_size, sequence_length, hidden_size)` is a sequence of + past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) + contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*, defaults to `True`): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). Set to `False` during training, `True` during generation + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the + config will be used instead. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. This argument can be used only in eager mode, in graph mode the value in the config will be + used instead. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in + eager mode, in graph mode the value will always be set to True. + training (`bool`, *optional*, defaults to `False`): + Whether or not to use the model in training mode (some modules like dropout modules have different + behaviors between training and evaluation). +""" + + +@keras_serializable +class TFBlenderbotSmallEncoder(tf.keras.layers.Layer): + config_class = BlenderbotSmallConfig + """ + Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a + [`TFBlenderbotSmallEncoderLayer`]. + + Args: + config: BlenderbotSmallConfig + """ + + def __init__( + self, config: BlenderbotSmallConfig, embed_tokens: Optional[tf.keras.layers.Embedding] = None, **kwargs + ): + super().__init__(**kwargs) + self.config = config + self.dropout = tf.keras.layers.Dropout(config.dropout) + self.layerdrop = config.encoder_layerdrop + self.padding_idx = config.pad_token_id + self.max_source_positions = config.max_position_embeddings + self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0 + + self.embed_tokens = embed_tokens + self.embed_positions = TFBlenderbotSmallLearnedPositionalEmbedding( + config.max_position_embeddings, + config.d_model, + name="embed_positions", + ) + self.layers = [TFBlenderbotSmallEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)] + self.layernorm_embedding = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding") + self.embed_dim = config.d_model + + def get_embed_tokens(self): + return self.embed_tokens + + def set_embed_tokens(self, embed_tokens): + self.embed_tokens = embed_tokens + + @unpack_inputs + def call( + self, + input_ids=None, + inputs_embeds=None, + attention_mask=None, + head_mask=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + training=False, + ): + """ + Args: + input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value + in the config will be used instead. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. This argument can be used only in eager mode, in graph mode the value in the config + will be used instead. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used + in eager mode, in graph mode the value will always be set to True. + training (`bool`, *optional*, defaults to `False`): + Whether or not to use the model in training mode (some modules like dropout modules have different + behaviors between training and evaluation). + """ + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = shape_list(input_ids) + elif inputs_embeds is not None: + input_shape = shape_list(inputs_embeds)[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if inputs_embeds is None: + check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) + inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + + embed_pos = self.embed_positions(input_shape) + hidden_states = inputs_embeds + embed_pos + hidden_states = self.layernorm_embedding(hidden_states) + hidden_states = self.dropout(hidden_states, training=training) + + # check attention mask and invert + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _expand_mask(attention_mask) + else: + attention_mask = None + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + # check if head_mask has a correct number of layers specified if desired + if head_mask is not None: + tf.debugging.assert_equal( + shape_list(head_mask)[0], + len(self.layers), + message=( + f"The head_mask should be specified for {len(self.layers)} layers, but it is for" + f" {shape_list(head_mask)[0]}." + ), + ) + + # encoder layers + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + dropout_probability = random.uniform(0, 1) + if training and (dropout_probability < self.layerdrop): # skip the layer + continue + + hidden_states, attn = encoder_layer( + hidden_states, + attention_mask, + head_mask[idx] if head_mask is not None else None, + ) + + if output_attentions: + all_attentions += (attn,) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return TFBaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "embed_positions", None) is not None: + with tf.name_scope(self.embed_positions.name): + self.embed_positions.build(None) + if getattr(self, "layernorm_embedding", None) is not None: + with tf.name_scope(self.layernorm_embedding.name): + self.layernorm_embedding.build([None, None, self.embed_dim]) + if getattr(self, "layers", None) is not None: + for layer in self.layers: + with tf.name_scope(layer.name): + layer.build(None) + + +@keras_serializable +class TFBlenderbotSmallDecoder(tf.keras.layers.Layer): + config_class = BlenderbotSmallConfig + """ + Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFBlenderbotSmallDecoderLayer`] + + Args: + config: BlenderbotSmallConfig + embed_tokens: output embedding + """ + + def __init__( + self, config: BlenderbotSmallConfig, embed_tokens: Optional[tf.keras.layers.Embedding] = None, **kwargs + ): + super().__init__(**kwargs) + self.config = config + self.padding_idx = config.pad_token_id + self.embed_tokens = embed_tokens + self.layerdrop = config.decoder_layerdrop + self.embed_positions = TFBlenderbotSmallLearnedPositionalEmbedding( + config.max_position_embeddings, + config.d_model, + name="embed_positions", + ) + self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0 + self.layers = [TFBlenderbotSmallDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)] + self.layernorm_embedding = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding") + + self.dropout = tf.keras.layers.Dropout(config.dropout) + + def get_embed_tokens(self): + return self.embed_tokens + + def set_embed_tokens(self, embed_tokens): + self.embed_tokens = embed_tokens + + @unpack_inputs + def call( + self, + input_ids=None, + inputs_embeds=None, + attention_mask=None, + position_ids=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + head_mask=None, + cross_attn_head_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + training=False, + ): + r""" + Args: + input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the + range `[0, config.max_position_embeddings - 1]`. + encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + of the decoder. + encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): + Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values + selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up + decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those + that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of + all `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value + in the config will be used instead. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. This argument can be used only in eager mode, in graph mode the value in the config + will be used instead. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used + in eager mode, in graph mode the value will always be set to True. + training (`bool`, *optional*, defaults to `False`): + Whether or not to use the model in training mode (some modules like dropout modules have different + behaviors between training and evaluation). + """ + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + input_shape = shape_list(input_ids) + elif inputs_embeds is not None: + input_shape = shape_list(inputs_embeds)[:-1] + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0 + + if inputs_embeds is None: + check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) + inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + if input_shape[-1] > 1: + combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length) + else: + combined_attention_mask = _expand_mask( + tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1] + ) + + if attention_mask is not None: + combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1]) + + if encoder_hidden_states is not None and encoder_attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1]) + + # embed positions + if position_ids is None: + positions = self.embed_positions(input_shape, past_key_values_length) + else: + positions = self.embed_positions(input_shape, position_ids=position_ids) + + hidden_states = self.layernorm_embedding(inputs_embeds) + positions + hidden_states = self.dropout(hidden_states, training=training) + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None + present_key_values = () if use_cache else None + + # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired + for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: + if attn_mask is not None: + tf.debugging.assert_equal( + shape_list(attn_mask)[0], + len(self.layers), + message=( + f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for" + f" {shape_list(attn_mask)[0]}." + ), + ) + + for idx, decoder_layer in enumerate(self.layers): + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + if output_hidden_states: + all_hidden_states += (hidden_states,) + dropout_probability = random.uniform(0, 1) + + if training and (dropout_probability < self.layerdrop): + continue + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer( + hidden_states, + attention_mask=combined_attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + layer_head_mask=head_mask[idx] if head_mask is not None else None, + cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, + past_key_value=past_key_value, + ) + + if use_cache: + present_key_values += (present_key_value,) + + if output_attentions: + all_self_attns += (layer_self_attn,) + + if encoder_hidden_states is not None: + all_cross_attns += (layer_cross_attn,) + + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if not return_dict: + return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns + else: + return TFBaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=present_key_values, + hidden_states=all_hidden_states, + attentions=all_self_attns, + cross_attentions=all_cross_attns, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "embed_positions", None) is not None: + with tf.name_scope(self.embed_positions.name): + self.embed_positions.build(None) + if getattr(self, "layernorm_embedding", None) is not None: + with tf.name_scope(self.layernorm_embedding.name): + self.layernorm_embedding.build([None, None, self.config.d_model]) + if getattr(self, "layers", None) is not None: + for layer in self.layers: + with tf.name_scope(layer.name): + layer.build(None) + + +@keras_serializable +class TFBlenderbotSmallMainLayer(tf.keras.layers.Layer): + config_class = BlenderbotSmallConfig + + def __init__(self, config: BlenderbotSmallConfig, **kwargs): + super().__init__(**kwargs) + + self.config = config + self.shared = tf.keras.layers.Embedding( + input_dim=config.vocab_size, + output_dim=config.d_model, + embeddings_initializer=tf.keras.initializers.TruncatedNormal(stddev=self.config.init_std), + name="model.shared", + ) + # Additional attribute to specify the expected name scope of the layer (for loading/storing weights) + self.shared.load_weight_prefix = "model.shared" + + self.encoder = TFBlenderbotSmallEncoder(config, self.shared, name="encoder") + self.decoder = TFBlenderbotSmallDecoder(config, self.shared, name="decoder") + + def get_input_embeddings(self): + return self.shared + + def set_input_embeddings(self, new_embeddings): + self.shared = new_embeddings + self.encoder.embed_tokens = self.shared + self.decoder.embed_tokens = self.shared + + @unpack_inputs + def call( + self, + input_ids=None, + attention_mask=None, + decoder_input_ids=None, + decoder_attention_mask=None, + decoder_position_ids=None, + head_mask=None, + decoder_head_mask=None, + cross_attn_head_mask=None, + encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None, + past_key_values=None, + inputs_embeds=None, + decoder_inputs_embeds=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + training=False, + **kwargs, + ): + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + + if encoder_outputs is None: + encoder_outputs = self.encoder( + input_ids=input_ids, + attention_mask=attention_mask, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True + elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput): + encoder_outputs = TFBaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False + elif not return_dict and not isinstance(encoder_outputs, tuple): + encoder_outputs = encoder_outputs.to_tuple() + + decoder_outputs = self.decoder( + decoder_input_ids, + attention_mask=decoder_attention_mask, + position_ids=decoder_position_ids, + encoder_hidden_states=encoder_outputs[0], + encoder_attention_mask=attention_mask, + head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + if not return_dict: + return decoder_outputs + encoder_outputs + + return TFSeq2SeqModelOutput( + last_hidden_state=decoder_outputs.last_hidden_state, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + # The shared/tied weights expect to be in the model base namespace + # Adding "/" to the end (not the start!) of a tf.name_scope puts it in the root namespace rather than + # the current one. + with tf.name_scope(self.shared.load_weight_prefix + "/" + self.shared.name + "/"): + self.shared.build(None) + if getattr(self, "encoder", None) is not None: + with tf.name_scope(self.encoder.name): + self.encoder.build(None) + if getattr(self, "decoder", None) is not None: + with tf.name_scope(self.decoder.name): + self.decoder.build(None) + + +@add_start_docstrings( + "The bare BLENDERBOT_SMALL Model outputting raw hidden-states without any specific head on top.", + BLENDERBOT_SMALL_START_DOCSTRING, +) +class TFBlenderbotSmallModel(TFBlenderbotSmallPreTrainedModel): + def __init__(self, config: BlenderbotSmallConfig, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + + self.model = TFBlenderbotSmallMainLayer(config, name="model") + + def get_encoder(self): + return self.model.encoder + + def get_decoder(self): + return self.model.decoder + + @unpack_inputs + @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFSeq2SeqModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: tf.Tensor | None = None, + attention_mask: tf.Tensor | None = None, + decoder_input_ids: tf.Tensor | None = None, + decoder_attention_mask: tf.Tensor | None = None, + decoder_position_ids: tf.Tensor | None = None, + head_mask: tf.Tensor | None = None, + decoder_head_mask: tf.Tensor | None = None, + cross_attn_head_mask: tf.Tensor | None = None, + encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None, + past_key_values: List[tf.Tensor] | None = None, + inputs_embeds: tf.Tensor | None = None, + decoder_inputs_embeds: tf.Tensor | None = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: Optional[bool] = False, + **kwargs, + ) -> Union[Tuple[tf.Tensor], TFSeq2SeqModelOutput]: + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + decoder_position_ids=decoder_position_ids, + head_mask=head_mask, + decoder_head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + encoder_outputs=encoder_outputs, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + decoder_inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + return outputs + + # Copied from transformers.models.bart.modeling_tf_bart.TFBartModel.serving_output + def serving_output(self, output): + pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None + dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None + dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None + cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None + enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None + enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None + + return TFSeq2SeqModelOutput( + last_hidden_state=output.last_hidden_state, + past_key_values=pkv, + decoder_hidden_states=dec_hs, + decoder_attentions=dec_attns, + cross_attentions=cross_attns, + encoder_last_hidden_state=output.encoder_last_hidden_state, + encoder_hidden_states=enc_hs, + encoder_attentions=enc_attns, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "model", None) is not None: + with tf.name_scope(self.model.name): + self.model.build(None) + + +# Copied from transformers.models.bart.modeling_tf_bart.BiasLayer +class BiasLayer(tf.keras.layers.Layer): + """ + Bias as a layer. It is used for serialization purposes: `tf.keras.Model.save_weights` stores on a per-layer basis, + so all weights have to be registered in a layer. + """ + + def __init__(self, shape, initializer, trainable, name, **kwargs): + super().__init__(name=name, **kwargs) + # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of + # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see: + # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214 + self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable) + + def call(self, x): + return x + self.bias + + +@add_start_docstrings( + "The BLENDERBOT_SMALL Model with a language modeling head. Can be used for summarization.", + BLENDERBOT_SMALL_START_DOCSTRING, +) +class TFBlenderbotSmallForConditionalGeneration(TFBlenderbotSmallPreTrainedModel, TFCausalLanguageModelingLoss): + _keys_to_ignore_on_load_unexpected = [ + r"model.encoder.embed_tokens.weight", + r"model.decoder.embed_tokens.weight", + ] + + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.model = TFBlenderbotSmallMainLayer(config, name="model") + self.use_cache = config.use_cache + # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency. + self.bias_layer = BiasLayer( + name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False + ) + + def get_decoder(self): + return self.model.decoder + + def get_encoder(self): + return self.model.encoder + + def get_output_embeddings(self): + return self.get_input_embeddings() + + def set_output_embeddings(self, value): + self.set_input_embeddings(value) + + def get_bias(self): + return {"final_logits_bias": self.bias_layer.bias} + + def set_bias(self, value): + # Replaces the existing layers containing bias for correct (de)serialization. + vocab_size = value["final_logits_bias"].shape[-1] + self.bias_layer = BiasLayer( + name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=False + ) + self.bias_layer.bias.assign(value["final_logits_bias"]) + + @unpack_inputs + @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) + @add_end_docstrings(BLENDERBOT_SMALL_GENERATION_EXAMPLE) + def call( + self, + input_ids: tf.Tensor | None = None, + attention_mask: tf.Tensor | None = None, + decoder_input_ids: tf.Tensor | None = None, + decoder_attention_mask: tf.Tensor | None = None, + decoder_position_ids: tf.Tensor | None = None, + head_mask: tf.Tensor | None = None, + decoder_head_mask: tf.Tensor | None = None, + cross_attn_head_mask: tf.Tensor | None = None, + encoder_outputs: Optional[TFBaseModelOutput] = None, + past_key_values: List[tf.Tensor] | None = None, + inputs_embeds: tf.Tensor | None = None, + decoder_inputs_embeds: tf.Tensor | None = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: tf.Tensor | None = None, + training: Optional[bool] = False, + ) -> Union[Tuple[tf.Tensor], TFSeq2SeqLMOutput]: + r""" + labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + """ + + if labels is not None: + labels = tf.where( + labels == self.config.pad_token_id, + tf.cast(tf.fill(shape_list(labels), -100), labels.dtype), + labels, + ) + use_cache = False + if decoder_input_ids is None and decoder_inputs_embeds is None: + decoder_input_ids = shift_tokens_right( + labels, self.config.pad_token_id, self.config.decoder_start_token_id + ) + + outputs = self.model( + input_ids, + attention_mask=attention_mask, + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + decoder_position_ids=decoder_position_ids, + head_mask=head_mask, + decoder_head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + encoder_outputs=encoder_outputs, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + decoder_inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True) + lm_logits = self.bias_layer(lm_logits) + masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) + + if not return_dict: + output = (lm_logits,) + outputs[1:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + return TFSeq2SeqLMOutput( + loss=masked_lm_loss, + logits=lm_logits, + past_key_values=outputs.past_key_values, # index 1 of d outputs + decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs + decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs + cross_attentions=outputs.cross_attentions, # index 4 of d outputs + encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs + encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out + encoder_attentions=outputs.encoder_attentions, # 2 of e out + ) + + # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.serving_output + def serving_output(self, output): + pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None + dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None + dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None + cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None + enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None + enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None + + return TFSeq2SeqLMOutput( + logits=output.logits, + past_key_values=pkv, + decoder_hidden_states=dec_hs, + decoder_attentions=dec_attns, + cross_attentions=cross_attns, + encoder_last_hidden_state=output.encoder_last_hidden_state, + encoder_hidden_states=enc_hs, + encoder_attentions=enc_attns, + ) + + # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.prepare_inputs_for_generation + def prepare_inputs_for_generation( + self, + decoder_input_ids, + past_key_values=None, + attention_mask=None, + decoder_attention_mask=None, + head_mask=None, + decoder_head_mask=None, + cross_attn_head_mask=None, + use_cache=None, + encoder_outputs=None, + **kwargs, + ): + # cut decoder_input_ids if past_key_values is used + if past_key_values is not None: + decoder_input_ids = decoder_input_ids[:, -1:] + + if decoder_attention_mask is not None: # xla + decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:] + elif past_key_values is not None: # no xla + past_key_values + decoder_position_ids = past_key_values[0][0].shape[2] + else: # no xla + no past_key_values + decoder_position_ids = tf.range(decoder_input_ids.shape[1]) + + return { + "input_ids": None, # encoder_outputs is defined. input_ids not needed + "encoder_outputs": encoder_outputs, + "past_key_values": past_key_values, + "decoder_input_ids": decoder_input_ids, + "attention_mask": attention_mask, + "decoder_attention_mask": decoder_attention_mask, + "decoder_position_ids": decoder_position_ids, + "head_mask": head_mask, + "decoder_head_mask": decoder_head_mask, + "cross_attn_head_mask": cross_attn_head_mask, + "use_cache": use_cache, # change this to avoid caching (presumably for debugging) + } + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "model", None) is not None: + with tf.name_scope(self.model.name): + self.model.build(None) + if getattr(self, "bias_layer", None) is not None: + with tf.name_scope(self.bias_layer.name): + self.bias_layer.build(None) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/tokenization_blenderbot_small.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/tokenization_blenderbot_small.py new file mode 100644 index 0000000000000000000000000000000000000000..240495d73894efc2bd3911d28d30199262981a2b --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/tokenization_blenderbot_small.py @@ -0,0 +1,258 @@ +# coding=utf-8 +# Copyright 2021 The Facebook Inc. and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization class for BlenderbotSmall.""" + +import json +import os +from typing import Dict, List, Optional, Tuple + +import regex as re + +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +VOCAB_FILES_NAMES = { + "vocab_file": "vocab.json", + "merges_file": "merges.txt", + "tokenizer_config_file": "tokenizer_config.json", +} + +PRETRAINED_VOCAB_FILES_MAP = { + "vocab_file": { + "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" + }, + "merges_file": { + "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" + }, + "tokenizer_config_file": { + "facebook/blenderbot_small-90M": ( + "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" + ) + }, +} + +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"facebook/blenderbot_small-90M": 512} + + +def get_pairs(word): + """ + Return set of symbol pairs in a word. + + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + + pairs = set(pairs) + return pairs + + +class BlenderbotSmallTokenizer(PreTrainedTokenizer): + """ + Constructs a Blenderbot-90M tokenizer based on BPE (Byte-Pair-Encoding) + + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to + the superclass for more information regarding methods. + + Args: + vocab_file (`str`): + File containing the vocabulary. + merges_file (`str`): + Path to the merges file. + bos_token (`str`, *optional*, defaults to `"__start__"`): + The beginning of sentence token. + eos_token (`str`, *optional*, defaults to `"__end__"`): + The end of sentence token. + unk_token (`str`, *optional*, defaults to `"__unk__"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + pad_token (`str`, *optional*, defaults to `"__null__"`): + The token used for padding, for example when batching sequences of different lengths. + kwargs (*optional*): + Additional keyword arguments passed along to [`PreTrainedTokenizer`] + """ + + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, + vocab_file, + merges_file, + bos_token="__start__", + eos_token="__end__", + unk_token="__unk__", + pad_token="__null__", + **kwargs, + ): + with open(vocab_file, encoding="utf-8") as vocab_handle: + self.encoder = json.load(vocab_handle) + self.decoder = {v: k for k, v in self.encoder.items()} + with open(merges_file, encoding="utf-8") as merges_handle: + merges = merges_handle.read().split("\n")[1:-1] + merges = [tuple(merge.split()) for merge in merges] + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = {} + super().__init__(unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs) + + @property + def vocab_size(self) -> int: + return len(self.encoder) + + def get_vocab(self) -> Dict: + return dict(self.encoder, **self.added_tokens_encoder) + + def bpe(self, token: str) -> str: + if token in self.cache: + return self.cache[token] + token = re.sub("([.,!?()])", r" \1", token) + token = re.sub("(')", r" \1 ", token) + token = re.sub(r"\s{2,}", " ", token) + if "\n" in token: + token = token.replace("\n", " __newln__") + + tokens = token.split(" ") + words = [] + for token in tokens: + if not len(token): + continue + + token = token.lower() + word = tuple(token) + word = tuple(list(word[:-1]) + [word[-1] + ""]) + pairs = get_pairs(word) + + if not pairs: + words.append(token) + continue + + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except ValueError: + new_word.extend(word[i:]) + break + + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = "@@ ".join(word) + word = word[:-4] + + self.cache[token] = word + words.append(word) + return " ".join(words) + + def _tokenize(self, text: str) -> List[str]: + """Split a string into tokens using BPE.""" + split_tokens = [] + + words = re.findall(r"\S+\n?", text) + + for token in words: + split_tokens.extend(list(self.bpe(token).split(" "))) + return split_tokens + + def _convert_token_to_id(self, token: str) -> int: + """Converts a token to an id using the vocab.""" + token = token.lower() + return self.encoder.get(token, self.encoder.get(self.unk_token)) + + def _convert_id_to_token(self, index: int) -> str: + """Converts an index (integer) in a token (str) using the vocab.""" + return self.decoder.get(index, self.unk_token) + + def convert_tokens_to_string(self, tokens: List[str]) -> str: + """Converts a sequence of tokens in a single string.""" + out_string = " ".join(tokens).replace("@@ ", "").strip() + return out_string + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + merge_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] + ) + + with open(vocab_file, "w", encoding="utf-8") as f: + f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") + + index = 0 + with open(merge_file, "w", encoding="utf-8") as writer: + writer.write("#version: 0.2\n") + for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." + " Please check that the tokenizer is not corrupted!" + ) + index = token_index + writer.write(" ".join(bpe_tokens) + "\n") + index += 1 + + return vocab_file, merge_file + + @property + # Copied from transformers.models.blenderbot.tokenization_blenderbot.BlenderbotTokenizer.default_chat_template + def default_chat_template(self): + """ + A very simple chat template that just adds whitespace between messages. + """ + logger.warning_once( + "\nNo chat template is defined for this tokenizer - using the default template " + f"for the {self.__class__.__name__} class. If the default is not appropriate for " + "your model, please set `tokenizer.chat_template` to an appropriate template. " + "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n" + ) + return ( + "{% for message in messages %}" + "{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}" + "{{ message['content'] }}" + "{% if not loop.last %}{{ ' ' }}{% endif %}" + "{% endfor %}" + "{{ eos_token }}" + ) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/tokenization_blenderbot_small_fast.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/tokenization_blenderbot_small_fast.py new file mode 100644 index 0000000000000000000000000000000000000000..4bf0017b5f2a29753bfce8414b813c270e82432a --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/blenderbot_small/tokenization_blenderbot_small_fast.py @@ -0,0 +1,140 @@ +# coding=utf-8 +# Copyright 2021, The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Fast tokenization class for BlenderbotSmall.""" +from typing import List, Optional + +from tokenizers import ByteLevelBPETokenizer + +from ...tokenization_utils_fast import PreTrainedTokenizerFast +from ...utils import logging +from .tokenization_blenderbot_small import BlenderbotSmallTokenizer + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = { + "vocab_file": "vocab.json", + "merges_file": "merges.txt", + "tokenizer_config_file": "tokenizer_config.json", +} + +PRETRAINED_VOCAB_FILES_MAP = { + "vocab_file": { + "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" + }, + "merges_file": { + "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" + }, + "tokenizer_config_file": { + "facebook/blenderbot_small-90M": ( + "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" + ) + }, +} + +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { + "facebook/blenderbot_small-90M": 512, +} + + +class BlenderbotSmallTokenizerFast(PreTrainedTokenizerFast): + """ + Construct a "fast" BlenderbotSmall tokenizer (backed by HuggingFace's *tokenizers* library). + + Args: + vocab_file (`str`): + Path to the vocabulary file. + """ + + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + slow_tokenizer_class = BlenderbotSmallTokenizer + + def __init__( + self, + vocab_file=None, + merges_file=None, + unk_token="<|endoftext|>", + bos_token="<|endoftext|>", + eos_token="<|endoftext|>", + add_prefix_space=False, + trim_offsets=True, + **kwargs, + ): + super().__init__( + ByteLevelBPETokenizer( + vocab=vocab_file, + merges=merges_file, + add_prefix_space=add_prefix_space, + trim_offsets=trim_offsets, + ), + bos_token=bos_token, + eos_token=eos_token, + unk_token=unk_token, + **kwargs, + ) + self.add_prefix_space = add_prefix_space + + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id] + if token_ids_1 is None: + return output + + return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id] + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. BlenderbotSmall + does not make use of token type ids, therefore a list of zeros is returned. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of zeros. + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] + + @property + # Copied from transformers.models.blenderbot.tokenization_blenderbot.BlenderbotTokenizer.default_chat_template + def default_chat_template(self): + """ + A very simple chat template that just adds whitespace between messages. + """ + logger.warning_once( + "\nNo chat template is defined for this tokenizer - using the default template " + f"for the {self.__class__.__name__} class. If the default is not appropriate for " + "your model, please set `tokenizer.chat_template` to an appropriate template. " + "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n" + ) + return ( + "{% for message in messages %}" + "{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}" + "{{ message['content'] }}" + "{% if not loop.last %}{{ ' ' }}{% endif %}" + "{% endfor %}" + "{{ eos_token }}" + ) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efc89807427382814208ba49d052c5d1be990f63 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/tokenization_byt5.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/tokenization_byt5.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..119842f7bf157c96c6d680860eb40e98bf734240 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/tokenization_byt5.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/byt5/convert_byt5_original_tf_checkpoint_to_pytorch.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/byt5/convert_byt5_original_tf_checkpoint_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..7d9a20f3b0b395ffd31a2e8445d94aedb6036a6e --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/byt5/convert_byt5_original_tf_checkpoint_to_pytorch.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# Copyright 2018 The T5 authors and HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert T5 checkpoint.""" + + +import argparse + +from transformers import T5Config, T5ForConditionalGeneration, load_tf_weights_in_t5 +from transformers.utils import logging + + +logging.set_verbosity_info() + + +def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path): + # Initialise PyTorch model + config = T5Config.from_json_file(config_file) + print(f"Building PyTorch model from configuration: {config}") + model = T5ForConditionalGeneration(config) + + # Load weights from tf checkpoint + load_tf_weights_in_t5(model, config, tf_checkpoint_path) + + # Save pytorch-model + print(f"Save PyTorch model to {pytorch_dump_path}") + model.save_pretrained(pytorch_dump_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." + ) + parser.add_argument( + "--config_file", + default=None, + type=str, + required=True, + help=( + "The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture." + ), + ) + parser.add_argument( + "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." + ) + args = parser.parse_args() + convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/__init__.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dbc0a57e8324f3025c96fad65f18fc59de6fa56c --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/__init__.py @@ -0,0 +1,88 @@ +# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available + + +_import_structure = { + "configuration_chinese_clip": [ + "CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", + "ChineseCLIPConfig", + "ChineseCLIPOnnxConfig", + "ChineseCLIPTextConfig", + "ChineseCLIPVisionConfig", + ], + "processing_chinese_clip": ["ChineseCLIPProcessor"], +} + +try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["feature_extraction_chinese_clip"] = ["ChineseCLIPFeatureExtractor"] + _import_structure["image_processing_chinese_clip"] = ["ChineseCLIPImageProcessor"] + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_chinese_clip"] = [ + "CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", + "ChineseCLIPModel", + "ChineseCLIPPreTrainedModel", + "ChineseCLIPTextModel", + "ChineseCLIPVisionModel", + ] + +if TYPE_CHECKING: + from .configuration_chinese_clip import ( + CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, + ChineseCLIPConfig, + ChineseCLIPOnnxConfig, + ChineseCLIPTextConfig, + ChineseCLIPVisionConfig, + ) + from .processing_chinese_clip import ChineseCLIPProcessor + + try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_chinese_clip import ( + CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, + ChineseCLIPModel, + ChineseCLIPPreTrainedModel, + ChineseCLIPTextModel, + ChineseCLIPVisionModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/configuration_chinese_clip.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/configuration_chinese_clip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdd6bb4642d44f72f8f248ff42d369c5a0ca2070 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/configuration_chinese_clip.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/convert_chinese_clip_original_pytorch_to_hf.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/convert_chinese_clip_original_pytorch_to_hf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d7a4200edbd94dbc5c2edf7cdcbc788ac4fb1b2 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/convert_chinese_clip_original_pytorch_to_hf.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/feature_extraction_chinese_clip.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/feature_extraction_chinese_clip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..827d49545de7af4b36b50a338782249220cbeab7 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/feature_extraction_chinese_clip.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/image_processing_chinese_clip.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/image_processing_chinese_clip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a26b261ed05733e41c56b802195b1625e268775c Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/image_processing_chinese_clip.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/modeling_chinese_clip.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/modeling_chinese_clip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d62a58eaff732430f722a23116530d97bcc80295 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/modeling_chinese_clip.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/processing_chinese_clip.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/processing_chinese_clip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ccfe28abea63d21c803be73dafe68cbbc5fd85f5 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/processing_chinese_clip.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/configuration_chinese_clip.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/configuration_chinese_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..399b4e6b0ec1606dde8e9c851250b499a0b74223 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/configuration_chinese_clip.py @@ -0,0 +1,472 @@ +# coding=utf-8 +# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Chinese-CLIP model configuration""" + +import os +from collections import OrderedDict +from typing import TYPE_CHECKING, Any, Mapping, Optional, Union + + +if TYPE_CHECKING: + from ...processing_utils import ProcessorMixin + from ...utils import TensorType + +from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + +CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "OFA-Sys/chinese-clip-vit-base-patch16": ( + "https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16/resolve/main/config.json" + ), +} + + +class ChineseCLIPTextConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used to instantiate a + Chinese CLIP model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the Chinese CLIP + [OFA-Sys/chinese-clip-vit-base-patch16](https: + //huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 30522): + Vocabulary size of the CHINESE_CLIP model. Defines the number of different tokens that can be represented + by the `inputs_ids` passed when calling [`ChineseCLIPModel`]. + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + max_position_embeddings (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + type_vocab_size (`int`, *optional*, defaults to 2): + The vocabulary size of the `token_type_ids` passed when calling [`ChineseCLIPModel`]. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + initializer_factor (`float`, *optional*, defaults to 1.0): + A factor for initializing all weight matrices (should be kept to 1, used internally for initialization + testing). + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + pad_token_id (`int`, *optional*, defaults to 0): + Padding token id. + position_embedding_type (`str`, *optional*, defaults to `"absolute"`): + Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For + positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to + [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). + For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models + with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + + Example: + + ```python + >>> from transformers import ChineseCLIPTextConfig, ChineseCLIPTextModel + + >>> # Initializing a ChineseCLIPTextConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration + >>> configuration = ChineseCLIPTextConfig() + + >>> # Initializing a ChineseCLIPTextModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration + >>> model = ChineseCLIPTextModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "chinese_clip_text_model" + + def __init__( + self, + vocab_size=30522, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=2, + initializer_range=0.02, + initializer_factor=1.0, + layer_norm_eps=1e-12, + pad_token_id=0, + position_embedding_type="absolute", + use_cache=True, + **kwargs, + ): + super().__init__(pad_token_id=pad_token_id, **kwargs) + + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + self.initializer_factor = initializer_factor + self.layer_norm_eps = layer_norm_eps + self.position_embedding_type = position_embedding_type + self.use_cache = use_cache + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": + cls._set_token_in_kwargs(kwargs) + + config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) + + # get the vision config dict if we are loading from ChineseCLIPConfig + if config_dict.get("model_type") == "chinese_clip": + config_dict = config_dict["text_config"] + + if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: + logger.warning( + f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." + ) + + return cls.from_dict(config_dict, **kwargs) + + +class ChineseCLIPVisionConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used to instantiate an + ChineseCLIP model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the ChineseCLIP + [OFA-Sys/chinese-clip-vit-base-patch16](https: + //huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + projection_dim (`int`, *optional*, defaults to 512): + Dimentionality of text and vision projection layers. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + image_size (`int`, *optional*, defaults to 224): + The size (resolution) of each image. + patch_size (`int`, *optional*, defaults to 32): + The size (resolution) of each patch. + hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. + layer_norm_eps (`float`, *optional*, defaults to 1e-05): + The epsilon used by the layer normalization layers. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + initializer_factor (`float`, *optional*, defaults to 1.0): + A factor for initializing all weight matrices (should be kept to 1, used internally for initialization + testing). + Example: + ```python + >>> from transformers import ChineseCLIPVisionConfig, ChineseCLIPVisionModel + + >>> # Initializing a ChineseCLIPVisionConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration + >>> configuration = ChineseCLIPVisionConfig() + + >>> # Initializing a ChineseCLIPVisionModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration + >>> model = ChineseCLIPVisionModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "chinese_clip_vision_model" + + def __init__( + self, + hidden_size=768, + intermediate_size=3072, + projection_dim=512, + num_hidden_layers=12, + num_attention_heads=12, + num_channels=3, + image_size=224, + patch_size=32, + hidden_act="quick_gelu", + layer_norm_eps=1e-5, + attention_dropout=0.0, + initializer_range=0.02, + initializer_factor=1.0, + **kwargs, + ): + super().__init__(**kwargs) + + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.projection_dim = projection_dim + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.num_channels = num_channels + self.patch_size = patch_size + self.image_size = image_size + self.initializer_range = initializer_range + self.initializer_factor = initializer_factor + self.attention_dropout = attention_dropout + self.layer_norm_eps = layer_norm_eps + self.hidden_act = hidden_act + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": + cls._set_token_in_kwargs(kwargs) + + config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) + + # get the vision config dict if we are loading from ChineseCLIPConfig + if config_dict.get("model_type") == "chinese_clip": + config_dict = config_dict["vision_config"] + + if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: + logger.warning( + f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." + ) + + return cls.from_dict(config_dict, **kwargs) + + +class ChineseCLIPConfig(PretrainedConfig): + r""" + [`ChineseCLIPConfig`] is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used + to instantiate Chinese-CLIP model according to the specified arguments, defining the text model and vision model + configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the + Chinese-CLIP [OFA-Sys/chinese-clip-vit-base-patch16](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) + architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + text_config (`dict`, *optional*): + Dictionary of configuration options used to initialize [`ChineseCLIPTextConfig`]. + vision_config (`dict`, *optional*): + Dictionary of configuration options used to initialize [`ChineseCLIPVisionConfig`]. + projection_dim (`int`, *optional*, defaults to 512): + Dimentionality of text and vision projection layers. + logit_scale_init_value (`float`, *optional*, defaults to 2.6592): + The inital value of the *logit_scale* paramter. Default is used as per the original ChineseCLIP + implementation. + kwargs (*optional*): + Dictionary of keyword arguments. + + Example: + + ```python + >>> from transformers import ChineseCLIPConfig, ChineseCLIPModel + + >>> # Initializing a ChineseCLIPConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration + >>> configuration = ChineseCLIPConfig() + + >>> # Initializing a ChineseCLIPModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration + >>> model = ChineseCLIPModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + + >>> # We can also initialize a ChineseCLIPConfig from a ChineseCLIPTextConfig and a ChineseCLIPVisionConfig + + >>> # Initializing a ChineseCLIPTextConfig and ChineseCLIPVisionConfig configuration + >>> config_text = ChineseCLIPTextConfig() + >>> config_vision = ChineseCLIPVisionConfig() + + >>> config = ChineseCLIPConfig.from_text_vision_configs(config_text, config_vision) + ```""" + + model_type = "chinese_clip" + + def __init__( + self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs + ): + # If `_config_dict` exist, we use them for the backward compatibility. + # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot + # of confusion!). + text_config_dict = kwargs.pop("text_config_dict", None) + vision_config_dict = kwargs.pop("vision_config_dict", None) + + super().__init__(**kwargs) + + # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in + # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most + # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. + if text_config_dict is not None: + if text_config is None: + text_config = {} + + # This is the complete result when using `text_config_dict`. + _text_config_dict = ChineseCLIPTextConfig(**text_config_dict).to_dict() + + # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. + for key, value in _text_config_dict.items(): + if key in text_config and value != text_config[key] and key not in ["transformers_version"]: + # If specified in `text_config_dict` + if key in text_config_dict: + message = ( + f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. " + f'The value `text_config_dict["{key}"]` will be used instead.' + ) + # If inferred from default argument values (just to be super careful) + else: + message = ( + f"`text_config_dict` is provided which will be used to initialize `ChineseCLIPTextConfig`. " + f'The value `text_config["{key}"]` will be overriden.' + ) + logger.info(message) + + # Update all values in `text_config` with the ones in `_text_config_dict`. + text_config.update(_text_config_dict) + + if vision_config_dict is not None: + if vision_config is None: + vision_config = {} + + # This is the complete result when using `vision_config_dict`. + _vision_config_dict = ChineseCLIPVisionConfig(**vision_config_dict).to_dict() + # convert keys to string instead of integer + if "id2label" in _vision_config_dict: + _vision_config_dict["id2label"] = { + str(key): value for key, value in _vision_config_dict["id2label"].items() + } + + # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. + for key, value in _vision_config_dict.items(): + if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: + # If specified in `vision_config_dict` + if key in vision_config_dict: + message = ( + f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different " + f'values. The value `vision_config_dict["{key}"]` will be used instead.' + ) + # If inferred from default argument values (just to be super careful) + else: + message = ( + f"`vision_config_dict` is provided which will be used to initialize " + f'`ChineseCLIPVisionConfig`. The value `vision_config["{key}"]` will be overriden.' + ) + logger.info(message) + + # Update all values in `vision_config` with the ones in `_vision_config_dict`. + vision_config.update(_vision_config_dict) + + if text_config is None: + text_config = {} + logger.info("`text_config` is `None`. Initializing the `ChineseCLIPTextConfig` with default values.") + + if vision_config is None: + vision_config = {} + logger.info("`vision_config` is `None`. initializing the `ChineseCLIPVisionConfig` with default values.") + + self.text_config = ChineseCLIPTextConfig(**text_config) + self.vision_config = ChineseCLIPVisionConfig(**vision_config) + + self.projection_dim = projection_dim + self.logit_scale_init_value = logit_scale_init_value + self.initializer_factor = 1.0 + self.initializer_range = 0.02 + + @classmethod + def from_text_vision_configs( + cls, text_config: ChineseCLIPTextConfig, vision_config: ChineseCLIPVisionConfig, **kwargs + ): + r""" + Instantiate a [`ChineseCLIPConfig`] (or a derived class) from Chinese-CLIP text model configuration and + Chinese-CLIP vision model configuration. Returns: + [`ChineseCLIPConfig`]: An instance of a configuration object + """ + + return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) + + +class ChineseCLIPOnnxConfig(OnnxConfig): + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict( + [ + ("input_ids", {0: "batch", 1: "sequence"}), + ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), + ("attention_mask", {0: "batch", 1: "sequence"}), + ] + ) + + @property + def outputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict( + [ + ("logits_per_image", {0: "batch"}), + ("logits_per_text", {0: "batch"}), + ("text_embeds", {0: "batch"}), + ("image_embeds", {0: "batch"}), + ] + ) + + @property + def atol_for_validation(self) -> float: + return 1e-4 + + def generate_dummy_inputs( + self, + processor: "ProcessorMixin", + batch_size: int = -1, + seq_length: int = -1, + framework: Optional["TensorType"] = None, + ) -> Mapping[str, Any]: + text_input_dict = super().generate_dummy_inputs( + processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework + ) + image_input_dict = super().generate_dummy_inputs( + processor.image_processor, batch_size=batch_size, framework=framework + ) + return {**text_input_dict, **image_input_dict} + + @property + def default_onnx_opset(self) -> int: + return 14 diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/image_processing_chinese_clip.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/image_processing_chinese_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..4f1048a45e6ac68e19fbc69c50e84230510c1fd6 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/image_processing_chinese_clip.py @@ -0,0 +1,312 @@ +# coding=utf-8 +# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Image processor class for Chinese-CLIP.""" + +from typing import Dict, List, Optional, Union + +import numpy as np + +from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict +from ...image_transforms import ( + convert_to_rgb, + get_resize_output_image_size, + resize, + to_channel_dimension_format, +) +from ...image_utils import ( + OPENAI_CLIP_MEAN, + OPENAI_CLIP_STD, + ChannelDimension, + ImageInput, + PILImageResampling, + infer_channel_dimension_format, + is_scaled_image, + make_list_of_images, + to_numpy_array, + valid_images, +) +from ...utils import TensorType, is_vision_available, logging + + +logger = logging.get_logger(__name__) + + +if is_vision_available(): + import PIL + + +class ChineseCLIPImageProcessor(BaseImageProcessor): + r""" + Constructs a Chinese-CLIP image processor. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by + `do_resize` in the `preprocess` method. + size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): + Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with + the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` + method. + resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): + Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. + do_center_crop (`bool`, *optional*, defaults to `True`): + Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the + `preprocess` method. + crop_size (`Dict[str, int]` *optional*, defaults to 224): + Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess` + method. + do_rescale (`bool`, *optional*, defaults to `True`): + Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in + the `preprocess` method. + rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): + Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` + method. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. + image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): + Mean to use if normalizing the image. This is a float or list of floats the length of the number of + channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. + image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): + Standard deviation to use if normalizing the image. This is a float or list of floats the length of the + number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. + Can be overridden by the `image_std` parameter in the `preprocess` method. + do_convert_rgb (`bool`, *optional*, defaults to `True`): + Whether to convert the image to RGB. + """ + + model_input_names = ["pixel_values"] + + def __init__( + self, + do_resize: bool = True, + size: Dict[str, int] = None, + resample: PILImageResampling = PILImageResampling.BICUBIC, + do_center_crop: bool = True, + crop_size: Dict[str, int] = None, + do_rescale: bool = True, + rescale_factor: Union[int, float] = 1 / 255, + do_normalize: bool = True, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + do_convert_rgb: bool = True, + **kwargs, + ) -> None: + super().__init__(**kwargs) + size = size if size is not None else {"shortest_edge": 224} + size = get_size_dict(size, default_to_square=False) + crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} + crop_size = get_size_dict(crop_size) + + self.do_resize = do_resize + self.size = size + self.resample = resample + self.do_center_crop = do_center_crop + self.crop_size = crop_size + self.do_rescale = do_rescale + self.rescale_factor = rescale_factor + self.do_normalize = do_normalize + self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN + self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD + self.do_convert_rgb = do_convert_rgb + + def resize( + self, + image: np.ndarray, + size: Dict[str, int], + resample: PILImageResampling = PILImageResampling.BICUBIC, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge + resized to keep the input aspect ratio. + + Args: + image (`np.ndarray`): + Image to resize. + size (`Dict[str, int]`): + Size of the output image. + resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): + Resampling filter to use when resiizing the image. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format of the image. If not provided, it will be the same as the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred from the input + image. + """ + size = get_size_dict(size, default_to_square=False) + output_size = get_resize_output_image_size( + image, size=(size["height"], size["width"]), default_to_square=False, input_data_format=input_data_format + ) + return resize( + image, + size=output_size, + resample=resample, + data_format=data_format, + input_data_format=input_data_format, + **kwargs, + ) + + def preprocess( + self, + images: ImageInput, + do_resize: bool = None, + size: Dict[str, int] = None, + resample: PILImageResampling = None, + do_center_crop: bool = None, + crop_size: int = None, + do_rescale: bool = None, + rescale_factor: float = None, + do_normalize: bool = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + do_convert_rgb: bool = None, + return_tensors: Optional[Union[str, TensorType]] = None, + data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> PIL.Image.Image: + """ + Preprocess an image or batch of images. + + Args: + images (`ImageInput`): + Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If + passing in images with pixel values between 0 and 1, set `do_rescale=False`. + do_resize (`bool`, *optional*, defaults to `self.do_resize`): + Whether to resize the image. + size (`Dict[str, int]`, *optional*, defaults to `self.size`): + Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with + the longest edge resized to keep the input aspect ratio. + resample (`int`, *optional*, defaults to `self.resample`): + Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only + has an effect if `do_resize` is set to `True`. + do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): + Whether to center crop the image. + crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): + Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. + do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): + Whether to rescale the image. + rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): + Rescale factor to rescale the image by if `do_rescale` is set to `True`. + do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): + Whether to normalize the image. + image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): + Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. + image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): + Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to + `True`. + do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): + Whether to convert the image to RGB. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + """ + do_resize = do_resize if do_resize is not None else self.do_resize + size = size if size is not None else self.size + size = get_size_dict(size, default_to_square=False) + resample = resample if resample is not None else self.resample + do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop + crop_size = crop_size if crop_size is not None else self.crop_size + crop_size = get_size_dict(crop_size) + do_rescale = do_rescale if do_rescale is not None else self.do_rescale + rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor + do_normalize = do_normalize if do_normalize is not None else self.do_normalize + image_mean = image_mean if image_mean is not None else self.image_mean + image_std = image_std if image_std is not None else self.image_std + do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb + + images = make_list_of_images(images) + + if not valid_images(images): + raise ValueError( + "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + + if do_resize and size is None: + raise ValueError("Size must be specified if do_resize is True.") + + if do_center_crop and crop_size is None: + raise ValueError("Crop size must be specified if do_center_crop is True.") + + if do_rescale and rescale_factor is None: + raise ValueError("Rescale factor must be specified if do_rescale is True.") + + if do_normalize and (image_mean is None or image_std is None): + raise ValueError("Image mean and std must be specified if do_normalize is True.") + + # PIL RGBA images are converted to RGB + if do_convert_rgb: + images = [convert_to_rgb(image) for image in images] + + # All transformations expect numpy arrays. + images = [to_numpy_array(image) for image in images] + + if is_scaled_image(images[0]) and do_rescale: + logger.warning_once( + "It looks like you are trying to rescale already rescaled images. If the input" + " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." + ) + + if input_data_format is None: + # We assume that all images have the same channel dimension format. + input_data_format = infer_channel_dimension_format(images[0]) + + if do_resize: + images = [ + self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) + for image in images + ] + + if do_center_crop: + images = [ + self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images + ] + + if do_rescale: + images = [ + self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) + for image in images + ] + + if do_normalize: + images = [ + self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) + for image in images + ] + + images = [ + to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images + ] + + data = {"pixel_values": images} + return BatchFeature(data=data, tensor_type=return_tensors) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/modeling_chinese_clip.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/modeling_chinese_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..a16fb081b1935769dee60909d3d9314693fcb207 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/modeling_chinese_clip.py @@ -0,0 +1,1564 @@ +# coding=utf-8 +# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch Chinese-CLIP model.""" + + +import math +from dataclasses import dataclass +from typing import Any, List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn + +from ...activations import ACT2FN +from ...modeling_outputs import ( + BaseModelOutput, + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPooling, + BaseModelOutputWithPoolingAndCrossAttentions, +) +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from ...utils import ( + ModelOutput, + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_chinese_clip import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "OFA-Sys/chinese-clip-vit-base-patch16" +_CONFIG_FOR_DOC = "ChineseCLIPConfig" + +CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "OFA-Sys/chinese-clip-vit-base-patch16", + # See all Chinese-CLIP models at https://huggingface.co/models?filter=chinese_clip +] + + +# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html +# Copied from transformers.models.clip.modeling_clip.contrastive_loss +def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: + return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) + + +def chinese_clip_loss(similarity: torch.Tensor) -> torch.Tensor: + caption_loss = contrastive_loss(similarity) + image_loss = contrastive_loss(similarity.t()) + return (caption_loss + image_loss) / 2.0 + + +@dataclass +class ChineseCLIPOutput(ModelOutput): + """ + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): + Contrastive loss for image-text similarity. + logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): + The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text + similarity scores. + logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): + The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image + similarity scores. + text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): + The text embeddings obtained by applying the projection layer to the pooled output of + [`ChineseCLIPTextModel`]. + image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): + The image embeddings obtained by applying the projection layer to the pooled output of + [`ChineseCLIPVisionModel`]. + text_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`): + The output of the [`ChineseCLIPTextModel`]. + vision_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`): + The output of the [`ChineseCLIPVisionModel`]. + """ + + loss: Optional[torch.FloatTensor] = None + logits_per_image: torch.FloatTensor = None + logits_per_text: torch.FloatTensor = None + text_embeds: torch.FloatTensor = None + image_embeds: torch.FloatTensor = None + text_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None + vision_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None + + def to_tuple(self) -> Tuple[Any]: + return tuple( + self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() + for k in self.keys() + ) + + +# Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert->ChineseCLIPText +class ChineseCLIPTextEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + self.register_buffer( + "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False + ) + self.register_buffer( + "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False + ) + + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + past_key_values_length: int = 0, + ) -> torch.Tensor: + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] + + # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs + # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves + # issue #5664 + if token_type_ids is None: + if hasattr(self, "token_type_ids"): + buffered_token_type_ids = self.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + token_type_embeddings + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +# Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->ChineseCLIP +class ChineseCLIPVisionEmbeddings(nn.Module): + def __init__(self, config: ChineseCLIPVisionConfig): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.image_size = config.image_size + self.patch_size = config.patch_size + + self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) + + self.patch_embedding = nn.Conv2d( + in_channels=config.num_channels, + out_channels=self.embed_dim, + kernel_size=self.patch_size, + stride=self.patch_size, + bias=False, + ) + + self.num_patches = (self.image_size // self.patch_size) ** 2 + self.num_positions = self.num_patches + 1 + self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) + self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) + + def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: + batch_size = pixel_values.shape[0] + target_dtype = self.patch_embedding.weight.dtype + patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] + patch_embeds = patch_embeds.flatten(2).transpose(1, 2) + + class_embeds = self.class_embedding.expand(batch_size, 1, -1) + embeddings = torch.cat([class_embeds, patch_embeds], dim=1) + embeddings = embeddings + self.position_embedding(self.position_ids) + return embeddings + + +# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->ChineseCLIPText +class ChineseCLIPTextSelfAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = position_embedding_type or getattr( + config, "position_embedding_type", "absolute" + ) + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) + + self.is_decoder = config.is_decoder + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_layer = past_key_value[0] + value_layer = past_key_value[1] + attention_mask = encoder_attention_mask + elif is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + use_cache = past_key_value is not None + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + query_length, key_length = query_layer.shape[2], key_layer.shape[2] + if use_cache: + position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( + -1, 1 + ) + else: + position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + + positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in ChineseCLIPTextModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + if self.is_decoder: + outputs = outputs + (past_key_value,) + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->ChineseCLIPText +class ChineseCLIPTextSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->ChineseCLIPText +class ChineseCLIPTextAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + self.self = ChineseCLIPTextSelfAttention(config, position_embedding_type=position_embedding_type) + self.output = ChineseCLIPTextSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +class ChineseCLIPVisionAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.embed_dim // self.num_heads + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" + f" {self.num_heads})." + ) + self.scale = self.head_dim**-0.5 + self.dropout = config.attention_dropout + + self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + bsz, tgt_len, embed_dim = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) * self.scale + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_states = value_states.view(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {attn_weights.size()}" + ) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if output_attentions: + # this operation is a bit akward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped + + +# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->ChineseCLIPText +class ChineseCLIPTextIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->ChineseCLIPText +class ChineseCLIPTextOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->ChineseCLIPVision +class ChineseCLIPVisionMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.activation_fn = ACT2FN[config.hidden_act] + self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) + self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.fc1(hidden_states) + hidden_states = self.activation_fn(hidden_states) + hidden_states = self.fc2(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->ChineseCLIPText +class ChineseCLIPTextLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = ChineseCLIPTextAttention(config) + self.is_decoder = config.is_decoder + self.add_cross_attention = config.add_cross_attention + if self.add_cross_attention: + if not self.is_decoder: + raise ValueError(f"{self} should be used as a decoder model if cross attention is added") + self.crossattention = ChineseCLIPTextAttention(config, position_embedding_type="absolute") + self.intermediate = ChineseCLIPTextIntermediate(config) + self.output = ChineseCLIPTextOutput(config) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + # if decoder, the last output is tuple of self-attn cache + if self.is_decoder: + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + else: + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + cross_attn_present_key_value = None + if self.is_decoder and encoder_hidden_states is not None: + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" + " by setting `config.add_cross_attention=True`" + ) + + # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + cross_attn_past_key_value, + output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + + # add cross-attn cache to positions 3,4 of present_key_value tuple + cross_attn_present_key_value = cross_attention_outputs[-1] + present_key_value = present_key_value + cross_attn_present_key_value + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + + # if decoder, return the attn key/values as the last output + if self.is_decoder: + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +class ChineseCLIPVisionLayer(nn.Module): + def __init__(self, config: ChineseCLIPConfig): + super().__init__() + self.embed_dim = config.hidden_size + self.self_attn = ChineseCLIPVisionAttention(config) + self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + self.mlp = ChineseCLIPVisionMLP(config) + self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + + hidden_states = self.layer_norm1(hidden_states) + hidden_states, attn_weights = self.self_attn( + hidden_states=hidden_states, + output_attentions=output_attentions, + ) + hidden_states = residual + hidden_states + + residual = hidden_states + hidden_states = self.layer_norm2(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->ChineseCLIPText +class ChineseCLIPTextPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class ChineseCLIPPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = ChineseCLIPConfig + base_model_prefix = "chinese_clip" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights""" + factor = self.config.initializer_factor + if isinstance(module, ChineseCLIPVisionEmbeddings): + factor = self.config.initializer_factor + nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor) + nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) + nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) + elif isinstance(module, ChineseCLIPTextEmbeddings): + nn.init.normal_(module.word_embeddings.weight, mean=0.0, std=self.config.initializer_range) + nn.init.normal_(module.position_embeddings.weight, mean=0.0, std=self.config.initializer_range) + nn.init.normal_(module.token_type_embeddings.weight, mean=0.0, std=self.config.initializer_range) + for embedding in [module.word_embeddings, module.position_embeddings, module.token_type_embeddings]: + if embedding.padding_idx is not None: + embedding.weight.data[embedding.padding_idx].zero_() + elif isinstance(module, ChineseCLIPVisionAttention): + factor = self.config.initializer_factor + in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor + out_proj_std = (module.embed_dim**-0.5) * factor + nn.init.normal_(module.q_proj.weight, std=in_proj_std) + nn.init.normal_(module.k_proj.weight, std=in_proj_std) + nn.init.normal_(module.v_proj.weight, std=in_proj_std) + nn.init.normal_(module.out_proj.weight, std=out_proj_std) + elif isinstance(module, ChineseCLIPVisionMLP): + factor = self.config.initializer_factor + in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor + fc_std = (2 * module.config.hidden_size) ** -0.5 * factor + nn.init.normal_(module.fc1.weight, std=fc_std) + nn.init.normal_(module.fc2.weight, std=in_proj_std) + elif isinstance(module, ChineseCLIPModel): + nn.init.normal_( + module.text_projection.weight, + std=module.text_embed_dim**-0.5 * self.config.initializer_factor, + ) + nn.init.normal_( + module.visual_projection.weight, + std=module.vision_embed_dim**-0.5 * self.config.initializer_factor, + ) + + if isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + + +CHINESE_CLIP_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it + as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`ChineseCLIPConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +CHINESE_CLIP_TEXT_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +CHINESE_CLIP_VISION_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using + [`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +CHINESE_CLIP_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using + [`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details. + return_loss (`bool`, *optional*): + Whether or not to return the contrastive loss. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->ChineseCLIPText +class ChineseCLIPTextEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([ChineseCLIPTextLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = False, + output_hidden_states: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + next_decoder_cache = () if use_cache else None + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + layer_module.__call__, + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +class ChineseCLIPVisionEncoder(nn.Module): + """ + Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a + [`ChineseCLIPVisionEncoderLayer`]. + + Args: + config: ChineseCLIPConfig + """ + + def __init__(self, config: ChineseCLIPConfig): + super().__init__() + self.config = config + self.layers = nn.ModuleList([ChineseCLIPVisionLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + inputs_embeds, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutput]: + r""" + Args: + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + hidden_states = inputs_embeds + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + encoder_layer.__call__, + hidden_states, + output_attentions, + ) + else: + layer_outputs = encoder_layer( + hidden_states, + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +class ChineseCLIPVisionTransformer(nn.Module): + def __init__(self, config: ChineseCLIPVisionConfig): + super().__init__() + self.config = config + embed_dim = config.hidden_size + + self.embeddings = ChineseCLIPVisionEmbeddings(config) + self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) + self.encoder = ChineseCLIPVisionEncoder(config) + self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) + + @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig) + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + r""" + Returns: + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + hidden_states = self.embeddings(pixel_values) + hidden_states = self.pre_layrnorm(hidden_states) + + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + last_hidden_state = encoder_outputs[0] + pooled_output = last_hidden_state[:, 0, :] + pooled_output = self.post_layernorm(pooled_output) + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +@add_start_docstrings( + "The text model from CHINESE_CLIP without any head or projection on top.", + CHINESE_CLIP_START_DOCSTRING, +) +class ChineseCLIPTextModel(ChineseCLIPPreTrainedModel): + """ + + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in [Attention is + all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, + Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + + To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set + to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and + `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. + """ + + config_class = ChineseCLIPTextConfig + + def __init__(self, config, add_pooling_layer=True): + super().__init__(config) + self.config = config + + self.embeddings = ChineseCLIPTextEmbeddings(config) + self.encoder = ChineseCLIPTextEncoder(config) + + self.pooler = ChineseCLIPTextPooler(config) if add_pooling_layer else None + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPoolingAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if self.config.is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + batch_size, seq_length = input_shape + device = input_ids.device if input_ids is not None else inputs_embeds.device + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) + + if token_type_ids is None: + if hasattr(self.embeddings, "token_type_ids"): + buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.config.is_decoder and encoder_hidden_states is not None: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + if encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +@add_start_docstrings( + """The vision model from CHINESE_CLIP without any head or projection on top.""", + CHINESE_CLIP_START_DOCSTRING, +) +class ChineseCLIPVisionModel(ChineseCLIPPreTrainedModel): + config_class = ChineseCLIPVisionConfig + main_input_name = "pixel_values" + + def __init__(self, config: ChineseCLIPVisionConfig): + super().__init__(config) + self.vision_model = ChineseCLIPVisionTransformer(config) + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> nn.Module: + return self.vision_model.embeddings.patch_embedding + + @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig) + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + r""" + Returns: + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import CLIPProcessor, ChineseCLIPVisionModel + + >>> model = ChineseCLIPVisionModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") + >>> processor = CLIPProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") + + >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, return_tensors="pt") + + >>> outputs = model(**inputs) + >>> last_hidden_state = outputs.last_hidden_state + >>> pooled_output = outputs.pooler_output # pooled CLS states + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + return self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + +@add_start_docstrings(CHINESE_CLIP_START_DOCSTRING) +class ChineseCLIPModel(ChineseCLIPPreTrainedModel): + config_class = ChineseCLIPConfig + + def __init__(self, config: ChineseCLIPConfig): + super().__init__(config) + + if not isinstance(config.text_config, ChineseCLIPTextConfig): + raise ValueError( + "config.text_config is expected to be of type ChineseCLIPTextConfig but is of type" + f" {type(config.text_config)}." + ) + + if not isinstance(config.vision_config, ChineseCLIPVisionConfig): + raise ValueError( + "config.vision_config is expected to be of type ChineseCLIPVisionConfig but is of type" + f" {type(config.vision_config)}." + ) + + text_config = config.text_config + vision_config = config.vision_config + + self.projection_dim = config.projection_dim + self.text_embed_dim = text_config.hidden_size + self.vision_embed_dim = vision_config.hidden_size + + self.text_model = ChineseCLIPTextModel(text_config, add_pooling_layer=False) + self.vision_model = ChineseCLIPVisionTransformer(vision_config) + + self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) + self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) + self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(CHINESE_CLIP_TEXT_INPUTS_DOCSTRING) + def get_text_features( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> torch.FloatTensor: + r""" + Returns: + text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by + applying the projection layer to the final [CLS] hidden state of Text-Transformer. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, ChineseCLIPModel + + >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") + >>> tokenizer = AutoTokenizer.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") + + >>> inputs = tokenizer(["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], padding=True, return_tensors="pt") + >>> text_features = model.get_text_features(**inputs) + >>> text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True) + ```""" + # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components. + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + text_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = text_outputs[0][:, 0, :] + text_features = self.text_projection(pooled_output) + + return text_features + + @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING) + def get_image_features( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> torch.FloatTensor: + r""" + Returns: + image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by + applying the projection layer to the final [CLS] hidden state of Vision-Transformer. + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, ChineseCLIPModel + + >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") + >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") + + >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, return_tensors="pt") + + >>> image_features = model.get_image_features(**inputs) + >>> image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True) + ```""" + # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components. + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + vision_outputs = self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = vision_outputs[1] # pooled_output + image_features = self.visual_projection(pooled_output) + + return image_features + + @add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=ChineseCLIPOutput, config_class=ChineseCLIPConfig) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + pixel_values: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + return_loss: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, ChineseCLIPOutput]: + r""" + Returns: + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, ChineseCLIPModel + + >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") + >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") + + >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(text=["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], images=image, return_tensors="pt", padding=True) + + >>> outputs = model(**inputs) + >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score + >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities + ```""" + # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components. + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + vision_outputs = self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + text_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + image_embeds = vision_outputs[1] + image_embeds = self.visual_projection(image_embeds) + + text_embeds = text_outputs[0][:, 0, :] + text_embeds = self.text_projection(text_embeds) + + # normalized features + image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) + text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) + + # cosine similarity as logits + logit_scale = self.logit_scale.exp() + logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale + logits_per_image = logits_per_text.t() + + loss = None + if return_loss: + loss = chinese_clip_loss(logits_per_text) + + if not return_dict: + # fix the None pooled_output of text_outputs to conform with dict_output + pooled_output = text_outputs[1] + if pooled_output is None: + text_outputs = (text_outputs[0],) + text_outputs[2:] + output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) + return ((loss,) + output) if loss is not None else output + + return ChineseCLIPOutput( + loss=loss, + logits_per_image=logits_per_image, + logits_per_text=logits_per_text, + text_embeds=text_embeds, + image_embeds=image_embeds, + text_model_output=text_outputs, + vision_model_output=vision_outputs, + ) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/processing_chinese_clip.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/processing_chinese_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..832f44102abf32e7a5cb0b7f04cda0faea80ded0 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/processing_chinese_clip.py @@ -0,0 +1,142 @@ +# coding=utf-8 +# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Image/Text processor class for Chinese-CLIP +""" + +import warnings + +from ...processing_utils import ProcessorMixin +from ...tokenization_utils_base import BatchEncoding + + +class ChineseCLIPProcessor(ProcessorMixin): + r""" + Constructs a Chinese-CLIP processor which wraps a Chinese-CLIP image processor and a Chinese-CLIP tokenizer into a + single processor. + + [`ChineseCLIPProcessor`] offers all the functionalities of [`ChineseCLIPImageProcessor`] and [`BertTokenizerFast`]. + See the [`~ChineseCLIPProcessor.__call__`] and [`~ChineseCLIPProcessor.decode`] for more information. + + Args: + image_processor ([`ChineseCLIPImageProcessor`], *optional*): + The image processor is a required input. + tokenizer ([`BertTokenizerFast`], *optional*): + The tokenizer is a required input. + """ + + attributes = ["image_processor", "tokenizer"] + image_processor_class = "ChineseCLIPImageProcessor" + tokenizer_class = ("BertTokenizer", "BertTokenizerFast") + + def __init__(self, image_processor=None, tokenizer=None, **kwargs): + feature_extractor = None + if "feature_extractor" in kwargs: + warnings.warn( + "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" + " instead.", + FutureWarning, + ) + feature_extractor = kwargs.pop("feature_extractor") + + image_processor = image_processor if image_processor is not None else feature_extractor + if image_processor is None: + raise ValueError("You need to specify an `image_processor`.") + if tokenizer is None: + raise ValueError("You need to specify a `tokenizer`.") + + super().__init__(image_processor, tokenizer) + self.current_processor = self.image_processor + + def __call__(self, text=None, images=None, return_tensors=None, **kwargs): + """ + Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` + and `kwargs` arguments to BertTokenizerFast's [`~BertTokenizerFast.__call__`] if `text` is not `None` to encode + the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to + CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring + of the above two methods for more information. + + Args: + text (`str`, `List[str]`, `List[List[str]]`): + The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings + (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set + `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). + images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): + The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch + tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a + number of channels, H and W are image height and width. + + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors of a particular framework. Acceptable values are: + + - `'tf'`: Return TensorFlow `tf.constant` objects. + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return NumPy `np.ndarray` objects. + - `'jax'`: Return JAX `jnp.ndarray` objects. + + Returns: + [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: + + - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. + - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when + `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not + `None`). + - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. + """ + + if text is None and images is None: + raise ValueError("You have to specify either text or images. Both cannot be none.") + + if text is not None: + encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs) + + if images is not None: + image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs) + + if text is not None and images is not None: + encoding["pixel_values"] = image_features.pixel_values + return encoding + elif text is not None: + return encoding + else: + return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) + + def batch_decode(self, *args, **kwargs): + """ + This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please + refer to the docstring of this method for more information. + """ + return self.tokenizer.batch_decode(*args, **kwargs) + + def decode(self, *args, **kwargs): + """ + This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to + the docstring of this method for more information. + """ + return self.tokenizer.decode(*args, **kwargs) + + @property + def model_input_names(self): + tokenizer_input_names = self.tokenizer.model_input_names + image_processor_input_names = self.image_processor.model_input_names + return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + @property + def feature_extractor_class(self): + warnings.warn( + "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", + FutureWarning, + ) + return self.image_processor_class diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/configuration_efficientformer.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/configuration_efficientformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0eeed73256d8ee7077c90fa821c01fb1effb20c6 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/configuration_efficientformer.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/modeling_efficientformer.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/modeling_efficientformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88a5088b3a0e5e03692afaca7988c250e6ff67fe Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/modeling_efficientformer.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/configuration_efficientformer.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/configuration_efficientformer.py new file mode 100644 index 0000000000000000000000000000000000000000..fecb90a886e8eb9ef06c15748034825a20a1b0bf --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/configuration_efficientformer.py @@ -0,0 +1,173 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" EfficientFormer model configuration""" + +from typing import List + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + +EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "snap-research/efficientformer-l1-300": ( + "https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json" + ), +} + + +class EfficientFormerConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of an [`EfficientFormerModel`]. It is used to + instantiate an EfficientFormer model according to the specified arguments, defining the model architecture. + Instantiating a configuration with the defaults will yield a similar configuration to that of the EfficientFormer + [snap-research/efficientformer-l1](https://huggingface.co/snap-research/efficientformer-l1) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + depths (`List(int)`, *optional*, defaults to `[3, 2, 6, 4]`) + Depth of each stage. + hidden_sizes (`List(int)`, *optional*, defaults to `[48, 96, 224, 448]`) + Dimensionality of each stage. + downsamples (`List(bool)`, *optional*, defaults to `[True, True, True, True]`) + Whether or not to downsample inputs between two stages. + dim (`int`, *optional*, defaults to 448): + Number of channels in Meta3D layers + key_dim (`int`, *optional*, defaults to 32): + The size of the key in meta3D block. + attention_ratio (`int`, *optional*, defaults to 4): + Ratio of the dimension of the query and value to the dimension of the key in MSHA block + resolution (`int`, *optional*, defaults to 7) + Size of each patch + num_hidden_layers (`int`, *optional*, defaults to 5): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 8): + Number of attention heads for each attention layer in the 3D MetaBlock. + mlp_expansion_ratio (`int`, *optional*, defaults to 4): + Ratio of size of the hidden dimensionality of an MLP to the dimensionality of its input. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings and encoder. + patch_size (`int`, *optional*, defaults to 16): + The size (resolution) of each patch. + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + pool_size (`int`, *optional*, defaults to 3): + Kernel size of pooling layers. + downsample_patch_size (`int`, *optional*, defaults to 3): + The size of patches in downsampling layers. + downsample_stride (`int`, *optional*, defaults to 2): + The stride of convolution kernels in downsampling layers. + downsample_pad (`int`, *optional*, defaults to 1): + Padding in downsampling layers. + drop_path_rate (`int`, *optional*, defaults to 0): + Rate at which to increase dropout probability in DropPath. + num_meta3d_blocks (`int`, *optional*, defaults to 1): + The number of 3D MetaBlocks in the last stage. + distillation (`bool`, *optional*, defaults to `True`): + Whether to add a distillation head. + use_layer_scale (`bool`, *optional*, defaults to `True`): + Whether to scale outputs from token mixers. + layer_scale_init_value (`float`, *optional*, defaults to 1e-5): + Factor by which outputs from token mixers are scaled. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` are supported. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + image_size (`int`, *optional*, defaults to `224`): + The size (resolution) of each image. + + Example: + + ```python + >>> from transformers import EfficientFormerConfig, EfficientFormerModel + + >>> # Initializing a EfficientFormer efficientformer-l1 style configuration + >>> configuration = EfficientFormerConfig() + + >>> # Initializing a EfficientFormerModel (with random weights) from the efficientformer-l3 style configuration + >>> model = EfficientFormerModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "efficientformer" + + def __init__( + self, + depths: List[int] = [3, 2, 6, 4], + hidden_sizes: List[int] = [48, 96, 224, 448], + downsamples: List[bool] = [True, True, True, True], + dim: int = 448, + key_dim: int = 32, + attention_ratio: int = 4, + resolution: int = 7, + num_hidden_layers: int = 5, + num_attention_heads: int = 8, + mlp_expansion_ratio: int = 4, + hidden_dropout_prob: float = 0.0, + patch_size: int = 16, + num_channels: int = 3, + pool_size: int = 3, + downsample_patch_size: int = 3, + downsample_stride: int = 2, + downsample_pad: int = 1, + drop_path_rate: float = 0.0, + num_meta3d_blocks: int = 1, + distillation: bool = True, + use_layer_scale: bool = True, + layer_scale_init_value: float = 1e-5, + hidden_act: str = "gelu", + initializer_range: float = 0.02, + layer_norm_eps: float = 1e-12, + image_size: int = 224, + batch_norm_eps: float = 1e-05, + **kwargs, + ) -> None: + super().__init__(**kwargs) + + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.hidden_sizes = hidden_sizes + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.patch_size = patch_size + self.num_channels = num_channels + self.depths = depths + self.mlp_expansion_ratio = mlp_expansion_ratio + self.downsamples = downsamples + self.dim = dim + self.key_dim = key_dim + self.attention_ratio = attention_ratio + self.resolution = resolution + self.pool_size = pool_size + self.downsample_patch_size = downsample_patch_size + self.downsample_stride = downsample_stride + self.downsample_pad = downsample_pad + self.drop_path_rate = drop_path_rate + self.num_meta3d_blocks = num_meta3d_blocks + self.distillation = distillation + self.use_layer_scale = use_layer_scale + self.layer_scale_init_value = layer_scale_init_value + self.image_size = image_size + self.batch_norm_eps = batch_norm_eps diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/graphormer/__init__.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/graphormer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4263525682147f42553effe2c7b287ec91c6613d --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/graphormer/__init__.py @@ -0,0 +1,57 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available + + +_import_structure = { + "configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_graphormer"] = [ + "GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "GraphormerForGraphClassification", + "GraphormerModel", + "GraphormerPreTrainedModel", + ] + + +if TYPE_CHECKING: + from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_graphormer import ( + GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + GraphormerForGraphClassification, + GraphormerModel, + GraphormerPreTrainedModel, + ) + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3aac16b99fb7ff4a4fdb7a93fac297719a1639df Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/collating_graphormer.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/collating_graphormer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3095553162edd7f0f33e98619534939bf79d561 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/collating_graphormer.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/configuration_graphormer.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/configuration_graphormer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59f0c88cd0c627b57b969230f67e46d6af7a6f4d Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/configuration_graphormer.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/graphormer/algos_graphormer.pyx b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/graphormer/algos_graphormer.pyx new file mode 100644 index 0000000000000000000000000000000000000000..a0fafbdee53b55efb9596036817b03be0d006992 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/graphormer/algos_graphormer.pyx @@ -0,0 +1,107 @@ +# Copyright (c) Microsoft Corporation and HuggingFace +# Licensed under the MIT License. + +import cython + +cimport numpy +from cython.parallel cimport parallel, prange + +import numpy as np + + +# Reduce this number if matrices are too big for large graphs +UNREACHABLE_NODE_DISTANCE = 510 + +def floyd_warshall(adjacency_matrix): + """ + Applies the Floyd-Warshall algorithm to the adjacency matrix, to compute the + shortest paths distance between all nodes, up to UNREACHABLE_NODE_DISTANCE. + """ + (nrows, ncols) = adjacency_matrix.shape + assert nrows == ncols + cdef unsigned int n = nrows + + adj_mat_copy = adjacency_matrix.astype(np.int32, order='C', casting='safe', copy=True) + assert adj_mat_copy.flags['C_CONTIGUOUS'] + cdef numpy.ndarray[numpy.int32_t, ndim=2, mode='c'] M = adj_mat_copy + cdef numpy.ndarray[numpy.int32_t, ndim=2, mode='c'] path = -1 * np.ones([n, n], dtype=np.int32) + + cdef unsigned int i, j, k + cdef numpy.int32_t M_ij, M_ik, cost_ikkj + cdef numpy.int32_t* M_ptr = &M[0,0] + cdef numpy.int32_t* M_i_ptr + cdef numpy.int32_t* M_k_ptr + + # set unreachable nodes distance to UNREACHABLE_NODE_DISTANCE + for i in range(n): + for j in range(n): + if i == j: + M[i][j] = 0 + elif M[i][j] == 0: + M[i][j] = UNREACHABLE_NODE_DISTANCE + + # floyed algo + for k in range(n): + M_k_ptr = M_ptr + n*k + for i in range(n): + M_i_ptr = M_ptr + n*i + M_ik = M_i_ptr[k] + for j in range(n): + cost_ikkj = M_ik + M_k_ptr[j] + M_ij = M_i_ptr[j] + if M_ij > cost_ikkj: + M_i_ptr[j] = cost_ikkj + path[i][j] = k + + # set unreachable path to UNREACHABLE_NODE_DISTANCE + for i in range(n): + for j in range(n): + if M[i][j] >= UNREACHABLE_NODE_DISTANCE: + path[i][j] = UNREACHABLE_NODE_DISTANCE + M[i][j] = UNREACHABLE_NODE_DISTANCE + + return M, path + + +def get_all_edges(path, i, j): + """ + Recursive function to compute all possible paths between two nodes from the graph adjacency matrix. + """ + cdef int k = path[i][j] + if k == -1: + return [] + else: + return get_all_edges(path, i, k) + [k] + get_all_edges(path, k, j) + + +def gen_edge_input(max_dist, path, edge_feat): + """ + Generates the full edge feature and adjacency matrix. + Shape: num_nodes * num_nodes * max_distance_between_nodes * num_edge_features + Dim 1 is the input node, dim 2 the output node of the edge, dim 3 the depth of the edge, dim 4 the feature + """ + (nrows, ncols) = path.shape + assert nrows == ncols + cdef unsigned int n = nrows + cdef unsigned int max_dist_copy = max_dist + + path_copy = path.astype(long, order='C', casting='safe', copy=True) + edge_feat_copy = edge_feat.astype(long, order='C', casting='safe', copy=True) + assert path_copy.flags['C_CONTIGUOUS'] + assert edge_feat_copy.flags['C_CONTIGUOUS'] + + cdef numpy.ndarray[numpy.int32_t, ndim=4, mode='c'] edge_fea_all = -1 * np.ones([n, n, max_dist_copy, edge_feat.shape[-1]], dtype=np.int32) + cdef unsigned int i, j, k, num_path, cur + + for i in range(n): + for j in range(n): + if i == j: + continue + if path_copy[i][j] == UNREACHABLE_NODE_DISTANCE: + continue + path = [i] + get_all_edges(path_copy, i, j) + [j] + num_path = len(path) - 1 + for k in range(num_path): + edge_fea_all[i, j, k, :] = edge_feat_copy[path[k], path[k+1], :] + + return edge_fea_all diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/graphormer/collating_graphormer.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/graphormer/collating_graphormer.py new file mode 100644 index 0000000000000000000000000000000000000000..58ce602ea28de1a3f5f45c40a9ffb1a0e4f0fdcf --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/graphormer/collating_graphormer.py @@ -0,0 +1,134 @@ +# Copyright (c) Microsoft Corporation and HuggingFace +# Licensed under the MIT License. + +from typing import Any, Dict, List, Mapping + +import numpy as np +import torch + +from ...utils import is_cython_available, requires_backends + + +if is_cython_available(): + import pyximport + + pyximport.install(setup_args={"include_dirs": np.get_include()}) + from . import algos_graphormer # noqa E402 + + +def convert_to_single_emb(x, offset: int = 512): + feature_num = x.shape[1] if len(x.shape) > 1 else 1 + feature_offset = 1 + np.arange(0, feature_num * offset, offset, dtype=np.int64) + x = x + feature_offset + return x + + +def preprocess_item(item, keep_features=True): + requires_backends(preprocess_item, ["cython"]) + + if keep_features and "edge_attr" in item.keys(): # edge_attr + edge_attr = np.asarray(item["edge_attr"], dtype=np.int64) + else: + edge_attr = np.ones((len(item["edge_index"][0]), 1), dtype=np.int64) # same embedding for all + + if keep_features and "node_feat" in item.keys(): # input_nodes + node_feature = np.asarray(item["node_feat"], dtype=np.int64) + else: + node_feature = np.ones((item["num_nodes"], 1), dtype=np.int64) # same embedding for all + + edge_index = np.asarray(item["edge_index"], dtype=np.int64) + + input_nodes = convert_to_single_emb(node_feature) + 1 + num_nodes = item["num_nodes"] + + if len(edge_attr.shape) == 1: + edge_attr = edge_attr[:, None] + attn_edge_type = np.zeros([num_nodes, num_nodes, edge_attr.shape[-1]], dtype=np.int64) + attn_edge_type[edge_index[0], edge_index[1]] = convert_to_single_emb(edge_attr) + 1 + + # node adj matrix [num_nodes, num_nodes] bool + adj = np.zeros([num_nodes, num_nodes], dtype=bool) + adj[edge_index[0], edge_index[1]] = True + + shortest_path_result, path = algos_graphormer.floyd_warshall(adj) + max_dist = np.amax(shortest_path_result) + + input_edges = algos_graphormer.gen_edge_input(max_dist, path, attn_edge_type) + attn_bias = np.zeros([num_nodes + 1, num_nodes + 1], dtype=np.single) # with graph token + + # combine + item["input_nodes"] = input_nodes + 1 # we shift all indices by one for padding + item["attn_bias"] = attn_bias + item["attn_edge_type"] = attn_edge_type + item["spatial_pos"] = shortest_path_result.astype(np.int64) + 1 # we shift all indices by one for padding + item["in_degree"] = np.sum(adj, axis=1).reshape(-1) + 1 # we shift all indices by one for padding + item["out_degree"] = item["in_degree"] # for undirected graph + item["input_edges"] = input_edges + 1 # we shift all indices by one for padding + if "labels" not in item: + item["labels"] = item["y"] + + return item + + +class GraphormerDataCollator: + def __init__(self, spatial_pos_max=20, on_the_fly_processing=False): + if not is_cython_available(): + raise ImportError("Graphormer preprocessing needs Cython (pyximport)") + + self.spatial_pos_max = spatial_pos_max + self.on_the_fly_processing = on_the_fly_processing + + def __call__(self, features: List[dict]) -> Dict[str, Any]: + if self.on_the_fly_processing: + features = [preprocess_item(i) for i in features] + + if not isinstance(features[0], Mapping): + features = [vars(f) for f in features] + batch = {} + + max_node_num = max(len(i["input_nodes"]) for i in features) + node_feat_size = len(features[0]["input_nodes"][0]) + edge_feat_size = len(features[0]["attn_edge_type"][0][0]) + max_dist = max(len(i["input_edges"][0][0]) for i in features) + edge_input_size = len(features[0]["input_edges"][0][0][0]) + batch_size = len(features) + + batch["attn_bias"] = torch.zeros(batch_size, max_node_num + 1, max_node_num + 1, dtype=torch.float) + batch["attn_edge_type"] = torch.zeros(batch_size, max_node_num, max_node_num, edge_feat_size, dtype=torch.long) + batch["spatial_pos"] = torch.zeros(batch_size, max_node_num, max_node_num, dtype=torch.long) + batch["in_degree"] = torch.zeros(batch_size, max_node_num, dtype=torch.long) + batch["input_nodes"] = torch.zeros(batch_size, max_node_num, node_feat_size, dtype=torch.long) + batch["input_edges"] = torch.zeros( + batch_size, max_node_num, max_node_num, max_dist, edge_input_size, dtype=torch.long + ) + + for ix, f in enumerate(features): + for k in ["attn_bias", "attn_edge_type", "spatial_pos", "in_degree", "input_nodes", "input_edges"]: + f[k] = torch.tensor(f[k]) + + if len(f["attn_bias"][1:, 1:][f["spatial_pos"] >= self.spatial_pos_max]) > 0: + f["attn_bias"][1:, 1:][f["spatial_pos"] >= self.spatial_pos_max] = float("-inf") + + batch["attn_bias"][ix, : f["attn_bias"].shape[0], : f["attn_bias"].shape[1]] = f["attn_bias"] + batch["attn_edge_type"][ix, : f["attn_edge_type"].shape[0], : f["attn_edge_type"].shape[1], :] = f[ + "attn_edge_type" + ] + batch["spatial_pos"][ix, : f["spatial_pos"].shape[0], : f["spatial_pos"].shape[1]] = f["spatial_pos"] + batch["in_degree"][ix, : f["in_degree"].shape[0]] = f["in_degree"] + batch["input_nodes"][ix, : f["input_nodes"].shape[0], :] = f["input_nodes"] + batch["input_edges"][ + ix, : f["input_edges"].shape[0], : f["input_edges"].shape[1], : f["input_edges"].shape[2], : + ] = f["input_edges"] + + batch["out_degree"] = batch["in_degree"] + + sample = features[0]["labels"] + if len(sample) == 1: # one task + if isinstance(sample[0], float): # regression + batch["labels"] = torch.from_numpy(np.concatenate([i["labels"] for i in features])) + else: # binary classification + batch["labels"] = torch.from_numpy(np.concatenate([i["labels"] for i in features])) + else: # multi task classification, left to float to keep the NaNs + batch["labels"] = torch.from_numpy(np.stack([i["labels"] for i in features], axis=0)) + + return batch diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/graphormer/configuration_graphormer.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/graphormer/configuration_graphormer.py new file mode 100644 index 0000000000000000000000000000000000000000..9d49fbea29448d5ebb52c26724c145442b8b0a95 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/graphormer/configuration_graphormer.py @@ -0,0 +1,221 @@ +# coding=utf-8 +# Copyright 2022 Microsoft, clefourrier and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Graphormer model configuration""" + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + +GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = { + # pcqm4mv1 now deprecated + "graphormer-base": "https://huggingface.co/clefourrier/graphormer-base-pcqm4mv2/resolve/main/config.json", + # See all Graphormer models at https://huggingface.co/models?filter=graphormer +} + + +class GraphormerConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`~GraphormerModel`]. It is used to instantiate an + Graphormer model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the Graphormer + [graphormer-base-pcqm4mv1](https://huggingface.co/graphormer-base-pcqm4mv1) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + num_classes (`int`, *optional*, defaults to 1): + Number of target classes or labels, set to n for binary classification of n tasks. + num_atoms (`int`, *optional*, defaults to 512*9): + Number of node types in the graphs. + num_edges (`int`, *optional*, defaults to 512*3): + Number of edges types in the graph. + num_in_degree (`int`, *optional*, defaults to 512): + Number of in degrees types in the input graphs. + num_out_degree (`int`, *optional*, defaults to 512): + Number of out degrees types in the input graphs. + num_edge_dis (`int`, *optional*, defaults to 128): + Number of edge dis in the input graphs. + multi_hop_max_dist (`int`, *optional*, defaults to 20): + Maximum distance of multi hop edges between two nodes. + spatial_pos_max (`int`, *optional*, defaults to 1024): + Maximum distance between nodes in the graph attention bias matrices, used during preprocessing and + collation. + edge_type (`str`, *optional*, defaults to multihop): + Type of edge relation chosen. + max_nodes (`int`, *optional*, defaults to 512): + Maximum number of nodes which can be parsed for the input graphs. + share_input_output_embed (`bool`, *optional*, defaults to `False`): + Shares the embedding layer between encoder and decoder - careful, True is not implemented. + num_layers (`int`, *optional*, defaults to 12): + Number of layers. + embedding_dim (`int`, *optional*, defaults to 768): + Dimension of the embedding layer in encoder. + ffn_embedding_dim (`int`, *optional*, defaults to 768): + Dimension of the "intermediate" (often named feed-forward) layer in encoder. + num_attention_heads (`int`, *optional*, defaults to 32): + Number of attention heads in the encoder. + self_attention (`bool`, *optional*, defaults to `True`): + Model is self attentive (False not implemented). + activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + dropout (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_dropout (`float`, *optional*, defaults to 0.1): + The dropout probability for the attention weights. + activation_dropout (`float`, *optional*, defaults to 0.1): + The dropout probability for the activation of the linear transformer layer. + layerdrop (`float`, *optional*, defaults to 0.0): + The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) + for more details. + bias (`bool`, *optional*, defaults to `True`): + Uses bias in the attention module - unsupported at the moment. + embed_scale(`float`, *optional*, defaults to None): + Scaling factor for the node embeddings. + num_trans_layers_to_freeze (`int`, *optional*, defaults to 0): + Number of transformer layers to freeze. + encoder_normalize_before (`bool`, *optional*, defaults to `False`): + Normalize features before encoding the graph. + pre_layernorm (`bool`, *optional*, defaults to `False`): + Apply layernorm before self attention and the feed forward network. Without this, post layernorm will be + used. + apply_graphormer_init (`bool`, *optional*, defaults to `False`): + Apply a custom graphormer initialisation to the model before training. + freeze_embeddings (`bool`, *optional*, defaults to `False`): + Freeze the embedding layer, or train it along the model. + encoder_normalize_before (`bool`, *optional*, defaults to `False`): + Apply the layer norm before each encoder block. + q_noise (`float`, *optional*, defaults to 0.0): + Amount of quantization noise (see "Training with Quantization Noise for Extreme Model Compression"). (For + more detail, see fairseq's documentation on quant_noise). + qn_block_size (`int`, *optional*, defaults to 8): + Size of the blocks for subsequent quantization with iPQ (see q_noise). + kdim (`int`, *optional*, defaults to None): + Dimension of the key in the attention, if different from the other values. + vdim (`int`, *optional*, defaults to None): + Dimension of the value in the attention, if different from the other values. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). + traceable (`bool`, *optional*, defaults to `False`): + Changes return value of the encoder's inner_state to stacked tensors. + + Example: + ```python + >>> from transformers import GraphormerForGraphClassification, GraphormerConfig + + >>> # Initializing a Graphormer graphormer-base-pcqm4mv2 style configuration + >>> configuration = GraphormerConfig() + + >>> # Initializing a model from the graphormer-base-pcqm4mv1 style configuration + >>> model = GraphormerForGraphClassification(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ``` + """ + + model_type = "graphormer" + keys_to_ignore_at_inference = ["past_key_values"] + + def __init__( + self, + num_classes: int = 1, + num_atoms: int = 512 * 9, + num_edges: int = 512 * 3, + num_in_degree: int = 512, + num_out_degree: int = 512, + num_spatial: int = 512, + num_edge_dis: int = 128, + multi_hop_max_dist: int = 5, # sometimes is 20 + spatial_pos_max: int = 1024, + edge_type: str = "multi_hop", + max_nodes: int = 512, + share_input_output_embed: bool = False, + num_hidden_layers: int = 12, + embedding_dim: int = 768, + ffn_embedding_dim: int = 768, + num_attention_heads: int = 32, + dropout: float = 0.1, + attention_dropout: float = 0.1, + activation_dropout: float = 0.1, + layerdrop: float = 0.0, + encoder_normalize_before: bool = False, + pre_layernorm: bool = False, + apply_graphormer_init: bool = False, + activation_fn: str = "gelu", + embed_scale: float = None, + freeze_embeddings: bool = False, + num_trans_layers_to_freeze: int = 0, + traceable: bool = False, + q_noise: float = 0.0, + qn_block_size: int = 8, + kdim: int = None, + vdim: int = None, + bias: bool = True, + self_attention: bool = True, + pad_token_id=0, + bos_token_id=1, + eos_token_id=2, + **kwargs, + ): + self.num_classes = num_classes + self.num_atoms = num_atoms + self.num_in_degree = num_in_degree + self.num_out_degree = num_out_degree + self.num_edges = num_edges + self.num_spatial = num_spatial + self.num_edge_dis = num_edge_dis + self.edge_type = edge_type + self.multi_hop_max_dist = multi_hop_max_dist + self.spatial_pos_max = spatial_pos_max + self.max_nodes = max_nodes + self.num_hidden_layers = num_hidden_layers + self.embedding_dim = embedding_dim + self.hidden_size = embedding_dim + self.ffn_embedding_dim = ffn_embedding_dim + self.num_attention_heads = num_attention_heads + self.dropout = dropout + self.attention_dropout = attention_dropout + self.activation_dropout = activation_dropout + self.layerdrop = layerdrop + self.encoder_normalize_before = encoder_normalize_before + self.pre_layernorm = pre_layernorm + self.apply_graphormer_init = apply_graphormer_init + self.activation_fn = activation_fn + self.embed_scale = embed_scale + self.freeze_embeddings = freeze_embeddings + self.num_trans_layers_to_freeze = num_trans_layers_to_freeze + self.share_input_output_embed = share_input_output_embed + self.traceable = traceable + self.q_noise = q_noise + self.qn_block_size = qn_block_size + + # These parameters are here for future extensions + # atm, the model only supports self attention + self.kdim = kdim + self.vdim = vdim + self.self_attention = self_attention + self.bias = bias + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + **kwargs, + ) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/roc_bert/__pycache__/configuration_roc_bert.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/roc_bert/__pycache__/configuration_roc_bert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e273ac67d743874df7dfb76619d2ce3c29ca646 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/roc_bert/__pycache__/configuration_roc_bert.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..366ca50a9ba58b321a96cdd5ed27a7afe3111408 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/configuration_vitmatte.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/configuration_vitmatte.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..966c50f9a33ad28ed74126d866578332bc59f135 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/configuration_vitmatte.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/convert_vitmatte_to_hf.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/convert_vitmatte_to_hf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0cc13c45a2c1bd07b7bf1796d2963318484161d3 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/convert_vitmatte_to_hf.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/image_processing_vitmatte.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/image_processing_vitmatte.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fa36daeea055f60477ad4d769eea4832c403637 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/image_processing_vitmatte.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/modeling_vitmatte.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/modeling_vitmatte.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52162fe5dcea781238eeccbae46533851603e2d0 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/modeling_vitmatte.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/configuration_vitmatte.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/configuration_vitmatte.py new file mode 100644 index 0000000000000000000000000000000000000000..562abbe5e5ae9fb11851abd0e4addd073fbaf703 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/configuration_vitmatte.py @@ -0,0 +1,108 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" VitMatte model configuration""" + +import copy +from typing import List + +from ...configuration_utils import PretrainedConfig +from ...utils import logging +from ..auto.configuration_auto import CONFIG_MAPPING + + +logger = logging.get_logger(__name__) + +VITMATTE_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "hustvl/vitmatte-small-composition-1k": "https://huggingface.co/hustvl/vitmatte-small-composition-1k/resolve/main/config.json", +} + + +class VitMatteConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of [`VitMatteForImageMatting`]. It is used to + instantiate a ViTMatte model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the ViTMatte + [hustvl/vitmatte-small-composition-1k](https://huggingface.co/hustvl/vitmatte-small-composition-1k) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `VitDetConfig()`): + The configuration of the backbone model. + hidden_size (`int`, *optional*, defaults to 384): + The number of input channels of the decoder. + batch_norm_eps (`float`, *optional*, defaults to 1e-05): + The epsilon used by the batch norm layers. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + convstream_hidden_sizes (`List[int]`, *optional*, defaults to `[48, 96, 192]`): + The output channels of the ConvStream module. + fusion_hidden_sizes (`List[int]`, *optional*, defaults to `[256, 128, 64, 32]`): + The output channels of the Fusion blocks. + + Example: + + ```python + >>> from transformers import VitMatteConfig, VitMatteForImageMatting + + >>> # Initializing a ViTMatte hustvl/vitmatte-small-composition-1k style configuration + >>> configuration = VitMatteConfig() + + >>> # Initializing a model (with random weights) from the hustvl/vitmatte-small-composition-1k style configuration + >>> model = VitMatteForImageMatting(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "vitmatte" + + def __init__( + self, + backbone_config: PretrainedConfig = None, + hidden_size: int = 384, + batch_norm_eps: float = 1e-5, + initializer_range: float = 0.02, + convstream_hidden_sizes: List[int] = [48, 96, 192], + fusion_hidden_sizes: List[int] = [256, 128, 64, 32], + **kwargs, + ): + super().__init__(**kwargs) + + if backbone_config is None: + logger.info("`backbone_config` is `None`. Initializing the config with the default `VitDet` backbone.") + backbone_config = CONFIG_MAPPING["vitdet"](out_features=["stage4"]) + elif isinstance(backbone_config, dict): + backbone_model_type = backbone_config.get("model_type") + config_class = CONFIG_MAPPING[backbone_model_type] + backbone_config = config_class.from_dict(backbone_config) + + self.backbone_config = backbone_config + self.batch_norm_eps = batch_norm_eps + self.hidden_size = hidden_size + self.initializer_range = initializer_range + self.convstream_hidden_sizes = convstream_hidden_sizes + self.fusion_hidden_sizes = fusion_hidden_sizes + + def to_dict(self): + """ + Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. Returns: + `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, + """ + output = copy.deepcopy(self.__dict__) + output["backbone_config"] = self.backbone_config.to_dict() + output["model_type"] = self.__class__.model_type + return output diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/convert_vitmatte_to_hf.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/convert_vitmatte_to_hf.py new file mode 100644 index 0000000000000000000000000000000000000000..bcc05563337198021c91f56356533bf87c1e6e9f --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/convert_vitmatte_to_hf.py @@ -0,0 +1,170 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert VitMatte checkpoints from the original repository. + +URL: https://github.com/hustvl/ViTMatte +""" + +import argparse + +import requests +import torch +from huggingface_hub import hf_hub_download +from PIL import Image + +from transformers import VitDetConfig, VitMatteConfig, VitMatteForImageMatting, VitMatteImageProcessor + + +def get_config(model_name): + hidden_size = 384 if "small" in model_name else 768 + num_attention_heads = 6 if "small" in model_name else 12 + + backbone_config = VitDetConfig( + num_channels=4, + image_size=512, + pretrain_image_size=224, + patch_size=16, + hidden_size=hidden_size, + num_attention_heads=num_attention_heads, + use_absolute_position_embeddings=True, + use_relative_position_embeddings=True, + window_size=14, + # 2, 5, 8, 11 for global attention + window_block_indices=[0, 1, 3, 4, 6, 7, 9, 10], + residual_block_indices=[2, 5, 8, 11], + out_features=["stage12"], + ) + + return VitMatteConfig(backbone_config=backbone_config, hidden_size=hidden_size) + + +# here we list all keys to be renamed (original name on the left, our name on the right) +def create_rename_keys(config): + rename_keys = [] + + # fmt: off + # stem + rename_keys.append(("backbone.pos_embed", "backbone.embeddings.position_embeddings")) + rename_keys.append(("backbone.patch_embed.proj.weight", "backbone.embeddings.projection.weight")) + rename_keys.append(("backbone.patch_embed.proj.bias", "backbone.embeddings.projection.bias")) + # fmt: on + + return rename_keys + + +def rename_key(dct, old, new): + val = dct.pop(old) + dct[new] = val + + +def convert_vitmatte_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub): + config = get_config(model_name) + + # load original state dict + model_name_to_filename = { + "vitmatte-small-composition-1k": "ViTMatte_S_Com.pth", + "vitmatte-base-composition-1k": "ViTMatte_B_Com.pth", + "vitmatte-small-distinctions-646": "ViTMatte_S_DIS.pth", + "vitmatte-base-distinctions-646": "ViTMatte_B_DIS.pth", + } + + filename = model_name_to_filename[model_name] + filepath = hf_hub_download(repo_id="nielsr/vitmatte-checkpoints", filename=filename, repo_type="model") + state_dict = torch.load(filepath, map_location="cpu") + + # rename keys + for key in state_dict.copy().keys(): + val = state_dict.pop(key) + if "backbone.blocks" in key: + key = key.replace("backbone.blocks", "backbone.encoder.layer") + if "attn" in key: + key = key.replace("attn", "attention") + if "fusion_blks" in key: + key = key.replace("fusion_blks", "fusion_blocks") + if "bn" in key: + key = key.replace("bn", "batch_norm") + state_dict[key] = val + + # rename keys + rename_keys = create_rename_keys(config) + for src, dest in rename_keys: + rename_key(state_dict, src, dest) + + # create model + processor = VitMatteImageProcessor() + model = VitMatteForImageMatting(config) + model.eval() + + # load state dict + model.load_state_dict(state_dict) + + # verify on dummy image + trimap + url = "https://github.com/hustvl/ViTMatte/blob/main/demo/bulb_rgb.png?raw=true" + image = Image.open(requests.get(url, stream=True).raw).convert("RGB") + url = "https://github.com/hustvl/ViTMatte/blob/main/demo/bulb_trimap.png?raw=true" + trimap = Image.open(requests.get(url, stream=True).raw) + + pixel_values = processor(images=image, trimaps=trimap.convert("L"), return_tensors="pt").pixel_values + + with torch.no_grad(): + alphas = model(pixel_values).alphas + + if model_name == "vitmatte-small-composition-1k": + expected_slice = torch.tensor([[0.9977, 0.9987, 0.9990], [0.9980, 0.9998, 0.9998], [0.9983, 0.9998, 0.9998]]) + elif model_name == "vitmatte-base-composition-1k": + expected_slice = torch.tensor([[0.9972, 0.9971, 0.9981], [0.9948, 0.9987, 0.9994], [0.9963, 0.9992, 0.9995]]) + elif model_name == "vitmatte-small-distinctions-646": + expected_slice = torch.tensor([[0.9880, 0.9970, 0.9972], [0.9960, 0.9996, 0.9997], [0.9963, 0.9996, 0.9997]]) + elif model_name == "vitmatte-base-distinctions-646": + expected_slice = torch.tensor([[0.9963, 0.9998, 0.9999], [0.9995, 1.0000, 1.0000], [0.9992, 0.9999, 1.0000]]) + + assert torch.allclose(alphas[0, 0, :3, :3], expected_slice, atol=1e-4) + print("Looks ok!") + + if pytorch_dump_folder_path is not None: + print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}") + model.save_pretrained(pytorch_dump_folder_path) + processor.save_pretrained(pytorch_dump_folder_path) + + if push_to_hub: + print(f"Pushing model and processor for {model_name} to hub") + model.push_to_hub(f"hustvl/{model_name}") + processor.push_to_hub(f"hustvl/{model_name}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--model_name", + default="vitmatte-small-composition-1k", + type=str, + choices=[ + "vitmatte-small-composition-1k", + "vitmatte-base-composition-1k", + "vitmatte-small-distinctions-646", + "vitmatte-base-distinctions-646", + ], + help="Name of the VitMatte model you'd like to convert.", + ) + parser.add_argument( + "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." + ) + parser.add_argument( + "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." + ) + + args = parser.parse_args() + convert_vitmatte_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/image_processing_vitmatte.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/image_processing_vitmatte.py new file mode 100644 index 0000000000000000000000000000000000000000..602b1fbefa8ceab4ffd4a900d5c0cade2308f3e5 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/image_processing_vitmatte.py @@ -0,0 +1,267 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Image processor class for ViTMatte.""" + +from typing import List, Optional, Union + +import numpy as np + +from ...image_processing_utils import BaseImageProcessor, BatchFeature +from ...image_transforms import pad, to_channel_dimension_format +from ...image_utils import ( + IMAGENET_STANDARD_MEAN, + IMAGENET_STANDARD_STD, + ChannelDimension, + ImageInput, + get_image_size, + infer_channel_dimension_format, + is_scaled_image, + make_list_of_images, + to_numpy_array, + valid_images, +) +from ...utils import TensorType, logging + + +logger = logging.get_logger(__name__) + + +class VitMatteImageProcessor(BaseImageProcessor): + r""" + Constructs a ViTMatte image processor. + + Args: + do_rescale (`bool`, *optional*, defaults to `True`): + Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` + parameter in the `preprocess` method. + rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): + Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the + `preprocess` method. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` + method. + image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): + Mean to use if normalizing the image. This is a float or list of floats the length of the number of + channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. + image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): + Standard deviation to use if normalizing the image. This is a float or list of floats the length of the + number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. + do_pad (`bool`, *optional*, defaults to `True`): + Whether to pad the image to make the width and height divisible by `size_divisibility`. Can be overridden + by the `do_pad` parameter in the `preprocess` method. + size_divisibility (`int`, *optional*, defaults to 32): + The width and height of the image will be padded to be divisible by this number. + """ + + model_input_names = ["pixel_values"] + + def __init__( + self, + do_rescale: bool = True, + rescale_factor: Union[int, float] = 1 / 255, + do_normalize: bool = True, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + do_pad: bool = True, + size_divisibility: int = 32, + **kwargs, + ) -> None: + super().__init__(**kwargs) + self.do_rescale = do_rescale + self.do_normalize = do_normalize + self.do_pad = do_pad + self.rescale_factor = rescale_factor + self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN + self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD + self.size_divisibility = size_divisibility + + def pad_image( + self, + image: np.ndarray, + size_divisibility: int = 32, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> np.ndarray: + """ + Args: + image (`np.ndarray`): + Image to pad. + size_divisibility (`int`, *optional*, defaults to 32): + The width and height of the image will be padded to be divisible by this number. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + """ + if input_data_format is None: + input_data_format = infer_channel_dimension_format(image) + + height, width = get_image_size(image, input_data_format) + + if height % size_divisibility != 0 or width % size_divisibility != 0: + pad_height = size_divisibility - height % size_divisibility + pad_width = size_divisibility - width % size_divisibility + padding = ((0, pad_height), (0, pad_width)) + image = pad(image, padding=padding, data_format=data_format, input_data_format=input_data_format) + + if data_format is not None: + image = to_channel_dimension_format(image, data_format, input_data_format) + + return image + + def preprocess( + self, + images: ImageInput, + trimaps: ImageInput, + do_rescale: Optional[bool] = None, + rescale_factor: Optional[float] = None, + do_normalize: Optional[bool] = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + do_pad: Optional[bool] = None, + size_divisibility: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ): + """ + Preprocess an image or batch of images. + + Args: + images (`ImageInput`): + Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If + passing in images with pixel values between 0 and 1, set `do_rescale=False`. + trimaps (`ImageInput`): + Trimap to preprocess. + do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): + Whether to rescale the image values between [0 - 1]. + rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): + Rescale factor to rescale the image by if `do_rescale` is set to `True`. + do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): + Whether to normalize the image. + image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): + Image mean to use if `do_normalize` is set to `True`. + image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): + Image standard deviation to use if `do_normalize` is set to `True`. + do_pad (`bool`, *optional*, defaults to `self.do_pad`): + Whether to pad the image. + size_divisibility (`int`, *optional*, defaults to `self.size_divisibility`): + The size divisibility to pad the image to if `do_pad` is set to `True`. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + """ + do_rescale = do_rescale if do_rescale is not None else self.do_rescale + do_normalize = do_normalize if do_normalize is not None else self.do_normalize + do_pad = do_pad if do_pad is not None else self.do_pad + rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor + image_mean = image_mean if image_mean is not None else self.image_mean + image_std = image_std if image_std is not None else self.image_std + size_divisibility = size_divisibility if size_divisibility is not None else self.size_divisibility + + images = make_list_of_images(images) + trimaps = make_list_of_images(trimaps, expected_ndims=2) + + if not valid_images(images): + raise ValueError( + "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + if not valid_images(trimaps): + raise ValueError( + "Invalid trimap type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + + if do_rescale and rescale_factor is None: + raise ValueError("Rescale factor must be specified if do_rescale is True.") + + if do_pad and size_divisibility is None: + raise ValueError("Size divisilibyt must be specified if do_pad is True.") + + if do_normalize and (image_mean is None or image_std is None): + raise ValueError("Image mean and std must be specified if do_normalize is True.") + + # All transformations expect numpy arrays. + images = [to_numpy_array(image) for image in images] + trimaps = [to_numpy_array(trimap) for trimap in trimaps] + + if is_scaled_image(images[0]) and do_rescale: + logger.warning_once( + "It looks like you are trying to rescale already rescaled images. If the input" + " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." + ) + + if input_data_format is None: + # We assume that all images have the same channel dimension format. + input_data_format = infer_channel_dimension_format(images[0]) + + if do_rescale: + images = [ + self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) + for image in images + ] + trimaps = [ + self.rescale(image=trimap, scale=rescale_factor, input_data_format=input_data_format) + for trimap in trimaps + ] + + if do_normalize: + images = [ + self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) + for image in images + ] + + # concatenate images and trimaps + images = [ + np.concatenate([image, np.expand_dims(trimap, axis=-1)], axis=-1) for image, trimap in zip(images, trimaps) + ] + + if do_pad: + images = [ + self.pad_image(image, size_divisibility=size_divisibility, input_data_format=input_data_format) + for image in images + ] + + images = [ + to_channel_dimension_format(image=image, channel_dim=data_format, input_channel_dim=input_data_format) + for image in images + ] + + data = {"pixel_values": images} + return BatchFeature(data=data, tensor_type=return_tensors) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/modeling_vitmatte.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/modeling_vitmatte.py new file mode 100644 index 0000000000000000000000000000000000000000..01e6ed5aa0a3d822d918497f0c10953de9614484 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vitmatte/modeling_vitmatte.py @@ -0,0 +1,338 @@ +# coding=utf-8 +# Copyright 2023 HUST-VL and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch ViTMatte model.""" + +from dataclasses import dataclass +from typing import Optional, Tuple + +import torch +from torch import nn + +from ... import AutoBackbone +from ...modeling_utils import PreTrainedModel +from ...utils import ( + ModelOutput, + add_start_docstrings, + add_start_docstrings_to_model_forward, + replace_return_docstrings, +) +from .configuration_vitmatte import VitMatteConfig + + +VITMATTE_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "hustvl/vitmatte-small-composition-1k", + # See all VitMatte models at https://huggingface.co/models?filter=vitmatte +] + + +# General docstring +_CONFIG_FOR_DOC = "VitMatteConfig" + + +@dataclass +class ImageMattingOutput(ModelOutput): + """ + Class for outputs of image matting models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Loss. + alphas (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Estimated alpha values. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states + (also called feature maps) of the model at the output of each stage. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + alphas: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +class VitMattePreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = VitMatteConfig + main_input_name = "pixel_values" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + if isinstance(module, nn.Conv2d): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + + +class VitMatteBasicConv3x3(nn.Module): + """ + Basic convolution layers including: Conv3x3, BatchNorm2d, ReLU layers. + """ + + def __init__(self, config, in_channels, out_channels, stride=2, padding=1): + super().__init__() + self.conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + padding=padding, + bias=False, + ) + self.batch_norm = nn.BatchNorm2d(out_channels, eps=config.batch_norm_eps) + self.relu = nn.ReLU() + + def forward(self, hidden_state): + hidden_state = self.conv(hidden_state) + hidden_state = self.batch_norm(hidden_state) + hidden_state = self.relu(hidden_state) + + return hidden_state + + +class VitMatteConvStream(nn.Module): + """ + Simple ConvStream containing a series of basic conv3x3 layers to extract detail features. + """ + + def __init__(self, config): + super().__init__() + + in_channels = config.backbone_config.num_channels + out_channels = config.convstream_hidden_sizes + + self.convs = nn.ModuleList() + self.conv_chans = [in_channels] + out_channels + + for i in range(len(self.conv_chans) - 1): + in_chan_ = self.conv_chans[i] + out_chan_ = self.conv_chans[i + 1] + self.convs.append(VitMatteBasicConv3x3(config, in_chan_, out_chan_)) + + def forward(self, pixel_values): + out_dict = {"detailed_feature_map_0": pixel_values} + embeddings = pixel_values + for i in range(len(self.convs)): + embeddings = self.convs[i](embeddings) + name_ = "detailed_feature_map_" + str(i + 1) + out_dict[name_] = embeddings + + return out_dict + + +class VitMatteFusionBlock(nn.Module): + """ + Simple fusion block to fuse features from ConvStream and Plain Vision Transformer. + """ + + def __init__(self, config, in_channels, out_channels): + super().__init__() + self.conv = VitMatteBasicConv3x3(config, in_channels, out_channels, stride=1, padding=1) + + def forward(self, features, detailed_feature_map): + upscaled_features = nn.functional.interpolate(features, scale_factor=2, mode="bilinear", align_corners=False) + out = torch.cat([detailed_feature_map, upscaled_features], dim=1) + out = self.conv(out) + + return out + + +class VitMatteHead(nn.Module): + """ + Simple Matting Head, containing only conv3x3 and conv1x1 layers. + """ + + def __init__(self, config): + super().__init__() + + in_channels = config.fusion_hidden_sizes[-1] + mid_channels = 16 + + self.matting_convs = nn.Sequential( + nn.Conv2d(in_channels, mid_channels, kernel_size=3, stride=1, padding=1), + nn.BatchNorm2d(mid_channels), + nn.ReLU(True), + nn.Conv2d(mid_channels, 1, kernel_size=1, stride=1, padding=0), + ) + + def forward(self, hidden_state): + hidden_state = self.matting_convs(hidden_state) + + return hidden_state + + +class VitMatteDetailCaptureModule(nn.Module): + """ + Simple and lightweight Detail Capture Module for ViT Matting. + """ + + def __init__(self, config): + super().__init__() + if len(config.fusion_hidden_sizes) != len(config.convstream_hidden_sizes) + 1: + raise ValueError( + "The length of fusion_hidden_sizes should be equal to the length of convstream_hidden_sizes + 1." + ) + + self.config = config + self.convstream = VitMatteConvStream(config) + self.conv_chans = self.convstream.conv_chans + + self.fusion_blocks = nn.ModuleList() + self.fusion_channels = [config.hidden_size] + config.fusion_hidden_sizes + + for i in range(len(self.fusion_channels) - 1): + self.fusion_blocks.append( + VitMatteFusionBlock( + config=config, + in_channels=self.fusion_channels[i] + self.conv_chans[-(i + 1)], + out_channels=self.fusion_channels[i + 1], + ) + ) + + self.matting_head = VitMatteHead(config) + + def forward(self, features, pixel_values): + detail_features = self.convstream(pixel_values) + for i in range(len(self.fusion_blocks)): + detailed_feature_map_name = "detailed_feature_map_" + str(len(self.fusion_blocks) - i - 1) + features = self.fusion_blocks[i](features, detail_features[detailed_feature_map_name]) + + alphas = torch.sigmoid(self.matting_head(features)) + + return alphas + + +VITMATTE_START_DOCSTRING = r""" + Parameters: + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use + it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + config ([`UperNetConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +VITMATTE_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using + [`AutoImageProcessor`]. See [`VitMatteImageProcessor.__call__`] for details. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See + `attentions` under returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under + returned tensors for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + """ViTMatte framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""", + VITMATTE_START_DOCSTRING, +) +class VitMatteForImageMatting(VitMattePreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.config = config + + self.backbone = AutoBackbone.from_config(config.backbone_config) + self.decoder = VitMatteDetailCaptureModule(config) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(VITMATTE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=ImageMattingOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + labels: Optional[torch.Tensor] = None, + return_dict: Optional[bool] = None, + ): + """ + labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): + Ground truth image matting for computing the loss. + + Returns: + + Examples: + + ```python + >>> from transformers import VitMatteImageProcessor, VitMatteForImageMatting + >>> import torch + >>> from PIL import Image + >>> from huggingface_hub import hf_hub_download + + >>> processor = VitMatteImageProcessor.from_pretrained("hustvl/vitmatte-small-composition-1k") + >>> model = VitMatteForImageMatting.from_pretrained("hustvl/vitmatte-small-composition-1k") + + >>> filepath = hf_hub_download( + ... repo_id="hf-internal-testing/image-matting-fixtures", filename="image.png", repo_type="dataset" + ... ) + >>> image = Image.open(filepath).convert("RGB") + >>> filepath = hf_hub_download( + ... repo_id="hf-internal-testing/image-matting-fixtures", filename="trimap.png", repo_type="dataset" + ... ) + >>> trimap = Image.open(filepath).convert("L") + + >>> # prepare image + trimap for the model + >>> inputs = processor(images=image, trimaps=trimap, return_tensors="pt") + + >>> with torch.no_grad(): + ... alphas = model(**inputs).alphas + >>> print(alphas.shape) + torch.Size([1, 1, 640, 960]) + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + + outputs = self.backbone.forward_with_filtered_kwargs( + pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions + ) + + features = outputs.feature_maps[-1] + alphas = self.decoder(features, pixel_values) + + loss = None + if labels is not None: + raise NotImplementedError("Training is not yet supported") + + if not return_dict: + output = (alphas,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return ImageMattingOutput( + loss=loss, + alphas=alphas, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__pycache__/configuration_xlm_roberta_xl.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__pycache__/configuration_xlm_roberta_xl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89a726a3b171061f8b6ed9ac4c9385d641f2c68d Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__pycache__/configuration_xlm_roberta_xl.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py new file mode 100644 index 0000000000000000000000000000000000000000..e2dee1cbe4e11b74defe20c4fdc2f736a9800803 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py @@ -0,0 +1,156 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" XLM_ROBERTa_XL configuration""" + +from collections import OrderedDict +from typing import Mapping + +from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + +XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json", + "facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json", + # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl +} + + +class XLMRobertaXLConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`XLMRobertaXLModel`] or a [`TFXLMRobertaXLModel`]. + It is used to instantiate a XLM_ROBERTA_XL model according to the specified arguments, defining the model + architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the + XLM_ROBERTA_XL [facebook/xlm-roberta-xl](https://huggingface.co/facebook/xlm-roberta-xl) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 250880): + Vocabulary size of the XLM_ROBERTA_XL model. Defines the number of different tokens that can be represented + by the `inputs_ids` passed when calling [`XLMRobertaXLModel`]. + hidden_size (`int`, *optional*, defaults to 2560): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 36): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 32): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 10240): + Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + max_position_embeddings (`int`, *optional*, defaults to 514): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + type_vocab_size (`int`, *optional*, defaults to 1): + The vocabulary size of the `token_type_ids` passed when calling [`XLMRobertaXLModel`] or + [`TFXLMRobertaXLModel`]. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-5): + The epsilon used by the layer normalization layers. + position_embedding_type (`str`, *optional*, defaults to `"absolute"`): + Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For + positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to + [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). + For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models + with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + classifier_dropout (`float`, *optional*): + The dropout ratio for the classification head. + + Examples: + + ```python + >>> from transformers import XLMRobertaXLConfig, XLMRobertaXLModel + + >>> # Initializing a XLM_ROBERTA_XL bert-base-uncased style configuration + >>> configuration = XLMRobertaXLConfig() + + >>> # Initializing a model (with random weights) from the bert-base-uncased style configuration + >>> model = XLMRobertaXLModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "xlm-roberta-xl" + + def __init__( + self, + vocab_size=250880, + hidden_size=2560, + num_hidden_layers=36, + num_attention_heads=32, + intermediate_size=10240, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=514, + type_vocab_size=1, + initializer_range=0.02, + layer_norm_eps=1e-05, + pad_token_id=1, + bos_token_id=0, + eos_token_id=2, + position_embedding_type="absolute", + use_cache=True, + classifier_dropout=None, + **kwargs, + ): + super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.position_embedding_type = position_embedding_type + self.use_cache = use_cache + self.classifier_dropout = classifier_dropout + + +# Copied from transformers.models.roberta.configuration_roberta.RobertaOnnxConfig with Roberta->XLMRobertaXL +class XLMRobertaXLOnnxConfig(OnnxConfig): + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + if self.task == "multiple-choice": + dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} + else: + dynamic_axis = {0: "batch", 1: "sequence"} + return OrderedDict( + [ + ("input_ids", dynamic_axis), + ("attention_mask", dynamic_axis), + ] + ) diff --git a/infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/__main__.py b/infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..8f5e44ea9e432213593c9270284ea7fd860bb63d --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/__main__.py @@ -0,0 +1,6 @@ +import sys +from fontTools.designspaceLib import main + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/__pycache__/__init__.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e011e12b4f5f41b3eada49c3758f710353d3931 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/__pycache__/statNames.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/__pycache__/statNames.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d032279965978a89d5d3d3cc7b538032ba357f99 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/__pycache__/statNames.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/__pycache__/types.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/__pycache__/types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..835418c67a35a18f8e65cf36ef5a21b011d80d59 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/__pycache__/types.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/split.py b/infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/split.py new file mode 100644 index 0000000000000000000000000000000000000000..0b7cdf4be05dea1e810b4fddf4bf026bc1a50a85 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/split.py @@ -0,0 +1,475 @@ +"""Allows building all the variable fonts of a DesignSpace version 5 by +splitting the document into interpolable sub-space, then into each VF. +""" + +from __future__ import annotations + +import itertools +import logging +import math +from typing import Any, Callable, Dict, Iterator, List, Tuple, cast + +from fontTools.designspaceLib import ( + AxisDescriptor, + AxisMappingDescriptor, + DesignSpaceDocument, + DiscreteAxisDescriptor, + InstanceDescriptor, + RuleDescriptor, + SimpleLocationDict, + SourceDescriptor, + VariableFontDescriptor, +) +from fontTools.designspaceLib.statNames import StatNames, getStatNames +from fontTools.designspaceLib.types import ( + ConditionSet, + Range, + Region, + getVFUserRegion, + locationInRegion, + regionInRegion, + userRegionToDesignRegion, +) + +LOGGER = logging.getLogger(__name__) + +MakeInstanceFilenameCallable = Callable[ + [DesignSpaceDocument, InstanceDescriptor, StatNames], str +] + + +def defaultMakeInstanceFilename( + doc: DesignSpaceDocument, instance: InstanceDescriptor, statNames: StatNames +) -> str: + """Default callable to synthesize an instance filename + when makeNames=True, for instances that don't specify an instance name + in the designspace. This part of the name generation can be overriden + because it's not specified by the STAT table. + """ + familyName = instance.familyName or statNames.familyNames.get("en") + styleName = instance.styleName or statNames.styleNames.get("en") + return f"{familyName}-{styleName}.ttf" + + +def splitInterpolable( + doc: DesignSpaceDocument, + makeNames: bool = True, + expandLocations: bool = True, + makeInstanceFilename: MakeInstanceFilenameCallable = defaultMakeInstanceFilename, +) -> Iterator[Tuple[SimpleLocationDict, DesignSpaceDocument]]: + """Split the given DS5 into several interpolable sub-designspaces. + There are as many interpolable sub-spaces as there are combinations of + discrete axis values. + + E.g. with axes: + - italic (discrete) Upright or Italic + - style (discrete) Sans or Serif + - weight (continuous) 100 to 900 + + There are 4 sub-spaces in which the Weight axis should interpolate: + (Upright, Sans), (Upright, Serif), (Italic, Sans) and (Italic, Serif). + + The sub-designspaces still include the full axis definitions and STAT data, + but the rules, sources, variable fonts, instances are trimmed down to only + keep what falls within the interpolable sub-space. + + Args: + - ``makeNames``: Whether to compute the instance family and style + names using the STAT data. + - ``expandLocations``: Whether to turn all locations into "full" + locations, including implicit default axis values where missing. + - ``makeInstanceFilename``: Callable to synthesize an instance filename + when makeNames=True, for instances that don't specify an instance name + in the designspace. This part of the name generation can be overridden + because it's not specified by the STAT table. + + .. versionadded:: 5.0 + """ + discreteAxes = [] + interpolableUserRegion: Region = {} + for axis in doc.axes: + if hasattr(axis, "values"): + # Mypy doesn't support narrowing union types via hasattr() + # TODO(Python 3.10): use TypeGuard + # https://mypy.readthedocs.io/en/stable/type_narrowing.html + axis = cast(DiscreteAxisDescriptor, axis) + discreteAxes.append(axis) + else: + axis = cast(AxisDescriptor, axis) + interpolableUserRegion[axis.name] = Range( + axis.minimum, + axis.maximum, + axis.default, + ) + valueCombinations = itertools.product(*[axis.values for axis in discreteAxes]) + for values in valueCombinations: + discreteUserLocation = { + discreteAxis.name: value + for discreteAxis, value in zip(discreteAxes, values) + } + subDoc = _extractSubSpace( + doc, + {**interpolableUserRegion, **discreteUserLocation}, + keepVFs=True, + makeNames=makeNames, + expandLocations=expandLocations, + makeInstanceFilename=makeInstanceFilename, + ) + yield discreteUserLocation, subDoc + + +def splitVariableFonts( + doc: DesignSpaceDocument, + makeNames: bool = False, + expandLocations: bool = False, + makeInstanceFilename: MakeInstanceFilenameCallable = defaultMakeInstanceFilename, +) -> Iterator[Tuple[str, DesignSpaceDocument]]: + """Convert each variable font listed in this document into a standalone + designspace. This can be used to compile all the variable fonts from a + format 5 designspace using tools that can only deal with 1 VF at a time. + + Args: + - ``makeNames``: Whether to compute the instance family and style + names using the STAT data. + - ``expandLocations``: Whether to turn all locations into "full" + locations, including implicit default axis values where missing. + - ``makeInstanceFilename``: Callable to synthesize an instance filename + when makeNames=True, for instances that don't specify an instance name + in the designspace. This part of the name generation can be overridden + because it's not specified by the STAT table. + + .. versionadded:: 5.0 + """ + # Make one DesignspaceDoc v5 for each variable font + for vf in doc.getVariableFonts(): + vfUserRegion = getVFUserRegion(doc, vf) + vfDoc = _extractSubSpace( + doc, + vfUserRegion, + keepVFs=False, + makeNames=makeNames, + expandLocations=expandLocations, + makeInstanceFilename=makeInstanceFilename, + ) + vfDoc.lib = {**vfDoc.lib, **vf.lib} + yield vf.name, vfDoc + + +def convert5to4( + doc: DesignSpaceDocument, +) -> Dict[str, DesignSpaceDocument]: + """Convert each variable font listed in this document into a standalone + format 4 designspace. This can be used to compile all the variable fonts + from a format 5 designspace using tools that only know about format 4. + + .. versionadded:: 5.0 + """ + vfs = {} + for _location, subDoc in splitInterpolable(doc): + for vfName, vfDoc in splitVariableFonts(subDoc): + vfDoc.formatVersion = "4.1" + vfs[vfName] = vfDoc + return vfs + + +def _extractSubSpace( + doc: DesignSpaceDocument, + userRegion: Region, + *, + keepVFs: bool, + makeNames: bool, + expandLocations: bool, + makeInstanceFilename: MakeInstanceFilenameCallable, +) -> DesignSpaceDocument: + subDoc = DesignSpaceDocument() + # Don't include STAT info + # FIXME: (Jany) let's think about it. Not include = OK because the point of + # the splitting is to build VFs and we'll use the STAT data of the full + # document to generate the STAT of the VFs, so "no need" to have STAT data + # in sub-docs. Counterpoint: what if someone wants to split this DS for + # other purposes? Maybe for that it would be useful to also subset the STAT + # data? + # subDoc.elidedFallbackName = doc.elidedFallbackName + + def maybeExpandDesignLocation(object): + if expandLocations: + return object.getFullDesignLocation(doc) + else: + return object.designLocation + + for axis in doc.axes: + range = userRegion[axis.name] + if isinstance(range, Range) and hasattr(axis, "minimum"): + # Mypy doesn't support narrowing union types via hasattr() + # TODO(Python 3.10): use TypeGuard + # https://mypy.readthedocs.io/en/stable/type_narrowing.html + axis = cast(AxisDescriptor, axis) + subDoc.addAxis( + AxisDescriptor( + # Same info + tag=axis.tag, + name=axis.name, + labelNames=axis.labelNames, + hidden=axis.hidden, + # Subset range + minimum=max(range.minimum, axis.minimum), + default=range.default or axis.default, + maximum=min(range.maximum, axis.maximum), + map=[ + (user, design) + for user, design in axis.map + if range.minimum <= user <= range.maximum + ], + # Don't include STAT info + axisOrdering=None, + axisLabels=None, + ) + ) + + subDoc.axisMappings = mappings = [] + subDocAxes = {axis.name for axis in subDoc.axes} + for mapping in doc.axisMappings: + if not all(axis in subDocAxes for axis in mapping.inputLocation.keys()): + continue + if not all(axis in subDocAxes for axis in mapping.outputLocation.keys()): + LOGGER.error( + "In axis mapping from input %s, some output axes are not in the variable-font: %s", + mapping.inputLocation, + mapping.outputLocation, + ) + continue + + mappingAxes = set() + mappingAxes.update(mapping.inputLocation.keys()) + mappingAxes.update(mapping.outputLocation.keys()) + for axis in doc.axes: + if axis.name not in mappingAxes: + continue + range = userRegion[axis.name] + if ( + range.minimum != axis.minimum + or (range.default is not None and range.default != axis.default) + or range.maximum != axis.maximum + ): + LOGGER.error( + "Limiting axis ranges used in elements not supported: %s", + axis.name, + ) + continue + + mappings.append( + AxisMappingDescriptor( + inputLocation=mapping.inputLocation, + outputLocation=mapping.outputLocation, + ) + ) + + # Don't include STAT info + # subDoc.locationLabels = doc.locationLabels + + # Rules: subset them based on conditions + designRegion = userRegionToDesignRegion(doc, userRegion) + subDoc.rules = _subsetRulesBasedOnConditions(doc.rules, designRegion) + subDoc.rulesProcessingLast = doc.rulesProcessingLast + + # Sources: keep only the ones that fall within the kept axis ranges + for source in doc.sources: + if not locationInRegion(doc.map_backward(source.designLocation), userRegion): + continue + + subDoc.addSource( + SourceDescriptor( + filename=source.filename, + path=source.path, + font=source.font, + name=source.name, + designLocation=_filterLocation( + userRegion, maybeExpandDesignLocation(source) + ), + layerName=source.layerName, + familyName=source.familyName, + styleName=source.styleName, + muteKerning=source.muteKerning, + muteInfo=source.muteInfo, + mutedGlyphNames=source.mutedGlyphNames, + ) + ) + + # Copy family name translations from the old default source to the new default + vfDefault = subDoc.findDefault() + oldDefault = doc.findDefault() + if vfDefault is not None and oldDefault is not None: + vfDefault.localisedFamilyName = oldDefault.localisedFamilyName + + # Variable fonts: keep only the ones that fall within the kept axis ranges + if keepVFs: + # Note: call getVariableFont() to make the implicit VFs explicit + for vf in doc.getVariableFonts(): + vfUserRegion = getVFUserRegion(doc, vf) + if regionInRegion(vfUserRegion, userRegion): + subDoc.addVariableFont( + VariableFontDescriptor( + name=vf.name, + filename=vf.filename, + axisSubsets=[ + axisSubset + for axisSubset in vf.axisSubsets + if isinstance(userRegion[axisSubset.name], Range) + ], + lib=vf.lib, + ) + ) + + # Instances: same as Sources + compute missing names + for instance in doc.instances: + if not locationInRegion(instance.getFullUserLocation(doc), userRegion): + continue + + if makeNames: + statNames = getStatNames(doc, instance.getFullUserLocation(doc)) + familyName = instance.familyName or statNames.familyNames.get("en") + styleName = instance.styleName or statNames.styleNames.get("en") + subDoc.addInstance( + InstanceDescriptor( + filename=instance.filename + or makeInstanceFilename(doc, instance, statNames), + path=instance.path, + font=instance.font, + name=instance.name or f"{familyName} {styleName}", + userLocation={} if expandLocations else instance.userLocation, + designLocation=_filterLocation( + userRegion, maybeExpandDesignLocation(instance) + ), + familyName=familyName, + styleName=styleName, + postScriptFontName=instance.postScriptFontName + or statNames.postScriptFontName, + styleMapFamilyName=instance.styleMapFamilyName + or statNames.styleMapFamilyNames.get("en"), + styleMapStyleName=instance.styleMapStyleName + or statNames.styleMapStyleName, + localisedFamilyName=instance.localisedFamilyName + or statNames.familyNames, + localisedStyleName=instance.localisedStyleName + or statNames.styleNames, + localisedStyleMapFamilyName=instance.localisedStyleMapFamilyName + or statNames.styleMapFamilyNames, + localisedStyleMapStyleName=instance.localisedStyleMapStyleName + or {}, + lib=instance.lib, + ) + ) + else: + subDoc.addInstance( + InstanceDescriptor( + filename=instance.filename, + path=instance.path, + font=instance.font, + name=instance.name, + userLocation={} if expandLocations else instance.userLocation, + designLocation=_filterLocation( + userRegion, maybeExpandDesignLocation(instance) + ), + familyName=instance.familyName, + styleName=instance.styleName, + postScriptFontName=instance.postScriptFontName, + styleMapFamilyName=instance.styleMapFamilyName, + styleMapStyleName=instance.styleMapStyleName, + localisedFamilyName=instance.localisedFamilyName, + localisedStyleName=instance.localisedStyleName, + localisedStyleMapFamilyName=instance.localisedStyleMapFamilyName, + localisedStyleMapStyleName=instance.localisedStyleMapStyleName, + lib=instance.lib, + ) + ) + + subDoc.lib = doc.lib + + return subDoc + + +def _conditionSetFrom(conditionSet: List[Dict[str, Any]]) -> ConditionSet: + c: Dict[str, Range] = {} + for condition in conditionSet: + minimum, maximum = condition.get("minimum"), condition.get("maximum") + c[condition["name"]] = Range( + minimum if minimum is not None else -math.inf, + maximum if maximum is not None else math.inf, + ) + return c + + +def _subsetRulesBasedOnConditions( + rules: List[RuleDescriptor], designRegion: Region +) -> List[RuleDescriptor]: + # What rules to keep: + # - Keep the rule if any conditionset is relevant. + # - A conditionset is relevant if all conditions are relevant or it is empty. + # - A condition is relevant if + # - axis is point (C-AP), + # - and point in condition's range (C-AP-in) + # (in this case remove the condition because it's always true) + # - else (C-AP-out) whole conditionset can be discarded (condition false + # => conditionset false) + # - axis is range (C-AR), + # - (C-AR-all) and axis range fully contained in condition range: we can + # scrap the condition because it's always true + # - (C-AR-inter) and intersection(axis range, condition range) not empty: + # keep the condition with the smaller range (= intersection) + # - (C-AR-none) else, whole conditionset can be discarded + newRules: List[RuleDescriptor] = [] + for rule in rules: + newRule: RuleDescriptor = RuleDescriptor( + name=rule.name, conditionSets=[], subs=rule.subs + ) + for conditionset in rule.conditionSets: + cs = _conditionSetFrom(conditionset) + newConditionset: List[Dict[str, Any]] = [] + discardConditionset = False + for selectionName, selectionValue in designRegion.items(): + # TODO: Ensure that all(key in conditionset for key in region.keys())? + if selectionName not in cs: + # raise Exception("Selection has different axes than the rules") + continue + if isinstance(selectionValue, (float, int)): # is point + # Case C-AP-in + if selectionValue in cs[selectionName]: + pass # always matches, conditionset can stay empty for this one. + # Case C-AP-out + else: + discardConditionset = True + else: # is range + # Case C-AR-all + if selectionValue in cs[selectionName]: + pass # always matches, conditionset can stay empty for this one. + else: + intersection = cs[selectionName].intersection(selectionValue) + # Case C-AR-inter + if intersection is not None: + newConditionset.append( + { + "name": selectionName, + "minimum": intersection.minimum, + "maximum": intersection.maximum, + } + ) + # Case C-AR-none + else: + discardConditionset = True + if not discardConditionset: + newRule.conditionSets.append(newConditionset) + if newRule.conditionSets: + newRules.append(newRule) + + return newRules + + +def _filterLocation( + userRegion: Region, + location: Dict[str, float], +) -> Dict[str, float]: + return { + name: value + for name, value in location.items() + if name in userRegion and isinstance(userRegion[name], Range) + } diff --git a/infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/types.py b/infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/types.py new file mode 100644 index 0000000000000000000000000000000000000000..80ba9d6d7b44f58773f42107d672c13651c166a9 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/types.py @@ -0,0 +1,147 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import Dict, List, Optional, Union, cast + +from fontTools.designspaceLib import ( + AxisDescriptor, + DesignSpaceDocument, + DesignSpaceDocumentError, + RangeAxisSubsetDescriptor, + SimpleLocationDict, + ValueAxisSubsetDescriptor, + VariableFontDescriptor, +) + + +def clamp(value, minimum, maximum): + return min(max(value, minimum), maximum) + + +@dataclass +class Range: + minimum: float + """Inclusive minimum of the range.""" + maximum: float + """Inclusive maximum of the range.""" + default: float = 0 + """Default value""" + + def __post_init__(self): + self.minimum, self.maximum = sorted((self.minimum, self.maximum)) + self.default = clamp(self.default, self.minimum, self.maximum) + + def __contains__(self, value: Union[float, Range]) -> bool: + if isinstance(value, Range): + return self.minimum <= value.minimum and value.maximum <= self.maximum + return self.minimum <= value <= self.maximum + + def intersection(self, other: Range) -> Optional[Range]: + if self.maximum < other.minimum or self.minimum > other.maximum: + return None + else: + return Range( + max(self.minimum, other.minimum), + min(self.maximum, other.maximum), + self.default, # We don't care about the default in this use-case + ) + + +# A region selection is either a range or a single value, as a Designspace v5 +# axis-subset element only allows a single discrete value or a range for a +# variable-font element. +Region = Dict[str, Union[Range, float]] + +# A conditionset is a set of named ranges. +ConditionSet = Dict[str, Range] + +# A rule is a list of conditionsets where any has to be relevant for the whole rule to be relevant. +Rule = List[ConditionSet] +Rules = Dict[str, Rule] + + +def locationInRegion(location: SimpleLocationDict, region: Region) -> bool: + for name, value in location.items(): + if name not in region: + return False + regionValue = region[name] + if isinstance(regionValue, (float, int)): + if value != regionValue: + return False + else: + if value not in regionValue: + return False + return True + + +def regionInRegion(region: Region, superRegion: Region) -> bool: + for name, value in region.items(): + if not name in superRegion: + return False + superValue = superRegion[name] + if isinstance(superValue, (float, int)): + if value != superValue: + return False + else: + if value not in superValue: + return False + return True + + +def userRegionToDesignRegion(doc: DesignSpaceDocument, userRegion: Region) -> Region: + designRegion = {} + for name, value in userRegion.items(): + axis = doc.getAxis(name) + if axis is None: + raise DesignSpaceDocumentError( + f"Cannot find axis named '{name}' for region." + ) + if isinstance(value, (float, int)): + designRegion[name] = axis.map_forward(value) + else: + designRegion[name] = Range( + axis.map_forward(value.minimum), + axis.map_forward(value.maximum), + axis.map_forward(value.default), + ) + return designRegion + + +def getVFUserRegion(doc: DesignSpaceDocument, vf: VariableFontDescriptor) -> Region: + vfUserRegion: Region = {} + # For each axis, 2 cases: + # - it has a range = it's an axis in the VF DS + # - it's a single location = use it to know which rules should apply in the VF + for axisSubset in vf.axisSubsets: + axis = doc.getAxis(axisSubset.name) + if axis is None: + raise DesignSpaceDocumentError( + f"Cannot find axis named '{axisSubset.name}' for variable font '{vf.name}'." + ) + if hasattr(axisSubset, "userMinimum"): + # Mypy doesn't support narrowing union types via hasattr() + # TODO(Python 3.10): use TypeGuard + # https://mypy.readthedocs.io/en/stable/type_narrowing.html + axisSubset = cast(RangeAxisSubsetDescriptor, axisSubset) + if not hasattr(axis, "minimum"): + raise DesignSpaceDocumentError( + f"Cannot select a range over '{axis.name}' for variable font '{vf.name}' " + "because it's a discrete axis, use only 'userValue' instead." + ) + axis = cast(AxisDescriptor, axis) + vfUserRegion[axis.name] = Range( + max(axisSubset.userMinimum, axis.minimum), + min(axisSubset.userMaximum, axis.maximum), + axisSubset.userDefault or axis.default, + ) + else: + axisSubset = cast(ValueAxisSubsetDescriptor, axisSubset) + vfUserRegion[axis.name] = axisSubset.userValue + # Any axis not mentioned explicitly has a single location = default value + for axis in doc.axes: + if axis.name not in vfUserRegion: + assert isinstance( + axis.default, (int, float) + ), f"Axis '{axis.name}' has no valid default value." + vfUserRegion[axis.name] = axis.default + return vfUserRegion diff --git a/infer_4_47_1/lib/python3.10/site-packages/fontTools/feaLib/__pycache__/builder.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/fontTools/feaLib/__pycache__/builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19af4d69a0fe4683f2de2aabb556a86436423105 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/fontTools/feaLib/__pycache__/builder.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/fontTools/feaLib/ast.py b/infer_4_47_1/lib/python3.10/site-packages/fontTools/feaLib/ast.py new file mode 100644 index 0000000000000000000000000000000000000000..17c6cc3fbe494a076d2b59f4664ab9fe56ecd20f --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/fontTools/feaLib/ast.py @@ -0,0 +1,2134 @@ +from fontTools.feaLib.error import FeatureLibError +from fontTools.feaLib.location import FeatureLibLocation +from fontTools.misc.encodingTools import getEncoding +from fontTools.misc.textTools import byteord, tobytes +from collections import OrderedDict +import itertools + +SHIFT = " " * 4 + +__all__ = [ + "Element", + "FeatureFile", + "Comment", + "GlyphName", + "GlyphClass", + "GlyphClassName", + "MarkClassName", + "AnonymousBlock", + "Block", + "FeatureBlock", + "NestedBlock", + "LookupBlock", + "GlyphClassDefinition", + "GlyphClassDefStatement", + "MarkClass", + "MarkClassDefinition", + "AlternateSubstStatement", + "Anchor", + "AnchorDefinition", + "AttachStatement", + "AxisValueLocationStatement", + "BaseAxis", + "CVParametersNameStatement", + "ChainContextPosStatement", + "ChainContextSubstStatement", + "CharacterStatement", + "ConditionsetStatement", + "CursivePosStatement", + "ElidedFallbackName", + "ElidedFallbackNameID", + "Expression", + "FeatureNameStatement", + "FeatureReferenceStatement", + "FontRevisionStatement", + "HheaField", + "IgnorePosStatement", + "IgnoreSubstStatement", + "IncludeStatement", + "LanguageStatement", + "LanguageSystemStatement", + "LigatureCaretByIndexStatement", + "LigatureCaretByPosStatement", + "LigatureSubstStatement", + "LookupFlagStatement", + "LookupReferenceStatement", + "MarkBasePosStatement", + "MarkLigPosStatement", + "MarkMarkPosStatement", + "MultipleSubstStatement", + "NameRecord", + "OS2Field", + "PairPosStatement", + "ReverseChainSingleSubstStatement", + "ScriptStatement", + "SinglePosStatement", + "SingleSubstStatement", + "SizeParameters", + "Statement", + "STATAxisValueStatement", + "STATDesignAxisStatement", + "STATNameStatement", + "SubtableStatement", + "TableBlock", + "ValueRecord", + "ValueRecordDefinition", + "VheaField", +] + + +def deviceToString(device): + if device is None: + return "" + else: + return "" % ", ".join("%d %d" % t for t in device) + + +fea_keywords = set( + [ + "anchor", + "anchordef", + "anon", + "anonymous", + "by", + "contour", + "cursive", + "device", + "enum", + "enumerate", + "excludedflt", + "exclude_dflt", + "feature", + "from", + "ignore", + "ignorebaseglyphs", + "ignoreligatures", + "ignoremarks", + "include", + "includedflt", + "include_dflt", + "language", + "languagesystem", + "lookup", + "lookupflag", + "mark", + "markattachmenttype", + "markclass", + "nameid", + "null", + "parameters", + "pos", + "position", + "required", + "righttoleft", + "reversesub", + "rsub", + "script", + "sub", + "substitute", + "subtable", + "table", + "usemarkfilteringset", + "useextension", + "valuerecorddef", + "base", + "gdef", + "head", + "hhea", + "name", + "vhea", + "vmtx", + ] +) + + +def asFea(g): + if hasattr(g, "asFea"): + return g.asFea() + elif isinstance(g, tuple) and len(g) == 2: + return asFea(g[0]) + " - " + asFea(g[1]) # a range + elif g.lower() in fea_keywords: + return "\\" + g + else: + return g + + +class Element(object): + """A base class representing "something" in a feature file.""" + + def __init__(self, location=None): + #: location of this element as a `FeatureLibLocation` object. + if location and not isinstance(location, FeatureLibLocation): + location = FeatureLibLocation(*location) + self.location = location + + def build(self, builder): + pass + + def asFea(self, indent=""): + """Returns this element as a string of feature code. For block-type + elements (such as :class:`FeatureBlock`), the `indent` string is + added to the start of each line in the output.""" + raise NotImplementedError + + def __str__(self): + return self.asFea() + + +class Statement(Element): + pass + + +class Expression(Element): + pass + + +class Comment(Element): + """A comment in a feature file.""" + + def __init__(self, text, location=None): + super(Comment, self).__init__(location) + #: Text of the comment + self.text = text + + def asFea(self, indent=""): + return self.text + + +class NullGlyph(Expression): + """The NULL glyph, used in glyph deletion substitutions.""" + + def __init__(self, location=None): + Expression.__init__(self, location) + #: The name itself as a string + + def glyphSet(self): + """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" + return () + + def asFea(self, indent=""): + return "NULL" + + +class GlyphName(Expression): + """A single glyph name, such as ``cedilla``.""" + + def __init__(self, glyph, location=None): + Expression.__init__(self, location) + #: The name itself as a string + self.glyph = glyph + + def glyphSet(self): + """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" + return (self.glyph,) + + def asFea(self, indent=""): + return asFea(self.glyph) + + +class GlyphClass(Expression): + """A glyph class, such as ``[acute cedilla grave]``.""" + + def __init__(self, glyphs=None, location=None): + Expression.__init__(self, location) + #: The list of glyphs in this class, as :class:`GlyphName` objects. + self.glyphs = glyphs if glyphs is not None else [] + self.original = [] + self.curr = 0 + + def glyphSet(self): + """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" + return tuple(self.glyphs) + + def asFea(self, indent=""): + if len(self.original): + if self.curr < len(self.glyphs): + self.original.extend(self.glyphs[self.curr :]) + self.curr = len(self.glyphs) + return "[" + " ".join(map(asFea, self.original)) + "]" + else: + return "[" + " ".join(map(asFea, self.glyphs)) + "]" + + def extend(self, glyphs): + """Add a list of :class:`GlyphName` objects to the class.""" + self.glyphs.extend(glyphs) + + def append(self, glyph): + """Add a single :class:`GlyphName` object to the class.""" + self.glyphs.append(glyph) + + def add_range(self, start, end, glyphs): + """Add a range (e.g. ``A-Z``) to the class. ``start`` and ``end`` + are either :class:`GlyphName` objects or strings representing the + start and end glyphs in the class, and ``glyphs`` is the full list of + :class:`GlyphName` objects in the range.""" + if self.curr < len(self.glyphs): + self.original.extend(self.glyphs[self.curr :]) + self.original.append((start, end)) + self.glyphs.extend(glyphs) + self.curr = len(self.glyphs) + + def add_cid_range(self, start, end, glyphs): + """Add a range to the class by glyph ID. ``start`` and ``end`` are the + initial and final IDs, and ``glyphs`` is the full list of + :class:`GlyphName` objects in the range.""" + if self.curr < len(self.glyphs): + self.original.extend(self.glyphs[self.curr :]) + self.original.append(("\\{}".format(start), "\\{}".format(end))) + self.glyphs.extend(glyphs) + self.curr = len(self.glyphs) + + def add_class(self, gc): + """Add glyphs from the given :class:`GlyphClassName` object to the + class.""" + if self.curr < len(self.glyphs): + self.original.extend(self.glyphs[self.curr :]) + self.original.append(gc) + self.glyphs.extend(gc.glyphSet()) + self.curr = len(self.glyphs) + + +class GlyphClassName(Expression): + """A glyph class name, such as ``@FRENCH_MARKS``. This must be instantiated + with a :class:`GlyphClassDefinition` object.""" + + def __init__(self, glyphclass, location=None): + Expression.__init__(self, location) + assert isinstance(glyphclass, GlyphClassDefinition) + self.glyphclass = glyphclass + + def glyphSet(self): + """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" + return tuple(self.glyphclass.glyphSet()) + + def asFea(self, indent=""): + return "@" + self.glyphclass.name + + +class MarkClassName(Expression): + """A mark class name, such as ``@FRENCH_MARKS`` defined with ``markClass``. + This must be instantiated with a :class:`MarkClass` object.""" + + def __init__(self, markClass, location=None): + Expression.__init__(self, location) + assert isinstance(markClass, MarkClass) + self.markClass = markClass + + def glyphSet(self): + """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" + return self.markClass.glyphSet() + + def asFea(self, indent=""): + return "@" + self.markClass.name + + +class AnonymousBlock(Statement): + """An anonymous data block.""" + + def __init__(self, tag, content, location=None): + Statement.__init__(self, location) + self.tag = tag #: string containing the block's "tag" + self.content = content #: block data as string + + def asFea(self, indent=""): + res = "anon {} {{\n".format(self.tag) + res += self.content + res += "}} {};\n\n".format(self.tag) + return res + + +class Block(Statement): + """A block of statements: feature, lookup, etc.""" + + def __init__(self, location=None): + Statement.__init__(self, location) + self.statements = [] #: Statements contained in the block + + def build(self, builder): + """When handed a 'builder' object of comparable interface to + :class:`fontTools.feaLib.builder`, walks the statements in this + block, calling the builder callbacks.""" + for s in self.statements: + s.build(builder) + + def asFea(self, indent=""): + indent += SHIFT + return ( + indent + + ("\n" + indent).join([s.asFea(indent=indent) for s in self.statements]) + + "\n" + ) + + +class FeatureFile(Block): + """The top-level element of the syntax tree, containing the whole feature + file in its ``statements`` attribute.""" + + def __init__(self): + Block.__init__(self, location=None) + self.markClasses = {} # name --> ast.MarkClass + + def asFea(self, indent=""): + return "\n".join(s.asFea(indent=indent) for s in self.statements) + + +class FeatureBlock(Block): + """A named feature block.""" + + def __init__(self, name, use_extension=False, location=None): + Block.__init__(self, location) + self.name, self.use_extension = name, use_extension + + def build(self, builder): + """Call the ``start_feature`` callback on the builder object, visit + all the statements in this feature, and then call ``end_feature``.""" + # TODO(sascha): Handle use_extension. + builder.start_feature(self.location, self.name) + # language exclude_dflt statements modify builder.features_ + # limit them to this block with temporary builder.features_ + features = builder.features_ + builder.features_ = {} + Block.build(self, builder) + for key, value in builder.features_.items(): + features.setdefault(key, []).extend(value) + builder.features_ = features + builder.end_feature() + + def asFea(self, indent=""): + res = indent + "feature %s " % self.name.strip() + if self.use_extension: + res += "useExtension " + res += "{\n" + res += Block.asFea(self, indent=indent) + res += indent + "} %s;\n" % self.name.strip() + return res + + +class NestedBlock(Block): + """A block inside another block, for example when found inside a + ``cvParameters`` block.""" + + def __init__(self, tag, block_name, location=None): + Block.__init__(self, location) + self.tag = tag + self.block_name = block_name + + def build(self, builder): + Block.build(self, builder) + if self.block_name == "ParamUILabelNameID": + builder.add_to_cv_num_named_params(self.tag) + + def asFea(self, indent=""): + res = "{}{} {{\n".format(indent, self.block_name) + res += Block.asFea(self, indent=indent) + res += "{}}};\n".format(indent) + return res + + +class LookupBlock(Block): + """A named lookup, containing ``statements``.""" + + def __init__(self, name, use_extension=False, location=None): + Block.__init__(self, location) + self.name, self.use_extension = name, use_extension + + def build(self, builder): + # TODO(sascha): Handle use_extension. + builder.start_lookup_block(self.location, self.name) + Block.build(self, builder) + builder.end_lookup_block() + + def asFea(self, indent=""): + res = "lookup {} ".format(self.name) + if self.use_extension: + res += "useExtension " + res += "{\n" + res += Block.asFea(self, indent=indent) + res += "{}}} {};\n".format(indent, self.name) + return res + + +class TableBlock(Block): + """A ``table ... { }`` block.""" + + def __init__(self, name, location=None): + Block.__init__(self, location) + self.name = name + + def asFea(self, indent=""): + res = "table {} {{\n".format(self.name.strip()) + res += super(TableBlock, self).asFea(indent=indent) + res += "}} {};\n".format(self.name.strip()) + return res + + +class GlyphClassDefinition(Statement): + """Example: ``@UPPERCASE = [A-Z];``.""" + + def __init__(self, name, glyphs, location=None): + Statement.__init__(self, location) + self.name = name #: class name as a string, without initial ``@`` + self.glyphs = glyphs #: a :class:`GlyphClass` object + + def glyphSet(self): + """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" + return tuple(self.glyphs.glyphSet()) + + def asFea(self, indent=""): + return "@" + self.name + " = " + self.glyphs.asFea() + ";" + + +class GlyphClassDefStatement(Statement): + """Example: ``GlyphClassDef @UPPERCASE, [B], [C], [D];``. The parameters + must be either :class:`GlyphClass` or :class:`GlyphClassName` objects, or + ``None``.""" + + def __init__( + self, baseGlyphs, markGlyphs, ligatureGlyphs, componentGlyphs, location=None + ): + Statement.__init__(self, location) + self.baseGlyphs, self.markGlyphs = (baseGlyphs, markGlyphs) + self.ligatureGlyphs = ligatureGlyphs + self.componentGlyphs = componentGlyphs + + def build(self, builder): + """Calls the builder's ``add_glyphClassDef`` callback.""" + base = self.baseGlyphs.glyphSet() if self.baseGlyphs else tuple() + liga = self.ligatureGlyphs.glyphSet() if self.ligatureGlyphs else tuple() + mark = self.markGlyphs.glyphSet() if self.markGlyphs else tuple() + comp = self.componentGlyphs.glyphSet() if self.componentGlyphs else tuple() + builder.add_glyphClassDef(self.location, base, liga, mark, comp) + + def asFea(self, indent=""): + return "GlyphClassDef {}, {}, {}, {};".format( + self.baseGlyphs.asFea() if self.baseGlyphs else "", + self.ligatureGlyphs.asFea() if self.ligatureGlyphs else "", + self.markGlyphs.asFea() if self.markGlyphs else "", + self.componentGlyphs.asFea() if self.componentGlyphs else "", + ) + + +class MarkClass(object): + """One `or more` ``markClass`` statements for the same mark class. + + While glyph classes can be defined only once, the feature file format + allows expanding mark classes with multiple definitions, each using + different glyphs and anchors. The following are two ``MarkClassDefinitions`` + for the same ``MarkClass``:: + + markClass [acute grave] @FRENCH_ACCENTS; + markClass [cedilla] @FRENCH_ACCENTS; + + The ``MarkClass`` object is therefore just a container for a list of + :class:`MarkClassDefinition` statements. + """ + + def __init__(self, name): + self.name = name + self.definitions = [] + self.glyphs = OrderedDict() # glyph --> ast.MarkClassDefinitions + + def addDefinition(self, definition): + """Add a :class:`MarkClassDefinition` statement to this mark class.""" + assert isinstance(definition, MarkClassDefinition) + self.definitions.append(definition) + for glyph in definition.glyphSet(): + if glyph in self.glyphs: + otherLoc = self.glyphs[glyph].location + if otherLoc is None: + end = "" + else: + end = f" at {otherLoc}" + raise FeatureLibError( + "Glyph %s already defined%s" % (glyph, end), definition.location + ) + self.glyphs[glyph] = definition + + def glyphSet(self): + """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" + return tuple(self.glyphs.keys()) + + def asFea(self, indent=""): + res = "\n".join(d.asFea() for d in self.definitions) + return res + + +class MarkClassDefinition(Statement): + """A single ``markClass`` statement. The ``markClass`` should be a + :class:`MarkClass` object, the ``anchor`` an :class:`Anchor` object, + and the ``glyphs`` parameter should be a `glyph-containing object`_ . + + Example: + + .. code:: python + + mc = MarkClass("FRENCH_ACCENTS") + mc.addDefinition( MarkClassDefinition(mc, Anchor(350, 800), + GlyphClass([ GlyphName("acute"), GlyphName("grave") ]) + ) ) + mc.addDefinition( MarkClassDefinition(mc, Anchor(350, -200), + GlyphClass([ GlyphName("cedilla") ]) + ) ) + + mc.asFea() + # markClass [acute grave] @FRENCH_ACCENTS; + # markClass [cedilla] @FRENCH_ACCENTS; + + """ + + def __init__(self, markClass, anchor, glyphs, location=None): + Statement.__init__(self, location) + assert isinstance(markClass, MarkClass) + assert isinstance(anchor, Anchor) and isinstance(glyphs, Expression) + self.markClass, self.anchor, self.glyphs = markClass, anchor, glyphs + + def glyphSet(self): + """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" + return self.glyphs.glyphSet() + + def asFea(self, indent=""): + return "markClass {} {} @{};".format( + self.glyphs.asFea(), self.anchor.asFea(), self.markClass.name + ) + + +class AlternateSubstStatement(Statement): + """A ``sub ... from ...`` statement. + + ``prefix``, ``glyph``, ``suffix`` and ``replacement`` should be lists of + `glyph-containing objects`_. ``glyph`` should be a `one element list`.""" + + def __init__(self, prefix, glyph, suffix, replacement, location=None): + Statement.__init__(self, location) + self.prefix, self.glyph, self.suffix = (prefix, glyph, suffix) + self.replacement = replacement + + def build(self, builder): + """Calls the builder's ``add_alternate_subst`` callback.""" + glyph = self.glyph.glyphSet() + assert len(glyph) == 1, glyph + glyph = list(glyph)[0] + prefix = [p.glyphSet() for p in self.prefix] + suffix = [s.glyphSet() for s in self.suffix] + replacement = self.replacement.glyphSet() + builder.add_alternate_subst(self.location, prefix, glyph, suffix, replacement) + + def asFea(self, indent=""): + res = "sub " + if len(self.prefix) or len(self.suffix): + if len(self.prefix): + res += " ".join(map(asFea, self.prefix)) + " " + res += asFea(self.glyph) + "'" # even though we really only use 1 + if len(self.suffix): + res += " " + " ".join(map(asFea, self.suffix)) + else: + res += asFea(self.glyph) + res += " from " + res += asFea(self.replacement) + res += ";" + return res + + +class Anchor(Expression): + """An ``Anchor`` element, used inside a ``pos`` rule. + + If a ``name`` is given, this will be used in preference to the coordinates. + Other values should be integer. + """ + + def __init__( + self, + x, + y, + name=None, + contourpoint=None, + xDeviceTable=None, + yDeviceTable=None, + location=None, + ): + Expression.__init__(self, location) + self.name = name + self.x, self.y, self.contourpoint = x, y, contourpoint + self.xDeviceTable, self.yDeviceTable = xDeviceTable, yDeviceTable + + def asFea(self, indent=""): + if self.name is not None: + return "".format(self.name) + res = "" + exit = self.exitAnchor.asFea() if self.exitAnchor else "" + return "pos cursive {} {} {};".format(self.glyphclass.asFea(), entry, exit) + + +class FeatureReferenceStatement(Statement): + """Example: ``feature salt;``""" + + def __init__(self, featureName, location=None): + Statement.__init__(self, location) + self.location, self.featureName = (location, featureName) + + def build(self, builder): + """Calls the builder object's ``add_feature_reference`` callback.""" + builder.add_feature_reference(self.location, self.featureName) + + def asFea(self, indent=""): + return "feature {};".format(self.featureName) + + +class IgnorePosStatement(Statement): + """An ``ignore pos`` statement, containing `one or more` contexts to ignore. + + ``chainContexts`` should be a list of ``(prefix, glyphs, suffix)`` tuples, + with each of ``prefix``, ``glyphs`` and ``suffix`` being + `glyph-containing objects`_ .""" + + def __init__(self, chainContexts, location=None): + Statement.__init__(self, location) + self.chainContexts = chainContexts + + def build(self, builder): + """Calls the builder object's ``add_chain_context_pos`` callback on each + rule context.""" + for prefix, glyphs, suffix in self.chainContexts: + prefix = [p.glyphSet() for p in prefix] + glyphs = [g.glyphSet() for g in glyphs] + suffix = [s.glyphSet() for s in suffix] + builder.add_chain_context_pos(self.location, prefix, glyphs, suffix, []) + + def asFea(self, indent=""): + contexts = [] + for prefix, glyphs, suffix in self.chainContexts: + res = "" + if len(prefix) or len(suffix): + if len(prefix): + res += " ".join(map(asFea, prefix)) + " " + res += " ".join(g.asFea() + "'" for g in glyphs) + if len(suffix): + res += " " + " ".join(map(asFea, suffix)) + else: + res += " ".join(map(asFea, glyphs)) + contexts.append(res) + return "ignore pos " + ", ".join(contexts) + ";" + + +class IgnoreSubstStatement(Statement): + """An ``ignore sub`` statement, containing `one or more` contexts to ignore. + + ``chainContexts`` should be a list of ``(prefix, glyphs, suffix)`` tuples, + with each of ``prefix``, ``glyphs`` and ``suffix`` being + `glyph-containing objects`_ .""" + + def __init__(self, chainContexts, location=None): + Statement.__init__(self, location) + self.chainContexts = chainContexts + + def build(self, builder): + """Calls the builder object's ``add_chain_context_subst`` callback on + each rule context.""" + for prefix, glyphs, suffix in self.chainContexts: + prefix = [p.glyphSet() for p in prefix] + glyphs = [g.glyphSet() for g in glyphs] + suffix = [s.glyphSet() for s in suffix] + builder.add_chain_context_subst(self.location, prefix, glyphs, suffix, []) + + def asFea(self, indent=""): + contexts = [] + for prefix, glyphs, suffix in self.chainContexts: + res = "" + if len(prefix): + res += " ".join(map(asFea, prefix)) + " " + res += " ".join(g.asFea() + "'" for g in glyphs) + if len(suffix): + res += " " + " ".join(map(asFea, suffix)) + contexts.append(res) + return "ignore sub " + ", ".join(contexts) + ";" + + +class IncludeStatement(Statement): + """An ``include()`` statement.""" + + def __init__(self, filename, location=None): + super(IncludeStatement, self).__init__(location) + self.filename = filename #: String containing name of file to include + + def build(self): + # TODO: consider lazy-loading the including parser/lexer? + raise FeatureLibError( + "Building an include statement is not implemented yet. " + "Instead, use Parser(..., followIncludes=True) for building.", + self.location, + ) + + def asFea(self, indent=""): + return indent + "include(%s);" % self.filename + + +class LanguageStatement(Statement): + """A ``language`` statement within a feature.""" + + def __init__(self, language, include_default=True, required=False, location=None): + Statement.__init__(self, location) + assert len(language) == 4 + self.language = language #: A four-character language tag + self.include_default = include_default #: If false, "exclude_dflt" + self.required = required + + def build(self, builder): + """Call the builder object's ``set_language`` callback.""" + builder.set_language( + location=self.location, + language=self.language, + include_default=self.include_default, + required=self.required, + ) + + def asFea(self, indent=""): + res = "language {}".format(self.language.strip()) + if not self.include_default: + res += " exclude_dflt" + if self.required: + res += " required" + res += ";" + return res + + +class LanguageSystemStatement(Statement): + """A top-level ``languagesystem`` statement.""" + + def __init__(self, script, language, location=None): + Statement.__init__(self, location) + self.script, self.language = (script, language) + + def build(self, builder): + """Calls the builder object's ``add_language_system`` callback.""" + builder.add_language_system(self.location, self.script, self.language) + + def asFea(self, indent=""): + return "languagesystem {} {};".format(self.script, self.language.strip()) + + +class FontRevisionStatement(Statement): + """A ``head`` table ``FontRevision`` statement. ``revision`` should be a + number, and will be formatted to three significant decimal places.""" + + def __init__(self, revision, location=None): + Statement.__init__(self, location) + self.revision = revision + + def build(self, builder): + builder.set_font_revision(self.location, self.revision) + + def asFea(self, indent=""): + return "FontRevision {:.3f};".format(self.revision) + + +class LigatureCaretByIndexStatement(Statement): + """A ``GDEF`` table ``LigatureCaretByIndex`` statement. ``glyphs`` should be + a `glyph-containing object`_, and ``carets`` should be a list of integers.""" + + def __init__(self, glyphs, carets, location=None): + Statement.__init__(self, location) + self.glyphs, self.carets = (glyphs, carets) + + def build(self, builder): + """Calls the builder object's ``add_ligatureCaretByIndex_`` callback.""" + glyphs = self.glyphs.glyphSet() + builder.add_ligatureCaretByIndex_(self.location, glyphs, set(self.carets)) + + def asFea(self, indent=""): + return "LigatureCaretByIndex {} {};".format( + self.glyphs.asFea(), " ".join(str(x) for x in self.carets) + ) + + +class LigatureCaretByPosStatement(Statement): + """A ``GDEF`` table ``LigatureCaretByPos`` statement. ``glyphs`` should be + a `glyph-containing object`_, and ``carets`` should be a list of integers.""" + + def __init__(self, glyphs, carets, location=None): + Statement.__init__(self, location) + self.glyphs, self.carets = (glyphs, carets) + + def build(self, builder): + """Calls the builder object's ``add_ligatureCaretByPos_`` callback.""" + glyphs = self.glyphs.glyphSet() + builder.add_ligatureCaretByPos_(self.location, glyphs, set(self.carets)) + + def asFea(self, indent=""): + return "LigatureCaretByPos {} {};".format( + self.glyphs.asFea(), " ".join(str(x) for x in self.carets) + ) + + +class LigatureSubstStatement(Statement): + """A chained contextual substitution statement. + + ``prefix``, ``glyphs``, and ``suffix`` should be lists of + `glyph-containing objects`_; ``replacement`` should be a single + `glyph-containing object`_. + + If ``forceChain`` is True, this is expressed as a chaining rule + (e.g. ``sub f' i' by f_i``) even when no context is given.""" + + def __init__(self, prefix, glyphs, suffix, replacement, forceChain, location=None): + Statement.__init__(self, location) + self.prefix, self.glyphs, self.suffix = (prefix, glyphs, suffix) + self.replacement, self.forceChain = replacement, forceChain + + def build(self, builder): + prefix = [p.glyphSet() for p in self.prefix] + glyphs = [g.glyphSet() for g in self.glyphs] + suffix = [s.glyphSet() for s in self.suffix] + builder.add_ligature_subst( + self.location, prefix, glyphs, suffix, self.replacement, self.forceChain + ) + + def asFea(self, indent=""): + res = "sub " + if len(self.prefix) or len(self.suffix) or self.forceChain: + if len(self.prefix): + res += " ".join(g.asFea() for g in self.prefix) + " " + res += " ".join(g.asFea() + "'" for g in self.glyphs) + if len(self.suffix): + res += " " + " ".join(g.asFea() for g in self.suffix) + else: + res += " ".join(g.asFea() for g in self.glyphs) + res += " by " + res += asFea(self.replacement) + res += ";" + return res + + +class LookupFlagStatement(Statement): + """A ``lookupflag`` statement. The ``value`` should be an integer value + representing the flags in use, but not including the ``markAttachment`` + class and ``markFilteringSet`` values, which must be specified as + glyph-containing objects.""" + + def __init__( + self, value=0, markAttachment=None, markFilteringSet=None, location=None + ): + Statement.__init__(self, location) + self.value = value + self.markAttachment = markAttachment + self.markFilteringSet = markFilteringSet + + def build(self, builder): + """Calls the builder object's ``set_lookup_flag`` callback.""" + markAttach = None + if self.markAttachment is not None: + markAttach = self.markAttachment.glyphSet() + markFilter = None + if self.markFilteringSet is not None: + markFilter = self.markFilteringSet.glyphSet() + builder.set_lookup_flag(self.location, self.value, markAttach, markFilter) + + def asFea(self, indent=""): + res = [] + flags = ["RightToLeft", "IgnoreBaseGlyphs", "IgnoreLigatures", "IgnoreMarks"] + curr = 1 + for i in range(len(flags)): + if self.value & curr != 0: + res.append(flags[i]) + curr = curr << 1 + if self.markAttachment is not None: + res.append("MarkAttachmentType {}".format(self.markAttachment.asFea())) + if self.markFilteringSet is not None: + res.append("UseMarkFilteringSet {}".format(self.markFilteringSet.asFea())) + if not res: + res = ["0"] + return "lookupflag {};".format(" ".join(res)) + + +class LookupReferenceStatement(Statement): + """Represents a ``lookup ...;`` statement to include a lookup in a feature. + + The ``lookup`` should be a :class:`LookupBlock` object.""" + + def __init__(self, lookup, location=None): + Statement.__init__(self, location) + self.location, self.lookup = (location, lookup) + + def build(self, builder): + """Calls the builder object's ``add_lookup_call`` callback.""" + builder.add_lookup_call(self.lookup.name) + + def asFea(self, indent=""): + return "lookup {};".format(self.lookup.name) + + +class MarkBasePosStatement(Statement): + """A mark-to-base positioning rule. The ``base`` should be a + `glyph-containing object`_. The ``marks`` should be a list of + (:class:`Anchor`, :class:`MarkClass`) tuples.""" + + def __init__(self, base, marks, location=None): + Statement.__init__(self, location) + self.base, self.marks = base, marks + + def build(self, builder): + """Calls the builder object's ``add_mark_base_pos`` callback.""" + builder.add_mark_base_pos(self.location, self.base.glyphSet(), self.marks) + + def asFea(self, indent=""): + res = "pos base {}".format(self.base.asFea()) + for a, m in self.marks: + res += "\n" + indent + SHIFT + "{} mark @{}".format(a.asFea(), m.name) + res += ";" + return res + + +class MarkLigPosStatement(Statement): + """A mark-to-ligature positioning rule. The ``ligatures`` must be a + `glyph-containing object`_. The ``marks`` should be a list of lists: each + element in the top-level list represents a component glyph, and is made + up of a list of (:class:`Anchor`, :class:`MarkClass`) tuples representing + mark attachment points for that position. + + Example:: + + m1 = MarkClass("TOP_MARKS") + m2 = MarkClass("BOTTOM_MARKS") + # ... add definitions to mark classes... + + glyph = GlyphName("lam_meem_jeem") + marks = [ + [ (Anchor(625,1800), m1) ], # Attachments on 1st component (lam) + [ (Anchor(376,-378), m2) ], # Attachments on 2nd component (meem) + [ ] # No attachments on the jeem + ] + mlp = MarkLigPosStatement(glyph, marks) + + mlp.asFea() + # pos ligature lam_meem_jeem mark @TOP_MARKS + # ligComponent mark @BOTTOM_MARKS; + + """ + + def __init__(self, ligatures, marks, location=None): + Statement.__init__(self, location) + self.ligatures, self.marks = ligatures, marks + + def build(self, builder): + """Calls the builder object's ``add_mark_lig_pos`` callback.""" + builder.add_mark_lig_pos(self.location, self.ligatures.glyphSet(), self.marks) + + def asFea(self, indent=""): + res = "pos ligature {}".format(self.ligatures.asFea()) + ligs = [] + for l in self.marks: + temp = "" + if l is None or not len(l): + temp = "\n" + indent + SHIFT * 2 + "" + else: + for a, m in l: + temp += ( + "\n" + + indent + + SHIFT * 2 + + "{} mark @{}".format(a.asFea(), m.name) + ) + ligs.append(temp) + res += ("\n" + indent + SHIFT + "ligComponent").join(ligs) + res += ";" + return res + + +class MarkMarkPosStatement(Statement): + """A mark-to-mark positioning rule. The ``baseMarks`` must be a + `glyph-containing object`_. The ``marks`` should be a list of + (:class:`Anchor`, :class:`MarkClass`) tuples.""" + + def __init__(self, baseMarks, marks, location=None): + Statement.__init__(self, location) + self.baseMarks, self.marks = baseMarks, marks + + def build(self, builder): + """Calls the builder object's ``add_mark_mark_pos`` callback.""" + builder.add_mark_mark_pos(self.location, self.baseMarks.glyphSet(), self.marks) + + def asFea(self, indent=""): + res = "pos mark {}".format(self.baseMarks.asFea()) + for a, m in self.marks: + res += "\n" + indent + SHIFT + "{} mark @{}".format(a.asFea(), m.name) + res += ";" + return res + + +class MultipleSubstStatement(Statement): + """A multiple substitution statement. + + Args: + prefix: a list of `glyph-containing objects`_. + glyph: a single glyph-containing object. + suffix: a list of glyph-containing objects. + replacement: a list of glyph-containing objects. + forceChain: If true, the statement is expressed as a chaining rule + (e.g. ``sub f' i' by f_i``) even when no context is given. + """ + + def __init__( + self, prefix, glyph, suffix, replacement, forceChain=False, location=None + ): + Statement.__init__(self, location) + self.prefix, self.glyph, self.suffix = prefix, glyph, suffix + self.replacement = replacement + self.forceChain = forceChain + + def build(self, builder): + """Calls the builder object's ``add_multiple_subst`` callback.""" + prefix = [p.glyphSet() for p in self.prefix] + suffix = [s.glyphSet() for s in self.suffix] + if hasattr(self.glyph, "glyphSet"): + originals = self.glyph.glyphSet() + else: + originals = [self.glyph] + count = len(originals) + replaces = [] + for r in self.replacement: + if hasattr(r, "glyphSet"): + replace = r.glyphSet() + else: + replace = [r] + if len(replace) == 1 and len(replace) != count: + replace = replace * count + replaces.append(replace) + replaces = list(zip(*replaces)) + + seen_originals = set() + for i, original in enumerate(originals): + if original not in seen_originals: + seen_originals.add(original) + builder.add_multiple_subst( + self.location, + prefix, + original, + suffix, + replaces and replaces[i] or (), + self.forceChain, + ) + + def asFea(self, indent=""): + res = "sub " + if len(self.prefix) or len(self.suffix) or self.forceChain: + if len(self.prefix): + res += " ".join(map(asFea, self.prefix)) + " " + res += asFea(self.glyph) + "'" + if len(self.suffix): + res += " " + " ".join(map(asFea, self.suffix)) + else: + res += asFea(self.glyph) + replacement = self.replacement or [NullGlyph()] + res += " by " + res += " ".join(map(asFea, replacement)) + res += ";" + return res + + +class PairPosStatement(Statement): + """A pair positioning statement. + + ``glyphs1`` and ``glyphs2`` should be `glyph-containing objects`_. + ``valuerecord1`` should be a :class:`ValueRecord` object; + ``valuerecord2`` should be either a :class:`ValueRecord` object or ``None``. + If ``enumerated`` is true, then this is expressed as an + `enumerated pair `_. + """ + + def __init__( + self, + glyphs1, + valuerecord1, + glyphs2, + valuerecord2, + enumerated=False, + location=None, + ): + Statement.__init__(self, location) + self.enumerated = enumerated + self.glyphs1, self.valuerecord1 = glyphs1, valuerecord1 + self.glyphs2, self.valuerecord2 = glyphs2, valuerecord2 + + def build(self, builder): + """Calls a callback on the builder object: + + * If the rule is enumerated, calls ``add_specific_pair_pos`` on each + combination of first and second glyphs. + * If the glyphs are both single :class:`GlyphName` objects, calls + ``add_specific_pair_pos``. + * Else, calls ``add_class_pair_pos``. + """ + if self.enumerated: + g = [self.glyphs1.glyphSet(), self.glyphs2.glyphSet()] + seen_pair = False + for glyph1, glyph2 in itertools.product(*g): + seen_pair = True + builder.add_specific_pair_pos( + self.location, glyph1, self.valuerecord1, glyph2, self.valuerecord2 + ) + if not seen_pair: + raise FeatureLibError( + "Empty glyph class in positioning rule", self.location + ) + return + + is_specific = isinstance(self.glyphs1, GlyphName) and isinstance( + self.glyphs2, GlyphName + ) + if is_specific: + builder.add_specific_pair_pos( + self.location, + self.glyphs1.glyph, + self.valuerecord1, + self.glyphs2.glyph, + self.valuerecord2, + ) + else: + builder.add_class_pair_pos( + self.location, + self.glyphs1.glyphSet(), + self.valuerecord1, + self.glyphs2.glyphSet(), + self.valuerecord2, + ) + + def asFea(self, indent=""): + res = "enum " if self.enumerated else "" + if self.valuerecord2: + res += "pos {} {} {} {};".format( + self.glyphs1.asFea(), + self.valuerecord1.asFea(), + self.glyphs2.asFea(), + self.valuerecord2.asFea(), + ) + else: + res += "pos {} {} {};".format( + self.glyphs1.asFea(), self.glyphs2.asFea(), self.valuerecord1.asFea() + ) + return res + + +class ReverseChainSingleSubstStatement(Statement): + """A reverse chaining substitution statement. You don't see those every day. + + Note the unusual argument order: ``suffix`` comes `before` ``glyphs``. + ``old_prefix``, ``old_suffix``, ``glyphs`` and ``replacements`` should be + lists of `glyph-containing objects`_. ``glyphs`` and ``replacements`` should + be one-item lists. + """ + + def __init__(self, old_prefix, old_suffix, glyphs, replacements, location=None): + Statement.__init__(self, location) + self.old_prefix, self.old_suffix = old_prefix, old_suffix + self.glyphs = glyphs + self.replacements = replacements + + def build(self, builder): + prefix = [p.glyphSet() for p in self.old_prefix] + suffix = [s.glyphSet() for s in self.old_suffix] + originals = self.glyphs[0].glyphSet() + replaces = self.replacements[0].glyphSet() + if len(replaces) == 1: + replaces = replaces * len(originals) + builder.add_reverse_chain_single_subst( + self.location, prefix, suffix, dict(zip(originals, replaces)) + ) + + def asFea(self, indent=""): + res = "rsub " + if len(self.old_prefix) or len(self.old_suffix): + if len(self.old_prefix): + res += " ".join(asFea(g) for g in self.old_prefix) + " " + res += " ".join(asFea(g) + "'" for g in self.glyphs) + if len(self.old_suffix): + res += " " + " ".join(asFea(g) for g in self.old_suffix) + else: + res += " ".join(map(asFea, self.glyphs)) + res += " by {};".format(" ".join(asFea(g) for g in self.replacements)) + return res + + +class SingleSubstStatement(Statement): + """A single substitution statement. + + Note the unusual argument order: ``prefix`` and suffix come `after` + the replacement ``glyphs``. ``prefix``, ``suffix``, ``glyphs`` and + ``replace`` should be lists of `glyph-containing objects`_. ``glyphs`` and + ``replace`` should be one-item lists. + """ + + def __init__(self, glyphs, replace, prefix, suffix, forceChain, location=None): + Statement.__init__(self, location) + self.prefix, self.suffix = prefix, suffix + self.forceChain = forceChain + self.glyphs = glyphs + self.replacements = replace + + def build(self, builder): + """Calls the builder object's ``add_single_subst`` callback.""" + prefix = [p.glyphSet() for p in self.prefix] + suffix = [s.glyphSet() for s in self.suffix] + originals = self.glyphs[0].glyphSet() + replaces = self.replacements[0].glyphSet() + if len(replaces) == 1: + replaces = replaces * len(originals) + builder.add_single_subst( + self.location, + prefix, + suffix, + OrderedDict(zip(originals, replaces)), + self.forceChain, + ) + + def asFea(self, indent=""): + res = "sub " + if len(self.prefix) or len(self.suffix) or self.forceChain: + if len(self.prefix): + res += " ".join(asFea(g) for g in self.prefix) + " " + res += " ".join(asFea(g) + "'" for g in self.glyphs) + if len(self.suffix): + res += " " + " ".join(asFea(g) for g in self.suffix) + else: + res += " ".join(asFea(g) for g in self.glyphs) + res += " by {};".format(" ".join(asFea(g) for g in self.replacements)) + return res + + +class ScriptStatement(Statement): + """A ``script`` statement.""" + + def __init__(self, script, location=None): + Statement.__init__(self, location) + self.script = script #: the script code + + def build(self, builder): + """Calls the builder's ``set_script`` callback.""" + builder.set_script(self.location, self.script) + + def asFea(self, indent=""): + return "script {};".format(self.script.strip()) + + +class SinglePosStatement(Statement): + """A single position statement. ``prefix`` and ``suffix`` should be + lists of `glyph-containing objects`_. + + ``pos`` should be a one-element list containing a (`glyph-containing object`_, + :class:`ValueRecord`) tuple.""" + + def __init__(self, pos, prefix, suffix, forceChain, location=None): + Statement.__init__(self, location) + self.pos, self.prefix, self.suffix = pos, prefix, suffix + self.forceChain = forceChain + + def build(self, builder): + """Calls the builder object's ``add_single_pos`` callback.""" + prefix = [p.glyphSet() for p in self.prefix] + suffix = [s.glyphSet() for s in self.suffix] + pos = [(g.glyphSet(), value) for g, value in self.pos] + builder.add_single_pos(self.location, prefix, suffix, pos, self.forceChain) + + def asFea(self, indent=""): + res = "pos " + if len(self.prefix) or len(self.suffix) or self.forceChain: + if len(self.prefix): + res += " ".join(map(asFea, self.prefix)) + " " + res += " ".join( + [ + asFea(x[0]) + "'" + ((" " + x[1].asFea()) if x[1] else "") + for x in self.pos + ] + ) + if len(self.suffix): + res += " " + " ".join(map(asFea, self.suffix)) + else: + res += " ".join( + [asFea(x[0]) + " " + (x[1].asFea() if x[1] else "") for x in self.pos] + ) + res += ";" + return res + + +class SubtableStatement(Statement): + """Represents a subtable break.""" + + def __init__(self, location=None): + Statement.__init__(self, location) + + def build(self, builder): + """Calls the builder objects's ``add_subtable_break`` callback.""" + builder.add_subtable_break(self.location) + + def asFea(self, indent=""): + return "subtable;" + + +class ValueRecord(Expression): + """Represents a value record.""" + + def __init__( + self, + xPlacement=None, + yPlacement=None, + xAdvance=None, + yAdvance=None, + xPlaDevice=None, + yPlaDevice=None, + xAdvDevice=None, + yAdvDevice=None, + vertical=False, + location=None, + ): + Expression.__init__(self, location) + self.xPlacement, self.yPlacement = (xPlacement, yPlacement) + self.xAdvance, self.yAdvance = (xAdvance, yAdvance) + self.xPlaDevice, self.yPlaDevice = (xPlaDevice, yPlaDevice) + self.xAdvDevice, self.yAdvDevice = (xAdvDevice, yAdvDevice) + self.vertical = vertical + + def __eq__(self, other): + return ( + self.xPlacement == other.xPlacement + and self.yPlacement == other.yPlacement + and self.xAdvance == other.xAdvance + and self.yAdvance == other.yAdvance + and self.xPlaDevice == other.xPlaDevice + and self.xAdvDevice == other.xAdvDevice + ) + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return ( + hash(self.xPlacement) + ^ hash(self.yPlacement) + ^ hash(self.xAdvance) + ^ hash(self.yAdvance) + ^ hash(self.xPlaDevice) + ^ hash(self.yPlaDevice) + ^ hash(self.xAdvDevice) + ^ hash(self.yAdvDevice) + ) + + def asFea(self, indent=""): + if not self: + return "" + + x, y = self.xPlacement, self.yPlacement + xAdvance, yAdvance = self.xAdvance, self.yAdvance + xPlaDevice, yPlaDevice = self.xPlaDevice, self.yPlaDevice + xAdvDevice, yAdvDevice = self.xAdvDevice, self.yAdvDevice + vertical = self.vertical + + # Try format A, if possible. + if x is None and y is None: + if xAdvance is None and vertical: + return str(yAdvance) + elif yAdvance is None and not vertical: + return str(xAdvance) + + # Make any remaining None value 0 to avoid generating invalid records. + x = x or 0 + y = y or 0 + xAdvance = xAdvance or 0 + yAdvance = yAdvance or 0 + + # Try format B, if possible. + if ( + xPlaDevice is None + and yPlaDevice is None + and xAdvDevice is None + and yAdvDevice is None + ): + return "<%s %s %s %s>" % (x, y, xAdvance, yAdvance) + + # Last resort is format C. + return "<%s %s %s %s %s %s %s %s>" % ( + x, + y, + xAdvance, + yAdvance, + deviceToString(xPlaDevice), + deviceToString(yPlaDevice), + deviceToString(xAdvDevice), + deviceToString(yAdvDevice), + ) + + def __bool__(self): + return any( + getattr(self, v) is not None + for v in [ + "xPlacement", + "yPlacement", + "xAdvance", + "yAdvance", + "xPlaDevice", + "yPlaDevice", + "xAdvDevice", + "yAdvDevice", + ] + ) + + __nonzero__ = __bool__ + + +class ValueRecordDefinition(Statement): + """Represents a named value record definition.""" + + def __init__(self, name, value, location=None): + Statement.__init__(self, location) + self.name = name #: Value record name as string + self.value = value #: :class:`ValueRecord` object + + def asFea(self, indent=""): + return "valueRecordDef {} {};".format(self.value.asFea(), self.name) + + +def simplify_name_attributes(pid, eid, lid): + if pid == 3 and eid == 1 and lid == 1033: + return "" + elif pid == 1 and eid == 0 and lid == 0: + return "1" + else: + return "{} {} {}".format(pid, eid, lid) + + +class NameRecord(Statement): + """Represents a name record. (`Section 9.e. `_)""" + + def __init__(self, nameID, platformID, platEncID, langID, string, location=None): + Statement.__init__(self, location) + self.nameID = nameID #: Name ID as integer (e.g. 9 for designer's name) + self.platformID = platformID #: Platform ID as integer + self.platEncID = platEncID #: Platform encoding ID as integer + self.langID = langID #: Language ID as integer + self.string = string #: Name record value + + def build(self, builder): + """Calls the builder object's ``add_name_record`` callback.""" + builder.add_name_record( + self.location, + self.nameID, + self.platformID, + self.platEncID, + self.langID, + self.string, + ) + + def asFea(self, indent=""): + def escape(c, escape_pattern): + # Also escape U+0022 QUOTATION MARK and U+005C REVERSE SOLIDUS + if c >= 0x20 and c <= 0x7E and c not in (0x22, 0x5C): + return chr(c) + else: + return escape_pattern % c + + encoding = getEncoding(self.platformID, self.platEncID, self.langID) + if encoding is None: + raise FeatureLibError("Unsupported encoding", self.location) + s = tobytes(self.string, encoding=encoding) + if encoding == "utf_16_be": + escaped_string = "".join( + [ + escape(byteord(s[i]) * 256 + byteord(s[i + 1]), r"\%04x") + for i in range(0, len(s), 2) + ] + ) + else: + escaped_string = "".join([escape(byteord(b), r"\%02x") for b in s]) + plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID) + if plat != "": + plat += " " + return 'nameid {} {}"{}";'.format(self.nameID, plat, escaped_string) + + +class FeatureNameStatement(NameRecord): + """Represents a ``sizemenuname`` or ``name`` statement.""" + + def build(self, builder): + """Calls the builder object's ``add_featureName`` callback.""" + NameRecord.build(self, builder) + builder.add_featureName(self.nameID) + + def asFea(self, indent=""): + if self.nameID == "size": + tag = "sizemenuname" + else: + tag = "name" + plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID) + if plat != "": + plat += " " + return '{} {}"{}";'.format(tag, plat, self.string) + + +class STATNameStatement(NameRecord): + """Represents a STAT table ``name`` statement.""" + + def asFea(self, indent=""): + plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID) + if plat != "": + plat += " " + return 'name {}"{}";'.format(plat, self.string) + + +class SizeParameters(Statement): + """A ``parameters`` statement.""" + + def __init__(self, DesignSize, SubfamilyID, RangeStart, RangeEnd, location=None): + Statement.__init__(self, location) + self.DesignSize = DesignSize + self.SubfamilyID = SubfamilyID + self.RangeStart = RangeStart + self.RangeEnd = RangeEnd + + def build(self, builder): + """Calls the builder object's ``set_size_parameters`` callback.""" + builder.set_size_parameters( + self.location, + self.DesignSize, + self.SubfamilyID, + self.RangeStart, + self.RangeEnd, + ) + + def asFea(self, indent=""): + res = "parameters {:.1f} {}".format(self.DesignSize, self.SubfamilyID) + if self.RangeStart != 0 or self.RangeEnd != 0: + res += " {} {}".format(int(self.RangeStart * 10), int(self.RangeEnd * 10)) + return res + ";" + + +class CVParametersNameStatement(NameRecord): + """Represent a name statement inside a ``cvParameters`` block.""" + + def __init__( + self, nameID, platformID, platEncID, langID, string, block_name, location=None + ): + NameRecord.__init__( + self, nameID, platformID, platEncID, langID, string, location=location + ) + self.block_name = block_name + + def build(self, builder): + """Calls the builder object's ``add_cv_parameter`` callback.""" + item = "" + if self.block_name == "ParamUILabelNameID": + item = "_{}".format(builder.cv_num_named_params_.get(self.nameID, 0)) + builder.add_cv_parameter(self.nameID) + self.nameID = (self.nameID, self.block_name + item) + NameRecord.build(self, builder) + + def asFea(self, indent=""): + plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID) + if plat != "": + plat += " " + return 'name {}"{}";'.format(plat, self.string) + + +class CharacterStatement(Statement): + """ + Statement used in cvParameters blocks of Character Variant features (cvXX). + The Unicode value may be written with either decimal or hexadecimal + notation. The value must be preceded by '0x' if it is a hexadecimal value. + The largest Unicode value allowed is 0xFFFFFF. + """ + + def __init__(self, character, tag, location=None): + Statement.__init__(self, location) + self.character = character + self.tag = tag + + def build(self, builder): + """Calls the builder object's ``add_cv_character`` callback.""" + builder.add_cv_character(self.character, self.tag) + + def asFea(self, indent=""): + return "Character {:#x};".format(self.character) + + +class BaseAxis(Statement): + """An axis definition, being either a ``VertAxis.BaseTagList/BaseScriptList`` + pair or a ``HorizAxis.BaseTagList/BaseScriptList`` pair.""" + + def __init__(self, bases, scripts, vertical, location=None): + Statement.__init__(self, location) + self.bases = bases #: A list of baseline tag names as strings + self.scripts = scripts #: A list of script record tuplets (script tag, default baseline tag, base coordinate) + self.vertical = vertical #: Boolean; VertAxis if True, HorizAxis if False + + def build(self, builder): + """Calls the builder object's ``set_base_axis`` callback.""" + builder.set_base_axis(self.bases, self.scripts, self.vertical) + + def asFea(self, indent=""): + direction = "Vert" if self.vertical else "Horiz" + scripts = [ + "{} {} {}".format(a[0], a[1], " ".join(map(str, a[2]))) + for a in self.scripts + ] + return "{}Axis.BaseTagList {};\n{}{}Axis.BaseScriptList {};".format( + direction, " ".join(self.bases), indent, direction, ", ".join(scripts) + ) + + +class OS2Field(Statement): + """An entry in the ``OS/2`` table. Most ``values`` should be numbers or + strings, apart from when the key is ``UnicodeRange``, ``CodePageRange`` + or ``Panose``, in which case it should be an array of integers.""" + + def __init__(self, key, value, location=None): + Statement.__init__(self, location) + self.key = key + self.value = value + + def build(self, builder): + """Calls the builder object's ``add_os2_field`` callback.""" + builder.add_os2_field(self.key, self.value) + + def asFea(self, indent=""): + def intarr2str(x): + return " ".join(map(str, x)) + + numbers = ( + "FSType", + "TypoAscender", + "TypoDescender", + "TypoLineGap", + "winAscent", + "winDescent", + "XHeight", + "CapHeight", + "WeightClass", + "WidthClass", + "LowerOpSize", + "UpperOpSize", + ) + ranges = ("UnicodeRange", "CodePageRange") + keywords = dict([(x.lower(), [x, str]) for x in numbers]) + keywords.update([(x.lower(), [x, intarr2str]) for x in ranges]) + keywords["panose"] = ["Panose", intarr2str] + keywords["vendor"] = ["Vendor", lambda y: '"{}"'.format(y)] + if self.key in keywords: + return "{} {};".format( + keywords[self.key][0], keywords[self.key][1](self.value) + ) + return "" # should raise exception + + +class HheaField(Statement): + """An entry in the ``hhea`` table.""" + + def __init__(self, key, value, location=None): + Statement.__init__(self, location) + self.key = key + self.value = value + + def build(self, builder): + """Calls the builder object's ``add_hhea_field`` callback.""" + builder.add_hhea_field(self.key, self.value) + + def asFea(self, indent=""): + fields = ("CaretOffset", "Ascender", "Descender", "LineGap") + keywords = dict([(x.lower(), x) for x in fields]) + return "{} {};".format(keywords[self.key], self.value) + + +class VheaField(Statement): + """An entry in the ``vhea`` table.""" + + def __init__(self, key, value, location=None): + Statement.__init__(self, location) + self.key = key + self.value = value + + def build(self, builder): + """Calls the builder object's ``add_vhea_field`` callback.""" + builder.add_vhea_field(self.key, self.value) + + def asFea(self, indent=""): + fields = ("VertTypoAscender", "VertTypoDescender", "VertTypoLineGap") + keywords = dict([(x.lower(), x) for x in fields]) + return "{} {};".format(keywords[self.key], self.value) + + +class STATDesignAxisStatement(Statement): + """A STAT table Design Axis + + Args: + tag (str): a 4 letter axis tag + axisOrder (int): an int + names (list): a list of :class:`STATNameStatement` objects + """ + + def __init__(self, tag, axisOrder, names, location=None): + Statement.__init__(self, location) + self.tag = tag + self.axisOrder = axisOrder + self.names = names + self.location = location + + def build(self, builder): + builder.addDesignAxis(self, self.location) + + def asFea(self, indent=""): + indent += SHIFT + res = f"DesignAxis {self.tag} {self.axisOrder} {{ \n" + res += ("\n" + indent).join([s.asFea(indent=indent) for s in self.names]) + "\n" + res += "};" + return res + + +class ElidedFallbackName(Statement): + """STAT table ElidedFallbackName + + Args: + names: a list of :class:`STATNameStatement` objects + """ + + def __init__(self, names, location=None): + Statement.__init__(self, location) + self.names = names + self.location = location + + def build(self, builder): + builder.setElidedFallbackName(self.names, self.location) + + def asFea(self, indent=""): + indent += SHIFT + res = "ElidedFallbackName { \n" + res += ("\n" + indent).join([s.asFea(indent=indent) for s in self.names]) + "\n" + res += "};" + return res + + +class ElidedFallbackNameID(Statement): + """STAT table ElidedFallbackNameID + + Args: + value: an int pointing to an existing name table name ID + """ + + def __init__(self, value, location=None): + Statement.__init__(self, location) + self.value = value + self.location = location + + def build(self, builder): + builder.setElidedFallbackName(self.value, self.location) + + def asFea(self, indent=""): + return f"ElidedFallbackNameID {self.value};" + + +class STATAxisValueStatement(Statement): + """A STAT table Axis Value Record + + Args: + names (list): a list of :class:`STATNameStatement` objects + locations (list): a list of :class:`AxisValueLocationStatement` objects + flags (int): an int + """ + + def __init__(self, names, locations, flags, location=None): + Statement.__init__(self, location) + self.names = names + self.locations = locations + self.flags = flags + + def build(self, builder): + builder.addAxisValueRecord(self, self.location) + + def asFea(self, indent=""): + res = "AxisValue {\n" + for location in self.locations: + res += location.asFea() + + for nameRecord in self.names: + res += nameRecord.asFea() + res += "\n" + + if self.flags: + flags = ["OlderSiblingFontAttribute", "ElidableAxisValueName"] + flagStrings = [] + curr = 1 + for i in range(len(flags)): + if self.flags & curr != 0: + flagStrings.append(flags[i]) + curr = curr << 1 + res += f"flag {' '.join(flagStrings)};\n" + res += "};" + return res + + +class AxisValueLocationStatement(Statement): + """ + A STAT table Axis Value Location + + Args: + tag (str): a 4 letter axis tag + values (list): a list of ints and/or floats + """ + + def __init__(self, tag, values, location=None): + Statement.__init__(self, location) + self.tag = tag + self.values = values + + def asFea(self, res=""): + res += f"location {self.tag} " + res += f"{' '.join(str(i) for i in self.values)};\n" + return res + + +class ConditionsetStatement(Statement): + """ + A variable layout conditionset + + Args: + name (str): the name of this conditionset + conditions (dict): a dictionary mapping axis tags to a + tuple of (min,max) userspace coordinates. + """ + + def __init__(self, name, conditions, location=None): + Statement.__init__(self, location) + self.name = name + self.conditions = conditions + + def build(self, builder): + builder.add_conditionset(self.location, self.name, self.conditions) + + def asFea(self, res="", indent=""): + res += indent + f"conditionset {self.name} " + "{\n" + for tag, (minvalue, maxvalue) in self.conditions.items(): + res += indent + SHIFT + f"{tag} {minvalue} {maxvalue};\n" + res += indent + "}" + f" {self.name};\n" + return res + + +class VariationBlock(Block): + """A variation feature block, applicable in a given set of conditions.""" + + def __init__(self, name, conditionset, use_extension=False, location=None): + Block.__init__(self, location) + self.name, self.conditionset, self.use_extension = ( + name, + conditionset, + use_extension, + ) + + def build(self, builder): + """Call the ``start_feature`` callback on the builder object, visit + all the statements in this feature, and then call ``end_feature``.""" + builder.start_feature(self.location, self.name) + if ( + self.conditionset != "NULL" + and self.conditionset not in builder.conditionsets_ + ): + raise FeatureLibError( + f"variation block used undefined conditionset {self.conditionset}", + self.location, + ) + + # language exclude_dflt statements modify builder.features_ + # limit them to this block with temporary builder.features_ + features = builder.features_ + builder.features_ = {} + Block.build(self, builder) + for key, value in builder.features_.items(): + items = builder.feature_variations_.setdefault(key, {}).setdefault( + self.conditionset, [] + ) + items.extend(value) + if key not in features: + features[key] = [] # Ensure we make a feature record + builder.features_ = features + builder.end_feature() + + def asFea(self, indent=""): + res = indent + "variation %s " % self.name.strip() + res += self.conditionset + " " + if self.use_extension: + res += "useExtension " + res += "{\n" + res += Block.asFea(self, indent=indent) + res += indent + "} %s;\n" % self.name.strip() + return res diff --git a/infer_4_47_1/lib/python3.10/site-packages/fontTools/feaLib/builder.py b/infer_4_47_1/lib/python3.10/site-packages/fontTools/feaLib/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..1cfe1c3a137ebfc1c1b4bd8b437266638b284cab --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/fontTools/feaLib/builder.py @@ -0,0 +1,1741 @@ +from fontTools.misc import sstruct +from fontTools.misc.textTools import Tag, tostr, binary2num, safeEval +from fontTools.feaLib.error import FeatureLibError +from fontTools.feaLib.lookupDebugInfo import ( + LookupDebugInfo, + LOOKUP_DEBUG_INFO_KEY, + LOOKUP_DEBUG_ENV_VAR, +) +from fontTools.feaLib.parser import Parser +from fontTools.feaLib.ast import FeatureFile +from fontTools.feaLib.variableScalar import VariableScalar +from fontTools.otlLib import builder as otl +from fontTools.otlLib.maxContextCalc import maxCtxFont +from fontTools.ttLib import newTable, getTableModule +from fontTools.ttLib.tables import otBase, otTables +from fontTools.otlLib.builder import ( + AlternateSubstBuilder, + ChainContextPosBuilder, + ChainContextSubstBuilder, + LigatureSubstBuilder, + MultipleSubstBuilder, + CursivePosBuilder, + MarkBasePosBuilder, + MarkLigPosBuilder, + MarkMarkPosBuilder, + ReverseChainSingleSubstBuilder, + SingleSubstBuilder, + ClassPairPosSubtableBuilder, + PairPosBuilder, + SinglePosBuilder, + ChainContextualRule, +) +from fontTools.otlLib.error import OpenTypeLibError +from fontTools.varLib.varStore import OnlineVarStoreBuilder +from fontTools.varLib.builder import buildVarDevTable +from fontTools.varLib.featureVars import addFeatureVariationsRaw +from fontTools.varLib.models import normalizeValue, piecewiseLinearMap +from collections import defaultdict +import copy +import itertools +from io import StringIO +import logging +import warnings +import os + + +log = logging.getLogger(__name__) + + +def addOpenTypeFeatures(font, featurefile, tables=None, debug=False): + """Add features from a file to a font. Note that this replaces any features + currently present. + + Args: + font (feaLib.ttLib.TTFont): The font object. + featurefile: Either a path or file object (in which case we + parse it into an AST), or a pre-parsed AST instance. + tables: If passed, restrict the set of affected tables to those in the + list. + debug: Whether to add source debugging information to the font in the + ``Debg`` table + + """ + builder = Builder(font, featurefile) + builder.build(tables=tables, debug=debug) + + +def addOpenTypeFeaturesFromString( + font, features, filename=None, tables=None, debug=False +): + """Add features from a string to a font. Note that this replaces any + features currently present. + + Args: + font (feaLib.ttLib.TTFont): The font object. + features: A string containing feature code. + filename: The directory containing ``filename`` is used as the root of + relative ``include()`` paths; if ``None`` is provided, the current + directory is assumed. + tables: If passed, restrict the set of affected tables to those in the + list. + debug: Whether to add source debugging information to the font in the + ``Debg`` table + + """ + + featurefile = StringIO(tostr(features)) + if filename: + featurefile.name = filename + addOpenTypeFeatures(font, featurefile, tables=tables, debug=debug) + + +class Builder(object): + supportedTables = frozenset( + Tag(tag) + for tag in [ + "BASE", + "GDEF", + "GPOS", + "GSUB", + "OS/2", + "head", + "hhea", + "name", + "vhea", + "STAT", + ] + ) + + def __init__(self, font, featurefile): + self.font = font + # 'featurefile' can be either a path or file object (in which case we + # parse it into an AST), or a pre-parsed AST instance + if isinstance(featurefile, FeatureFile): + self.parseTree, self.file = featurefile, None + else: + self.parseTree, self.file = None, featurefile + self.glyphMap = font.getReverseGlyphMap() + self.varstorebuilder = None + if "fvar" in font: + self.axes = font["fvar"].axes + self.varstorebuilder = OnlineVarStoreBuilder( + [ax.axisTag for ax in self.axes] + ) + self.default_language_systems_ = set() + self.script_ = None + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + self.language_systems = set() + self.seen_non_DFLT_script_ = False + self.named_lookups_ = {} + self.cur_lookup_ = None + self.cur_lookup_name_ = None + self.cur_feature_name_ = None + self.lookups_ = [] + self.lookup_locations = {"GSUB": {}, "GPOS": {}} + self.features_ = {} # ('latn', 'DEU ', 'smcp') --> [LookupBuilder*] + self.required_features_ = {} # ('latn', 'DEU ') --> 'scmp' + self.feature_variations_ = {} + # for feature 'aalt' + self.aalt_features_ = [] # [(location, featureName)*], for 'aalt' + self.aalt_location_ = None + self.aalt_alternates_ = {} + # for 'featureNames' + self.featureNames_ = set() + self.featureNames_ids_ = {} + # for 'cvParameters' + self.cv_parameters_ = set() + self.cv_parameters_ids_ = {} + self.cv_num_named_params_ = {} + self.cv_characters_ = defaultdict(list) + # for feature 'size' + self.size_parameters_ = None + # for table 'head' + self.fontRevision_ = None # 2.71 + # for table 'name' + self.names_ = [] + # for table 'BASE' + self.base_horiz_axis_ = None + self.base_vert_axis_ = None + # for table 'GDEF' + self.attachPoints_ = {} # "a" --> {3, 7} + self.ligCaretCoords_ = {} # "f_f_i" --> {300, 600} + self.ligCaretPoints_ = {} # "f_f_i" --> {3, 7} + self.glyphClassDefs_ = {} # "fi" --> (2, (file, line, column)) + self.markAttach_ = {} # "acute" --> (4, (file, line, column)) + self.markAttachClassID_ = {} # frozenset({"acute", "grave"}) --> 4 + self.markFilterSets_ = {} # frozenset({"acute", "grave"}) --> 4 + # for table 'OS/2' + self.os2_ = {} + # for table 'hhea' + self.hhea_ = {} + # for table 'vhea' + self.vhea_ = {} + # for table 'STAT' + self.stat_ = {} + # for conditionsets + self.conditionsets_ = {} + # We will often use exactly the same locations (i.e. the font's masters) + # for a large number of variable scalars. Instead of creating a model + # for each, let's share the models. + self.model_cache = {} + + def build(self, tables=None, debug=False): + if self.parseTree is None: + self.parseTree = Parser(self.file, self.glyphMap).parse() + self.parseTree.build(self) + # by default, build all the supported tables + if tables is None: + tables = self.supportedTables + else: + tables = frozenset(tables) + unsupported = tables - self.supportedTables + if unsupported: + unsupported_string = ", ".join(sorted(unsupported)) + raise NotImplementedError( + "The following tables were requested but are unsupported: " + f"{unsupported_string}." + ) + if "GSUB" in tables: + self.build_feature_aalt_() + if "head" in tables: + self.build_head() + if "hhea" in tables: + self.build_hhea() + if "vhea" in tables: + self.build_vhea() + if "name" in tables: + self.build_name() + if "OS/2" in tables: + self.build_OS_2() + if "STAT" in tables: + self.build_STAT() + for tag in ("GPOS", "GSUB"): + if tag not in tables: + continue + table = self.makeTable(tag) + if self.feature_variations_: + self.makeFeatureVariations(table, tag) + if ( + table.ScriptList.ScriptCount > 0 + or table.FeatureList.FeatureCount > 0 + or table.LookupList.LookupCount > 0 + ): + fontTable = self.font[tag] = newTable(tag) + fontTable.table = table + elif tag in self.font: + del self.font[tag] + if any(tag in self.font for tag in ("GPOS", "GSUB")) and "OS/2" in self.font: + self.font["OS/2"].usMaxContext = maxCtxFont(self.font) + if "GDEF" in tables: + gdef = self.buildGDEF() + if gdef: + self.font["GDEF"] = gdef + elif "GDEF" in self.font: + del self.font["GDEF"] + if "BASE" in tables: + base = self.buildBASE() + if base: + self.font["BASE"] = base + elif "BASE" in self.font: + del self.font["BASE"] + if debug or os.environ.get(LOOKUP_DEBUG_ENV_VAR): + self.buildDebg() + + def get_chained_lookup_(self, location, builder_class): + result = builder_class(self.font, location) + result.lookupflag = self.lookupflag_ + result.markFilterSet = self.lookupflag_markFilterSet_ + self.lookups_.append(result) + return result + + def add_lookup_to_feature_(self, lookup, feature_name): + for script, lang in self.language_systems: + key = (script, lang, feature_name) + self.features_.setdefault(key, []).append(lookup) + + def get_lookup_(self, location, builder_class): + if ( + self.cur_lookup_ + and type(self.cur_lookup_) == builder_class + and self.cur_lookup_.lookupflag == self.lookupflag_ + and self.cur_lookup_.markFilterSet == self.lookupflag_markFilterSet_ + ): + return self.cur_lookup_ + if self.cur_lookup_name_ and self.cur_lookup_: + raise FeatureLibError( + "Within a named lookup block, all rules must be of " + "the same lookup type and flag", + location, + ) + self.cur_lookup_ = builder_class(self.font, location) + self.cur_lookup_.lookupflag = self.lookupflag_ + self.cur_lookup_.markFilterSet = self.lookupflag_markFilterSet_ + self.lookups_.append(self.cur_lookup_) + if self.cur_lookup_name_: + # We are starting a lookup rule inside a named lookup block. + self.named_lookups_[self.cur_lookup_name_] = self.cur_lookup_ + if self.cur_feature_name_: + # We are starting a lookup rule inside a feature. This includes + # lookup rules inside named lookups inside features. + self.add_lookup_to_feature_(self.cur_lookup_, self.cur_feature_name_) + return self.cur_lookup_ + + def build_feature_aalt_(self): + if not self.aalt_features_ and not self.aalt_alternates_: + return + # > alternate glyphs will be sorted in the order that the source features + # > are named in the aalt definition, not the order of the feature definitions + # > in the file. Alternates defined explicitly ... will precede all others. + # https://github.com/fonttools/fonttools/issues/836 + alternates = {g: list(a) for g, a in self.aalt_alternates_.items()} + for location, name in self.aalt_features_ + [(None, "aalt")]: + feature = [ + (script, lang, feature, lookups) + for (script, lang, feature), lookups in self.features_.items() + if feature == name + ] + # "aalt" does not have to specify its own lookups, but it might. + if not feature and name != "aalt": + warnings.warn("%s: Feature %s has not been defined" % (location, name)) + continue + for script, lang, feature, lookups in feature: + for lookuplist in lookups: + if not isinstance(lookuplist, list): + lookuplist = [lookuplist] + for lookup in lookuplist: + for glyph, alts in lookup.getAlternateGlyphs().items(): + alts_for_glyph = alternates.setdefault(glyph, []) + alts_for_glyph.extend( + g for g in alts if g not in alts_for_glyph + ) + single = { + glyph: repl[0] for glyph, repl in alternates.items() if len(repl) == 1 + } + multi = {glyph: repl for glyph, repl in alternates.items() if len(repl) > 1} + if not single and not multi: + return + self.features_ = { + (script, lang, feature): lookups + for (script, lang, feature), lookups in self.features_.items() + if feature != "aalt" + } + old_lookups = self.lookups_ + self.lookups_ = [] + self.start_feature(self.aalt_location_, "aalt") + if single: + single_lookup = self.get_lookup_(location, SingleSubstBuilder) + single_lookup.mapping = single + if multi: + multi_lookup = self.get_lookup_(location, AlternateSubstBuilder) + multi_lookup.alternates = multi + self.end_feature() + self.lookups_.extend(old_lookups) + + def build_head(self): + if not self.fontRevision_: + return + table = self.font.get("head") + if not table: # this only happens for unit tests + table = self.font["head"] = newTable("head") + table.decompile(b"\0" * 54, self.font) + table.tableVersion = 1.0 + table.created = table.modified = 3406620153 # 2011-12-13 11:22:33 + table.fontRevision = self.fontRevision_ + + def build_hhea(self): + if not self.hhea_: + return + table = self.font.get("hhea") + if not table: # this only happens for unit tests + table = self.font["hhea"] = newTable("hhea") + table.decompile(b"\0" * 36, self.font) + table.tableVersion = 0x00010000 + if "caretoffset" in self.hhea_: + table.caretOffset = self.hhea_["caretoffset"] + if "ascender" in self.hhea_: + table.ascent = self.hhea_["ascender"] + if "descender" in self.hhea_: + table.descent = self.hhea_["descender"] + if "linegap" in self.hhea_: + table.lineGap = self.hhea_["linegap"] + + def build_vhea(self): + if not self.vhea_: + return + table = self.font.get("vhea") + if not table: # this only happens for unit tests + table = self.font["vhea"] = newTable("vhea") + table.decompile(b"\0" * 36, self.font) + table.tableVersion = 0x00011000 + if "verttypoascender" in self.vhea_: + table.ascent = self.vhea_["verttypoascender"] + if "verttypodescender" in self.vhea_: + table.descent = self.vhea_["verttypodescender"] + if "verttypolinegap" in self.vhea_: + table.lineGap = self.vhea_["verttypolinegap"] + + def get_user_name_id(self, table): + # Try to find first unused font-specific name id + nameIDs = [name.nameID for name in table.names] + for user_name_id in range(256, 32767): + if user_name_id not in nameIDs: + return user_name_id + + def buildFeatureParams(self, tag): + params = None + if tag == "size": + params = otTables.FeatureParamsSize() + ( + params.DesignSize, + params.SubfamilyID, + params.RangeStart, + params.RangeEnd, + ) = self.size_parameters_ + if tag in self.featureNames_ids_: + params.SubfamilyNameID = self.featureNames_ids_[tag] + else: + params.SubfamilyNameID = 0 + elif tag in self.featureNames_: + if not self.featureNames_ids_: + # name table wasn't selected among the tables to build; skip + pass + else: + assert tag in self.featureNames_ids_ + params = otTables.FeatureParamsStylisticSet() + params.Version = 0 + params.UINameID = self.featureNames_ids_[tag] + elif tag in self.cv_parameters_: + params = otTables.FeatureParamsCharacterVariants() + params.Format = 0 + params.FeatUILabelNameID = self.cv_parameters_ids_.get( + (tag, "FeatUILabelNameID"), 0 + ) + params.FeatUITooltipTextNameID = self.cv_parameters_ids_.get( + (tag, "FeatUITooltipTextNameID"), 0 + ) + params.SampleTextNameID = self.cv_parameters_ids_.get( + (tag, "SampleTextNameID"), 0 + ) + params.NumNamedParameters = self.cv_num_named_params_.get(tag, 0) + params.FirstParamUILabelNameID = self.cv_parameters_ids_.get( + (tag, "ParamUILabelNameID_0"), 0 + ) + params.CharCount = len(self.cv_characters_[tag]) + params.Character = self.cv_characters_[tag] + return params + + def build_name(self): + if not self.names_: + return + table = self.font.get("name") + if not table: # this only happens for unit tests + table = self.font["name"] = newTable("name") + table.names = [] + for name in self.names_: + nameID, platformID, platEncID, langID, string = name + # For featureNames block, nameID is 'feature tag' + # For cvParameters blocks, nameID is ('feature tag', 'block name') + if not isinstance(nameID, int): + tag = nameID + if tag in self.featureNames_: + if tag not in self.featureNames_ids_: + self.featureNames_ids_[tag] = self.get_user_name_id(table) + assert self.featureNames_ids_[tag] is not None + nameID = self.featureNames_ids_[tag] + elif tag[0] in self.cv_parameters_: + if tag not in self.cv_parameters_ids_: + self.cv_parameters_ids_[tag] = self.get_user_name_id(table) + assert self.cv_parameters_ids_[tag] is not None + nameID = self.cv_parameters_ids_[tag] + table.setName(string, nameID, platformID, platEncID, langID) + table.names.sort() + + def build_OS_2(self): + if not self.os2_: + return + table = self.font.get("OS/2") + if not table: # this only happens for unit tests + table = self.font["OS/2"] = newTable("OS/2") + data = b"\0" * sstruct.calcsize(getTableModule("OS/2").OS2_format_0) + table.decompile(data, self.font) + version = 0 + if "fstype" in self.os2_: + table.fsType = self.os2_["fstype"] + if "panose" in self.os2_: + panose = getTableModule("OS/2").Panose() + ( + panose.bFamilyType, + panose.bSerifStyle, + panose.bWeight, + panose.bProportion, + panose.bContrast, + panose.bStrokeVariation, + panose.bArmStyle, + panose.bLetterForm, + panose.bMidline, + panose.bXHeight, + ) = self.os2_["panose"] + table.panose = panose + if "typoascender" in self.os2_: + table.sTypoAscender = self.os2_["typoascender"] + if "typodescender" in self.os2_: + table.sTypoDescender = self.os2_["typodescender"] + if "typolinegap" in self.os2_: + table.sTypoLineGap = self.os2_["typolinegap"] + if "winascent" in self.os2_: + table.usWinAscent = self.os2_["winascent"] + if "windescent" in self.os2_: + table.usWinDescent = self.os2_["windescent"] + if "vendor" in self.os2_: + table.achVendID = safeEval("'''" + self.os2_["vendor"] + "'''") + if "weightclass" in self.os2_: + table.usWeightClass = self.os2_["weightclass"] + if "widthclass" in self.os2_: + table.usWidthClass = self.os2_["widthclass"] + if "unicoderange" in self.os2_: + table.setUnicodeRanges(self.os2_["unicoderange"]) + if "codepagerange" in self.os2_: + pages = self.build_codepages_(self.os2_["codepagerange"]) + table.ulCodePageRange1, table.ulCodePageRange2 = pages + version = 1 + if "xheight" in self.os2_: + table.sxHeight = self.os2_["xheight"] + version = 2 + if "capheight" in self.os2_: + table.sCapHeight = self.os2_["capheight"] + version = 2 + if "loweropsize" in self.os2_: + table.usLowerOpticalPointSize = self.os2_["loweropsize"] + version = 5 + if "upperopsize" in self.os2_: + table.usUpperOpticalPointSize = self.os2_["upperopsize"] + version = 5 + + def checkattr(table, attrs): + for attr in attrs: + if not hasattr(table, attr): + setattr(table, attr, 0) + + table.version = max(version, table.version) + # this only happens for unit tests + if version >= 1: + checkattr(table, ("ulCodePageRange1", "ulCodePageRange2")) + if version >= 2: + checkattr( + table, + ( + "sxHeight", + "sCapHeight", + "usDefaultChar", + "usBreakChar", + "usMaxContext", + ), + ) + if version >= 5: + checkattr(table, ("usLowerOpticalPointSize", "usUpperOpticalPointSize")) + + def setElidedFallbackName(self, value, location): + # ElidedFallbackName is a convenience method for setting + # ElidedFallbackNameID so only one can be allowed + for token in ("ElidedFallbackName", "ElidedFallbackNameID"): + if token in self.stat_: + raise FeatureLibError( + f"{token} is already set.", + location, + ) + if isinstance(value, int): + self.stat_["ElidedFallbackNameID"] = value + elif isinstance(value, list): + self.stat_["ElidedFallbackName"] = value + else: + raise AssertionError(value) + + def addDesignAxis(self, designAxis, location): + if "DesignAxes" not in self.stat_: + self.stat_["DesignAxes"] = [] + if designAxis.tag in (r.tag for r in self.stat_["DesignAxes"]): + raise FeatureLibError( + f'DesignAxis already defined for tag "{designAxis.tag}".', + location, + ) + if designAxis.axisOrder in (r.axisOrder for r in self.stat_["DesignAxes"]): + raise FeatureLibError( + f"DesignAxis already defined for axis number {designAxis.axisOrder}.", + location, + ) + self.stat_["DesignAxes"].append(designAxis) + + def addAxisValueRecord(self, axisValueRecord, location): + if "AxisValueRecords" not in self.stat_: + self.stat_["AxisValueRecords"] = [] + # Check for duplicate AxisValueRecords + for record_ in self.stat_["AxisValueRecords"]: + if ( + {n.asFea() for n in record_.names} + == {n.asFea() for n in axisValueRecord.names} + and {n.asFea() for n in record_.locations} + == {n.asFea() for n in axisValueRecord.locations} + and record_.flags == axisValueRecord.flags + ): + raise FeatureLibError( + "An AxisValueRecord with these values is already defined.", + location, + ) + self.stat_["AxisValueRecords"].append(axisValueRecord) + + def build_STAT(self): + if not self.stat_: + return + + axes = self.stat_.get("DesignAxes") + if not axes: + raise FeatureLibError("DesignAxes not defined", None) + axisValueRecords = self.stat_.get("AxisValueRecords") + axisValues = {} + format4_locations = [] + for tag in axes: + axisValues[tag.tag] = [] + if axisValueRecords is not None: + for avr in axisValueRecords: + valuesDict = {} + if avr.flags > 0: + valuesDict["flags"] = avr.flags + if len(avr.locations) == 1: + location = avr.locations[0] + values = location.values + if len(values) == 1: # format1 + valuesDict.update({"value": values[0], "name": avr.names}) + if len(values) == 2: # format3 + valuesDict.update( + { + "value": values[0], + "linkedValue": values[1], + "name": avr.names, + } + ) + if len(values) == 3: # format2 + nominal, minVal, maxVal = values + valuesDict.update( + { + "nominalValue": nominal, + "rangeMinValue": minVal, + "rangeMaxValue": maxVal, + "name": avr.names, + } + ) + axisValues[location.tag].append(valuesDict) + else: + valuesDict.update( + { + "location": {i.tag: i.values[0] for i in avr.locations}, + "name": avr.names, + } + ) + format4_locations.append(valuesDict) + + designAxes = [ + { + "ordering": a.axisOrder, + "tag": a.tag, + "name": a.names, + "values": axisValues[a.tag], + } + for a in axes + ] + + nameTable = self.font.get("name") + if not nameTable: # this only happens for unit tests + nameTable = self.font["name"] = newTable("name") + nameTable.names = [] + + if "ElidedFallbackNameID" in self.stat_: + nameID = self.stat_["ElidedFallbackNameID"] + name = nameTable.getDebugName(nameID) + if not name: + raise FeatureLibError( + f"ElidedFallbackNameID {nameID} points " + "to a nameID that does not exist in the " + '"name" table', + None, + ) + elif "ElidedFallbackName" in self.stat_: + nameID = self.stat_["ElidedFallbackName"] + + otl.buildStatTable( + self.font, + designAxes, + locations=format4_locations, + elidedFallbackName=nameID, + ) + + def build_codepages_(self, pages): + pages2bits = { + 1252: 0, + 1250: 1, + 1251: 2, + 1253: 3, + 1254: 4, + 1255: 5, + 1256: 6, + 1257: 7, + 1258: 8, + 874: 16, + 932: 17, + 936: 18, + 949: 19, + 950: 20, + 1361: 21, + 869: 48, + 866: 49, + 865: 50, + 864: 51, + 863: 52, + 862: 53, + 861: 54, + 860: 55, + 857: 56, + 855: 57, + 852: 58, + 775: 59, + 737: 60, + 708: 61, + 850: 62, + 437: 63, + } + bits = [pages2bits[p] for p in pages if p in pages2bits] + pages = [] + for i in range(2): + pages.append("") + for j in range(i * 32, (i + 1) * 32): + if j in bits: + pages[i] += "1" + else: + pages[i] += "0" + return [binary2num(p[::-1]) for p in pages] + + def buildBASE(self): + if not self.base_horiz_axis_ and not self.base_vert_axis_: + return None + base = otTables.BASE() + base.Version = 0x00010000 + base.HorizAxis = self.buildBASEAxis(self.base_horiz_axis_) + base.VertAxis = self.buildBASEAxis(self.base_vert_axis_) + + result = newTable("BASE") + result.table = base + return result + + def buildBASEAxis(self, axis): + if not axis: + return + bases, scripts = axis + axis = otTables.Axis() + axis.BaseTagList = otTables.BaseTagList() + axis.BaseTagList.BaselineTag = bases + axis.BaseTagList.BaseTagCount = len(bases) + axis.BaseScriptList = otTables.BaseScriptList() + axis.BaseScriptList.BaseScriptRecord = [] + axis.BaseScriptList.BaseScriptCount = len(scripts) + for script in sorted(scripts): + record = otTables.BaseScriptRecord() + record.BaseScriptTag = script[0] + record.BaseScript = otTables.BaseScript() + record.BaseScript.BaseLangSysCount = 0 + record.BaseScript.BaseValues = otTables.BaseValues() + record.BaseScript.BaseValues.DefaultIndex = bases.index(script[1]) + record.BaseScript.BaseValues.BaseCoord = [] + record.BaseScript.BaseValues.BaseCoordCount = len(script[2]) + for c in script[2]: + coord = otTables.BaseCoord() + coord.Format = 1 + coord.Coordinate = c + record.BaseScript.BaseValues.BaseCoord.append(coord) + axis.BaseScriptList.BaseScriptRecord.append(record) + return axis + + def buildGDEF(self): + gdef = otTables.GDEF() + gdef.GlyphClassDef = self.buildGDEFGlyphClassDef_() + gdef.AttachList = otl.buildAttachList(self.attachPoints_, self.glyphMap) + gdef.LigCaretList = otl.buildLigCaretList( + self.ligCaretCoords_, self.ligCaretPoints_, self.glyphMap + ) + gdef.MarkAttachClassDef = self.buildGDEFMarkAttachClassDef_() + gdef.MarkGlyphSetsDef = self.buildGDEFMarkGlyphSetsDef_() + gdef.Version = 0x00010002 if gdef.MarkGlyphSetsDef else 0x00010000 + if self.varstorebuilder: + store = self.varstorebuilder.finish() + if store: + gdef.Version = 0x00010003 + gdef.VarStore = store + varidx_map = store.optimize() + + gdef.remap_device_varidxes(varidx_map) + if "GPOS" in self.font: + self.font["GPOS"].table.remap_device_varidxes(varidx_map) + self.model_cache.clear() + if any( + ( + gdef.GlyphClassDef, + gdef.AttachList, + gdef.LigCaretList, + gdef.MarkAttachClassDef, + gdef.MarkGlyphSetsDef, + ) + ) or hasattr(gdef, "VarStore"): + result = newTable("GDEF") + result.table = gdef + return result + else: + return None + + def buildGDEFGlyphClassDef_(self): + if self.glyphClassDefs_: + classes = {g: c for (g, (c, _)) in self.glyphClassDefs_.items()} + else: + classes = {} + for lookup in self.lookups_: + classes.update(lookup.inferGlyphClasses()) + for markClass in self.parseTree.markClasses.values(): + for markClassDef in markClass.definitions: + for glyph in markClassDef.glyphSet(): + classes[glyph] = 3 + if classes: + result = otTables.GlyphClassDef() + result.classDefs = classes + return result + else: + return None + + def buildGDEFMarkAttachClassDef_(self): + classDefs = {g: c for g, (c, _) in self.markAttach_.items()} + if not classDefs: + return None + result = otTables.MarkAttachClassDef() + result.classDefs = classDefs + return result + + def buildGDEFMarkGlyphSetsDef_(self): + sets = [] + for glyphs, id_ in sorted( + self.markFilterSets_.items(), key=lambda item: item[1] + ): + sets.append(glyphs) + return otl.buildMarkGlyphSetsDef(sets, self.glyphMap) + + def buildDebg(self): + if "Debg" not in self.font: + self.font["Debg"] = newTable("Debg") + self.font["Debg"].data = {} + self.font["Debg"].data[LOOKUP_DEBUG_INFO_KEY] = self.lookup_locations + + def buildLookups_(self, tag): + assert tag in ("GPOS", "GSUB"), tag + for lookup in self.lookups_: + lookup.lookup_index = None + lookups = [] + for lookup in self.lookups_: + if lookup.table != tag: + continue + lookup.lookup_index = len(lookups) + self.lookup_locations[tag][str(lookup.lookup_index)] = LookupDebugInfo( + location=str(lookup.location), + name=self.get_lookup_name_(lookup), + feature=None, + ) + lookups.append(lookup) + otLookups = [] + for l in lookups: + try: + otLookups.append(l.build()) + except OpenTypeLibError as e: + raise FeatureLibError(str(e), e.location) from e + except Exception as e: + location = self.lookup_locations[tag][str(l.lookup_index)].location + raise FeatureLibError(str(e), location) from e + return otLookups + + def makeTable(self, tag): + table = getattr(otTables, tag, None)() + table.Version = 0x00010000 + table.ScriptList = otTables.ScriptList() + table.ScriptList.ScriptRecord = [] + table.FeatureList = otTables.FeatureList() + table.FeatureList.FeatureRecord = [] + table.LookupList = otTables.LookupList() + table.LookupList.Lookup = self.buildLookups_(tag) + + # Build a table for mapping (tag, lookup_indices) to feature_index. + # For example, ('liga', (2,3,7)) --> 23. + feature_indices = {} + required_feature_indices = {} # ('latn', 'DEU') --> 23 + scripts = {} # 'latn' --> {'DEU': [23, 24]} for feature #23,24 + # Sort the feature table by feature tag: + # https://github.com/fonttools/fonttools/issues/568 + sortFeatureTag = lambda f: (f[0][2], f[0][1], f[0][0], f[1]) + for key, lookups in sorted(self.features_.items(), key=sortFeatureTag): + script, lang, feature_tag = key + # l.lookup_index will be None when a lookup is not needed + # for the table under construction. For example, substitution + # rules will have no lookup_index while building GPOS tables. + # We also deduplicate lookup indices, as they only get applied once + # within a given feature: + # https://github.com/fonttools/fonttools/issues/2946 + lookup_indices = tuple( + dict.fromkeys( + l.lookup_index for l in lookups if l.lookup_index is not None + ) + ) + + size_feature = tag == "GPOS" and feature_tag == "size" + force_feature = self.any_feature_variations(feature_tag, tag) + if len(lookup_indices) == 0 and not size_feature and not force_feature: + continue + + for ix in lookup_indices: + try: + self.lookup_locations[tag][str(ix)] = self.lookup_locations[tag][ + str(ix) + ]._replace(feature=key) + except KeyError: + warnings.warn( + "feaLib.Builder subclass needs upgrading to " + "stash debug information. See fonttools#2065." + ) + + feature_key = (feature_tag, lookup_indices) + feature_index = feature_indices.get(feature_key) + if feature_index is None: + feature_index = len(table.FeatureList.FeatureRecord) + frec = otTables.FeatureRecord() + frec.FeatureTag = feature_tag + frec.Feature = otTables.Feature() + frec.Feature.FeatureParams = self.buildFeatureParams(feature_tag) + frec.Feature.LookupListIndex = list(lookup_indices) + frec.Feature.LookupCount = len(lookup_indices) + table.FeatureList.FeatureRecord.append(frec) + feature_indices[feature_key] = feature_index + scripts.setdefault(script, {}).setdefault(lang, []).append(feature_index) + if self.required_features_.get((script, lang)) == feature_tag: + required_feature_indices[(script, lang)] = feature_index + + # Build ScriptList. + for script, lang_features in sorted(scripts.items()): + srec = otTables.ScriptRecord() + srec.ScriptTag = script + srec.Script = otTables.Script() + srec.Script.DefaultLangSys = None + srec.Script.LangSysRecord = [] + for lang, feature_indices in sorted(lang_features.items()): + langrec = otTables.LangSysRecord() + langrec.LangSys = otTables.LangSys() + langrec.LangSys.LookupOrder = None + + req_feature_index = required_feature_indices.get((script, lang)) + if req_feature_index is None: + langrec.LangSys.ReqFeatureIndex = 0xFFFF + else: + langrec.LangSys.ReqFeatureIndex = req_feature_index + + langrec.LangSys.FeatureIndex = [ + i for i in feature_indices if i != req_feature_index + ] + langrec.LangSys.FeatureCount = len(langrec.LangSys.FeatureIndex) + + if lang == "dflt": + srec.Script.DefaultLangSys = langrec.LangSys + else: + langrec.LangSysTag = lang + srec.Script.LangSysRecord.append(langrec) + srec.Script.LangSysCount = len(srec.Script.LangSysRecord) + table.ScriptList.ScriptRecord.append(srec) + + table.ScriptList.ScriptCount = len(table.ScriptList.ScriptRecord) + table.FeatureList.FeatureCount = len(table.FeatureList.FeatureRecord) + table.LookupList.LookupCount = len(table.LookupList.Lookup) + return table + + def makeFeatureVariations(self, table, table_tag): + feature_vars = {} + has_any_variations = False + # Sort out which lookups to build, gather their indices + for (_, _, feature_tag), variations in self.feature_variations_.items(): + feature_vars[feature_tag] = [] + for conditionset, builders in variations.items(): + raw_conditionset = self.conditionsets_[conditionset] + indices = [] + for b in builders: + if b.table != table_tag: + continue + assert b.lookup_index is not None + indices.append(b.lookup_index) + has_any_variations = True + feature_vars[feature_tag].append((raw_conditionset, indices)) + + if has_any_variations: + for feature_tag, conditions_and_lookups in feature_vars.items(): + addFeatureVariationsRaw( + self.font, table, conditions_and_lookups, feature_tag + ) + + def any_feature_variations(self, feature_tag, table_tag): + for (_, _, feature), variations in self.feature_variations_.items(): + if feature != feature_tag: + continue + for conditionset, builders in variations.items(): + if any(b.table == table_tag for b in builders): + return True + return False + + def get_lookup_name_(self, lookup): + rev = {v: k for k, v in self.named_lookups_.items()} + if lookup in rev: + return rev[lookup] + return None + + def add_language_system(self, location, script, language): + # OpenType Feature File Specification, section 4.b.i + if script == "DFLT" and language == "dflt" and self.default_language_systems_: + raise FeatureLibError( + 'If "languagesystem DFLT dflt" is present, it must be ' + "the first of the languagesystem statements", + location, + ) + if script == "DFLT": + if self.seen_non_DFLT_script_: + raise FeatureLibError( + 'languagesystems using the "DFLT" script tag must ' + "precede all other languagesystems", + location, + ) + else: + self.seen_non_DFLT_script_ = True + if (script, language) in self.default_language_systems_: + raise FeatureLibError( + '"languagesystem %s %s" has already been specified' + % (script.strip(), language.strip()), + location, + ) + self.default_language_systems_.add((script, language)) + + def get_default_language_systems_(self): + # OpenType Feature File specification, 4.b.i. languagesystem: + # If no "languagesystem" statement is present, then the + # implementation must behave exactly as though the following + # statement were present at the beginning of the feature file: + # languagesystem DFLT dflt; + if self.default_language_systems_: + return frozenset(self.default_language_systems_) + else: + return frozenset({("DFLT", "dflt")}) + + def start_feature(self, location, name): + self.language_systems = self.get_default_language_systems_() + self.script_ = "DFLT" + self.cur_lookup_ = None + self.cur_feature_name_ = name + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + if name == "aalt": + self.aalt_location_ = location + + def end_feature(self): + assert self.cur_feature_name_ is not None + self.cur_feature_name_ = None + self.language_systems = None + self.cur_lookup_ = None + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + + def start_lookup_block(self, location, name): + if name in self.named_lookups_: + raise FeatureLibError( + 'Lookup "%s" has already been defined' % name, location + ) + if self.cur_feature_name_ == "aalt": + raise FeatureLibError( + "Lookup blocks cannot be placed inside 'aalt' features; " + "move it out, and then refer to it with a lookup statement", + location, + ) + self.cur_lookup_name_ = name + self.named_lookups_[name] = None + self.cur_lookup_ = None + if self.cur_feature_name_ is None: + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + + def end_lookup_block(self): + assert self.cur_lookup_name_ is not None + self.cur_lookup_name_ = None + self.cur_lookup_ = None + if self.cur_feature_name_ is None: + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + + def add_lookup_call(self, lookup_name): + assert lookup_name in self.named_lookups_, lookup_name + self.cur_lookup_ = None + lookup = self.named_lookups_[lookup_name] + if lookup is not None: # skip empty named lookup + self.add_lookup_to_feature_(lookup, self.cur_feature_name_) + + def set_font_revision(self, location, revision): + self.fontRevision_ = revision + + def set_language(self, location, language, include_default, required): + assert len(language) == 4 + if self.cur_feature_name_ in ("aalt", "size"): + raise FeatureLibError( + "Language statements are not allowed " + 'within "feature %s"' % self.cur_feature_name_, + location, + ) + if self.cur_feature_name_ is None: + raise FeatureLibError( + "Language statements are not allowed " + "within standalone lookup blocks", + location, + ) + self.cur_lookup_ = None + + key = (self.script_, language, self.cur_feature_name_) + lookups = self.features_.get((key[0], "dflt", key[2])) + if (language == "dflt" or include_default) and lookups: + self.features_[key] = lookups[:] + else: + self.features_[key] = [] + self.language_systems = frozenset([(self.script_, language)]) + + if required: + key = (self.script_, language) + if key in self.required_features_: + raise FeatureLibError( + "Language %s (script %s) has already " + "specified feature %s as its required feature" + % ( + language.strip(), + self.script_.strip(), + self.required_features_[key].strip(), + ), + location, + ) + self.required_features_[key] = self.cur_feature_name_ + + def getMarkAttachClass_(self, location, glyphs): + glyphs = frozenset(glyphs) + id_ = self.markAttachClassID_.get(glyphs) + if id_ is not None: + return id_ + id_ = len(self.markAttachClassID_) + 1 + self.markAttachClassID_[glyphs] = id_ + for glyph in glyphs: + if glyph in self.markAttach_: + _, loc = self.markAttach_[glyph] + raise FeatureLibError( + "Glyph %s already has been assigned " + "a MarkAttachmentType at %s" % (glyph, loc), + location, + ) + self.markAttach_[glyph] = (id_, location) + return id_ + + def getMarkFilterSet_(self, location, glyphs): + glyphs = frozenset(glyphs) + id_ = self.markFilterSets_.get(glyphs) + if id_ is not None: + return id_ + id_ = len(self.markFilterSets_) + self.markFilterSets_[glyphs] = id_ + return id_ + + def set_lookup_flag(self, location, value, markAttach, markFilter): + value = value & 0xFF + if markAttach: + markAttachClass = self.getMarkAttachClass_(location, markAttach) + value = value | (markAttachClass << 8) + if markFilter: + markFilterSet = self.getMarkFilterSet_(location, markFilter) + value = value | 0x10 + self.lookupflag_markFilterSet_ = markFilterSet + else: + self.lookupflag_markFilterSet_ = None + self.lookupflag_ = value + + def set_script(self, location, script): + if self.cur_feature_name_ in ("aalt", "size"): + raise FeatureLibError( + "Script statements are not allowed " + 'within "feature %s"' % self.cur_feature_name_, + location, + ) + if self.cur_feature_name_ is None: + raise FeatureLibError( + "Script statements are not allowed " "within standalone lookup blocks", + location, + ) + if self.language_systems == {(script, "dflt")}: + # Nothing to do. + return + self.cur_lookup_ = None + self.script_ = script + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + self.set_language(location, "dflt", include_default=True, required=False) + + def find_lookup_builders_(self, lookups): + """Helper for building chain contextual substitutions + + Given a list of lookup names, finds the LookupBuilder for each name. + If an input name is None, it gets mapped to a None LookupBuilder. + """ + lookup_builders = [] + for lookuplist in lookups: + if lookuplist is not None: + lookup_builders.append( + [self.named_lookups_.get(l.name) for l in lookuplist] + ) + else: + lookup_builders.append(None) + return lookup_builders + + def add_attach_points(self, location, glyphs, contourPoints): + for glyph in glyphs: + self.attachPoints_.setdefault(glyph, set()).update(contourPoints) + + def add_feature_reference(self, location, featureName): + if self.cur_feature_name_ != "aalt": + raise FeatureLibError( + 'Feature references are only allowed inside "feature aalt"', location + ) + self.aalt_features_.append((location, featureName)) + + def add_featureName(self, tag): + self.featureNames_.add(tag) + + def add_cv_parameter(self, tag): + self.cv_parameters_.add(tag) + + def add_to_cv_num_named_params(self, tag): + """Adds new items to ``self.cv_num_named_params_`` + or increments the count of existing items.""" + if tag in self.cv_num_named_params_: + self.cv_num_named_params_[tag] += 1 + else: + self.cv_num_named_params_[tag] = 1 + + def add_cv_character(self, character, tag): + self.cv_characters_[tag].append(character) + + def set_base_axis(self, bases, scripts, vertical): + if vertical: + self.base_vert_axis_ = (bases, scripts) + else: + self.base_horiz_axis_ = (bases, scripts) + + def set_size_parameters( + self, location, DesignSize, SubfamilyID, RangeStart, RangeEnd + ): + if self.cur_feature_name_ != "size": + raise FeatureLibError( + "Parameters statements are not allowed " + 'within "feature %s"' % self.cur_feature_name_, + location, + ) + self.size_parameters_ = [DesignSize, SubfamilyID, RangeStart, RangeEnd] + for script, lang in self.language_systems: + key = (script, lang, self.cur_feature_name_) + self.features_.setdefault(key, []) + + # GSUB rules + + # GSUB 1 + def add_single_subst(self, location, prefix, suffix, mapping, forceChain): + if self.cur_feature_name_ == "aalt": + for from_glyph, to_glyph in mapping.items(): + alts = self.aalt_alternates_.setdefault(from_glyph, []) + if to_glyph not in alts: + alts.append(to_glyph) + return + if prefix or suffix or forceChain: + self.add_single_subst_chained_(location, prefix, suffix, mapping) + return + lookup = self.get_lookup_(location, SingleSubstBuilder) + for from_glyph, to_glyph in mapping.items(): + if from_glyph in lookup.mapping: + if to_glyph == lookup.mapping[from_glyph]: + log.info( + "Removing duplicate single substitution from glyph" + ' "%s" to "%s" at %s', + from_glyph, + to_glyph, + location, + ) + else: + raise FeatureLibError( + 'Already defined rule for replacing glyph "%s" by "%s"' + % (from_glyph, lookup.mapping[from_glyph]), + location, + ) + lookup.mapping[from_glyph] = to_glyph + + # GSUB 2 + def add_multiple_subst( + self, location, prefix, glyph, suffix, replacements, forceChain=False + ): + if prefix or suffix or forceChain: + self.add_multi_subst_chained_(location, prefix, glyph, suffix, replacements) + return + lookup = self.get_lookup_(location, MultipleSubstBuilder) + if glyph in lookup.mapping: + if replacements == lookup.mapping[glyph]: + log.info( + "Removing duplicate multiple substitution from glyph" + ' "%s" to %s%s', + glyph, + replacements, + f" at {location}" if location else "", + ) + else: + raise FeatureLibError( + 'Already defined substitution for glyph "%s"' % glyph, location + ) + lookup.mapping[glyph] = replacements + + # GSUB 3 + def add_alternate_subst(self, location, prefix, glyph, suffix, replacement): + if self.cur_feature_name_ == "aalt": + alts = self.aalt_alternates_.setdefault(glyph, []) + alts.extend(g for g in replacement if g not in alts) + return + if prefix or suffix: + chain = self.get_lookup_(location, ChainContextSubstBuilder) + lookup = self.get_chained_lookup_(location, AlternateSubstBuilder) + chain.rules.append(ChainContextualRule(prefix, [{glyph}], suffix, [lookup])) + else: + lookup = self.get_lookup_(location, AlternateSubstBuilder) + if glyph in lookup.alternates: + raise FeatureLibError( + 'Already defined alternates for glyph "%s"' % glyph, location + ) + # We allow empty replacement glyphs here. + lookup.alternates[glyph] = replacement + + # GSUB 4 + def add_ligature_subst( + self, location, prefix, glyphs, suffix, replacement, forceChain + ): + if prefix or suffix or forceChain: + self.add_ligature_subst_chained_( + location, prefix, glyphs, suffix, replacement + ) + return + else: + lookup = self.get_lookup_(location, LigatureSubstBuilder) + + if not all(glyphs): + raise FeatureLibError("Empty glyph class in substitution", location) + + # OpenType feature file syntax, section 5.d, "Ligature substitution": + # "Since the OpenType specification does not allow ligature + # substitutions to be specified on target sequences that contain + # glyph classes, the implementation software will enumerate + # all specific glyph sequences if glyph classes are detected" + for g in itertools.product(*glyphs): + lookup.ligatures[g] = replacement + + # GSUB 5/6 + def add_chain_context_subst(self, location, prefix, glyphs, suffix, lookups): + if not all(glyphs) or not all(prefix) or not all(suffix): + raise FeatureLibError( + "Empty glyph class in contextual substitution", location + ) + lookup = self.get_lookup_(location, ChainContextSubstBuilder) + lookup.rules.append( + ChainContextualRule( + prefix, glyphs, suffix, self.find_lookup_builders_(lookups) + ) + ) + + def add_single_subst_chained_(self, location, prefix, suffix, mapping): + if not mapping or not all(prefix) or not all(suffix): + raise FeatureLibError( + "Empty glyph class in contextual substitution", location + ) + # https://github.com/fonttools/fonttools/issues/512 + # https://github.com/fonttools/fonttools/issues/2150 + chain = self.get_lookup_(location, ChainContextSubstBuilder) + sub = chain.find_chainable_subst(mapping, SingleSubstBuilder) + if sub is None: + sub = self.get_chained_lookup_(location, SingleSubstBuilder) + sub.mapping.update(mapping) + chain.rules.append( + ChainContextualRule(prefix, [list(mapping.keys())], suffix, [sub]) + ) + + def add_multi_subst_chained_(self, location, prefix, glyph, suffix, replacements): + if not all(prefix) or not all(suffix): + raise FeatureLibError( + "Empty glyph class in contextual substitution", location + ) + # https://github.com/fonttools/fonttools/issues/3551 + chain = self.get_lookup_(location, ChainContextSubstBuilder) + sub = chain.find_chainable_subst({glyph: replacements}, MultipleSubstBuilder) + if sub is None: + sub = self.get_chained_lookup_(location, MultipleSubstBuilder) + sub.mapping[glyph] = replacements + chain.rules.append(ChainContextualRule(prefix, [{glyph}], suffix, [sub])) + + def add_ligature_subst_chained_( + self, location, prefix, glyphs, suffix, replacement + ): + # https://github.com/fonttools/fonttools/issues/3701 + if not all(prefix) or not all(suffix): + raise FeatureLibError( + "Empty glyph class in contextual substitution", location + ) + chain = self.get_lookup_(location, ChainContextSubstBuilder) + sub = chain.find_chainable_ligature_subst(glyphs, replacement) + if sub is None: + sub = self.get_chained_lookup_(location, LigatureSubstBuilder) + + for g in itertools.product(*glyphs): + sub.ligatures[g] = replacement + + chain.rules.append(ChainContextualRule(prefix, glyphs, suffix, [sub])) + + # GSUB 8 + def add_reverse_chain_single_subst(self, location, old_prefix, old_suffix, mapping): + if not mapping: + raise FeatureLibError("Empty glyph class in substitution", location) + lookup = self.get_lookup_(location, ReverseChainSingleSubstBuilder) + lookup.rules.append((old_prefix, old_suffix, mapping)) + + # GPOS rules + + # GPOS 1 + def add_single_pos(self, location, prefix, suffix, pos, forceChain): + if prefix or suffix or forceChain: + self.add_single_pos_chained_(location, prefix, suffix, pos) + else: + lookup = self.get_lookup_(location, SinglePosBuilder) + for glyphs, value in pos: + if not glyphs: + raise FeatureLibError( + "Empty glyph class in positioning rule", location + ) + otValueRecord = self.makeOpenTypeValueRecord( + location, value, pairPosContext=False + ) + for glyph in glyphs: + try: + lookup.add_pos(location, glyph, otValueRecord) + except OpenTypeLibError as e: + raise FeatureLibError(str(e), e.location) from e + + # GPOS 2 + def add_class_pair_pos(self, location, glyphclass1, value1, glyphclass2, value2): + if not glyphclass1 or not glyphclass2: + raise FeatureLibError("Empty glyph class in positioning rule", location) + lookup = self.get_lookup_(location, PairPosBuilder) + v1 = self.makeOpenTypeValueRecord(location, value1, pairPosContext=True) + v2 = self.makeOpenTypeValueRecord(location, value2, pairPosContext=True) + lookup.addClassPair(location, glyphclass1, v1, glyphclass2, v2) + + def add_specific_pair_pos(self, location, glyph1, value1, glyph2, value2): + if not glyph1 or not glyph2: + raise FeatureLibError("Empty glyph class in positioning rule", location) + lookup = self.get_lookup_(location, PairPosBuilder) + v1 = self.makeOpenTypeValueRecord(location, value1, pairPosContext=True) + v2 = self.makeOpenTypeValueRecord(location, value2, pairPosContext=True) + lookup.addGlyphPair(location, glyph1, v1, glyph2, v2) + + # GPOS 3 + def add_cursive_pos(self, location, glyphclass, entryAnchor, exitAnchor): + if not glyphclass: + raise FeatureLibError("Empty glyph class in positioning rule", location) + lookup = self.get_lookup_(location, CursivePosBuilder) + lookup.add_attachment( + location, + glyphclass, + self.makeOpenTypeAnchor(location, entryAnchor), + self.makeOpenTypeAnchor(location, exitAnchor), + ) + + # GPOS 4 + def add_mark_base_pos(self, location, bases, marks): + builder = self.get_lookup_(location, MarkBasePosBuilder) + self.add_marks_(location, builder, marks) + if not bases: + raise FeatureLibError("Empty glyph class in positioning rule", location) + for baseAnchor, markClass in marks: + otBaseAnchor = self.makeOpenTypeAnchor(location, baseAnchor) + for base in bases: + builder.bases.setdefault(base, {})[markClass.name] = otBaseAnchor + + # GPOS 5 + def add_mark_lig_pos(self, location, ligatures, components): + builder = self.get_lookup_(location, MarkLigPosBuilder) + componentAnchors = [] + if not ligatures: + raise FeatureLibError("Empty glyph class in positioning rule", location) + for marks in components: + anchors = {} + self.add_marks_(location, builder, marks) + for ligAnchor, markClass in marks: + anchors[markClass.name] = self.makeOpenTypeAnchor(location, ligAnchor) + componentAnchors.append(anchors) + for glyph in ligatures: + builder.ligatures[glyph] = componentAnchors + + # GPOS 6 + def add_mark_mark_pos(self, location, baseMarks, marks): + builder = self.get_lookup_(location, MarkMarkPosBuilder) + self.add_marks_(location, builder, marks) + if not baseMarks: + raise FeatureLibError("Empty glyph class in positioning rule", location) + for baseAnchor, markClass in marks: + otBaseAnchor = self.makeOpenTypeAnchor(location, baseAnchor) + for baseMark in baseMarks: + builder.baseMarks.setdefault(baseMark, {})[ + markClass.name + ] = otBaseAnchor + + # GPOS 7/8 + def add_chain_context_pos(self, location, prefix, glyphs, suffix, lookups): + if not all(glyphs) or not all(prefix) or not all(suffix): + raise FeatureLibError( + "Empty glyph class in contextual positioning rule", location + ) + lookup = self.get_lookup_(location, ChainContextPosBuilder) + lookup.rules.append( + ChainContextualRule( + prefix, glyphs, suffix, self.find_lookup_builders_(lookups) + ) + ) + + def add_single_pos_chained_(self, location, prefix, suffix, pos): + if not pos or not all(prefix) or not all(suffix): + raise FeatureLibError( + "Empty glyph class in contextual positioning rule", location + ) + # https://github.com/fonttools/fonttools/issues/514 + chain = self.get_lookup_(location, ChainContextPosBuilder) + targets = [] + for _, _, _, lookups in chain.rules: + targets.extend(lookups) + subs = [] + for glyphs, value in pos: + if value is None: + subs.append(None) + continue + otValue = self.makeOpenTypeValueRecord( + location, value, pairPosContext=False + ) + sub = chain.find_chainable_single_pos(targets, glyphs, otValue) + if sub is None: + sub = self.get_chained_lookup_(location, SinglePosBuilder) + targets.append(sub) + for glyph in glyphs: + sub.add_pos(location, glyph, otValue) + subs.append(sub) + assert len(pos) == len(subs), (pos, subs) + chain.rules.append( + ChainContextualRule(prefix, [g for g, v in pos], suffix, subs) + ) + + def add_marks_(self, location, lookupBuilder, marks): + """Helper for add_mark_{base,liga,mark}_pos.""" + for _, markClass in marks: + for markClassDef in markClass.definitions: + for mark in markClassDef.glyphs.glyphSet(): + if mark not in lookupBuilder.marks: + otMarkAnchor = self.makeOpenTypeAnchor( + location, copy.deepcopy(markClassDef.anchor) + ) + lookupBuilder.marks[mark] = (markClass.name, otMarkAnchor) + else: + existingMarkClass = lookupBuilder.marks[mark][0] + if markClass.name != existingMarkClass: + raise FeatureLibError( + "Glyph %s cannot be in both @%s and @%s" + % (mark, existingMarkClass, markClass.name), + location, + ) + + def add_subtable_break(self, location): + self.cur_lookup_.add_subtable_break(location) + + def setGlyphClass_(self, location, glyph, glyphClass): + oldClass, oldLocation = self.glyphClassDefs_.get(glyph, (None, None)) + if oldClass and oldClass != glyphClass: + raise FeatureLibError( + "Glyph %s was assigned to a different class at %s" + % (glyph, oldLocation), + location, + ) + self.glyphClassDefs_[glyph] = (glyphClass, location) + + def add_glyphClassDef( + self, location, baseGlyphs, ligatureGlyphs, markGlyphs, componentGlyphs + ): + for glyph in baseGlyphs: + self.setGlyphClass_(location, glyph, 1) + for glyph in ligatureGlyphs: + self.setGlyphClass_(location, glyph, 2) + for glyph in markGlyphs: + self.setGlyphClass_(location, glyph, 3) + for glyph in componentGlyphs: + self.setGlyphClass_(location, glyph, 4) + + def add_ligatureCaretByIndex_(self, location, glyphs, carets): + for glyph in glyphs: + if glyph not in self.ligCaretPoints_: + self.ligCaretPoints_[glyph] = carets + + def makeLigCaret(self, location, caret): + if not isinstance(caret, VariableScalar): + return caret + default, device = self.makeVariablePos(location, caret) + if device is not None: + return (default, device) + return default + + def add_ligatureCaretByPos_(self, location, glyphs, carets): + carets = [self.makeLigCaret(location, caret) for caret in carets] + for glyph in glyphs: + if glyph not in self.ligCaretCoords_: + self.ligCaretCoords_[glyph] = carets + + def add_name_record(self, location, nameID, platformID, platEncID, langID, string): + self.names_.append([nameID, platformID, platEncID, langID, string]) + + def add_os2_field(self, key, value): + self.os2_[key] = value + + def add_hhea_field(self, key, value): + self.hhea_[key] = value + + def add_vhea_field(self, key, value): + self.vhea_[key] = value + + def add_conditionset(self, location, key, value): + if "fvar" not in self.font: + raise FeatureLibError( + "Cannot add feature variations to a font without an 'fvar' table", + location, + ) + + # Normalize + axisMap = { + axis.axisTag: (axis.minValue, axis.defaultValue, axis.maxValue) + for axis in self.axes + } + + value = { + tag: ( + normalizeValue(bottom, axisMap[tag]), + normalizeValue(top, axisMap[tag]), + ) + for tag, (bottom, top) in value.items() + } + + # NOTE: This might result in rounding errors (off-by-ones) compared to + # rules in Designspace files, since we're working with what's in the + # `avar` table rather than the original values. + if "avar" in self.font: + mapping = self.font["avar"].segments + value = { + axis: tuple( + piecewiseLinearMap(v, mapping[axis]) if axis in mapping else v + for v in condition_range + ) + for axis, condition_range in value.items() + } + + self.conditionsets_[key] = value + + def makeVariablePos(self, location, varscalar): + if not self.varstorebuilder: + raise FeatureLibError( + "Can't define a variable scalar in a non-variable font", location + ) + + varscalar.axes = self.axes + if not varscalar.does_vary: + return varscalar.default, None + + default, index = varscalar.add_to_variation_store( + self.varstorebuilder, self.model_cache, self.font.get("avar") + ) + + device = None + if index is not None and index != 0xFFFFFFFF: + device = buildVarDevTable(index) + + return default, device + + def makeAnchorPos(self, varscalar, deviceTable, location): + device = None + if not isinstance(varscalar, VariableScalar): + if deviceTable is not None: + device = otl.buildDevice(dict(deviceTable)) + return varscalar, device + default, device = self.makeVariablePos(location, varscalar) + if device is not None and deviceTable is not None: + raise FeatureLibError( + "Can't define a device coordinate and variable scalar", location + ) + return default, device + + def makeOpenTypeAnchor(self, location, anchor): + """ast.Anchor --> otTables.Anchor""" + if anchor is None: + return None + deviceX, deviceY = None, None + if anchor.xDeviceTable is not None: + deviceX = otl.buildDevice(dict(anchor.xDeviceTable)) + if anchor.yDeviceTable is not None: + deviceY = otl.buildDevice(dict(anchor.yDeviceTable)) + x, deviceX = self.makeAnchorPos(anchor.x, anchor.xDeviceTable, location) + y, deviceY = self.makeAnchorPos(anchor.y, anchor.yDeviceTable, location) + otlanchor = otl.buildAnchor(x, y, anchor.contourpoint, deviceX, deviceY) + return otlanchor + + _VALUEREC_ATTRS = { + name[0].lower() + name[1:]: (name, isDevice) + for _, name, isDevice, _ in otBase.valueRecordFormat + if not name.startswith("Reserved") + } + + def makeOpenTypeValueRecord(self, location, v, pairPosContext): + """ast.ValueRecord --> otBase.ValueRecord""" + if not v: + return None + + vr = {} + for astName, (otName, isDevice) in self._VALUEREC_ATTRS.items(): + val = getattr(v, astName, None) + if not val: + continue + if isDevice: + vr[otName] = otl.buildDevice(dict(val)) + elif isinstance(val, VariableScalar): + otDeviceName = otName[0:4] + "Device" + feaDeviceName = otDeviceName[0].lower() + otDeviceName[1:] + if getattr(v, feaDeviceName): + raise FeatureLibError( + "Can't define a device coordinate and variable scalar", location + ) + vr[otName], device = self.makeVariablePos(location, val) + if device is not None: + vr[otDeviceName] = device + else: + vr[otName] = val + + if pairPosContext and not vr: + vr = {"YAdvance": 0} if v.vertical else {"XAdvance": 0} + valRec = otl.buildValue(vr) + return valRec diff --git a/infer_4_47_1/lib/python3.10/site-packages/fontTools/feaLib/lexer.c b/infer_4_47_1/lib/python3.10/site-packages/fontTools/feaLib/lexer.c new file mode 100644 index 0000000000000000000000000000000000000000..f8ab1ee3da183eb48f18fe018f4dece051118a42 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/fontTools/feaLib/lexer.c @@ -0,0 +1,17986 @@ +/* Generated by Cython 3.0.11 */ + +/* BEGIN: Cython Metadata +{ + "distutils": { + "name": "fontTools.feaLib.lexer", + "sources": [ + "Lib/fontTools/feaLib/lexer.py" + ] + }, + "module_name": "fontTools.feaLib.lexer" +} +END: Cython Metadata */ + +#ifndef PY_SSIZE_T_CLEAN +#define PY_SSIZE_T_CLEAN +#endif /* PY_SSIZE_T_CLEAN */ +#if defined(CYTHON_LIMITED_API) && 0 + #ifndef Py_LIMITED_API + #if CYTHON_LIMITED_API+0 > 0x03030000 + #define Py_LIMITED_API CYTHON_LIMITED_API + #else + #define Py_LIMITED_API 0x03030000 + #endif + #endif +#endif + +#include "Python.h" +#ifndef Py_PYTHON_H + #error Python headers needed to compile C extensions, please install development version of Python. +#elif PY_VERSION_HEX < 0x02070000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) + #error Cython requires Python 2.7+ or Python 3.3+. +#else +#if defined(CYTHON_LIMITED_API) && CYTHON_LIMITED_API +#define __PYX_EXTRA_ABI_MODULE_NAME "limited" +#else +#define __PYX_EXTRA_ABI_MODULE_NAME "" +#endif +#define CYTHON_ABI "3_0_11" __PYX_EXTRA_ABI_MODULE_NAME +#define __PYX_ABI_MODULE_NAME "_cython_" CYTHON_ABI +#define __PYX_TYPE_MODULE_PREFIX __PYX_ABI_MODULE_NAME "." +#define CYTHON_HEX_VERSION 0x03000BF0 +#define CYTHON_FUTURE_DIVISION 1 +#include +#ifndef offsetof + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) +#endif +#if !defined(_WIN32) && !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif + #ifndef __fastcall + #define __fastcall + #endif +#endif +#ifndef DL_IMPORT + #define DL_IMPORT(t) t +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#define __PYX_COMMA , +#ifndef HAVE_LONG_LONG + #define HAVE_LONG_LONG +#endif +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef Py_HUGE_VAL + #define Py_HUGE_VAL HUGE_VAL +#endif +#define __PYX_LIMITED_VERSION_HEX PY_VERSION_HEX +#if defined(GRAALVM_PYTHON) + /* For very preliminary testing purposes. Most variables are set the same as PyPy. + The existence of this section does not imply that anything works or is even tested */ + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #define CYTHON_COMPILING_IN_LIMITED_API 0 + #define CYTHON_COMPILING_IN_GRAAL 1 + #define CYTHON_COMPILING_IN_NOGIL 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_TYPE_SPECS + #define CYTHON_USE_TYPE_SPECS 0 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_GIL + #define CYTHON_FAST_GIL 0 + #undef CYTHON_METH_FASTCALL + #define CYTHON_METH_FASTCALL 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #ifndef CYTHON_PEP487_INIT_SUBCLASS + #define CYTHON_PEP487_INIT_SUBCLASS (PY_MAJOR_VERSION >= 3) + #endif + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 1 + #undef CYTHON_USE_MODULE_STATE + #define CYTHON_USE_MODULE_STATE 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 + #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC + #define CYTHON_UPDATE_DESCRIPTOR_DOC 0 + #endif + #undef CYTHON_USE_FREELISTS + #define CYTHON_USE_FREELISTS 0 +#elif defined(PYPY_VERSION) + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #define CYTHON_COMPILING_IN_LIMITED_API 0 + #define CYTHON_COMPILING_IN_GRAAL 0 + #define CYTHON_COMPILING_IN_NOGIL 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #ifndef CYTHON_USE_TYPE_SPECS + #define CYTHON_USE_TYPE_SPECS 0 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_GIL + #define CYTHON_FAST_GIL 0 + #undef CYTHON_METH_FASTCALL + #define CYTHON_METH_FASTCALL 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #ifndef CYTHON_PEP487_INIT_SUBCLASS + #define CYTHON_PEP487_INIT_SUBCLASS (PY_MAJOR_VERSION >= 3) + #endif + #if PY_VERSION_HEX < 0x03090000 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #elif !defined(CYTHON_PEP489_MULTI_PHASE_INIT) + #define CYTHON_PEP489_MULTI_PHASE_INIT 1 + #endif + #undef CYTHON_USE_MODULE_STATE + #define CYTHON_USE_MODULE_STATE 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1 && PYPY_VERSION_NUM >= 0x07030C00) + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 + #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC + #define CYTHON_UPDATE_DESCRIPTOR_DOC 0 + #endif + #undef CYTHON_USE_FREELISTS + #define CYTHON_USE_FREELISTS 0 +#elif defined(CYTHON_LIMITED_API) + #ifdef Py_LIMITED_API + #undef __PYX_LIMITED_VERSION_HEX + #define __PYX_LIMITED_VERSION_HEX Py_LIMITED_API + #endif + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #define CYTHON_COMPILING_IN_LIMITED_API 1 + #define CYTHON_COMPILING_IN_GRAAL 0 + #define CYTHON_COMPILING_IN_NOGIL 0 + #undef CYTHON_CLINE_IN_TRACEBACK + #define CYTHON_CLINE_IN_TRACEBACK 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_TYPE_SPECS + #define CYTHON_USE_TYPE_SPECS 1 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #endif + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_GIL + #define CYTHON_FAST_GIL 0 + #undef CYTHON_METH_FASTCALL + #define CYTHON_METH_FASTCALL 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #ifndef CYTHON_PEP487_INIT_SUBCLASS + #define CYTHON_PEP487_INIT_SUBCLASS 1 + #endif + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_MODULE_STATE + #define CYTHON_USE_MODULE_STATE 1 + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #endif + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 + #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC + #define CYTHON_UPDATE_DESCRIPTOR_DOC 0 + #endif + #undef CYTHON_USE_FREELISTS + #define CYTHON_USE_FREELISTS 0 +#elif defined(Py_GIL_DISABLED) || defined(Py_NOGIL) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #define CYTHON_COMPILING_IN_LIMITED_API 0 + #define CYTHON_COMPILING_IN_GRAAL 0 + #define CYTHON_COMPILING_IN_NOGIL 1 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #ifndef CYTHON_USE_TYPE_SPECS + #define CYTHON_USE_TYPE_SPECS 0 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #ifndef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #ifndef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_GIL + #define CYTHON_FAST_GIL 0 + #ifndef CYTHON_METH_FASTCALL + #define CYTHON_METH_FASTCALL 1 + #endif + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #ifndef CYTHON_PEP487_INIT_SUBCLASS + #define CYTHON_PEP487_INIT_SUBCLASS 1 + #endif + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 1 + #endif + #ifndef CYTHON_USE_MODULE_STATE + #define CYTHON_USE_MODULE_STATE 0 + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 1 + #endif + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 + #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC + #define CYTHON_UPDATE_DESCRIPTOR_DOC 1 + #endif + #ifndef CYTHON_USE_FREELISTS + #define CYTHON_USE_FREELISTS 0 + #endif +#else + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + #define CYTHON_COMPILING_IN_LIMITED_API 0 + #define CYTHON_COMPILING_IN_GRAAL 0 + #define CYTHON_COMPILING_IN_NOGIL 0 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #ifndef CYTHON_USE_TYPE_SPECS + #define CYTHON_USE_TYPE_SPECS 0 + #endif + #ifndef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 1 + #endif + #if PY_MAJOR_VERSION < 3 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #ifndef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 1 + #endif + #ifndef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #if PY_VERSION_HEX < 0x030300F0 || PY_VERSION_HEX >= 0x030B00A2 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #ifndef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #ifndef CYTHON_FAST_GIL + #define CYTHON_FAST_GIL (PY_MAJOR_VERSION < 3 || PY_VERSION_HEX >= 0x03060000 && PY_VERSION_HEX < 0x030C00A6) + #endif + #ifndef CYTHON_METH_FASTCALL + #define CYTHON_METH_FASTCALL (PY_VERSION_HEX >= 0x030700A1) + #endif + #ifndef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 1 + #endif + #ifndef CYTHON_PEP487_INIT_SUBCLASS + #define CYTHON_PEP487_INIT_SUBCLASS 1 + #endif + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #elif !defined(CYTHON_PEP489_MULTI_PHASE_INIT) + #define CYTHON_PEP489_MULTI_PHASE_INIT 1 + #endif + #ifndef CYTHON_USE_MODULE_STATE + #define CYTHON_USE_MODULE_STATE 0 + #endif + #if PY_VERSION_HEX < 0x030400a1 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #elif !defined(CYTHON_USE_TP_FINALIZE) + #define CYTHON_USE_TP_FINALIZE 1 + #endif + #if PY_VERSION_HEX < 0x030600B1 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #elif !defined(CYTHON_USE_DICT_VERSIONS) + #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX < 0x030C00A5) + #endif + #if PY_VERSION_HEX < 0x030700A3 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 + #elif !defined(CYTHON_USE_EXC_INFO_STACK) + #define CYTHON_USE_EXC_INFO_STACK 1 + #endif + #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC + #define CYTHON_UPDATE_DESCRIPTOR_DOC 1 + #endif + #ifndef CYTHON_USE_FREELISTS + #define CYTHON_USE_FREELISTS 1 + #endif +#endif +#if !defined(CYTHON_FAST_PYCCALL) +#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) +#endif +#if !defined(CYTHON_VECTORCALL) +#define CYTHON_VECTORCALL (CYTHON_FAST_PYCCALL && PY_VERSION_HEX >= 0x030800B1) +#endif +#define CYTHON_BACKPORT_VECTORCALL (CYTHON_METH_FASTCALL && PY_VERSION_HEX < 0x030800B1) +#if CYTHON_USE_PYLONG_INTERNALS + #if PY_MAJOR_VERSION < 3 + #include "longintrepr.h" + #endif + #undef SHIFT + #undef BASE + #undef MASK + #ifdef SIZEOF_VOID_P + enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; + #endif +#endif +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif +#ifndef CYTHON_UNUSED + #if defined(__cplusplus) + /* for clang __has_cpp_attribute(maybe_unused) is true even before C++17 + * but leads to warnings with -pedantic, since it is a C++17 feature */ + #if ((defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) || __cplusplus >= 201703L) + #if __has_cpp_attribute(maybe_unused) + #define CYTHON_UNUSED [[maybe_unused]] + #endif + #endif + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_UNUSED_VAR(x) (void)(x) +# endif +#endif +#ifndef CYTHON_MAYBE_UNUSED_VAR + #define CYTHON_MAYBE_UNUSED_VAR(x) CYTHON_UNUSED_VAR(x) +#endif +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_USE_CPP_STD_MOVE + #if defined(__cplusplus) && (\ + __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1600)) + #define CYTHON_USE_CPP_STD_MOVE 1 + #else + #define CYTHON_USE_CPP_STD_MOVE 0 + #endif +#endif +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned char uint8_t; + typedef unsigned short uint16_t; + typedef unsigned int uint32_t; + #else + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + #endif + #endif + #if _MSC_VER < 1300 + #ifdef _WIN64 + typedef unsigned long long __pyx_uintptr_t; + #else + typedef unsigned int __pyx_uintptr_t; + #endif + #else + #ifdef _WIN64 + typedef unsigned __int64 __pyx_uintptr_t; + #else + typedef unsigned __int32 __pyx_uintptr_t; + #endif + #endif +#else + #include + typedef uintptr_t __pyx_uintptr_t; +#endif +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) + /* for clang __has_cpp_attribute(fallthrough) is true even before C++17 + * but leads to warnings with -pedantic, since it is a C++17 feature */ + #if ((defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) || __cplusplus >= 201703L) + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + #if defined(__clang__) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif +#ifdef __cplusplus + template + struct __PYX_IS_UNSIGNED_IMPL {static const bool value = T(0) < T(-1);}; + #define __PYX_IS_UNSIGNED(type) (__PYX_IS_UNSIGNED_IMPL::value) +#else + #define __PYX_IS_UNSIGNED(type) (((type)-1) > 0) +#endif +#if CYTHON_COMPILING_IN_PYPY == 1 + #define __PYX_NEED_TP_PRINT_SLOT (PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x030A0000) +#else + #define __PYX_NEED_TP_PRINT_SLOT (PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000) +#endif +#define __PYX_REINTERPRET_FUNCION(func_pointer, other_pointer) ((func_pointer)(void(*)(void))(other_pointer)) + +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #elif defined(__GNUC__) + #define CYTHON_INLINE __inline__ + #elif defined(_MSC_VER) + #define CYTHON_INLINE __inline + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_INLINE inline + #else + #define CYTHON_INLINE + #endif +#endif + +#define __PYX_BUILD_PY_SSIZE_T "n" +#define CYTHON_FORMAT_SSIZE_T "z" +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" + #define __Pyx_DefaultClassType PyClass_Type + #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" + #define __Pyx_DefaultClassType PyType_Type +#if CYTHON_COMPILING_IN_LIMITED_API + static CYTHON_INLINE PyObject* __Pyx_PyCode_New(int a, int p, int k, int l, int s, int f, + PyObject *code, PyObject *c, PyObject* n, PyObject *v, + PyObject *fv, PyObject *cell, PyObject* fn, + PyObject *name, int fline, PyObject *lnos) { + PyObject *exception_table = NULL; + PyObject *types_module=NULL, *code_type=NULL, *result=NULL; + #if __PYX_LIMITED_VERSION_HEX < 0x030B0000 + PyObject *version_info; + PyObject *py_minor_version = NULL; + #endif + long minor_version = 0; + PyObject *type, *value, *traceback; + PyErr_Fetch(&type, &value, &traceback); + #if __PYX_LIMITED_VERSION_HEX >= 0x030B0000 + minor_version = 11; + #else + if (!(version_info = PySys_GetObject("version_info"))) goto end; + if (!(py_minor_version = PySequence_GetItem(version_info, 1))) goto end; + minor_version = PyLong_AsLong(py_minor_version); + Py_DECREF(py_minor_version); + if (minor_version == -1 && PyErr_Occurred()) goto end; + #endif + if (!(types_module = PyImport_ImportModule("types"))) goto end; + if (!(code_type = PyObject_GetAttrString(types_module, "CodeType"))) goto end; + if (minor_version <= 7) { + (void)p; + result = PyObject_CallFunction(code_type, "iiiiiOOOOOOiOO", a, k, l, s, f, code, + c, n, v, fn, name, fline, lnos, fv, cell); + } else if (minor_version <= 10) { + result = PyObject_CallFunction(code_type, "iiiiiiOOOOOOiOO", a,p, k, l, s, f, code, + c, n, v, fn, name, fline, lnos, fv, cell); + } else { + if (!(exception_table = PyBytes_FromStringAndSize(NULL, 0))) goto end; + result = PyObject_CallFunction(code_type, "iiiiiiOOOOOOOiOO", a,p, k, l, s, f, code, + c, n, v, fn, name, name, fline, lnos, exception_table, fv, cell); + } + end: + Py_XDECREF(code_type); + Py_XDECREF(exception_table); + Py_XDECREF(types_module); + if (type) { + PyErr_Restore(type, value, traceback); + } + return result; + } + #ifndef CO_OPTIMIZED + #define CO_OPTIMIZED 0x0001 + #endif + #ifndef CO_NEWLOCALS + #define CO_NEWLOCALS 0x0002 + #endif + #ifndef CO_VARARGS + #define CO_VARARGS 0x0004 + #endif + #ifndef CO_VARKEYWORDS + #define CO_VARKEYWORDS 0x0008 + #endif + #ifndef CO_ASYNC_GENERATOR + #define CO_ASYNC_GENERATOR 0x0200 + #endif + #ifndef CO_GENERATOR + #define CO_GENERATOR 0x0020 + #endif + #ifndef CO_COROUTINE + #define CO_COROUTINE 0x0080 + #endif +#elif PY_VERSION_HEX >= 0x030B0000 + static CYTHON_INLINE PyCodeObject* __Pyx_PyCode_New(int a, int p, int k, int l, int s, int f, + PyObject *code, PyObject *c, PyObject* n, PyObject *v, + PyObject *fv, PyObject *cell, PyObject* fn, + PyObject *name, int fline, PyObject *lnos) { + PyCodeObject *result; + PyObject *empty_bytes = PyBytes_FromStringAndSize("", 0); + if (!empty_bytes) return NULL; + result = + #if PY_VERSION_HEX >= 0x030C0000 + PyUnstable_Code_NewWithPosOnlyArgs + #else + PyCode_NewWithPosOnlyArgs + #endif + (a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, name, fline, lnos, empty_bytes); + Py_DECREF(empty_bytes); + return result; + } +#elif PY_VERSION_HEX >= 0x030800B2 && !CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_NewWithPosOnlyArgs(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#else + #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#endif +#endif +#if PY_VERSION_HEX >= 0x030900A4 || defined(Py_IS_TYPE) + #define __Pyx_IS_TYPE(ob, type) Py_IS_TYPE(ob, type) +#else + #define __Pyx_IS_TYPE(ob, type) (((const PyObject*)ob)->ob_type == (type)) +#endif +#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_Is) + #define __Pyx_Py_Is(x, y) Py_Is(x, y) +#else + #define __Pyx_Py_Is(x, y) ((x) == (y)) +#endif +#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsNone) + #define __Pyx_Py_IsNone(ob) Py_IsNone(ob) +#else + #define __Pyx_Py_IsNone(ob) __Pyx_Py_Is((ob), Py_None) +#endif +#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsTrue) + #define __Pyx_Py_IsTrue(ob) Py_IsTrue(ob) +#else + #define __Pyx_Py_IsTrue(ob) __Pyx_Py_Is((ob), Py_True) +#endif +#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsFalse) + #define __Pyx_Py_IsFalse(ob) Py_IsFalse(ob) +#else + #define __Pyx_Py_IsFalse(ob) __Pyx_Py_Is((ob), Py_False) +#endif +#define __Pyx_NoneAsNull(obj) (__Pyx_Py_IsNone(obj) ? NULL : (obj)) +#if PY_VERSION_HEX >= 0x030900F0 && !CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyObject_GC_IsFinalized(o) PyObject_GC_IsFinalized(o) +#else + #define __Pyx_PyObject_GC_IsFinalized(o) _PyGC_FINALIZED(o) +#endif +#ifndef CO_COROUTINE + #define CO_COROUTINE 0x80 +#endif +#ifndef CO_ASYNC_GENERATOR + #define CO_ASYNC_GENERATOR 0x200 +#endif +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#ifndef Py_TPFLAGS_SEQUENCE + #define Py_TPFLAGS_SEQUENCE 0 +#endif +#ifndef Py_TPFLAGS_MAPPING + #define Py_TPFLAGS_MAPPING 0 +#endif +#ifndef METH_STACKLESS + #define METH_STACKLESS 0 +#endif +#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x80 + #endif + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, + Py_ssize_t nargs, PyObject *kwnames); +#else + #if PY_VERSION_HEX >= 0x030d00A4 + # define __Pyx_PyCFunctionFast PyCFunctionFast + # define __Pyx_PyCFunctionFastWithKeywords PyCFunctionFastWithKeywords + #else + # define __Pyx_PyCFunctionFast _PyCFunctionFast + # define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords + #endif +#endif +#if CYTHON_METH_FASTCALL + #define __Pyx_METH_FASTCALL METH_FASTCALL + #define __Pyx_PyCFunction_FastCall __Pyx_PyCFunctionFast + #define __Pyx_PyCFunction_FastCallWithKeywords __Pyx_PyCFunctionFastWithKeywords +#else + #define __Pyx_METH_FASTCALL METH_VARARGS + #define __Pyx_PyCFunction_FastCall PyCFunction + #define __Pyx_PyCFunction_FastCallWithKeywords PyCFunctionWithKeywords +#endif +#if CYTHON_VECTORCALL + #define __pyx_vectorcallfunc vectorcallfunc + #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET PY_VECTORCALL_ARGUMENTS_OFFSET + #define __Pyx_PyVectorcall_NARGS(n) PyVectorcall_NARGS((size_t)(n)) +#elif CYTHON_BACKPORT_VECTORCALL + typedef PyObject *(*__pyx_vectorcallfunc)(PyObject *callable, PyObject *const *args, + size_t nargsf, PyObject *kwnames); + #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET ((size_t)1 << (8 * sizeof(size_t) - 1)) + #define __Pyx_PyVectorcall_NARGS(n) ((Py_ssize_t)(((size_t)(n)) & ~__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)) +#else + #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET 0 + #define __Pyx_PyVectorcall_NARGS(n) ((Py_ssize_t)(n)) +#endif +#if PY_MAJOR_VERSION >= 0x030900B1 +#define __Pyx_PyCFunction_CheckExact(func) PyCFunction_CheckExact(func) +#else +#define __Pyx_PyCFunction_CheckExact(func) PyCFunction_Check(func) +#endif +#define __Pyx_CyOrPyCFunction_Check(func) PyCFunction_Check(func) +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_CyOrPyCFunction_GET_FUNCTION(func) (((PyCFunctionObject*)(func))->m_ml->ml_meth) +#elif !CYTHON_COMPILING_IN_LIMITED_API +#define __Pyx_CyOrPyCFunction_GET_FUNCTION(func) PyCFunction_GET_FUNCTION(func) +#endif +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_CyOrPyCFunction_GET_FLAGS(func) (((PyCFunctionObject*)(func))->m_ml->ml_flags) +static CYTHON_INLINE PyObject* __Pyx_CyOrPyCFunction_GET_SELF(PyObject *func) { + return (__Pyx_CyOrPyCFunction_GET_FLAGS(func) & METH_STATIC) ? NULL : ((PyCFunctionObject*)func)->m_self; +} +#endif +static CYTHON_INLINE int __Pyx__IsSameCFunction(PyObject *func, void *cfunc) { +#if CYTHON_COMPILING_IN_LIMITED_API + return PyCFunction_Check(func) && PyCFunction_GetFunction(func) == (PyCFunction) cfunc; +#else + return PyCFunction_Check(func) && PyCFunction_GET_FUNCTION(func) == (PyCFunction) cfunc; +#endif +} +#define __Pyx_IsSameCFunction(func, cfunc) __Pyx__IsSameCFunction(func, cfunc) +#if __PYX_LIMITED_VERSION_HEX < 0x030900B1 + #define __Pyx_PyType_FromModuleAndSpec(m, s, b) ((void)m, PyType_FromSpecWithBases(s, b)) + typedef PyObject *(*__Pyx_PyCMethod)(PyObject *, PyTypeObject *, PyObject *const *, size_t, PyObject *); +#else + #define __Pyx_PyType_FromModuleAndSpec(m, s, b) PyType_FromModuleAndSpec(m, s, b) + #define __Pyx_PyCMethod PyCMethod +#endif +#ifndef METH_METHOD + #define METH_METHOD 0x200 +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif +#if CYTHON_COMPILING_IN_LIMITED_API + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif +#if CYTHON_COMPILING_IN_LIMITED_API + #define __Pyx_PyThreadState_Current PyThreadState_Get() +#elif !CYTHON_FAST_THREAD_STATE + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x030d00A1 + #define __Pyx_PyThreadState_Current PyThreadState_GetUnchecked() +#elif PY_VERSION_HEX >= 0x03060000 + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#elif PY_VERSION_HEX >= 0x03000000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_Current +#endif +#if CYTHON_COMPILING_IN_LIMITED_API +static CYTHON_INLINE void *__Pyx_PyModule_GetState(PyObject *op) +{ + void *result; + result = PyModule_GetState(op); + if (!result) + Py_FatalError("Couldn't find the module state"); + return result; +} +#endif +#define __Pyx_PyObject_GetSlot(obj, name, func_ctype) __Pyx_PyType_GetSlot(Py_TYPE(obj), name, func_ctype) +#if CYTHON_COMPILING_IN_LIMITED_API + #define __Pyx_PyType_GetSlot(type, name, func_ctype) ((func_ctype) PyType_GetSlot((type), Py_##name)) +#else + #define __Pyx_PyType_GetSlot(type, name, func_ctype) ((type)->name) +#endif +#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) +#include "pythread.h" +#define Py_tss_NEEDS_INIT 0 +typedef int Py_tss_t; +static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { + *key = PyThread_create_key(); + return 0; +} +static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { + Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); + *key = Py_tss_NEEDS_INIT; + return key; +} +static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { + PyObject_Free(key); +} +static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { + return *key != Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { + PyThread_delete_key(*key); + *key = Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { + return PyThread_set_key_value(*key, value); +} +static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { + return PyThread_get_key_value(*key); +} +#endif +#if PY_MAJOR_VERSION < 3 + #if CYTHON_COMPILING_IN_PYPY + #if PYPY_VERSION_NUM < 0x07030600 + #if defined(__cplusplus) && __cplusplus >= 201402L + [[deprecated("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6")]] + #elif defined(__GNUC__) || defined(__clang__) + __attribute__ ((__deprecated__("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6"))) + #elif defined(_MSC_VER) + __declspec(deprecated("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6")) + #endif + static CYTHON_INLINE int PyGILState_Check(void) { + return 0; + } + #else // PYPY_VERSION_NUM < 0x07030600 + #endif // PYPY_VERSION_NUM < 0x07030600 + #else + static CYTHON_INLINE int PyGILState_Check(void) { + PyThreadState * tstate = _PyThreadState_Current; + return tstate && (tstate == PyGILState_GetThisThreadState()); + } + #endif +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030d0000 || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif +#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX > 0x030600B4 && PY_VERSION_HEX < 0x030d0000 && CYTHON_USE_UNICODE_INTERNALS +#define __Pyx_PyDict_GetItemStrWithError(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) +static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStr(PyObject *dict, PyObject *name) { + PyObject *res = __Pyx_PyDict_GetItemStrWithError(dict, name); + if (res == NULL) PyErr_Clear(); + return res; +} +#elif PY_MAJOR_VERSION >= 3 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07020000) +#define __Pyx_PyDict_GetItemStrWithError PyDict_GetItemWithError +#define __Pyx_PyDict_GetItemStr PyDict_GetItem +#else +static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStrWithError(PyObject *dict, PyObject *name) { +#if CYTHON_COMPILING_IN_PYPY + return PyDict_GetItem(dict, name); +#else + PyDictEntry *ep; + PyDictObject *mp = (PyDictObject*) dict; + long hash = ((PyStringObject *) name)->ob_shash; + assert(hash != -1); + ep = (mp->ma_lookup)(mp, name, hash); + if (ep == NULL) { + return NULL; + } + return ep->me_value; +#endif +} +#define __Pyx_PyDict_GetItemStr PyDict_GetItem +#endif +#if CYTHON_USE_TYPE_SLOTS + #define __Pyx_PyType_GetFlags(tp) (((PyTypeObject *)tp)->tp_flags) + #define __Pyx_PyType_HasFeature(type, feature) ((__Pyx_PyType_GetFlags(type) & (feature)) != 0) + #define __Pyx_PyObject_GetIterNextFunc(obj) (Py_TYPE(obj)->tp_iternext) +#else + #define __Pyx_PyType_GetFlags(tp) (PyType_GetFlags((PyTypeObject *)tp)) + #define __Pyx_PyType_HasFeature(type, feature) PyType_HasFeature(type, feature) + #define __Pyx_PyObject_GetIterNextFunc(obj) PyIter_Next +#endif +#if CYTHON_COMPILING_IN_LIMITED_API + #define __Pyx_SetItemOnTypeDict(tp, k, v) PyObject_GenericSetAttr((PyObject*)tp, k, v) +#else + #define __Pyx_SetItemOnTypeDict(tp, k, v) PyDict_SetItem(tp->tp_dict, k, v) +#endif +#if CYTHON_USE_TYPE_SPECS && PY_VERSION_HEX >= 0x03080000 +#define __Pyx_PyHeapTypeObject_GC_Del(obj) {\ + PyTypeObject *type = Py_TYPE((PyObject*)obj);\ + assert(__Pyx_PyType_HasFeature(type, Py_TPFLAGS_HEAPTYPE));\ + PyObject_GC_Del(obj);\ + Py_DECREF(type);\ +} +#else +#define __Pyx_PyHeapTypeObject_GC_Del(obj) PyObject_GC_Del(obj) +#endif +#if CYTHON_COMPILING_IN_LIMITED_API + #define CYTHON_PEP393_ENABLED 1 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GetLength(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_ReadChar(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((void)u, 1114111U) + #define __Pyx_PyUnicode_KIND(u) ((void)u, (0)) + #define __Pyx_PyUnicode_DATA(u) ((void*)u) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)k, PyUnicode_ReadChar((PyObject*)(d), i)) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GetLength(u)) +#elif PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) + #define CYTHON_PEP393_ENABLED 1 + #if PY_VERSION_HEX >= 0x030C0000 + #define __Pyx_PyUnicode_READY(op) (0) + #else + #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ + 0 : _PyUnicode_Ready((PyObject *)(op))) + #endif + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_KIND(u) ((int)PyUnicode_KIND(u)) + #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) + #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, (Py_UCS4) ch) + #if PY_VERSION_HEX >= 0x030C0000 + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) + #else + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000 + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length)) + #else + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) + #endif + #endif +#else + #define CYTHON_PEP393_ENABLED 0 + #define PyUnicode_1BYTE_KIND 1 + #define PyUnicode_2BYTE_KIND 2 + #define PyUnicode_4BYTE_KIND 4 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535U : 1114111U) + #define __Pyx_PyUnicode_KIND(u) ((int)sizeof(Py_UNICODE)) + #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = (Py_UNICODE) ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) +#else + #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ + PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #if !defined(PyUnicode_DecodeUnicodeEscape) + #define PyUnicode_DecodeUnicodeEscape(s, size, errors) PyUnicode_Decode(s, size, "unicode_escape", errors) + #endif + #if !defined(PyUnicode_Contains) || (PY_MAJOR_VERSION == 2 && PYPY_VERSION_NUM < 0x07030500) + #undef PyUnicode_Contains + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) + #endif + #if !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) + #endif + #if !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) + #endif +#endif +#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) +#else + #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) +#endif +#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) + #define PyObject_ASCII(o) PyObject_Repr(o) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyStringObject PyUnicodeObject + #define PyString_Type PyUnicode_Type + #define PyString_Check PyUnicode_Check + #define PyString_CheckExact PyUnicode_CheckExact +#ifndef PyObject_Unicode + #define PyObject_Unicode PyObject_Str +#endif +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) + #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) +#else + #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) + #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) +#endif +#if CYTHON_COMPILING_IN_CPYTHON + #define __Pyx_PySequence_ListKeepNew(obj)\ + (likely(PyList_CheckExact(obj) && Py_REFCNT(obj) == 1) ? __Pyx_NewRef(obj) : PySequence_List(obj)) +#else + #define __Pyx_PySequence_ListKeepNew(obj) PySequence_List(obj) +#endif +#ifndef PySet_CheckExact + #define PySet_CheckExact(obj) __Pyx_IS_TYPE(obj, &PySet_Type) +#endif +#if PY_VERSION_HEX >= 0x030900A4 + #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) +#else + #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) +#endif +#if CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_PySequence_ITEM(o, i) PySequence_ITEM(o, i) + #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) + #define __Pyx_PyTuple_SET_ITEM(o, i, v) (PyTuple_SET_ITEM(o, i, v), (0)) + #define __Pyx_PyList_SET_ITEM(o, i, v) (PyList_SET_ITEM(o, i, v), (0)) + #define __Pyx_PyTuple_GET_SIZE(o) PyTuple_GET_SIZE(o) + #define __Pyx_PyList_GET_SIZE(o) PyList_GET_SIZE(o) + #define __Pyx_PySet_GET_SIZE(o) PySet_GET_SIZE(o) + #define __Pyx_PyBytes_GET_SIZE(o) PyBytes_GET_SIZE(o) + #define __Pyx_PyByteArray_GET_SIZE(o) PyByteArray_GET_SIZE(o) +#else + #define __Pyx_PySequence_ITEM(o, i) PySequence_GetItem(o, i) + #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) + #define __Pyx_PyTuple_SET_ITEM(o, i, v) PyTuple_SetItem(o, i, v) + #define __Pyx_PyList_SET_ITEM(o, i, v) PyList_SetItem(o, i, v) + #define __Pyx_PyTuple_GET_SIZE(o) PyTuple_Size(o) + #define __Pyx_PyList_GET_SIZE(o) PyList_Size(o) + #define __Pyx_PySet_GET_SIZE(o) PySet_Size(o) + #define __Pyx_PyBytes_GET_SIZE(o) PyBytes_Size(o) + #define __Pyx_PyByteArray_GET_SIZE(o) PyByteArray_Size(o) +#endif +#if __PYX_LIMITED_VERSION_HEX >= 0x030d00A1 + #define __Pyx_PyImport_AddModuleRef(name) PyImport_AddModuleRef(name) +#else + static CYTHON_INLINE PyObject *__Pyx_PyImport_AddModuleRef(const char *name) { + PyObject *module = PyImport_AddModule(name); + Py_XINCREF(module); + return module; + } +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyIntObject PyLongObject + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define __Pyx_Py3Int_Check(op) PyLong_Check(op) + #define __Pyx_Py3Int_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define PyNumber_Int PyNumber_Long +#else + #define __Pyx_Py3Int_Check(op) (PyLong_Check(op) || PyInt_Check(op)) + #define __Pyx_Py3Int_CheckExact(op) (PyLong_CheckExact(op) || PyInt_CheckExact(op)) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBoolObject PyLongObject +#endif +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY + #ifndef PyUnicode_InternFromString + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) + #endif +#endif +#if PY_VERSION_HEX < 0x030200A4 + typedef long Py_hash_t; + #define __Pyx_PyInt_FromHash_t PyInt_FromLong + #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsHash_t +#else + #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t + #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsSsize_t +#endif +#if CYTHON_USE_ASYNC_SLOTS + #if PY_VERSION_HEX >= 0x030500B1 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods + #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) + #else + #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) + #endif +#else + #define __Pyx_PyType_AsAsync(obj) NULL +#endif +#ifndef __Pyx_PyAsyncMethodsStruct + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; +#endif + +#if defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS) + #if !defined(_USE_MATH_DEFINES) + #define _USE_MATH_DEFINES + #endif +#endif +#include +#ifdef NAN +#define __PYX_NAN() ((float) NAN) +#else +static CYTHON_INLINE float __PYX_NAN() { + float value; + memset(&value, 0xFF, sizeof(value)); + return value; +} +#endif +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + +#define __PYX_MARK_ERR_POS(f_index, lineno) \ + { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } +#define __PYX_ERR(f_index, lineno, Ln_error) \ + { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } + +#ifdef CYTHON_EXTERN_C + #undef __PYX_EXTERN_C + #define __PYX_EXTERN_C CYTHON_EXTERN_C +#elif defined(__PYX_EXTERN_C) + #ifdef _MSC_VER + #pragma message ("Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.") + #else + #warning Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead. + #endif +#else + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#define __PYX_HAVE__fontTools__feaLib__lexer +#define __PYX_HAVE_API__fontTools__feaLib__lexer +/* Early includes */ +#ifdef _OPENMP +#include +#endif /* _OPENMP */ + +#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) +#define CYTHON_WITHOUT_ASSERTIONS +#endif + +typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; + const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; + +#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) +#define __PYX_DEFAULT_STRING_ENCODING "" +#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString +#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) +#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ + (sizeof(type) < sizeof(Py_ssize_t)) ||\ + (sizeof(type) > sizeof(Py_ssize_t) &&\ + likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX) &&\ + (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ + v == (type)PY_SSIZE_T_MIN))) ||\ + (sizeof(type) == sizeof(Py_ssize_t) &&\ + (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX))) ) +static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { + return (size_t) i < (size_t) limit; +} +#if defined (__cplusplus) && __cplusplus >= 201103L + #include + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) +#elif defined (_MSC_VER) + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif +static CYTHON_INLINE Py_ssize_t __Pyx_ssize_strlen(const char *s); +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); +static CYTHON_INLINE PyObject* __Pyx_PyByteArray_FromString(const char*); +#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#else + #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize +#endif +#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyObject_AsWritableString(s) ((char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableSString(s) ((signed char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) +#define __Pyx_PyUnicode_FromOrdinal(o) PyUnicode_FromOrdinal((int)o) +#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode +#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) +#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); +#define __Pyx_PySequence_Tuple(obj)\ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); +static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject*); +#if CYTHON_ASSUME_SAFE_MACROS +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) +#else +#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) +#endif +#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#else +#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) +#endif +#if CYTHON_USE_PYLONG_INTERNALS + #if PY_VERSION_HEX >= 0x030C00A7 + #ifndef _PyLong_SIGN_MASK + #define _PyLong_SIGN_MASK 3 + #endif + #ifndef _PyLong_NON_SIZE_BITS + #define _PyLong_NON_SIZE_BITS 3 + #endif + #define __Pyx_PyLong_Sign(x) (((PyLongObject*)x)->long_value.lv_tag & _PyLong_SIGN_MASK) + #define __Pyx_PyLong_IsNeg(x) ((__Pyx_PyLong_Sign(x) & 2) != 0) + #define __Pyx_PyLong_IsNonNeg(x) (!__Pyx_PyLong_IsNeg(x)) + #define __Pyx_PyLong_IsZero(x) (__Pyx_PyLong_Sign(x) & 1) + #define __Pyx_PyLong_IsPos(x) (__Pyx_PyLong_Sign(x) == 0) + #define __Pyx_PyLong_CompactValueUnsigned(x) (__Pyx_PyLong_Digits(x)[0]) + #define __Pyx_PyLong_DigitCount(x) ((Py_ssize_t) (((PyLongObject*)x)->long_value.lv_tag >> _PyLong_NON_SIZE_BITS)) + #define __Pyx_PyLong_SignedDigitCount(x)\ + ((1 - (Py_ssize_t) __Pyx_PyLong_Sign(x)) * __Pyx_PyLong_DigitCount(x)) + #if defined(PyUnstable_Long_IsCompact) && defined(PyUnstable_Long_CompactValue) + #define __Pyx_PyLong_IsCompact(x) PyUnstable_Long_IsCompact((PyLongObject*) x) + #define __Pyx_PyLong_CompactValue(x) PyUnstable_Long_CompactValue((PyLongObject*) x) + #else + #define __Pyx_PyLong_IsCompact(x) (((PyLongObject*)x)->long_value.lv_tag < (2 << _PyLong_NON_SIZE_BITS)) + #define __Pyx_PyLong_CompactValue(x) ((1 - (Py_ssize_t) __Pyx_PyLong_Sign(x)) * (Py_ssize_t) __Pyx_PyLong_Digits(x)[0]) + #endif + typedef Py_ssize_t __Pyx_compact_pylong; + typedef size_t __Pyx_compact_upylong; + #else + #define __Pyx_PyLong_IsNeg(x) (Py_SIZE(x) < 0) + #define __Pyx_PyLong_IsNonNeg(x) (Py_SIZE(x) >= 0) + #define __Pyx_PyLong_IsZero(x) (Py_SIZE(x) == 0) + #define __Pyx_PyLong_IsPos(x) (Py_SIZE(x) > 0) + #define __Pyx_PyLong_CompactValueUnsigned(x) ((Py_SIZE(x) == 0) ? 0 : __Pyx_PyLong_Digits(x)[0]) + #define __Pyx_PyLong_DigitCount(x) __Pyx_sst_abs(Py_SIZE(x)) + #define __Pyx_PyLong_SignedDigitCount(x) Py_SIZE(x) + #define __Pyx_PyLong_IsCompact(x) (Py_SIZE(x) == 0 || Py_SIZE(x) == 1 || Py_SIZE(x) == -1) + #define __Pyx_PyLong_CompactValue(x)\ + ((Py_SIZE(x) == 0) ? (sdigit) 0 : ((Py_SIZE(x) < 0) ? -(sdigit)__Pyx_PyLong_Digits(x)[0] : (sdigit)__Pyx_PyLong_Digits(x)[0])) + typedef sdigit __Pyx_compact_pylong; + typedef digit __Pyx_compact_upylong; + #endif + #if PY_VERSION_HEX >= 0x030C00A5 + #define __Pyx_PyLong_Digits(x) (((PyLongObject*)x)->long_value.ob_digit) + #else + #define __Pyx_PyLong_Digits(x) (((PyLongObject*)x)->ob_digit) + #endif +#endif +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII +#include +static int __Pyx_sys_getdefaultencoding_not_ascii; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + PyObject* ascii_chars_u = NULL; + PyObject* ascii_chars_b = NULL; + const char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + if (strcmp(default_encoding_c, "ascii") == 0) { + __Pyx_sys_getdefaultencoding_not_ascii = 0; + } else { + char ascii_chars[128]; + int c; + for (c = 0; c < 128; c++) { + ascii_chars[c] = (char) c; + } + __Pyx_sys_getdefaultencoding_not_ascii = 1; + ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); + if (!ascii_chars_u) goto bad; + ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); + if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { + PyErr_Format( + PyExc_ValueError, + "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", + default_encoding_c); + goto bad; + } + Py_DECREF(ascii_chars_u); + Py_DECREF(ascii_chars_b); + } + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + Py_XDECREF(ascii_chars_u); + Py_XDECREF(ascii_chars_b); + return -1; +} +#endif +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) +#else +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#include +static char* __PYX_DEFAULT_STRING_ENCODING; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); + if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; + strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + return -1; +} +#endif +#endif + + +/* Test for GCC > 2.95 */ +#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#else /* !__GNUC__ or GCC < 2.95 */ + #define likely(x) (x) + #define unlikely(x) (x) +#endif /* __GNUC__ */ +static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } + +#if !CYTHON_USE_MODULE_STATE +static PyObject *__pyx_m = NULL; +#endif +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * __pyx_cfilenm = __FILE__; +static const char *__pyx_filename; + +/* #### Code section: filename_table ### */ + +static const char *__pyx_f[] = { + "Lib/fontTools/feaLib/lexer.py", +}; +/* #### Code section: utility_code_proto_before_types ### */ +/* ForceInitThreads.proto */ +#ifndef __PYX_FORCE_INIT_THREADS + #define __PYX_FORCE_INIT_THREADS 0 +#endif + +/* #### Code section: numeric_typedefs ### */ +/* #### Code section: complex_type_declarations ### */ +/* #### Code section: type_declarations ### */ + +/*--- Type declarations ---*/ +/* #### Code section: utility_code_proto ### */ + +/* --- Runtime support code (head) --- */ +/* Refnanny.proto */ +#ifndef CYTHON_REFNANNY + #define CYTHON_REFNANNY 0 +#endif +#if CYTHON_REFNANNY + typedef struct { + void (*INCREF)(void*, PyObject*, Py_ssize_t); + void (*DECREF)(void*, PyObject*, Py_ssize_t); + void (*GOTREF)(void*, PyObject*, Py_ssize_t); + void (*GIVEREF)(void*, PyObject*, Py_ssize_t); + void* (*SetupContext)(const char*, Py_ssize_t, const char*); + void (*FinishContext)(void**); + } __Pyx_RefNannyAPIStruct; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); + #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; +#ifdef WITH_THREAD + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + if (acquire_gil) {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__));\ + PyGILState_Release(__pyx_gilstate_save);\ + } else {\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__));\ + } + #define __Pyx_RefNannyFinishContextNogil() {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __Pyx_RefNannyFinishContext();\ + PyGILState_Release(__pyx_gilstate_save);\ + } +#else + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__)) + #define __Pyx_RefNannyFinishContextNogil() __Pyx_RefNannyFinishContext() +#endif + #define __Pyx_RefNannyFinishContextNogil() {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __Pyx_RefNannyFinishContext();\ + PyGILState_Release(__pyx_gilstate_save);\ + } + #define __Pyx_RefNannyFinishContext()\ + __Pyx_RefNanny->FinishContext(&__pyx_refnanny) + #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) + #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) + #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) + #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) + #define __Pyx_XINCREF(r) do { if((r) == NULL); else {__Pyx_INCREF(r); }} while(0) + #define __Pyx_XDECREF(r) do { if((r) == NULL); else {__Pyx_DECREF(r); }} while(0) + #define __Pyx_XGOTREF(r) do { if((r) == NULL); else {__Pyx_GOTREF(r); }} while(0) + #define __Pyx_XGIVEREF(r) do { if((r) == NULL); else {__Pyx_GIVEREF(r);}} while(0) +#else + #define __Pyx_RefNannyDeclarations + #define __Pyx_RefNannySetupContext(name, acquire_gil) + #define __Pyx_RefNannyFinishContextNogil() + #define __Pyx_RefNannyFinishContext() + #define __Pyx_INCREF(r) Py_INCREF(r) + #define __Pyx_DECREF(r) Py_DECREF(r) + #define __Pyx_GOTREF(r) + #define __Pyx_GIVEREF(r) + #define __Pyx_XINCREF(r) Py_XINCREF(r) + #define __Pyx_XDECREF(r) Py_XDECREF(r) + #define __Pyx_XGOTREF(r) + #define __Pyx_XGIVEREF(r) +#endif +#define __Pyx_Py_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; Py_XDECREF(tmp);\ + } while (0) +#define __Pyx_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_XDECREF(tmp);\ + } while (0) +#define __Pyx_DECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_DECREF(tmp);\ + } while (0) +#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) +#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) + +/* PyErrExceptionMatches.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) +static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); +#else +#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) +#endif + +/* PyThreadStateGet.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; +#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; +#if PY_VERSION_HEX >= 0x030C00A6 +#define __Pyx_PyErr_Occurred() (__pyx_tstate->current_exception != NULL) +#define __Pyx_PyErr_CurrentExceptionType() (__pyx_tstate->current_exception ? (PyObject*) Py_TYPE(__pyx_tstate->current_exception) : (PyObject*) NULL) +#else +#define __Pyx_PyErr_Occurred() (__pyx_tstate->curexc_type != NULL) +#define __Pyx_PyErr_CurrentExceptionType() (__pyx_tstate->curexc_type) +#endif +#else +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() (PyErr_Occurred() != NULL) +#define __Pyx_PyErr_CurrentExceptionType() PyErr_Occurred() +#endif + +/* PyErrFetchRestore.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A6 +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) +#else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + +/* PyObjectGetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) +#endif + +/* PyObjectGetAttrStrNoError.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); + +/* GetBuiltinName.proto */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name); + +/* TupleAndListFromArray.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyList_FromArray(PyObject *const *src, Py_ssize_t n); +static CYTHON_INLINE PyObject* __Pyx_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n); +#endif + +/* IncludeStringH.proto */ +#include + +/* BytesEquals.proto */ +static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); + +/* UnicodeEquals.proto */ +static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); + +/* fastcall.proto */ +#if CYTHON_AVOID_BORROWED_REFS + #define __Pyx_Arg_VARARGS(args, i) PySequence_GetItem(args, i) +#elif CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_Arg_VARARGS(args, i) PyTuple_GET_ITEM(args, i) +#else + #define __Pyx_Arg_VARARGS(args, i) PyTuple_GetItem(args, i) +#endif +#if CYTHON_AVOID_BORROWED_REFS + #define __Pyx_Arg_NewRef_VARARGS(arg) __Pyx_NewRef(arg) + #define __Pyx_Arg_XDECREF_VARARGS(arg) Py_XDECREF(arg) +#else + #define __Pyx_Arg_NewRef_VARARGS(arg) arg + #define __Pyx_Arg_XDECREF_VARARGS(arg) +#endif +#define __Pyx_NumKwargs_VARARGS(kwds) PyDict_Size(kwds) +#define __Pyx_KwValues_VARARGS(args, nargs) NULL +#define __Pyx_GetKwValue_VARARGS(kw, kwvalues, s) __Pyx_PyDict_GetItemStrWithError(kw, s) +#define __Pyx_KwargsAsDict_VARARGS(kw, kwvalues) PyDict_Copy(kw) +#if CYTHON_METH_FASTCALL + #define __Pyx_Arg_FASTCALL(args, i) args[i] + #define __Pyx_NumKwargs_FASTCALL(kwds) PyTuple_GET_SIZE(kwds) + #define __Pyx_KwValues_FASTCALL(args, nargs) ((args) + (nargs)) + static CYTHON_INLINE PyObject * __Pyx_GetKwValue_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues, PyObject *s); +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030d0000 + CYTHON_UNUSED static PyObject *__Pyx_KwargsAsDict_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues); + #else + #define __Pyx_KwargsAsDict_FASTCALL(kw, kwvalues) _PyStack_AsDict(kwvalues, kw) + #endif + #define __Pyx_Arg_NewRef_FASTCALL(arg) arg /* no-op, __Pyx_Arg_FASTCALL is direct and this needs + to have the same reference counting */ + #define __Pyx_Arg_XDECREF_FASTCALL(arg) +#else + #define __Pyx_Arg_FASTCALL __Pyx_Arg_VARARGS + #define __Pyx_NumKwargs_FASTCALL __Pyx_NumKwargs_VARARGS + #define __Pyx_KwValues_FASTCALL __Pyx_KwValues_VARARGS + #define __Pyx_GetKwValue_FASTCALL __Pyx_GetKwValue_VARARGS + #define __Pyx_KwargsAsDict_FASTCALL __Pyx_KwargsAsDict_VARARGS + #define __Pyx_Arg_NewRef_FASTCALL(arg) __Pyx_Arg_NewRef_VARARGS(arg) + #define __Pyx_Arg_XDECREF_FASTCALL(arg) __Pyx_Arg_XDECREF_VARARGS(arg) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS +#define __Pyx_ArgsSlice_VARARGS(args, start, stop) __Pyx_PyTuple_FromArray(&__Pyx_Arg_VARARGS(args, start), stop - start) +#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) __Pyx_PyTuple_FromArray(&__Pyx_Arg_FASTCALL(args, start), stop - start) +#else +#define __Pyx_ArgsSlice_VARARGS(args, start, stop) PyTuple_GetSlice(args, start, stop) +#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) PyTuple_GetSlice(args, start, stop) +#endif + +/* RaiseArgTupleInvalid.proto */ +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); + +/* RaiseDoubleKeywords.proto */ +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); + +/* ParseKeywords.proto */ +static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject *const *kwvalues, + PyObject **argnames[], + PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, + const char* function_name); + +/* PyObjectSetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +#define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o, n, NULL) +static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value); +#else +#define __Pyx_PyObject_DelAttrStr(o,n) PyObject_DelAttr(o,n) +#define __Pyx_PyObject_SetAttrStr(o,n,v) PyObject_SetAttr(o,n,v) +#endif + +/* PyDictVersioning.proto */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) +#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ + (version_var) = __PYX_GET_DICT_VERSION(dict);\ + (cache_var) = (value); +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ + (VAR) = __pyx_dict_cached_value;\ + } else {\ + (VAR) = __pyx_dict_cached_value = (LOOKUP);\ + __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ + }\ +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); +#else +#define __PYX_GET_DICT_VERSION(dict) (0) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); +#endif + +/* GetModuleGlobalName.proto */ +#if CYTHON_USE_DICT_VERSIONS +#define __Pyx_GetModuleGlobalName(var, name) do {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ + (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ + __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} while(0) +#define __Pyx_GetModuleGlobalNameUncached(var, name) do {\ + PY_UINT64_T __pyx_dict_version;\ + PyObject *__pyx_dict_cached_value;\ + (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} while(0) +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); +#else +#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) +#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); +#endif + +/* PyFunctionFastCall.proto */ +#if CYTHON_FAST_PYCALL +#if !CYTHON_VECTORCALL +#define __Pyx_PyFunction_FastCall(func, args, nargs)\ + __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); +#endif +#define __Pyx_BUILD_ASSERT_EXPR(cond)\ + (sizeof(char [1 - 2*!(cond)]) - 1) +#ifndef Py_MEMBER_SIZE +#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) +#endif +#if !CYTHON_VECTORCALL +#if PY_VERSION_HEX >= 0x03080000 + #include "frameobject.h" +#if PY_VERSION_HEX >= 0x030b00a6 && !CYTHON_COMPILING_IN_LIMITED_API + #ifndef Py_BUILD_CORE + #define Py_BUILD_CORE 1 + #endif + #include "internal/pycore_frame.h" +#endif + #define __Pxy_PyFrame_Initialize_Offsets() + #define __Pyx_PyFrame_GetLocalsplus(frame) ((frame)->f_localsplus) +#else + static size_t __pyx_pyframe_localsplus_offset = 0; + #include "frameobject.h" + #define __Pxy_PyFrame_Initialize_Offsets()\ + ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ + (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) + #define __Pyx_PyFrame_GetLocalsplus(frame)\ + (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) +#endif +#endif +#endif + +/* PyObjectCall.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); +#else +#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) +#endif + +/* PyObjectCallMethO.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); +#endif + +/* PyObjectFastCall.proto */ +#define __Pyx_PyObject_FastCall(func, args, nargs) __Pyx_PyObject_FastCallDict(func, args, (size_t)(nargs), NULL) +static CYTHON_INLINE PyObject* __Pyx_PyObject_FastCallDict(PyObject *func, PyObject **args, size_t nargs, PyObject *kwargs); + +/* RaiseTooManyValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); + +/* RaiseNeedMoreValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); + +/* IterFinish.proto */ +static CYTHON_INLINE int __Pyx_IterFinish(void); + +/* UnpackItemEndCheck.proto */ +static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); + +/* PyIntBinop.proto */ +#if !CYTHON_COMPILING_IN_PYPY +static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); +#else +#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ + (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) +#endif + +/* PyObjectCallNoArg.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); + +/* RaiseException.proto */ +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); + +/* GetItemInt.proto */ +#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ + (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ + __Pyx_GetItemInt_Generic(o, to_py_func(i)))) +#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ + (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, + int wraparound, int boundscheck); +#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ + (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, + int wraparound, int boundscheck); +static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, + int is_list, int wraparound, int boundscheck); + +/* PyObjectCallOneArg.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); + +/* ObjectGetItem.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject *key); +#else +#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) +#endif + +/* SliceObject.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice( + PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, + PyObject** py_start, PyObject** py_stop, PyObject** py_slice, + int has_cstart, int has_cstop, int wraparound); + +/* PyIntBinop.proto */ +#if !CYTHON_COMPILING_IN_PYPY +static PyObject* __Pyx_PyInt_SubtractObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); +#else +#define __Pyx_PyInt_SubtractObjC(op1, op2, intval, inplace, zerodivision_check)\ + (inplace ? PyNumber_InPlaceSubtract(op1, op2) : PyNumber_Subtract(op1, op2)) +#endif + +/* PySequenceContains.proto */ +static CYTHON_INLINE int __Pyx_PySequence_ContainsTF(PyObject* item, PyObject* seq, int eq) { + int result = PySequence_Contains(seq, item); + return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); +} + +/* PyUnicodeContains.proto */ +static CYTHON_INLINE int __Pyx_PyUnicode_ContainsTF(PyObject* substring, PyObject* text, int eq) { + int result = PyUnicode_Contains(text, substring); + return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); +} + +/* pybytes_as_double.proto */ +static double __Pyx_SlowPyString_AsDouble(PyObject *obj); +static double __Pyx__PyBytes_AsDouble(PyObject *obj, const char* start, Py_ssize_t length); +static CYTHON_INLINE double __Pyx_PyBytes_AsDouble(PyObject *obj) { + char* as_c_string; + Py_ssize_t size; +#if CYTHON_ASSUME_SAFE_MACROS + as_c_string = PyBytes_AS_STRING(obj); + size = PyBytes_GET_SIZE(obj); +#else + if (PyBytes_AsStringAndSize(obj, &as_c_string, &size) < 0) { + return (double)-1; + } +#endif + return __Pyx__PyBytes_AsDouble(obj, as_c_string, size); +} +static CYTHON_INLINE double __Pyx_PyByteArray_AsDouble(PyObject *obj) { + char* as_c_string; + Py_ssize_t size; +#if CYTHON_ASSUME_SAFE_MACROS + as_c_string = PyByteArray_AS_STRING(obj); + size = PyByteArray_GET_SIZE(obj); +#else + as_c_string = PyByteArray_AsString(obj); + if (as_c_string == NULL) { + return (double)-1; + } + size = PyByteArray_Size(obj); +#endif + return __Pyx__PyBytes_AsDouble(obj, as_c_string, size); +} + +/* pyunicode_as_double.proto */ +#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY && CYTHON_ASSUME_SAFE_MACROS +static const char* __Pyx__PyUnicode_AsDouble_Copy(const void* data, const int kind, char* buffer, Py_ssize_t start, Py_ssize_t end) { + int last_was_punctuation; + Py_ssize_t i; + last_was_punctuation = 1; + for (i=start; i <= end; i++) { + Py_UCS4 chr = PyUnicode_READ(kind, data, i); + int is_punctuation = (chr == '_') | (chr == '.'); + *buffer = (char)chr; + buffer += (chr != '_'); + if (unlikely(chr > 127)) goto parse_failure; + if (unlikely(last_was_punctuation & is_punctuation)) goto parse_failure; + last_was_punctuation = is_punctuation; + } + if (unlikely(last_was_punctuation)) goto parse_failure; + *buffer = '\0'; + return buffer; +parse_failure: + return NULL; +} +static double __Pyx__PyUnicode_AsDouble_inf_nan(const void* data, int kind, Py_ssize_t start, Py_ssize_t length) { + int matches = 1; + Py_UCS4 chr; + Py_UCS4 sign = PyUnicode_READ(kind, data, start); + int is_signed = (sign == '-') | (sign == '+'); + start += is_signed; + length -= is_signed; + switch (PyUnicode_READ(kind, data, start)) { + #ifdef Py_NAN + case 'n': + case 'N': + if (unlikely(length != 3)) goto parse_failure; + chr = PyUnicode_READ(kind, data, start+1); + matches &= (chr == 'a') | (chr == 'A'); + chr = PyUnicode_READ(kind, data, start+2); + matches &= (chr == 'n') | (chr == 'N'); + if (unlikely(!matches)) goto parse_failure; + return (sign == '-') ? -Py_NAN : Py_NAN; + #endif + case 'i': + case 'I': + if (unlikely(length < 3)) goto parse_failure; + chr = PyUnicode_READ(kind, data, start+1); + matches &= (chr == 'n') | (chr == 'N'); + chr = PyUnicode_READ(kind, data, start+2); + matches &= (chr == 'f') | (chr == 'F'); + if (likely(length == 3 && matches)) + return (sign == '-') ? -Py_HUGE_VAL : Py_HUGE_VAL; + if (unlikely(length != 8)) goto parse_failure; + chr = PyUnicode_READ(kind, data, start+3); + matches &= (chr == 'i') | (chr == 'I'); + chr = PyUnicode_READ(kind, data, start+4); + matches &= (chr == 'n') | (chr == 'N'); + chr = PyUnicode_READ(kind, data, start+5); + matches &= (chr == 'i') | (chr == 'I'); + chr = PyUnicode_READ(kind, data, start+6); + matches &= (chr == 't') | (chr == 'T'); + chr = PyUnicode_READ(kind, data, start+7); + matches &= (chr == 'y') | (chr == 'Y'); + if (unlikely(!matches)) goto parse_failure; + return (sign == '-') ? -Py_HUGE_VAL : Py_HUGE_VAL; + case '.': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': + break; + default: + goto parse_failure; + } + return 0.0; +parse_failure: + return -1.0; +} +static double __Pyx_PyUnicode_AsDouble_WithSpaces(PyObject *obj) { + double value; + const char *last; + char *end; + Py_ssize_t start, length = PyUnicode_GET_LENGTH(obj); + const int kind = PyUnicode_KIND(obj); + const void* data = PyUnicode_DATA(obj); + start = 0; + while (Py_UNICODE_ISSPACE(PyUnicode_READ(kind, data, start))) + start++; + while (start < length - 1 && Py_UNICODE_ISSPACE(PyUnicode_READ(kind, data, length - 1))) + length--; + length -= start; + if (unlikely(length <= 0)) goto fallback; + value = __Pyx__PyUnicode_AsDouble_inf_nan(data, kind, start, length); + if (unlikely(value == -1.0)) goto fallback; + if (value != 0.0) return value; + if (length < 40) { + char number[40]; + last = __Pyx__PyUnicode_AsDouble_Copy(data, kind, number, start, start + length); + if (unlikely(!last)) goto fallback; + value = PyOS_string_to_double(number, &end, NULL); + } else { + char *number = (char*) PyMem_Malloc((length + 1) * sizeof(char)); + if (unlikely(!number)) goto fallback; + last = __Pyx__PyUnicode_AsDouble_Copy(data, kind, number, start, start + length); + if (unlikely(!last)) { + PyMem_Free(number); + goto fallback; + } + value = PyOS_string_to_double(number, &end, NULL); + PyMem_Free(number); + } + if (likely(end == last) || (value == (double)-1 && PyErr_Occurred())) { + return value; + } +fallback: + return __Pyx_SlowPyString_AsDouble(obj); +} +#endif +static CYTHON_INLINE double __Pyx_PyUnicode_AsDouble(PyObject *obj) { +#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY && CYTHON_ASSUME_SAFE_MACROS + if (unlikely(__Pyx_PyUnicode_READY(obj) == -1)) + return (double)-1; + if (likely(PyUnicode_IS_ASCII(obj))) { + const char *s; + Py_ssize_t length; + s = PyUnicode_AsUTF8AndSize(obj, &length); + return __Pyx__PyBytes_AsDouble(obj, s, length); + } + return __Pyx_PyUnicode_AsDouble_WithSpaces(obj); +#else + return __Pyx_SlowPyString_AsDouble(obj); +#endif +} + +/* pynumber_float.proto */ +static CYTHON_INLINE PyObject* __Pyx__PyNumber_Float(PyObject* obj); +#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : __Pyx__PyNumber_Float(x)) + +/* IterNext.proto */ +#define __Pyx_PyIter_Next(obj) __Pyx_PyIter_Next2(obj, NULL) +static CYTHON_INLINE PyObject *__Pyx_PyIter_Next2(PyObject *, PyObject *); + +/* GetTopmostException.proto */ +#if CYTHON_USE_EXC_INFO_STACK && CYTHON_FAST_THREAD_STATE +static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); +#endif + +/* SaveResetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +#else +#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) +#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) +#endif + +/* GetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* PyObjectGetMethod.proto */ +static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method); + +/* PyObjectCallMethod0.proto */ +static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name); + +/* pop.proto */ +static CYTHON_INLINE PyObject* __Pyx__PyObject_Pop(PyObject* L); +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static CYTHON_INLINE PyObject* __Pyx_PyList_Pop(PyObject* L); +#define __Pyx_PyObject_Pop(L) (likely(PyList_CheckExact(L)) ?\ + __Pyx_PyList_Pop(L) : __Pyx__PyObject_Pop(L)) +#else +#define __Pyx_PyList_Pop(L) __Pyx__PyObject_Pop(L) +#define __Pyx_PyObject_Pop(L) __Pyx__PyObject_Pop(L) +#endif + +/* UnpackUnboundCMethod.proto */ +typedef struct { + PyObject *type; + PyObject **method_name; + PyCFunction func; + PyObject *method; + int flag; +} __Pyx_CachedCFunction; + +/* CallUnboundCMethod0.proto */ +static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self); +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_CallUnboundCMethod0(cfunc, self)\ + (likely((cfunc)->func) ?\ + (likely((cfunc)->flag == METH_NOARGS) ? (*((cfunc)->func))(self, NULL) :\ + (PY_VERSION_HEX >= 0x030600B1 && likely((cfunc)->flag == METH_FASTCALL) ?\ + (PY_VERSION_HEX >= 0x030700A0 ?\ + (*(__Pyx_PyCFunctionFast)(void*)(PyCFunction)(cfunc)->func)(self, &__pyx_empty_tuple, 0) :\ + (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, &__pyx_empty_tuple, 0, NULL)) :\ + (PY_VERSION_HEX >= 0x030700A0 && (cfunc)->flag == (METH_FASTCALL | METH_KEYWORDS) ?\ + (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, &__pyx_empty_tuple, 0, NULL) :\ + (likely((cfunc)->flag == (METH_VARARGS | METH_KEYWORDS)) ? ((*(PyCFunctionWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, __pyx_empty_tuple, NULL)) :\ + ((cfunc)->flag == METH_VARARGS ? (*((cfunc)->func))(self, __pyx_empty_tuple) :\ + __Pyx__CallUnboundCMethod0(cfunc, self)))))) :\ + __Pyx__CallUnboundCMethod0(cfunc, self)) +#else +#define __Pyx_CallUnboundCMethod0(cfunc, self) __Pyx__CallUnboundCMethod0(cfunc, self) +#endif + +/* ListAppend.proto */ +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { + PyListObject* L = (PyListObject*) list; + Py_ssize_t len = Py_SIZE(list); + if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { + Py_INCREF(x); + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030d0000 + L->ob_item[len] = x; + #else + PyList_SET_ITEM(list, len, x); + #endif + __Pyx_SET_SIZE(list, len + 1); + return 0; + } + return PyList_Append(list, x); +} +#else +#define __Pyx_PyList_Append(L,x) PyList_Append(L,x) +#endif + +/* PyObjectCall2Args.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); + +/* PyObjectCallMethod1.proto */ +static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg); + +/* append.proto */ +static CYTHON_INLINE int __Pyx_PyObject_Append(PyObject* L, PyObject* x); + +/* FastTypeChecks.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +#define __Pyx_TypeCheck2(obj, type1, type2) __Pyx_IsAnySubtype2(Py_TYPE(obj), (PyTypeObject *)type1, (PyTypeObject *)type2) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_TypeCheck2(obj, type1, type2) (PyObject_TypeCheck(obj, (PyTypeObject *)type1) || PyObject_TypeCheck(obj, (PyTypeObject *)type2)) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) +#endif +#define __Pyx_PyErr_ExceptionMatches2(err1, err2) __Pyx_PyErr_GivenExceptionMatches2(__Pyx_PyErr_CurrentExceptionType(), err1, err2) +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) + +/* SwapException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* GetAttr.proto */ +static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); + +/* HasAttr.proto */ +static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); + +/* GetAttr3.proto */ +static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); + +/* Import.proto */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); + +/* ImportFrom.proto */ +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); + +/* ImportDottedModule.proto */ +static PyObject *__Pyx_ImportDottedModule(PyObject *name, PyObject *parts_tuple); +#if PY_MAJOR_VERSION >= 3 +static PyObject *__Pyx_ImportDottedModule_WalkParts(PyObject *module, PyObject *name, PyObject *parts_tuple); +#endif + +/* Py3UpdateBases.proto */ +static PyObject* __Pyx_PEP560_update_bases(PyObject *bases); + +/* CalculateMetaclass.proto */ +static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases); + +/* SetNameInClass.proto */ +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && PY_VERSION_HEX < 0x030d0000 +#define __Pyx_SetNameInClass(ns, name, value)\ + (likely(PyDict_CheckExact(ns)) ? _PyDict_SetItem_KnownHash(ns, name, value, ((PyASCIIObject *) name)->hash) : PyObject_SetItem(ns, name, value)) +#elif CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_SetNameInClass(ns, name, value)\ + (likely(PyDict_CheckExact(ns)) ? PyDict_SetItem(ns, name, value) : PyObject_SetItem(ns, name, value)) +#else +#define __Pyx_SetNameInClass(ns, name, value) PyObject_SetItem(ns, name, value) +#endif + +/* IncludeStructmemberH.proto */ +#include + +/* FixUpExtensionType.proto */ +#if CYTHON_USE_TYPE_SPECS +static int __Pyx_fix_up_extension_type_from_spec(PyType_Spec *spec, PyTypeObject *type); +#endif + +/* FetchSharedCythonModule.proto */ +static PyObject *__Pyx_FetchSharedCythonABIModule(void); + +/* FetchCommonType.proto */ +#if !CYTHON_USE_TYPE_SPECS +static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type); +#else +static PyTypeObject* __Pyx_FetchCommonTypeFromSpec(PyObject *module, PyType_Spec *spec, PyObject *bases); +#endif + +/* PyMethodNew.proto */ +#if CYTHON_COMPILING_IN_LIMITED_API +static PyObject *__Pyx_PyMethod_New(PyObject *func, PyObject *self, PyObject *typ) { + PyObject *typesModule=NULL, *methodType=NULL, *result=NULL; + CYTHON_UNUSED_VAR(typ); + if (!self) + return __Pyx_NewRef(func); + typesModule = PyImport_ImportModule("types"); + if (!typesModule) return NULL; + methodType = PyObject_GetAttrString(typesModule, "MethodType"); + Py_DECREF(typesModule); + if (!methodType) return NULL; + result = PyObject_CallFunctionObjArgs(methodType, func, self, NULL); + Py_DECREF(methodType); + return result; +} +#elif PY_MAJOR_VERSION >= 3 +static PyObject *__Pyx_PyMethod_New(PyObject *func, PyObject *self, PyObject *typ) { + CYTHON_UNUSED_VAR(typ); + if (!self) + return __Pyx_NewRef(func); + return PyMethod_New(func, self); +} +#else + #define __Pyx_PyMethod_New PyMethod_New +#endif + +/* PyVectorcallFastCallDict.proto */ +#if CYTHON_METH_FASTCALL +static CYTHON_INLINE PyObject *__Pyx_PyVectorcall_FastCallDict(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw); +#endif + +/* CythonFunctionShared.proto */ +#define __Pyx_CyFunction_USED +#define __Pyx_CYFUNCTION_STATICMETHOD 0x01 +#define __Pyx_CYFUNCTION_CLASSMETHOD 0x02 +#define __Pyx_CYFUNCTION_CCLASS 0x04 +#define __Pyx_CYFUNCTION_COROUTINE 0x08 +#define __Pyx_CyFunction_GetClosure(f)\ + (((__pyx_CyFunctionObject *) (f))->func_closure) +#if PY_VERSION_HEX < 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API + #define __Pyx_CyFunction_GetClassObj(f)\ + (((__pyx_CyFunctionObject *) (f))->func_classobj) +#else + #define __Pyx_CyFunction_GetClassObj(f)\ + ((PyObject*) ((PyCMethodObject *) (f))->mm_class) +#endif +#define __Pyx_CyFunction_SetClassObj(f, classobj)\ + __Pyx__CyFunction_SetClassObj((__pyx_CyFunctionObject *) (f), (classobj)) +#define __Pyx_CyFunction_Defaults(type, f)\ + ((type *)(((__pyx_CyFunctionObject *) (f))->defaults)) +#define __Pyx_CyFunction_SetDefaultsGetter(f, g)\ + ((__pyx_CyFunctionObject *) (f))->defaults_getter = (g) +typedef struct { +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject_HEAD + PyObject *func; +#elif PY_VERSION_HEX < 0x030900B1 + PyCFunctionObject func; +#else + PyCMethodObject func; +#endif +#if CYTHON_BACKPORT_VECTORCALL + __pyx_vectorcallfunc func_vectorcall; +#endif +#if PY_VERSION_HEX < 0x030500A0 || CYTHON_COMPILING_IN_LIMITED_API + PyObject *func_weakreflist; +#endif + PyObject *func_dict; + PyObject *func_name; + PyObject *func_qualname; + PyObject *func_doc; + PyObject *func_globals; + PyObject *func_code; + PyObject *func_closure; +#if PY_VERSION_HEX < 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API + PyObject *func_classobj; +#endif + void *defaults; + int defaults_pyobjects; + size_t defaults_size; + int flags; + PyObject *defaults_tuple; + PyObject *defaults_kwdict; + PyObject *(*defaults_getter)(PyObject *); + PyObject *func_annotations; + PyObject *func_is_coroutine; +} __pyx_CyFunctionObject; +#undef __Pyx_CyOrPyCFunction_Check +#define __Pyx_CyFunction_Check(obj) __Pyx_TypeCheck(obj, __pyx_CyFunctionType) +#define __Pyx_CyOrPyCFunction_Check(obj) __Pyx_TypeCheck2(obj, __pyx_CyFunctionType, &PyCFunction_Type) +#define __Pyx_CyFunction_CheckExact(obj) __Pyx_IS_TYPE(obj, __pyx_CyFunctionType) +static CYTHON_INLINE int __Pyx__IsSameCyOrCFunction(PyObject *func, void *cfunc); +#undef __Pyx_IsSameCFunction +#define __Pyx_IsSameCFunction(func, cfunc) __Pyx__IsSameCyOrCFunction(func, cfunc) +static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject* op, PyMethodDef *ml, + int flags, PyObject* qualname, + PyObject *closure, + PyObject *module, PyObject *globals, + PyObject* code); +static CYTHON_INLINE void __Pyx__CyFunction_SetClassObj(__pyx_CyFunctionObject* f, PyObject* classobj); +static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m, + size_t size, + int pyobjects); +static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m, + PyObject *tuple); +static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m, + PyObject *dict); +static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m, + PyObject *dict); +static int __pyx_CyFunction_init(PyObject *module); +#if CYTHON_METH_FASTCALL +static PyObject * __Pyx_CyFunction_Vectorcall_NOARGS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); +static PyObject * __Pyx_CyFunction_Vectorcall_O(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); +static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); +static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); +#if CYTHON_BACKPORT_VECTORCALL +#define __Pyx_CyFunction_func_vectorcall(f) (((__pyx_CyFunctionObject*)f)->func_vectorcall) +#else +#define __Pyx_CyFunction_func_vectorcall(f) (((PyCFunctionObject*)f)->vectorcall) +#endif +#endif + +/* CythonFunction.proto */ +static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, + int flags, PyObject* qualname, + PyObject *closure, + PyObject *module, PyObject *globals, + PyObject* code); + +/* PyObjectLookupSpecial.proto */ +#if CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS +#define __Pyx_PyObject_LookupSpecialNoError(obj, attr_name) __Pyx__PyObject_LookupSpecial(obj, attr_name, 0) +#define __Pyx_PyObject_LookupSpecial(obj, attr_name) __Pyx__PyObject_LookupSpecial(obj, attr_name, 1) +static CYTHON_INLINE PyObject* __Pyx__PyObject_LookupSpecial(PyObject* obj, PyObject* attr_name, int with_error); +#else +#define __Pyx_PyObject_LookupSpecialNoError(o,n) __Pyx_PyObject_GetAttrStrNoError(o,n) +#define __Pyx_PyObject_LookupSpecial(o,n) __Pyx_PyObject_GetAttrStr(o,n) +#endif + +/* Py3ClassCreate.proto */ +static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, PyObject *qualname, + PyObject *mkw, PyObject *modname, PyObject *doc); +static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, PyObject *dict, + PyObject *mkw, int calculate_metaclass, int allow_py2_metaclass); + +/* CLineInTraceback.proto */ +#ifdef CYTHON_CLINE_IN_TRACEBACK +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#else +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); +#endif + +/* CodeObjectCache.proto */ +#if !CYTHON_COMPILING_IN_LIMITED_API +typedef struct { + PyCodeObject* code_object; + int code_line; +} __Pyx_CodeObjectCacheEntry; +struct __Pyx_CodeObjectCache { + int count; + int max_count; + __Pyx_CodeObjectCacheEntry* entries; +}; +static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); +static PyCodeObject *__pyx_find_code_object(int code_line); +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); +#endif + +/* AddTraceback.proto */ +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename); + +/* GCCDiagnostics.proto */ +#if !defined(__INTEL_COMPILER) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) +#define __Pyx_HAS_GCC_DIAGNOSTIC +#endif + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); + +/* FormatTypeName.proto */ +#if CYTHON_COMPILING_IN_LIMITED_API +typedef PyObject *__Pyx_TypeName; +#define __Pyx_FMT_TYPENAME "%U" +static __Pyx_TypeName __Pyx_PyType_GetName(PyTypeObject* tp); +#define __Pyx_DECREF_TypeName(obj) Py_XDECREF(obj) +#else +typedef const char *__Pyx_TypeName; +#define __Pyx_FMT_TYPENAME "%.200s" +#define __Pyx_PyType_GetName(tp) ((tp)->tp_name) +#define __Pyx_DECREF_TypeName(obj) +#endif + +/* CIntFromPy.proto */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); + +/* CheckBinaryVersion.proto */ +static unsigned long __Pyx_get_runtime_version(void); +static int __Pyx_check_binary_version(unsigned long ct_version, unsigned long rt_version, int allow_newer); + +/* InitStrings.proto */ +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); + +/* #### Code section: module_declarations ### */ + +/* Module declarations from "cython" */ + +/* Module declarations from "fontTools.feaLib.lexer" */ +/* #### Code section: typeinfo ### */ +/* #### Code section: before_global_var ### */ +#define __Pyx_MODULE_NAME "fontTools.feaLib.lexer" +extern int __pyx_module_is_main_fontTools__feaLib__lexer; +int __pyx_module_is_main_fontTools__feaLib__lexer = 0; + +/* Implementation of "fontTools.feaLib.lexer" */ +/* #### Code section: global_var ### */ +static PyObject *__pyx_builtin_ImportError; +static PyObject *__pyx_builtin_object; +static PyObject *__pyx_builtin_staticmethod; +static PyObject *__pyx_builtin_StopIteration; +static PyObject *__pyx_builtin_open; +/* #### Code section: string_decls ### */ +static const char __pyx_k_[] = "\n"; +static const char __pyx_k_0[] = "0"; +static const char __pyx_k_p[] = "p"; +static const char __pyx_k_r[] = "r"; +static const char __pyx_k_s[] = "}\\s*"; +static const char __pyx_k__2[] = "\r"; +static const char __pyx_k__3[] = "#"; +static const char __pyx_k__4[] = "("; +static const char __pyx_k__5[] = ")"; +static const char __pyx_k__6[] = "\\"; +static const char __pyx_k__7[] = "@"; +static const char __pyx_k__8[] = "."; +static const char __pyx_k__9[] = "-"; +static const char __pyx_k_os[] = "os"; +static const char __pyx_k_re[] = "re"; +static const char __pyx_k_xX[] = "xX"; +static const char __pyx_k_CID[] = "CID"; +static const char __pyx_k__10[] = "\""; +static const char __pyx_k__11[] = "[\r\n]"; +static const char __pyx_k__12[] = ""; +static const char __pyx_k__13[] = "*"; +static const char __pyx_k__16[] = " \t"; +static const char __pyx_k__17[] = "\r\n"; +static const char __pyx_k__18[] = ",;:-+'{}[]<>()="; +static const char __pyx_k__19[] = "_+*:.^~!\\"; +static const char __pyx_k__20[] = "_.+*:^~!/-"; +static const char __pyx_k__51[] = "?"; +static const char __pyx_k_doc[] = "__doc__"; +static const char __pyx_k_err[] = "err"; +static const char __pyx_k_pop[] = "pop"; +static const char __pyx_k_pos[] = "pos_"; +static const char __pyx_k_s_2[] = "\\s*;"; +static const char __pyx_k_sub[] = "sub"; +static const char __pyx_k_tag[] = "tag"; +static const char __pyx_k_NAME[] = "NAME"; +static const char __pyx_k_data[] = "data"; +static const char __pyx_k_dict[] = "__dict__"; +static const char __pyx_k_init[] = "__init__"; +static const char __pyx_k_iter[] = "__iter__"; +static const char __pyx_k_join[] = "join"; +static const char __pyx_k_line[] = "line_"; +static const char __pyx_k_main[] = "__main__"; +static const char __pyx_k_mode[] = "mode_"; +static const char __pyx_k_name[] = "name"; +static const char __pyx_k_next[] = "__next__"; +static const char __pyx_k_open[] = "open"; +static const char __pyx_k_path[] = "path"; +static const char __pyx_k_read[] = "read"; +static const char __pyx_k_self[] = "self"; +static const char __pyx_k_spec[] = "__spec__"; +static const char __pyx_k_test[] = "__test__"; +static const char __pyx_k_text[] = "text"; +static const char __pyx_k_FLOAT[] = "FLOAT"; +static const char __pyx_k_Lexer[] = "Lexer"; +static const char __pyx_k_OCTAL[] = "OCTAL"; +static const char __pyx_k_close[] = "close"; +static const char __pyx_k_isabs[] = "isabs"; +static const char __pyx_k_lexer[] = "lexer"; +static const char __pyx_k_limit[] = "limit"; +static const char __pyx_k_match[] = "match"; +static const char __pyx_k_split[] = "split"; +static const char __pyx_k_start[] = "start"; +static const char __pyx_k_strip[] = "strip"; +static const char __pyx_k_super[] = "super"; +static const char __pyx_k_token[] = "token"; +static const char __pyx_k_valid[] = "valid"; +static const char __pyx_k_NORMAL[] = "NORMAL"; +static const char __pyx_k_NUMBER[] = "NUMBER"; +static const char __pyx_k_STRING[] = "STRING"; +static const char __pyx_k_SYMBOL[] = "SYMBOL"; +static const char __pyx_k_append[] = "append"; +static const char __pyx_k_column[] = "column"; +static const char __pyx_k_getcwd[] = "getcwd"; +static const char __pyx_k_import[] = "__import__"; +static const char __pyx_k_lexers[] = "lexers_"; +static const char __pyx_k_module[] = "__module__"; +static const char __pyx_k_name_2[] = "__name__"; +static const char __pyx_k_next_2[] = "next_"; +static const char __pyx_k_next_3[] = "next"; +static const char __pyx_k_object[] = "object"; +static const char __pyx_k_regexp[] = "regexp"; +static const char __pyx_k_string[] = "string"; +static const char __pyx_k_text_2[] = "text_"; +static const char __pyx_k_COMMENT[] = "COMMENT"; +static const char __pyx_k_NEWLINE[] = "NEWLINE"; +static const char __pyx_k_NUMBERS[] = "NUMBERS"; +static const char __pyx_k_closing[] = "closing"; +static const char __pyx_k_compile[] = "compile"; +static const char __pyx_k_curpath[] = "curpath"; +static const char __pyx_k_dirname[] = "dirname"; +static const char __pyx_k_fileobj[] = "fileobj"; +static const char __pyx_k_include[] = "include"; +static const char __pyx_k_prepare[] = "__prepare__"; +static const char __pyx_k_stop_at[] = "stop_at"; +static const char __pyx_k_FILENAME[] = "FILENAME"; +static const char __pyx_k_cur_char[] = "cur_char"; +static const char __pyx_k_encoding[] = "encoding"; +static const char __pyx_k_features[] = ""; +static const char __pyx_k_filename[] = "filename"; +static const char __pyx_k_location[] = "location_"; +static const char __pyx_k_maxsplit[] = "maxsplit"; +static const char __pyx_k_qualname[] = "__qualname__"; +static const char __pyx_k_set_name[] = "__set_name__"; +static const char __pyx_k_metaclass[] = "__metaclass__"; +static const char __pyx_k_next_char[] = "next_char"; +static const char __pyx_k_scan_over[] = "scan_over_"; +static const char __pyx_k_utf_8_sig[] = "utf-8-sig"; +static const char __pyx_k_0123456789[] = "0123456789"; +static const char __pyx_k_A_Za_z_0_9[] = "^[A-Za-z_0-9.\\-]+$"; +static const char __pyx_k_CHAR_DIGIT[] = "CHAR_DIGIT_"; +static const char __pyx_k_GLYPHCLASS[] = "GLYPHCLASS"; +static const char __pyx_k_Lexer_next[] = "Lexer.next"; +static const char __pyx_k_filename_2[] = "filename_"; +static const char __pyx_k_fname_type[] = "fname_type"; +static const char __pyx_k_glyphclass[] = "glyphclass"; +static const char __pyx_k_includeDir[] = "includeDir"; +static const char __pyx_k_line_start[] = "line_start_"; +static const char __pyx_k_location_2[] = "location"; +static const char __pyx_k_make_lexer[] = "make_lexer_"; +static const char __pyx_k_scan_until[] = "scan_until_"; +static const char __pyx_k_token_type[] = "token_type"; +static const char __pyx_k_CHAR_LETTER[] = "CHAR_LETTER_"; +static const char __pyx_k_CHAR_SYMBOL[] = "CHAR_SYMBOL_"; +static const char __pyx_k_HEXADECIMAL[] = "HEXADECIMAL"; +static const char __pyx_k_ImportError[] = "ImportError"; +static const char __pyx_k_MODE_NORMAL[] = "MODE_NORMAL_"; +static const char __pyx_k_featurefile[] = "featurefile"; +static const char __pyx_k_fname_token[] = "fname_token"; +static const char __pyx_k_mro_entries[] = "__mro_entries__"; +static const char __pyx_k_text_length[] = "text_length_"; +static const char __pyx_k_CHAR_NEWLINE[] = "CHAR_NEWLINE_"; +static const char __pyx_k_Lexer___init[] = "Lexer.__init__"; +static const char __pyx_k_Lexer___iter[] = "Lexer.__iter__"; +static const char __pyx_k_Lexer___next[] = "Lexer.__next__"; +static const char __pyx_k_Lexer_next_2[] = "Lexer.next_"; +static const char __pyx_k_file_or_path[] = "file_or_path"; +static const char __pyx_k_initializing[] = "_initializing"; +static const char __pyx_k_is_coroutine[] = "_is_coroutine"; +static const char __pyx_k_staticmethod[] = "staticmethod"; +static const char __pyx_k_CHAR_HEXDIGIT[] = "CHAR_HEXDIGIT_"; +static const char __pyx_k_MODE_FILENAME[] = "MODE_FILENAME_"; +static const char __pyx_k_RE_GLYPHCLASS[] = "RE_GLYPHCLASS"; +static const char __pyx_k_StopIteration[] = "StopIteration"; +static const char __pyx_k_class_getitem[] = "__class_getitem__"; +static const char __pyx_k_init_subclass[] = "__init_subclass__"; +static const char __pyx_k_IncludingLexer[] = "IncludingLexer"; +static const char __pyx_k_Lexer_location[] = "Lexer.location_"; +static const char __pyx_k_fname_location[] = "fname_location"; +static const char __pyx_k_ANONYMOUS_BLOCK[] = "ANONYMOUS_BLOCK"; +static const char __pyx_k_CHAR_NAME_START[] = "CHAR_NAME_START_"; +static const char __pyx_k_CHAR_WHITESPACE[] = "CHAR_WHITESPACE_"; +static const char __pyx_k_FeatureLibError[] = "FeatureLibError"; +static const char __pyx_k_Lexer_scan_over[] = "Lexer.scan_over_"; +static const char __pyx_k_featurefilepath[] = "featurefilepath"; +static const char __pyx_k_Lexer_scan_until[] = "Lexer.scan_until_"; +static const char __pyx_k_FileNotFoundError[] = "FileNotFoundError"; +static const char __pyx_k_NonIncludingLexer[] = "NonIncludingLexer"; +static const char __pyx_k_Expected_file_name[] = "Expected file name"; +static const char __pyx_k_FeatureLibLocation[] = "FeatureLibLocation"; +static const char __pyx_k_asyncio_coroutines[] = "asyncio.coroutines"; +static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; +static const char __pyx_k_IncludedFeaNotFound[] = "IncludedFeaNotFound"; +static const char __pyx_k_IncludingLexer_next[] = "IncludingLexer.next"; +static const char __pyx_k_scan_anonymous_block[] = "scan_anonymous_block"; +static const char __pyx_k_IncludingLexer___init[] = "IncludingLexer.__init__"; +static const char __pyx_k_IncludingLexer___iter[] = "IncludingLexer.__iter__"; +static const char __pyx_k_IncludingLexer___next[] = "IncludingLexer.__next__"; +static const char __pyx_k_0123456789ABCDEFabcdef[] = "0123456789ABCDEFabcdef"; +static const char __pyx_k_CHAR_NAME_CONTINUATION[] = "CHAR_NAME_CONTINUATION_"; +static const char __pyx_k_Unexpected_character_r[] = "Unexpected character: %r"; +static const char __pyx_k_fontTools_feaLib_error[] = "fontTools.feaLib.error"; +static const char __pyx_k_fontTools_feaLib_lexer[] = "fontTools.feaLib.lexer"; +static const char __pyx_k_Expected_after_file_name[] = "Expected ')' after file name"; +static const char __pyx_k_NonIncludingLexer___next[] = "NonIncludingLexer.__next__"; +static const char __pyx_k_Expected_before_file_name[] = "Expected '(' before file name"; +static const char __pyx_k_Expected_glyph_class_name[] = "Expected glyph class name"; +static const char __pyx_k_IncludingLexer_make_lexer[] = "IncludingLexer.make_lexer_"; +static const char __pyx_k_fontTools_feaLib_location[] = "fontTools.feaLib.location"; +static const char __pyx_k_Lexer_scan_anonymous_block[] = "Lexer.scan_anonymous_block"; +static const char __pyx_k_Too_many_recursive_includes[] = "Too many recursive includes"; +static const char __pyx_k_Expected_to_terminate_string[] = "Expected '\"' to terminate string"; +static const char __pyx_k_Lib_fontTools_feaLib_lexer_py[] = "Lib/fontTools/feaLib/lexer.py"; +static const char __pyx_k_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; +static const char __pyx_k_A_Lexer_that_follows_include_sta[] = "A Lexer that follows include statements.\n\n The OpenType feature file specification states that due to\n historical reasons, relative imports should be resolved in this\n order:\n\n 1. If the source font is UFO format, then relative to the UFO's\n font directory\n 2. relative to the top-level include file\n 3. relative to the parent include file\n\n We only support 1 (via includeDir) and 2.\n "; +static const char __pyx_k_Expected_s_to_terminate_anonymou[] = "Expected '} %s;' to terminate anonymous block"; +static const char __pyx_k_Glyph_class_names_must_consist_o[] = "Glyph class names must consist of letters, digits, underscore, period or hyphen"; +static const char __pyx_k_IncludingLexer_scan_anonymous_bl[] = "IncludingLexer.scan_anonymous_block"; +static const char __pyx_k_Lexer_that_does_not_follow_inclu[] = "Lexer that does not follow `include` statements, emits them as-is."; +/* #### Code section: decls ### */ +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_text, PyObject *__pyx_v_filename); /* proto */ +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_2__iter__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_4next(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_6__next__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_8location_(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_10next_(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_12scan_over_(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_valid); /* proto */ +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_14scan_until_(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_stop_at); /* proto */ +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_16scan_anonymous_block(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_tag); /* proto */ +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_featurefile, PyObject *__pyx_v_includeDir); /* proto */ +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_2__iter__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_4next(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_6__next__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_8make_lexer_(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_file_or_path); /* proto */ +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_10scan_anonymous_block(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_tag); /* proto */ +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_17NonIncludingLexer___next__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */ +static __Pyx_CachedCFunction __pyx_umethod_PyList_Type_pop = {0, 0, 0, 0, 0}; +/* #### Code section: late_includes ### */ +/* #### Code section: module_state ### */ +typedef struct { + PyObject *__pyx_d; + PyObject *__pyx_b; + PyObject *__pyx_cython_runtime; + PyObject *__pyx_empty_tuple; + PyObject *__pyx_empty_bytes; + PyObject *__pyx_empty_unicode; + #ifdef __Pyx_CyFunction_USED + PyTypeObject *__pyx_CyFunctionType; + #endif + #ifdef __Pyx_FusedFunction_USED + PyTypeObject *__pyx_FusedFunctionType; + #endif + #ifdef __Pyx_Generator_USED + PyTypeObject *__pyx_GeneratorType; + #endif + #ifdef __Pyx_IterableCoroutine_USED + PyTypeObject *__pyx_IterableCoroutineType; + #endif + #ifdef __Pyx_Coroutine_USED + PyTypeObject *__pyx_CoroutineAwaitType; + #endif + #ifdef __Pyx_Coroutine_USED + PyTypeObject *__pyx_CoroutineType; + #endif + #if CYTHON_USE_MODULE_STATE + #endif + #if CYTHON_USE_MODULE_STATE + #endif + PyObject *__pyx_kp_u_; + PyObject *__pyx_kp_u_0; + PyObject *__pyx_kp_u_0123456789; + PyObject *__pyx_kp_u_0123456789ABCDEFabcdef; + PyObject *__pyx_n_u_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef; + PyObject *__pyx_n_s_ANONYMOUS_BLOCK; + PyObject *__pyx_n_u_ANONYMOUS_BLOCK; + PyObject *__pyx_kp_s_A_Lexer_that_follows_include_sta; + PyObject *__pyx_kp_u_A_Za_z_0_9; + PyObject *__pyx_n_s_CHAR_DIGIT; + PyObject *__pyx_n_s_CHAR_HEXDIGIT; + PyObject *__pyx_n_s_CHAR_LETTER; + PyObject *__pyx_n_s_CHAR_NAME_CONTINUATION; + PyObject *__pyx_n_s_CHAR_NAME_START; + PyObject *__pyx_n_s_CHAR_NEWLINE; + PyObject *__pyx_n_s_CHAR_SYMBOL; + PyObject *__pyx_n_s_CHAR_WHITESPACE; + PyObject *__pyx_n_s_CID; + PyObject *__pyx_n_u_CID; + PyObject *__pyx_n_s_COMMENT; + PyObject *__pyx_n_u_COMMENT; + PyObject *__pyx_kp_u_Expected_after_file_name; + PyObject *__pyx_kp_u_Expected_before_file_name; + PyObject *__pyx_kp_u_Expected_file_name; + PyObject *__pyx_kp_u_Expected_glyph_class_name; + PyObject *__pyx_kp_u_Expected_s_to_terminate_anonymou; + PyObject *__pyx_kp_u_Expected_to_terminate_string; + PyObject *__pyx_n_s_FILENAME; + PyObject *__pyx_n_u_FILENAME; + PyObject *__pyx_n_s_FLOAT; + PyObject *__pyx_n_u_FLOAT; + PyObject *__pyx_n_s_FeatureLibError; + PyObject *__pyx_n_s_FeatureLibLocation; + PyObject *__pyx_n_s_FileNotFoundError; + PyObject *__pyx_n_s_GLYPHCLASS; + PyObject *__pyx_n_u_GLYPHCLASS; + PyObject *__pyx_kp_u_Glyph_class_names_must_consist_o; + PyObject *__pyx_n_s_HEXADECIMAL; + PyObject *__pyx_n_u_HEXADECIMAL; + PyObject *__pyx_n_s_ImportError; + PyObject *__pyx_n_s_IncludedFeaNotFound; + PyObject *__pyx_n_s_IncludingLexer; + PyObject *__pyx_n_s_IncludingLexer___init; + PyObject *__pyx_n_s_IncludingLexer___iter; + PyObject *__pyx_n_s_IncludingLexer___next; + PyObject *__pyx_n_s_IncludingLexer_make_lexer; + PyObject *__pyx_n_s_IncludingLexer_next; + PyObject *__pyx_n_s_IncludingLexer_scan_anonymous_bl; + PyObject *__pyx_n_s_Lexer; + PyObject *__pyx_n_s_Lexer___init; + PyObject *__pyx_n_s_Lexer___iter; + PyObject *__pyx_n_s_Lexer___next; + PyObject *__pyx_n_s_Lexer_location; + PyObject *__pyx_n_s_Lexer_next; + PyObject *__pyx_n_s_Lexer_next_2; + PyObject *__pyx_n_s_Lexer_scan_anonymous_block; + PyObject *__pyx_n_s_Lexer_scan_over; + PyObject *__pyx_n_s_Lexer_scan_until; + PyObject *__pyx_kp_s_Lexer_that_does_not_follow_inclu; + PyObject *__pyx_kp_s_Lib_fontTools_feaLib_lexer_py; + PyObject *__pyx_n_s_MODE_FILENAME; + PyObject *__pyx_n_s_MODE_NORMAL; + PyObject *__pyx_n_s_NAME; + PyObject *__pyx_n_u_NAME; + PyObject *__pyx_n_s_NEWLINE; + PyObject *__pyx_n_u_NEWLINE; + PyObject *__pyx_n_u_NORMAL; + PyObject *__pyx_n_s_NUMBER; + PyObject *__pyx_n_u_NUMBER; + PyObject *__pyx_n_s_NUMBERS; + PyObject *__pyx_n_s_NonIncludingLexer; + PyObject *__pyx_n_s_NonIncludingLexer___next; + PyObject *__pyx_n_s_OCTAL; + PyObject *__pyx_n_u_OCTAL; + PyObject *__pyx_n_s_RE_GLYPHCLASS; + PyObject *__pyx_n_s_STRING; + PyObject *__pyx_n_u_STRING; + PyObject *__pyx_n_s_SYMBOL; + PyObject *__pyx_n_u_SYMBOL; + PyObject *__pyx_n_s_StopIteration; + PyObject *__pyx_kp_u_Too_many_recursive_includes; + PyObject *__pyx_kp_u_Unexpected_character_r; + PyObject *__pyx_kp_u__10; + PyObject *__pyx_kp_u__11; + PyObject *__pyx_kp_u__12; + PyObject *__pyx_n_s__13; + PyObject *__pyx_kp_u__16; + PyObject *__pyx_kp_u__17; + PyObject *__pyx_kp_u__18; + PyObject *__pyx_kp_u__19; + PyObject *__pyx_kp_u__2; + PyObject *__pyx_kp_u__20; + PyObject *__pyx_kp_u__3; + PyObject *__pyx_kp_u__4; + PyObject *__pyx_kp_u__5; + PyObject *__pyx_n_s__51; + PyObject *__pyx_kp_u__6; + PyObject *__pyx_kp_u__7; + PyObject *__pyx_kp_u__8; + PyObject *__pyx_kp_u__9; + PyObject *__pyx_n_s_append; + PyObject *__pyx_n_s_asyncio_coroutines; + PyObject *__pyx_n_s_class_getitem; + PyObject *__pyx_n_s_cline_in_traceback; + PyObject *__pyx_n_s_close; + PyObject *__pyx_n_s_closing; + PyObject *__pyx_n_s_column; + PyObject *__pyx_n_s_compile; + PyObject *__pyx_n_s_cur_char; + PyObject *__pyx_n_s_curpath; + PyObject *__pyx_n_s_data; + PyObject *__pyx_n_s_dict; + PyObject *__pyx_n_s_dirname; + PyObject *__pyx_n_s_doc; + PyObject *__pyx_n_s_encoding; + PyObject *__pyx_n_s_err; + PyObject *__pyx_n_s_featurefile; + PyObject *__pyx_n_s_featurefilepath; + PyObject *__pyx_kp_u_features; + PyObject *__pyx_n_s_file_or_path; + PyObject *__pyx_n_s_filename; + PyObject *__pyx_n_s_filename_2; + PyObject *__pyx_n_s_fileobj; + PyObject *__pyx_n_s_fname_location; + PyObject *__pyx_n_s_fname_token; + PyObject *__pyx_n_s_fname_type; + PyObject *__pyx_n_s_fontTools_feaLib_error; + PyObject *__pyx_n_s_fontTools_feaLib_lexer; + PyObject *__pyx_n_s_fontTools_feaLib_location; + PyObject *__pyx_n_s_getcwd; + PyObject *__pyx_n_s_glyphclass; + PyObject *__pyx_n_s_import; + PyObject *__pyx_n_u_include; + PyObject *__pyx_n_s_includeDir; + PyObject *__pyx_n_s_init; + PyObject *__pyx_n_s_init_subclass; + PyObject *__pyx_n_s_initializing; + PyObject *__pyx_n_s_is_coroutine; + PyObject *__pyx_n_s_isabs; + PyObject *__pyx_n_s_iter; + PyObject *__pyx_n_s_join; + PyObject *__pyx_n_s_lexer; + PyObject *__pyx_n_s_lexers; + PyObject *__pyx_n_s_limit; + PyObject *__pyx_n_s_line; + PyObject *__pyx_n_s_line_start; + PyObject *__pyx_n_s_location; + PyObject *__pyx_n_s_location_2; + PyObject *__pyx_n_s_main; + PyObject *__pyx_n_s_make_lexer; + PyObject *__pyx_n_s_match; + PyObject *__pyx_n_s_maxsplit; + PyObject *__pyx_n_s_metaclass; + PyObject *__pyx_n_s_mode; + PyObject *__pyx_n_s_module; + PyObject *__pyx_n_s_mro_entries; + PyObject *__pyx_n_u_name; + PyObject *__pyx_n_s_name_2; + PyObject *__pyx_n_s_next; + PyObject *__pyx_n_s_next_2; + PyObject *__pyx_n_s_next_3; + PyObject *__pyx_n_s_next_char; + PyObject *__pyx_n_s_object; + PyObject *__pyx_n_s_open; + PyObject *__pyx_n_s_os; + PyObject *__pyx_n_s_p; + PyObject *__pyx_n_s_path; + PyObject *__pyx_n_s_pop; + PyObject *__pyx_n_s_pos; + PyObject *__pyx_n_s_prepare; + PyObject *__pyx_n_s_qualname; + PyObject *__pyx_n_u_r; + PyObject *__pyx_n_s_re; + PyObject *__pyx_n_s_read; + PyObject *__pyx_n_u_read; + PyObject *__pyx_n_s_regexp; + PyObject *__pyx_kp_u_s; + PyObject *__pyx_kp_u_s_2; + PyObject *__pyx_n_s_scan_anonymous_block; + PyObject *__pyx_n_s_scan_over; + PyObject *__pyx_n_s_scan_until; + PyObject *__pyx_n_s_self; + PyObject *__pyx_n_s_set_name; + PyObject *__pyx_n_s_spec; + PyObject *__pyx_n_s_split; + PyObject *__pyx_n_s_start; + PyObject *__pyx_n_s_staticmethod; + PyObject *__pyx_n_s_stop_at; + PyObject *__pyx_n_s_string; + PyObject *__pyx_n_s_strip; + PyObject *__pyx_n_s_sub; + PyObject *__pyx_n_s_super; + PyObject *__pyx_n_s_tag; + PyObject *__pyx_n_s_test; + PyObject *__pyx_n_s_text; + PyObject *__pyx_n_s_text_2; + PyObject *__pyx_n_s_text_length; + PyObject *__pyx_n_s_token; + PyObject *__pyx_n_s_token_type; + PyObject *__pyx_kp_u_utf_8_sig; + PyObject *__pyx_n_s_valid; + PyObject *__pyx_n_u_xX; + PyObject *__pyx_int_0; + PyObject *__pyx_int_1; + PyObject *__pyx_int_2; + PyObject *__pyx_int_8; + PyObject *__pyx_int_10; + PyObject *__pyx_int_16; + PyObject *__pyx_tuple__14; + PyObject *__pyx_tuple__15; + PyObject *__pyx_tuple__21; + PyObject *__pyx_tuple__23; + PyObject *__pyx_tuple__26; + PyObject *__pyx_tuple__28; + PyObject *__pyx_tuple__30; + PyObject *__pyx_tuple__32; + PyObject *__pyx_tuple__34; + PyObject *__pyx_tuple__36; + PyObject *__pyx_tuple__38; + PyObject *__pyx_tuple__39; + PyObject *__pyx_tuple__40; + PyObject *__pyx_tuple__44; + PyObject *__pyx_tuple__46; + PyObject *__pyx_tuple__48; + PyObject *__pyx_codeobj__22; + PyObject *__pyx_codeobj__24; + PyObject *__pyx_codeobj__25; + PyObject *__pyx_codeobj__27; + PyObject *__pyx_codeobj__29; + PyObject *__pyx_codeobj__31; + PyObject *__pyx_codeobj__33; + PyObject *__pyx_codeobj__35; + PyObject *__pyx_codeobj__37; + PyObject *__pyx_codeobj__41; + PyObject *__pyx_codeobj__42; + PyObject *__pyx_codeobj__43; + PyObject *__pyx_codeobj__45; + PyObject *__pyx_codeobj__47; + PyObject *__pyx_codeobj__49; + PyObject *__pyx_codeobj__50; +} __pyx_mstate; + +#if CYTHON_USE_MODULE_STATE +#ifdef __cplusplus +namespace { + extern struct PyModuleDef __pyx_moduledef; +} /* anonymous namespace */ +#else +static struct PyModuleDef __pyx_moduledef; +#endif + +#define __pyx_mstate(o) ((__pyx_mstate *)__Pyx_PyModule_GetState(o)) + +#define __pyx_mstate_global (__pyx_mstate(PyState_FindModule(&__pyx_moduledef))) + +#define __pyx_m (PyState_FindModule(&__pyx_moduledef)) +#else +static __pyx_mstate __pyx_mstate_global_static = +#ifdef __cplusplus + {}; +#else + {0}; +#endif +static __pyx_mstate *__pyx_mstate_global = &__pyx_mstate_global_static; +#endif +/* #### Code section: module_state_clear ### */ +#if CYTHON_USE_MODULE_STATE +static int __pyx_m_clear(PyObject *m) { + __pyx_mstate *clear_module_state = __pyx_mstate(m); + if (!clear_module_state) return 0; + Py_CLEAR(clear_module_state->__pyx_d); + Py_CLEAR(clear_module_state->__pyx_b); + Py_CLEAR(clear_module_state->__pyx_cython_runtime); + Py_CLEAR(clear_module_state->__pyx_empty_tuple); + Py_CLEAR(clear_module_state->__pyx_empty_bytes); + Py_CLEAR(clear_module_state->__pyx_empty_unicode); + #ifdef __Pyx_CyFunction_USED + Py_CLEAR(clear_module_state->__pyx_CyFunctionType); + #endif + #ifdef __Pyx_FusedFunction_USED + Py_CLEAR(clear_module_state->__pyx_FusedFunctionType); + #endif + Py_CLEAR(clear_module_state->__pyx_kp_u_); + Py_CLEAR(clear_module_state->__pyx_kp_u_0); + Py_CLEAR(clear_module_state->__pyx_kp_u_0123456789); + Py_CLEAR(clear_module_state->__pyx_kp_u_0123456789ABCDEFabcdef); + Py_CLEAR(clear_module_state->__pyx_n_u_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef); + Py_CLEAR(clear_module_state->__pyx_n_s_ANONYMOUS_BLOCK); + Py_CLEAR(clear_module_state->__pyx_n_u_ANONYMOUS_BLOCK); + Py_CLEAR(clear_module_state->__pyx_kp_s_A_Lexer_that_follows_include_sta); + Py_CLEAR(clear_module_state->__pyx_kp_u_A_Za_z_0_9); + Py_CLEAR(clear_module_state->__pyx_n_s_CHAR_DIGIT); + Py_CLEAR(clear_module_state->__pyx_n_s_CHAR_HEXDIGIT); + Py_CLEAR(clear_module_state->__pyx_n_s_CHAR_LETTER); + Py_CLEAR(clear_module_state->__pyx_n_s_CHAR_NAME_CONTINUATION); + Py_CLEAR(clear_module_state->__pyx_n_s_CHAR_NAME_START); + Py_CLEAR(clear_module_state->__pyx_n_s_CHAR_NEWLINE); + Py_CLEAR(clear_module_state->__pyx_n_s_CHAR_SYMBOL); + Py_CLEAR(clear_module_state->__pyx_n_s_CHAR_WHITESPACE); + Py_CLEAR(clear_module_state->__pyx_n_s_CID); + Py_CLEAR(clear_module_state->__pyx_n_u_CID); + Py_CLEAR(clear_module_state->__pyx_n_s_COMMENT); + Py_CLEAR(clear_module_state->__pyx_n_u_COMMENT); + Py_CLEAR(clear_module_state->__pyx_kp_u_Expected_after_file_name); + Py_CLEAR(clear_module_state->__pyx_kp_u_Expected_before_file_name); + Py_CLEAR(clear_module_state->__pyx_kp_u_Expected_file_name); + Py_CLEAR(clear_module_state->__pyx_kp_u_Expected_glyph_class_name); + Py_CLEAR(clear_module_state->__pyx_kp_u_Expected_s_to_terminate_anonymou); + Py_CLEAR(clear_module_state->__pyx_kp_u_Expected_to_terminate_string); + Py_CLEAR(clear_module_state->__pyx_n_s_FILENAME); + Py_CLEAR(clear_module_state->__pyx_n_u_FILENAME); + Py_CLEAR(clear_module_state->__pyx_n_s_FLOAT); + Py_CLEAR(clear_module_state->__pyx_n_u_FLOAT); + Py_CLEAR(clear_module_state->__pyx_n_s_FeatureLibError); + Py_CLEAR(clear_module_state->__pyx_n_s_FeatureLibLocation); + Py_CLEAR(clear_module_state->__pyx_n_s_FileNotFoundError); + Py_CLEAR(clear_module_state->__pyx_n_s_GLYPHCLASS); + Py_CLEAR(clear_module_state->__pyx_n_u_GLYPHCLASS); + Py_CLEAR(clear_module_state->__pyx_kp_u_Glyph_class_names_must_consist_o); + Py_CLEAR(clear_module_state->__pyx_n_s_HEXADECIMAL); + Py_CLEAR(clear_module_state->__pyx_n_u_HEXADECIMAL); + Py_CLEAR(clear_module_state->__pyx_n_s_ImportError); + Py_CLEAR(clear_module_state->__pyx_n_s_IncludedFeaNotFound); + Py_CLEAR(clear_module_state->__pyx_n_s_IncludingLexer); + Py_CLEAR(clear_module_state->__pyx_n_s_IncludingLexer___init); + Py_CLEAR(clear_module_state->__pyx_n_s_IncludingLexer___iter); + Py_CLEAR(clear_module_state->__pyx_n_s_IncludingLexer___next); + Py_CLEAR(clear_module_state->__pyx_n_s_IncludingLexer_make_lexer); + Py_CLEAR(clear_module_state->__pyx_n_s_IncludingLexer_next); + Py_CLEAR(clear_module_state->__pyx_n_s_IncludingLexer_scan_anonymous_bl); + Py_CLEAR(clear_module_state->__pyx_n_s_Lexer); + Py_CLEAR(clear_module_state->__pyx_n_s_Lexer___init); + Py_CLEAR(clear_module_state->__pyx_n_s_Lexer___iter); + Py_CLEAR(clear_module_state->__pyx_n_s_Lexer___next); + Py_CLEAR(clear_module_state->__pyx_n_s_Lexer_location); + Py_CLEAR(clear_module_state->__pyx_n_s_Lexer_next); + Py_CLEAR(clear_module_state->__pyx_n_s_Lexer_next_2); + Py_CLEAR(clear_module_state->__pyx_n_s_Lexer_scan_anonymous_block); + Py_CLEAR(clear_module_state->__pyx_n_s_Lexer_scan_over); + Py_CLEAR(clear_module_state->__pyx_n_s_Lexer_scan_until); + Py_CLEAR(clear_module_state->__pyx_kp_s_Lexer_that_does_not_follow_inclu); + Py_CLEAR(clear_module_state->__pyx_kp_s_Lib_fontTools_feaLib_lexer_py); + Py_CLEAR(clear_module_state->__pyx_n_s_MODE_FILENAME); + Py_CLEAR(clear_module_state->__pyx_n_s_MODE_NORMAL); + Py_CLEAR(clear_module_state->__pyx_n_s_NAME); + Py_CLEAR(clear_module_state->__pyx_n_u_NAME); + Py_CLEAR(clear_module_state->__pyx_n_s_NEWLINE); + Py_CLEAR(clear_module_state->__pyx_n_u_NEWLINE); + Py_CLEAR(clear_module_state->__pyx_n_u_NORMAL); + Py_CLEAR(clear_module_state->__pyx_n_s_NUMBER); + Py_CLEAR(clear_module_state->__pyx_n_u_NUMBER); + Py_CLEAR(clear_module_state->__pyx_n_s_NUMBERS); + Py_CLEAR(clear_module_state->__pyx_n_s_NonIncludingLexer); + Py_CLEAR(clear_module_state->__pyx_n_s_NonIncludingLexer___next); + Py_CLEAR(clear_module_state->__pyx_n_s_OCTAL); + Py_CLEAR(clear_module_state->__pyx_n_u_OCTAL); + Py_CLEAR(clear_module_state->__pyx_n_s_RE_GLYPHCLASS); + Py_CLEAR(clear_module_state->__pyx_n_s_STRING); + Py_CLEAR(clear_module_state->__pyx_n_u_STRING); + Py_CLEAR(clear_module_state->__pyx_n_s_SYMBOL); + Py_CLEAR(clear_module_state->__pyx_n_u_SYMBOL); + Py_CLEAR(clear_module_state->__pyx_n_s_StopIteration); + Py_CLEAR(clear_module_state->__pyx_kp_u_Too_many_recursive_includes); + Py_CLEAR(clear_module_state->__pyx_kp_u_Unexpected_character_r); + Py_CLEAR(clear_module_state->__pyx_kp_u__10); + Py_CLEAR(clear_module_state->__pyx_kp_u__11); + Py_CLEAR(clear_module_state->__pyx_kp_u__12); + Py_CLEAR(clear_module_state->__pyx_n_s__13); + Py_CLEAR(clear_module_state->__pyx_kp_u__16); + Py_CLEAR(clear_module_state->__pyx_kp_u__17); + Py_CLEAR(clear_module_state->__pyx_kp_u__18); + Py_CLEAR(clear_module_state->__pyx_kp_u__19); + Py_CLEAR(clear_module_state->__pyx_kp_u__2); + Py_CLEAR(clear_module_state->__pyx_kp_u__20); + Py_CLEAR(clear_module_state->__pyx_kp_u__3); + Py_CLEAR(clear_module_state->__pyx_kp_u__4); + Py_CLEAR(clear_module_state->__pyx_kp_u__5); + Py_CLEAR(clear_module_state->__pyx_n_s__51); + Py_CLEAR(clear_module_state->__pyx_kp_u__6); + Py_CLEAR(clear_module_state->__pyx_kp_u__7); + Py_CLEAR(clear_module_state->__pyx_kp_u__8); + Py_CLEAR(clear_module_state->__pyx_kp_u__9); + Py_CLEAR(clear_module_state->__pyx_n_s_append); + Py_CLEAR(clear_module_state->__pyx_n_s_asyncio_coroutines); + Py_CLEAR(clear_module_state->__pyx_n_s_class_getitem); + Py_CLEAR(clear_module_state->__pyx_n_s_cline_in_traceback); + Py_CLEAR(clear_module_state->__pyx_n_s_close); + Py_CLEAR(clear_module_state->__pyx_n_s_closing); + Py_CLEAR(clear_module_state->__pyx_n_s_column); + Py_CLEAR(clear_module_state->__pyx_n_s_compile); + Py_CLEAR(clear_module_state->__pyx_n_s_cur_char); + Py_CLEAR(clear_module_state->__pyx_n_s_curpath); + Py_CLEAR(clear_module_state->__pyx_n_s_data); + Py_CLEAR(clear_module_state->__pyx_n_s_dict); + Py_CLEAR(clear_module_state->__pyx_n_s_dirname); + Py_CLEAR(clear_module_state->__pyx_n_s_doc); + Py_CLEAR(clear_module_state->__pyx_n_s_encoding); + Py_CLEAR(clear_module_state->__pyx_n_s_err); + Py_CLEAR(clear_module_state->__pyx_n_s_featurefile); + Py_CLEAR(clear_module_state->__pyx_n_s_featurefilepath); + Py_CLEAR(clear_module_state->__pyx_kp_u_features); + Py_CLEAR(clear_module_state->__pyx_n_s_file_or_path); + Py_CLEAR(clear_module_state->__pyx_n_s_filename); + Py_CLEAR(clear_module_state->__pyx_n_s_filename_2); + Py_CLEAR(clear_module_state->__pyx_n_s_fileobj); + Py_CLEAR(clear_module_state->__pyx_n_s_fname_location); + Py_CLEAR(clear_module_state->__pyx_n_s_fname_token); + Py_CLEAR(clear_module_state->__pyx_n_s_fname_type); + Py_CLEAR(clear_module_state->__pyx_n_s_fontTools_feaLib_error); + Py_CLEAR(clear_module_state->__pyx_n_s_fontTools_feaLib_lexer); + Py_CLEAR(clear_module_state->__pyx_n_s_fontTools_feaLib_location); + Py_CLEAR(clear_module_state->__pyx_n_s_getcwd); + Py_CLEAR(clear_module_state->__pyx_n_s_glyphclass); + Py_CLEAR(clear_module_state->__pyx_n_s_import); + Py_CLEAR(clear_module_state->__pyx_n_u_include); + Py_CLEAR(clear_module_state->__pyx_n_s_includeDir); + Py_CLEAR(clear_module_state->__pyx_n_s_init); + Py_CLEAR(clear_module_state->__pyx_n_s_init_subclass); + Py_CLEAR(clear_module_state->__pyx_n_s_initializing); + Py_CLEAR(clear_module_state->__pyx_n_s_is_coroutine); + Py_CLEAR(clear_module_state->__pyx_n_s_isabs); + Py_CLEAR(clear_module_state->__pyx_n_s_iter); + Py_CLEAR(clear_module_state->__pyx_n_s_join); + Py_CLEAR(clear_module_state->__pyx_n_s_lexer); + Py_CLEAR(clear_module_state->__pyx_n_s_lexers); + Py_CLEAR(clear_module_state->__pyx_n_s_limit); + Py_CLEAR(clear_module_state->__pyx_n_s_line); + Py_CLEAR(clear_module_state->__pyx_n_s_line_start); + Py_CLEAR(clear_module_state->__pyx_n_s_location); + Py_CLEAR(clear_module_state->__pyx_n_s_location_2); + Py_CLEAR(clear_module_state->__pyx_n_s_main); + Py_CLEAR(clear_module_state->__pyx_n_s_make_lexer); + Py_CLEAR(clear_module_state->__pyx_n_s_match); + Py_CLEAR(clear_module_state->__pyx_n_s_maxsplit); + Py_CLEAR(clear_module_state->__pyx_n_s_metaclass); + Py_CLEAR(clear_module_state->__pyx_n_s_mode); + Py_CLEAR(clear_module_state->__pyx_n_s_module); + Py_CLEAR(clear_module_state->__pyx_n_s_mro_entries); + Py_CLEAR(clear_module_state->__pyx_n_u_name); + Py_CLEAR(clear_module_state->__pyx_n_s_name_2); + Py_CLEAR(clear_module_state->__pyx_n_s_next); + Py_CLEAR(clear_module_state->__pyx_n_s_next_2); + Py_CLEAR(clear_module_state->__pyx_n_s_next_3); + Py_CLEAR(clear_module_state->__pyx_n_s_next_char); + Py_CLEAR(clear_module_state->__pyx_n_s_object); + Py_CLEAR(clear_module_state->__pyx_n_s_open); + Py_CLEAR(clear_module_state->__pyx_n_s_os); + Py_CLEAR(clear_module_state->__pyx_n_s_p); + Py_CLEAR(clear_module_state->__pyx_n_s_path); + Py_CLEAR(clear_module_state->__pyx_n_s_pop); + Py_CLEAR(clear_module_state->__pyx_n_s_pos); + Py_CLEAR(clear_module_state->__pyx_n_s_prepare); + Py_CLEAR(clear_module_state->__pyx_n_s_qualname); + Py_CLEAR(clear_module_state->__pyx_n_u_r); + Py_CLEAR(clear_module_state->__pyx_n_s_re); + Py_CLEAR(clear_module_state->__pyx_n_s_read); + Py_CLEAR(clear_module_state->__pyx_n_u_read); + Py_CLEAR(clear_module_state->__pyx_n_s_regexp); + Py_CLEAR(clear_module_state->__pyx_kp_u_s); + Py_CLEAR(clear_module_state->__pyx_kp_u_s_2); + Py_CLEAR(clear_module_state->__pyx_n_s_scan_anonymous_block); + Py_CLEAR(clear_module_state->__pyx_n_s_scan_over); + Py_CLEAR(clear_module_state->__pyx_n_s_scan_until); + Py_CLEAR(clear_module_state->__pyx_n_s_self); + Py_CLEAR(clear_module_state->__pyx_n_s_set_name); + Py_CLEAR(clear_module_state->__pyx_n_s_spec); + Py_CLEAR(clear_module_state->__pyx_n_s_split); + Py_CLEAR(clear_module_state->__pyx_n_s_start); + Py_CLEAR(clear_module_state->__pyx_n_s_staticmethod); + Py_CLEAR(clear_module_state->__pyx_n_s_stop_at); + Py_CLEAR(clear_module_state->__pyx_n_s_string); + Py_CLEAR(clear_module_state->__pyx_n_s_strip); + Py_CLEAR(clear_module_state->__pyx_n_s_sub); + Py_CLEAR(clear_module_state->__pyx_n_s_super); + Py_CLEAR(clear_module_state->__pyx_n_s_tag); + Py_CLEAR(clear_module_state->__pyx_n_s_test); + Py_CLEAR(clear_module_state->__pyx_n_s_text); + Py_CLEAR(clear_module_state->__pyx_n_s_text_2); + Py_CLEAR(clear_module_state->__pyx_n_s_text_length); + Py_CLEAR(clear_module_state->__pyx_n_s_token); + Py_CLEAR(clear_module_state->__pyx_n_s_token_type); + Py_CLEAR(clear_module_state->__pyx_kp_u_utf_8_sig); + Py_CLEAR(clear_module_state->__pyx_n_s_valid); + Py_CLEAR(clear_module_state->__pyx_n_u_xX); + Py_CLEAR(clear_module_state->__pyx_int_0); + Py_CLEAR(clear_module_state->__pyx_int_1); + Py_CLEAR(clear_module_state->__pyx_int_2); + Py_CLEAR(clear_module_state->__pyx_int_8); + Py_CLEAR(clear_module_state->__pyx_int_10); + Py_CLEAR(clear_module_state->__pyx_int_16); + Py_CLEAR(clear_module_state->__pyx_tuple__14); + Py_CLEAR(clear_module_state->__pyx_tuple__15); + Py_CLEAR(clear_module_state->__pyx_tuple__21); + Py_CLEAR(clear_module_state->__pyx_tuple__23); + Py_CLEAR(clear_module_state->__pyx_tuple__26); + Py_CLEAR(clear_module_state->__pyx_tuple__28); + Py_CLEAR(clear_module_state->__pyx_tuple__30); + Py_CLEAR(clear_module_state->__pyx_tuple__32); + Py_CLEAR(clear_module_state->__pyx_tuple__34); + Py_CLEAR(clear_module_state->__pyx_tuple__36); + Py_CLEAR(clear_module_state->__pyx_tuple__38); + Py_CLEAR(clear_module_state->__pyx_tuple__39); + Py_CLEAR(clear_module_state->__pyx_tuple__40); + Py_CLEAR(clear_module_state->__pyx_tuple__44); + Py_CLEAR(clear_module_state->__pyx_tuple__46); + Py_CLEAR(clear_module_state->__pyx_tuple__48); + Py_CLEAR(clear_module_state->__pyx_codeobj__22); + Py_CLEAR(clear_module_state->__pyx_codeobj__24); + Py_CLEAR(clear_module_state->__pyx_codeobj__25); + Py_CLEAR(clear_module_state->__pyx_codeobj__27); + Py_CLEAR(clear_module_state->__pyx_codeobj__29); + Py_CLEAR(clear_module_state->__pyx_codeobj__31); + Py_CLEAR(clear_module_state->__pyx_codeobj__33); + Py_CLEAR(clear_module_state->__pyx_codeobj__35); + Py_CLEAR(clear_module_state->__pyx_codeobj__37); + Py_CLEAR(clear_module_state->__pyx_codeobj__41); + Py_CLEAR(clear_module_state->__pyx_codeobj__42); + Py_CLEAR(clear_module_state->__pyx_codeobj__43); + Py_CLEAR(clear_module_state->__pyx_codeobj__45); + Py_CLEAR(clear_module_state->__pyx_codeobj__47); + Py_CLEAR(clear_module_state->__pyx_codeobj__49); + Py_CLEAR(clear_module_state->__pyx_codeobj__50); + return 0; +} +#endif +/* #### Code section: module_state_traverse ### */ +#if CYTHON_USE_MODULE_STATE +static int __pyx_m_traverse(PyObject *m, visitproc visit, void *arg) { + __pyx_mstate *traverse_module_state = __pyx_mstate(m); + if (!traverse_module_state) return 0; + Py_VISIT(traverse_module_state->__pyx_d); + Py_VISIT(traverse_module_state->__pyx_b); + Py_VISIT(traverse_module_state->__pyx_cython_runtime); + Py_VISIT(traverse_module_state->__pyx_empty_tuple); + Py_VISIT(traverse_module_state->__pyx_empty_bytes); + Py_VISIT(traverse_module_state->__pyx_empty_unicode); + #ifdef __Pyx_CyFunction_USED + Py_VISIT(traverse_module_state->__pyx_CyFunctionType); + #endif + #ifdef __Pyx_FusedFunction_USED + Py_VISIT(traverse_module_state->__pyx_FusedFunctionType); + #endif + Py_VISIT(traverse_module_state->__pyx_kp_u_); + Py_VISIT(traverse_module_state->__pyx_kp_u_0); + Py_VISIT(traverse_module_state->__pyx_kp_u_0123456789); + Py_VISIT(traverse_module_state->__pyx_kp_u_0123456789ABCDEFabcdef); + Py_VISIT(traverse_module_state->__pyx_n_u_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef); + Py_VISIT(traverse_module_state->__pyx_n_s_ANONYMOUS_BLOCK); + Py_VISIT(traverse_module_state->__pyx_n_u_ANONYMOUS_BLOCK); + Py_VISIT(traverse_module_state->__pyx_kp_s_A_Lexer_that_follows_include_sta); + Py_VISIT(traverse_module_state->__pyx_kp_u_A_Za_z_0_9); + Py_VISIT(traverse_module_state->__pyx_n_s_CHAR_DIGIT); + Py_VISIT(traverse_module_state->__pyx_n_s_CHAR_HEXDIGIT); + Py_VISIT(traverse_module_state->__pyx_n_s_CHAR_LETTER); + Py_VISIT(traverse_module_state->__pyx_n_s_CHAR_NAME_CONTINUATION); + Py_VISIT(traverse_module_state->__pyx_n_s_CHAR_NAME_START); + Py_VISIT(traverse_module_state->__pyx_n_s_CHAR_NEWLINE); + Py_VISIT(traverse_module_state->__pyx_n_s_CHAR_SYMBOL); + Py_VISIT(traverse_module_state->__pyx_n_s_CHAR_WHITESPACE); + Py_VISIT(traverse_module_state->__pyx_n_s_CID); + Py_VISIT(traverse_module_state->__pyx_n_u_CID); + Py_VISIT(traverse_module_state->__pyx_n_s_COMMENT); + Py_VISIT(traverse_module_state->__pyx_n_u_COMMENT); + Py_VISIT(traverse_module_state->__pyx_kp_u_Expected_after_file_name); + Py_VISIT(traverse_module_state->__pyx_kp_u_Expected_before_file_name); + Py_VISIT(traverse_module_state->__pyx_kp_u_Expected_file_name); + Py_VISIT(traverse_module_state->__pyx_kp_u_Expected_glyph_class_name); + Py_VISIT(traverse_module_state->__pyx_kp_u_Expected_s_to_terminate_anonymou); + Py_VISIT(traverse_module_state->__pyx_kp_u_Expected_to_terminate_string); + Py_VISIT(traverse_module_state->__pyx_n_s_FILENAME); + Py_VISIT(traverse_module_state->__pyx_n_u_FILENAME); + Py_VISIT(traverse_module_state->__pyx_n_s_FLOAT); + Py_VISIT(traverse_module_state->__pyx_n_u_FLOAT); + Py_VISIT(traverse_module_state->__pyx_n_s_FeatureLibError); + Py_VISIT(traverse_module_state->__pyx_n_s_FeatureLibLocation); + Py_VISIT(traverse_module_state->__pyx_n_s_FileNotFoundError); + Py_VISIT(traverse_module_state->__pyx_n_s_GLYPHCLASS); + Py_VISIT(traverse_module_state->__pyx_n_u_GLYPHCLASS); + Py_VISIT(traverse_module_state->__pyx_kp_u_Glyph_class_names_must_consist_o); + Py_VISIT(traverse_module_state->__pyx_n_s_HEXADECIMAL); + Py_VISIT(traverse_module_state->__pyx_n_u_HEXADECIMAL); + Py_VISIT(traverse_module_state->__pyx_n_s_ImportError); + Py_VISIT(traverse_module_state->__pyx_n_s_IncludedFeaNotFound); + Py_VISIT(traverse_module_state->__pyx_n_s_IncludingLexer); + Py_VISIT(traverse_module_state->__pyx_n_s_IncludingLexer___init); + Py_VISIT(traverse_module_state->__pyx_n_s_IncludingLexer___iter); + Py_VISIT(traverse_module_state->__pyx_n_s_IncludingLexer___next); + Py_VISIT(traverse_module_state->__pyx_n_s_IncludingLexer_make_lexer); + Py_VISIT(traverse_module_state->__pyx_n_s_IncludingLexer_next); + Py_VISIT(traverse_module_state->__pyx_n_s_IncludingLexer_scan_anonymous_bl); + Py_VISIT(traverse_module_state->__pyx_n_s_Lexer); + Py_VISIT(traverse_module_state->__pyx_n_s_Lexer___init); + Py_VISIT(traverse_module_state->__pyx_n_s_Lexer___iter); + Py_VISIT(traverse_module_state->__pyx_n_s_Lexer___next); + Py_VISIT(traverse_module_state->__pyx_n_s_Lexer_location); + Py_VISIT(traverse_module_state->__pyx_n_s_Lexer_next); + Py_VISIT(traverse_module_state->__pyx_n_s_Lexer_next_2); + Py_VISIT(traverse_module_state->__pyx_n_s_Lexer_scan_anonymous_block); + Py_VISIT(traverse_module_state->__pyx_n_s_Lexer_scan_over); + Py_VISIT(traverse_module_state->__pyx_n_s_Lexer_scan_until); + Py_VISIT(traverse_module_state->__pyx_kp_s_Lexer_that_does_not_follow_inclu); + Py_VISIT(traverse_module_state->__pyx_kp_s_Lib_fontTools_feaLib_lexer_py); + Py_VISIT(traverse_module_state->__pyx_n_s_MODE_FILENAME); + Py_VISIT(traverse_module_state->__pyx_n_s_MODE_NORMAL); + Py_VISIT(traverse_module_state->__pyx_n_s_NAME); + Py_VISIT(traverse_module_state->__pyx_n_u_NAME); + Py_VISIT(traverse_module_state->__pyx_n_s_NEWLINE); + Py_VISIT(traverse_module_state->__pyx_n_u_NEWLINE); + Py_VISIT(traverse_module_state->__pyx_n_u_NORMAL); + Py_VISIT(traverse_module_state->__pyx_n_s_NUMBER); + Py_VISIT(traverse_module_state->__pyx_n_u_NUMBER); + Py_VISIT(traverse_module_state->__pyx_n_s_NUMBERS); + Py_VISIT(traverse_module_state->__pyx_n_s_NonIncludingLexer); + Py_VISIT(traverse_module_state->__pyx_n_s_NonIncludingLexer___next); + Py_VISIT(traverse_module_state->__pyx_n_s_OCTAL); + Py_VISIT(traverse_module_state->__pyx_n_u_OCTAL); + Py_VISIT(traverse_module_state->__pyx_n_s_RE_GLYPHCLASS); + Py_VISIT(traverse_module_state->__pyx_n_s_STRING); + Py_VISIT(traverse_module_state->__pyx_n_u_STRING); + Py_VISIT(traverse_module_state->__pyx_n_s_SYMBOL); + Py_VISIT(traverse_module_state->__pyx_n_u_SYMBOL); + Py_VISIT(traverse_module_state->__pyx_n_s_StopIteration); + Py_VISIT(traverse_module_state->__pyx_kp_u_Too_many_recursive_includes); + Py_VISIT(traverse_module_state->__pyx_kp_u_Unexpected_character_r); + Py_VISIT(traverse_module_state->__pyx_kp_u__10); + Py_VISIT(traverse_module_state->__pyx_kp_u__11); + Py_VISIT(traverse_module_state->__pyx_kp_u__12); + Py_VISIT(traverse_module_state->__pyx_n_s__13); + Py_VISIT(traverse_module_state->__pyx_kp_u__16); + Py_VISIT(traverse_module_state->__pyx_kp_u__17); + Py_VISIT(traverse_module_state->__pyx_kp_u__18); + Py_VISIT(traverse_module_state->__pyx_kp_u__19); + Py_VISIT(traverse_module_state->__pyx_kp_u__2); + Py_VISIT(traverse_module_state->__pyx_kp_u__20); + Py_VISIT(traverse_module_state->__pyx_kp_u__3); + Py_VISIT(traverse_module_state->__pyx_kp_u__4); + Py_VISIT(traverse_module_state->__pyx_kp_u__5); + Py_VISIT(traverse_module_state->__pyx_n_s__51); + Py_VISIT(traverse_module_state->__pyx_kp_u__6); + Py_VISIT(traverse_module_state->__pyx_kp_u__7); + Py_VISIT(traverse_module_state->__pyx_kp_u__8); + Py_VISIT(traverse_module_state->__pyx_kp_u__9); + Py_VISIT(traverse_module_state->__pyx_n_s_append); + Py_VISIT(traverse_module_state->__pyx_n_s_asyncio_coroutines); + Py_VISIT(traverse_module_state->__pyx_n_s_class_getitem); + Py_VISIT(traverse_module_state->__pyx_n_s_cline_in_traceback); + Py_VISIT(traverse_module_state->__pyx_n_s_close); + Py_VISIT(traverse_module_state->__pyx_n_s_closing); + Py_VISIT(traverse_module_state->__pyx_n_s_column); + Py_VISIT(traverse_module_state->__pyx_n_s_compile); + Py_VISIT(traverse_module_state->__pyx_n_s_cur_char); + Py_VISIT(traverse_module_state->__pyx_n_s_curpath); + Py_VISIT(traverse_module_state->__pyx_n_s_data); + Py_VISIT(traverse_module_state->__pyx_n_s_dict); + Py_VISIT(traverse_module_state->__pyx_n_s_dirname); + Py_VISIT(traverse_module_state->__pyx_n_s_doc); + Py_VISIT(traverse_module_state->__pyx_n_s_encoding); + Py_VISIT(traverse_module_state->__pyx_n_s_err); + Py_VISIT(traverse_module_state->__pyx_n_s_featurefile); + Py_VISIT(traverse_module_state->__pyx_n_s_featurefilepath); + Py_VISIT(traverse_module_state->__pyx_kp_u_features); + Py_VISIT(traverse_module_state->__pyx_n_s_file_or_path); + Py_VISIT(traverse_module_state->__pyx_n_s_filename); + Py_VISIT(traverse_module_state->__pyx_n_s_filename_2); + Py_VISIT(traverse_module_state->__pyx_n_s_fileobj); + Py_VISIT(traverse_module_state->__pyx_n_s_fname_location); + Py_VISIT(traverse_module_state->__pyx_n_s_fname_token); + Py_VISIT(traverse_module_state->__pyx_n_s_fname_type); + Py_VISIT(traverse_module_state->__pyx_n_s_fontTools_feaLib_error); + Py_VISIT(traverse_module_state->__pyx_n_s_fontTools_feaLib_lexer); + Py_VISIT(traverse_module_state->__pyx_n_s_fontTools_feaLib_location); + Py_VISIT(traverse_module_state->__pyx_n_s_getcwd); + Py_VISIT(traverse_module_state->__pyx_n_s_glyphclass); + Py_VISIT(traverse_module_state->__pyx_n_s_import); + Py_VISIT(traverse_module_state->__pyx_n_u_include); + Py_VISIT(traverse_module_state->__pyx_n_s_includeDir); + Py_VISIT(traverse_module_state->__pyx_n_s_init); + Py_VISIT(traverse_module_state->__pyx_n_s_init_subclass); + Py_VISIT(traverse_module_state->__pyx_n_s_initializing); + Py_VISIT(traverse_module_state->__pyx_n_s_is_coroutine); + Py_VISIT(traverse_module_state->__pyx_n_s_isabs); + Py_VISIT(traverse_module_state->__pyx_n_s_iter); + Py_VISIT(traverse_module_state->__pyx_n_s_join); + Py_VISIT(traverse_module_state->__pyx_n_s_lexer); + Py_VISIT(traverse_module_state->__pyx_n_s_lexers); + Py_VISIT(traverse_module_state->__pyx_n_s_limit); + Py_VISIT(traverse_module_state->__pyx_n_s_line); + Py_VISIT(traverse_module_state->__pyx_n_s_line_start); + Py_VISIT(traverse_module_state->__pyx_n_s_location); + Py_VISIT(traverse_module_state->__pyx_n_s_location_2); + Py_VISIT(traverse_module_state->__pyx_n_s_main); + Py_VISIT(traverse_module_state->__pyx_n_s_make_lexer); + Py_VISIT(traverse_module_state->__pyx_n_s_match); + Py_VISIT(traverse_module_state->__pyx_n_s_maxsplit); + Py_VISIT(traverse_module_state->__pyx_n_s_metaclass); + Py_VISIT(traverse_module_state->__pyx_n_s_mode); + Py_VISIT(traverse_module_state->__pyx_n_s_module); + Py_VISIT(traverse_module_state->__pyx_n_s_mro_entries); + Py_VISIT(traverse_module_state->__pyx_n_u_name); + Py_VISIT(traverse_module_state->__pyx_n_s_name_2); + Py_VISIT(traverse_module_state->__pyx_n_s_next); + Py_VISIT(traverse_module_state->__pyx_n_s_next_2); + Py_VISIT(traverse_module_state->__pyx_n_s_next_3); + Py_VISIT(traverse_module_state->__pyx_n_s_next_char); + Py_VISIT(traverse_module_state->__pyx_n_s_object); + Py_VISIT(traverse_module_state->__pyx_n_s_open); + Py_VISIT(traverse_module_state->__pyx_n_s_os); + Py_VISIT(traverse_module_state->__pyx_n_s_p); + Py_VISIT(traverse_module_state->__pyx_n_s_path); + Py_VISIT(traverse_module_state->__pyx_n_s_pop); + Py_VISIT(traverse_module_state->__pyx_n_s_pos); + Py_VISIT(traverse_module_state->__pyx_n_s_prepare); + Py_VISIT(traverse_module_state->__pyx_n_s_qualname); + Py_VISIT(traverse_module_state->__pyx_n_u_r); + Py_VISIT(traverse_module_state->__pyx_n_s_re); + Py_VISIT(traverse_module_state->__pyx_n_s_read); + Py_VISIT(traverse_module_state->__pyx_n_u_read); + Py_VISIT(traverse_module_state->__pyx_n_s_regexp); + Py_VISIT(traverse_module_state->__pyx_kp_u_s); + Py_VISIT(traverse_module_state->__pyx_kp_u_s_2); + Py_VISIT(traverse_module_state->__pyx_n_s_scan_anonymous_block); + Py_VISIT(traverse_module_state->__pyx_n_s_scan_over); + Py_VISIT(traverse_module_state->__pyx_n_s_scan_until); + Py_VISIT(traverse_module_state->__pyx_n_s_self); + Py_VISIT(traverse_module_state->__pyx_n_s_set_name); + Py_VISIT(traverse_module_state->__pyx_n_s_spec); + Py_VISIT(traverse_module_state->__pyx_n_s_split); + Py_VISIT(traverse_module_state->__pyx_n_s_start); + Py_VISIT(traverse_module_state->__pyx_n_s_staticmethod); + Py_VISIT(traverse_module_state->__pyx_n_s_stop_at); + Py_VISIT(traverse_module_state->__pyx_n_s_string); + Py_VISIT(traverse_module_state->__pyx_n_s_strip); + Py_VISIT(traverse_module_state->__pyx_n_s_sub); + Py_VISIT(traverse_module_state->__pyx_n_s_super); + Py_VISIT(traverse_module_state->__pyx_n_s_tag); + Py_VISIT(traverse_module_state->__pyx_n_s_test); + Py_VISIT(traverse_module_state->__pyx_n_s_text); + Py_VISIT(traverse_module_state->__pyx_n_s_text_2); + Py_VISIT(traverse_module_state->__pyx_n_s_text_length); + Py_VISIT(traverse_module_state->__pyx_n_s_token); + Py_VISIT(traverse_module_state->__pyx_n_s_token_type); + Py_VISIT(traverse_module_state->__pyx_kp_u_utf_8_sig); + Py_VISIT(traverse_module_state->__pyx_n_s_valid); + Py_VISIT(traverse_module_state->__pyx_n_u_xX); + Py_VISIT(traverse_module_state->__pyx_int_0); + Py_VISIT(traverse_module_state->__pyx_int_1); + Py_VISIT(traverse_module_state->__pyx_int_2); + Py_VISIT(traverse_module_state->__pyx_int_8); + Py_VISIT(traverse_module_state->__pyx_int_10); + Py_VISIT(traverse_module_state->__pyx_int_16); + Py_VISIT(traverse_module_state->__pyx_tuple__14); + Py_VISIT(traverse_module_state->__pyx_tuple__15); + Py_VISIT(traverse_module_state->__pyx_tuple__21); + Py_VISIT(traverse_module_state->__pyx_tuple__23); + Py_VISIT(traverse_module_state->__pyx_tuple__26); + Py_VISIT(traverse_module_state->__pyx_tuple__28); + Py_VISIT(traverse_module_state->__pyx_tuple__30); + Py_VISIT(traverse_module_state->__pyx_tuple__32); + Py_VISIT(traverse_module_state->__pyx_tuple__34); + Py_VISIT(traverse_module_state->__pyx_tuple__36); + Py_VISIT(traverse_module_state->__pyx_tuple__38); + Py_VISIT(traverse_module_state->__pyx_tuple__39); + Py_VISIT(traverse_module_state->__pyx_tuple__40); + Py_VISIT(traverse_module_state->__pyx_tuple__44); + Py_VISIT(traverse_module_state->__pyx_tuple__46); + Py_VISIT(traverse_module_state->__pyx_tuple__48); + Py_VISIT(traverse_module_state->__pyx_codeobj__22); + Py_VISIT(traverse_module_state->__pyx_codeobj__24); + Py_VISIT(traverse_module_state->__pyx_codeobj__25); + Py_VISIT(traverse_module_state->__pyx_codeobj__27); + Py_VISIT(traverse_module_state->__pyx_codeobj__29); + Py_VISIT(traverse_module_state->__pyx_codeobj__31); + Py_VISIT(traverse_module_state->__pyx_codeobj__33); + Py_VISIT(traverse_module_state->__pyx_codeobj__35); + Py_VISIT(traverse_module_state->__pyx_codeobj__37); + Py_VISIT(traverse_module_state->__pyx_codeobj__41); + Py_VISIT(traverse_module_state->__pyx_codeobj__42); + Py_VISIT(traverse_module_state->__pyx_codeobj__43); + Py_VISIT(traverse_module_state->__pyx_codeobj__45); + Py_VISIT(traverse_module_state->__pyx_codeobj__47); + Py_VISIT(traverse_module_state->__pyx_codeobj__49); + Py_VISIT(traverse_module_state->__pyx_codeobj__50); + return 0; +} +#endif +/* #### Code section: module_state_defines ### */ +#define __pyx_d __pyx_mstate_global->__pyx_d +#define __pyx_b __pyx_mstate_global->__pyx_b +#define __pyx_cython_runtime __pyx_mstate_global->__pyx_cython_runtime +#define __pyx_empty_tuple __pyx_mstate_global->__pyx_empty_tuple +#define __pyx_empty_bytes __pyx_mstate_global->__pyx_empty_bytes +#define __pyx_empty_unicode __pyx_mstate_global->__pyx_empty_unicode +#ifdef __Pyx_CyFunction_USED +#define __pyx_CyFunctionType __pyx_mstate_global->__pyx_CyFunctionType +#endif +#ifdef __Pyx_FusedFunction_USED +#define __pyx_FusedFunctionType __pyx_mstate_global->__pyx_FusedFunctionType +#endif +#ifdef __Pyx_Generator_USED +#define __pyx_GeneratorType __pyx_mstate_global->__pyx_GeneratorType +#endif +#ifdef __Pyx_IterableCoroutine_USED +#define __pyx_IterableCoroutineType __pyx_mstate_global->__pyx_IterableCoroutineType +#endif +#ifdef __Pyx_Coroutine_USED +#define __pyx_CoroutineAwaitType __pyx_mstate_global->__pyx_CoroutineAwaitType +#endif +#ifdef __Pyx_Coroutine_USED +#define __pyx_CoroutineType __pyx_mstate_global->__pyx_CoroutineType +#endif +#if CYTHON_USE_MODULE_STATE +#endif +#if CYTHON_USE_MODULE_STATE +#endif +#define __pyx_kp_u_ __pyx_mstate_global->__pyx_kp_u_ +#define __pyx_kp_u_0 __pyx_mstate_global->__pyx_kp_u_0 +#define __pyx_kp_u_0123456789 __pyx_mstate_global->__pyx_kp_u_0123456789 +#define __pyx_kp_u_0123456789ABCDEFabcdef __pyx_mstate_global->__pyx_kp_u_0123456789ABCDEFabcdef +#define __pyx_n_u_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef __pyx_mstate_global->__pyx_n_u_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef +#define __pyx_n_s_ANONYMOUS_BLOCK __pyx_mstate_global->__pyx_n_s_ANONYMOUS_BLOCK +#define __pyx_n_u_ANONYMOUS_BLOCK __pyx_mstate_global->__pyx_n_u_ANONYMOUS_BLOCK +#define __pyx_kp_s_A_Lexer_that_follows_include_sta __pyx_mstate_global->__pyx_kp_s_A_Lexer_that_follows_include_sta +#define __pyx_kp_u_A_Za_z_0_9 __pyx_mstate_global->__pyx_kp_u_A_Za_z_0_9 +#define __pyx_n_s_CHAR_DIGIT __pyx_mstate_global->__pyx_n_s_CHAR_DIGIT +#define __pyx_n_s_CHAR_HEXDIGIT __pyx_mstate_global->__pyx_n_s_CHAR_HEXDIGIT +#define __pyx_n_s_CHAR_LETTER __pyx_mstate_global->__pyx_n_s_CHAR_LETTER +#define __pyx_n_s_CHAR_NAME_CONTINUATION __pyx_mstate_global->__pyx_n_s_CHAR_NAME_CONTINUATION +#define __pyx_n_s_CHAR_NAME_START __pyx_mstate_global->__pyx_n_s_CHAR_NAME_START +#define __pyx_n_s_CHAR_NEWLINE __pyx_mstate_global->__pyx_n_s_CHAR_NEWLINE +#define __pyx_n_s_CHAR_SYMBOL __pyx_mstate_global->__pyx_n_s_CHAR_SYMBOL +#define __pyx_n_s_CHAR_WHITESPACE __pyx_mstate_global->__pyx_n_s_CHAR_WHITESPACE +#define __pyx_n_s_CID __pyx_mstate_global->__pyx_n_s_CID +#define __pyx_n_u_CID __pyx_mstate_global->__pyx_n_u_CID +#define __pyx_n_s_COMMENT __pyx_mstate_global->__pyx_n_s_COMMENT +#define __pyx_n_u_COMMENT __pyx_mstate_global->__pyx_n_u_COMMENT +#define __pyx_kp_u_Expected_after_file_name __pyx_mstate_global->__pyx_kp_u_Expected_after_file_name +#define __pyx_kp_u_Expected_before_file_name __pyx_mstate_global->__pyx_kp_u_Expected_before_file_name +#define __pyx_kp_u_Expected_file_name __pyx_mstate_global->__pyx_kp_u_Expected_file_name +#define __pyx_kp_u_Expected_glyph_class_name __pyx_mstate_global->__pyx_kp_u_Expected_glyph_class_name +#define __pyx_kp_u_Expected_s_to_terminate_anonymou __pyx_mstate_global->__pyx_kp_u_Expected_s_to_terminate_anonymou +#define __pyx_kp_u_Expected_to_terminate_string __pyx_mstate_global->__pyx_kp_u_Expected_to_terminate_string +#define __pyx_n_s_FILENAME __pyx_mstate_global->__pyx_n_s_FILENAME +#define __pyx_n_u_FILENAME __pyx_mstate_global->__pyx_n_u_FILENAME +#define __pyx_n_s_FLOAT __pyx_mstate_global->__pyx_n_s_FLOAT +#define __pyx_n_u_FLOAT __pyx_mstate_global->__pyx_n_u_FLOAT +#define __pyx_n_s_FeatureLibError __pyx_mstate_global->__pyx_n_s_FeatureLibError +#define __pyx_n_s_FeatureLibLocation __pyx_mstate_global->__pyx_n_s_FeatureLibLocation +#define __pyx_n_s_FileNotFoundError __pyx_mstate_global->__pyx_n_s_FileNotFoundError +#define __pyx_n_s_GLYPHCLASS __pyx_mstate_global->__pyx_n_s_GLYPHCLASS +#define __pyx_n_u_GLYPHCLASS __pyx_mstate_global->__pyx_n_u_GLYPHCLASS +#define __pyx_kp_u_Glyph_class_names_must_consist_o __pyx_mstate_global->__pyx_kp_u_Glyph_class_names_must_consist_o +#define __pyx_n_s_HEXADECIMAL __pyx_mstate_global->__pyx_n_s_HEXADECIMAL +#define __pyx_n_u_HEXADECIMAL __pyx_mstate_global->__pyx_n_u_HEXADECIMAL +#define __pyx_n_s_ImportError __pyx_mstate_global->__pyx_n_s_ImportError +#define __pyx_n_s_IncludedFeaNotFound __pyx_mstate_global->__pyx_n_s_IncludedFeaNotFound +#define __pyx_n_s_IncludingLexer __pyx_mstate_global->__pyx_n_s_IncludingLexer +#define __pyx_n_s_IncludingLexer___init __pyx_mstate_global->__pyx_n_s_IncludingLexer___init +#define __pyx_n_s_IncludingLexer___iter __pyx_mstate_global->__pyx_n_s_IncludingLexer___iter +#define __pyx_n_s_IncludingLexer___next __pyx_mstate_global->__pyx_n_s_IncludingLexer___next +#define __pyx_n_s_IncludingLexer_make_lexer __pyx_mstate_global->__pyx_n_s_IncludingLexer_make_lexer +#define __pyx_n_s_IncludingLexer_next __pyx_mstate_global->__pyx_n_s_IncludingLexer_next +#define __pyx_n_s_IncludingLexer_scan_anonymous_bl __pyx_mstate_global->__pyx_n_s_IncludingLexer_scan_anonymous_bl +#define __pyx_n_s_Lexer __pyx_mstate_global->__pyx_n_s_Lexer +#define __pyx_n_s_Lexer___init __pyx_mstate_global->__pyx_n_s_Lexer___init +#define __pyx_n_s_Lexer___iter __pyx_mstate_global->__pyx_n_s_Lexer___iter +#define __pyx_n_s_Lexer___next __pyx_mstate_global->__pyx_n_s_Lexer___next +#define __pyx_n_s_Lexer_location __pyx_mstate_global->__pyx_n_s_Lexer_location +#define __pyx_n_s_Lexer_next __pyx_mstate_global->__pyx_n_s_Lexer_next +#define __pyx_n_s_Lexer_next_2 __pyx_mstate_global->__pyx_n_s_Lexer_next_2 +#define __pyx_n_s_Lexer_scan_anonymous_block __pyx_mstate_global->__pyx_n_s_Lexer_scan_anonymous_block +#define __pyx_n_s_Lexer_scan_over __pyx_mstate_global->__pyx_n_s_Lexer_scan_over +#define __pyx_n_s_Lexer_scan_until __pyx_mstate_global->__pyx_n_s_Lexer_scan_until +#define __pyx_kp_s_Lexer_that_does_not_follow_inclu __pyx_mstate_global->__pyx_kp_s_Lexer_that_does_not_follow_inclu +#define __pyx_kp_s_Lib_fontTools_feaLib_lexer_py __pyx_mstate_global->__pyx_kp_s_Lib_fontTools_feaLib_lexer_py +#define __pyx_n_s_MODE_FILENAME __pyx_mstate_global->__pyx_n_s_MODE_FILENAME +#define __pyx_n_s_MODE_NORMAL __pyx_mstate_global->__pyx_n_s_MODE_NORMAL +#define __pyx_n_s_NAME __pyx_mstate_global->__pyx_n_s_NAME +#define __pyx_n_u_NAME __pyx_mstate_global->__pyx_n_u_NAME +#define __pyx_n_s_NEWLINE __pyx_mstate_global->__pyx_n_s_NEWLINE +#define __pyx_n_u_NEWLINE __pyx_mstate_global->__pyx_n_u_NEWLINE +#define __pyx_n_u_NORMAL __pyx_mstate_global->__pyx_n_u_NORMAL +#define __pyx_n_s_NUMBER __pyx_mstate_global->__pyx_n_s_NUMBER +#define __pyx_n_u_NUMBER __pyx_mstate_global->__pyx_n_u_NUMBER +#define __pyx_n_s_NUMBERS __pyx_mstate_global->__pyx_n_s_NUMBERS +#define __pyx_n_s_NonIncludingLexer __pyx_mstate_global->__pyx_n_s_NonIncludingLexer +#define __pyx_n_s_NonIncludingLexer___next __pyx_mstate_global->__pyx_n_s_NonIncludingLexer___next +#define __pyx_n_s_OCTAL __pyx_mstate_global->__pyx_n_s_OCTAL +#define __pyx_n_u_OCTAL __pyx_mstate_global->__pyx_n_u_OCTAL +#define __pyx_n_s_RE_GLYPHCLASS __pyx_mstate_global->__pyx_n_s_RE_GLYPHCLASS +#define __pyx_n_s_STRING __pyx_mstate_global->__pyx_n_s_STRING +#define __pyx_n_u_STRING __pyx_mstate_global->__pyx_n_u_STRING +#define __pyx_n_s_SYMBOL __pyx_mstate_global->__pyx_n_s_SYMBOL +#define __pyx_n_u_SYMBOL __pyx_mstate_global->__pyx_n_u_SYMBOL +#define __pyx_n_s_StopIteration __pyx_mstate_global->__pyx_n_s_StopIteration +#define __pyx_kp_u_Too_many_recursive_includes __pyx_mstate_global->__pyx_kp_u_Too_many_recursive_includes +#define __pyx_kp_u_Unexpected_character_r __pyx_mstate_global->__pyx_kp_u_Unexpected_character_r +#define __pyx_kp_u__10 __pyx_mstate_global->__pyx_kp_u__10 +#define __pyx_kp_u__11 __pyx_mstate_global->__pyx_kp_u__11 +#define __pyx_kp_u__12 __pyx_mstate_global->__pyx_kp_u__12 +#define __pyx_n_s__13 __pyx_mstate_global->__pyx_n_s__13 +#define __pyx_kp_u__16 __pyx_mstate_global->__pyx_kp_u__16 +#define __pyx_kp_u__17 __pyx_mstate_global->__pyx_kp_u__17 +#define __pyx_kp_u__18 __pyx_mstate_global->__pyx_kp_u__18 +#define __pyx_kp_u__19 __pyx_mstate_global->__pyx_kp_u__19 +#define __pyx_kp_u__2 __pyx_mstate_global->__pyx_kp_u__2 +#define __pyx_kp_u__20 __pyx_mstate_global->__pyx_kp_u__20 +#define __pyx_kp_u__3 __pyx_mstate_global->__pyx_kp_u__3 +#define __pyx_kp_u__4 __pyx_mstate_global->__pyx_kp_u__4 +#define __pyx_kp_u__5 __pyx_mstate_global->__pyx_kp_u__5 +#define __pyx_n_s__51 __pyx_mstate_global->__pyx_n_s__51 +#define __pyx_kp_u__6 __pyx_mstate_global->__pyx_kp_u__6 +#define __pyx_kp_u__7 __pyx_mstate_global->__pyx_kp_u__7 +#define __pyx_kp_u__8 __pyx_mstate_global->__pyx_kp_u__8 +#define __pyx_kp_u__9 __pyx_mstate_global->__pyx_kp_u__9 +#define __pyx_n_s_append __pyx_mstate_global->__pyx_n_s_append +#define __pyx_n_s_asyncio_coroutines __pyx_mstate_global->__pyx_n_s_asyncio_coroutines +#define __pyx_n_s_class_getitem __pyx_mstate_global->__pyx_n_s_class_getitem +#define __pyx_n_s_cline_in_traceback __pyx_mstate_global->__pyx_n_s_cline_in_traceback +#define __pyx_n_s_close __pyx_mstate_global->__pyx_n_s_close +#define __pyx_n_s_closing __pyx_mstate_global->__pyx_n_s_closing +#define __pyx_n_s_column __pyx_mstate_global->__pyx_n_s_column +#define __pyx_n_s_compile __pyx_mstate_global->__pyx_n_s_compile +#define __pyx_n_s_cur_char __pyx_mstate_global->__pyx_n_s_cur_char +#define __pyx_n_s_curpath __pyx_mstate_global->__pyx_n_s_curpath +#define __pyx_n_s_data __pyx_mstate_global->__pyx_n_s_data +#define __pyx_n_s_dict __pyx_mstate_global->__pyx_n_s_dict +#define __pyx_n_s_dirname __pyx_mstate_global->__pyx_n_s_dirname +#define __pyx_n_s_doc __pyx_mstate_global->__pyx_n_s_doc +#define __pyx_n_s_encoding __pyx_mstate_global->__pyx_n_s_encoding +#define __pyx_n_s_err __pyx_mstate_global->__pyx_n_s_err +#define __pyx_n_s_featurefile __pyx_mstate_global->__pyx_n_s_featurefile +#define __pyx_n_s_featurefilepath __pyx_mstate_global->__pyx_n_s_featurefilepath +#define __pyx_kp_u_features __pyx_mstate_global->__pyx_kp_u_features +#define __pyx_n_s_file_or_path __pyx_mstate_global->__pyx_n_s_file_or_path +#define __pyx_n_s_filename __pyx_mstate_global->__pyx_n_s_filename +#define __pyx_n_s_filename_2 __pyx_mstate_global->__pyx_n_s_filename_2 +#define __pyx_n_s_fileobj __pyx_mstate_global->__pyx_n_s_fileobj +#define __pyx_n_s_fname_location __pyx_mstate_global->__pyx_n_s_fname_location +#define __pyx_n_s_fname_token __pyx_mstate_global->__pyx_n_s_fname_token +#define __pyx_n_s_fname_type __pyx_mstate_global->__pyx_n_s_fname_type +#define __pyx_n_s_fontTools_feaLib_error __pyx_mstate_global->__pyx_n_s_fontTools_feaLib_error +#define __pyx_n_s_fontTools_feaLib_lexer __pyx_mstate_global->__pyx_n_s_fontTools_feaLib_lexer +#define __pyx_n_s_fontTools_feaLib_location __pyx_mstate_global->__pyx_n_s_fontTools_feaLib_location +#define __pyx_n_s_getcwd __pyx_mstate_global->__pyx_n_s_getcwd +#define __pyx_n_s_glyphclass __pyx_mstate_global->__pyx_n_s_glyphclass +#define __pyx_n_s_import __pyx_mstate_global->__pyx_n_s_import +#define __pyx_n_u_include __pyx_mstate_global->__pyx_n_u_include +#define __pyx_n_s_includeDir __pyx_mstate_global->__pyx_n_s_includeDir +#define __pyx_n_s_init __pyx_mstate_global->__pyx_n_s_init +#define __pyx_n_s_init_subclass __pyx_mstate_global->__pyx_n_s_init_subclass +#define __pyx_n_s_initializing __pyx_mstate_global->__pyx_n_s_initializing +#define __pyx_n_s_is_coroutine __pyx_mstate_global->__pyx_n_s_is_coroutine +#define __pyx_n_s_isabs __pyx_mstate_global->__pyx_n_s_isabs +#define __pyx_n_s_iter __pyx_mstate_global->__pyx_n_s_iter +#define __pyx_n_s_join __pyx_mstate_global->__pyx_n_s_join +#define __pyx_n_s_lexer __pyx_mstate_global->__pyx_n_s_lexer +#define __pyx_n_s_lexers __pyx_mstate_global->__pyx_n_s_lexers +#define __pyx_n_s_limit __pyx_mstate_global->__pyx_n_s_limit +#define __pyx_n_s_line __pyx_mstate_global->__pyx_n_s_line +#define __pyx_n_s_line_start __pyx_mstate_global->__pyx_n_s_line_start +#define __pyx_n_s_location __pyx_mstate_global->__pyx_n_s_location +#define __pyx_n_s_location_2 __pyx_mstate_global->__pyx_n_s_location_2 +#define __pyx_n_s_main __pyx_mstate_global->__pyx_n_s_main +#define __pyx_n_s_make_lexer __pyx_mstate_global->__pyx_n_s_make_lexer +#define __pyx_n_s_match __pyx_mstate_global->__pyx_n_s_match +#define __pyx_n_s_maxsplit __pyx_mstate_global->__pyx_n_s_maxsplit +#define __pyx_n_s_metaclass __pyx_mstate_global->__pyx_n_s_metaclass +#define __pyx_n_s_mode __pyx_mstate_global->__pyx_n_s_mode +#define __pyx_n_s_module __pyx_mstate_global->__pyx_n_s_module +#define __pyx_n_s_mro_entries __pyx_mstate_global->__pyx_n_s_mro_entries +#define __pyx_n_u_name __pyx_mstate_global->__pyx_n_u_name +#define __pyx_n_s_name_2 __pyx_mstate_global->__pyx_n_s_name_2 +#define __pyx_n_s_next __pyx_mstate_global->__pyx_n_s_next +#define __pyx_n_s_next_2 __pyx_mstate_global->__pyx_n_s_next_2 +#define __pyx_n_s_next_3 __pyx_mstate_global->__pyx_n_s_next_3 +#define __pyx_n_s_next_char __pyx_mstate_global->__pyx_n_s_next_char +#define __pyx_n_s_object __pyx_mstate_global->__pyx_n_s_object +#define __pyx_n_s_open __pyx_mstate_global->__pyx_n_s_open +#define __pyx_n_s_os __pyx_mstate_global->__pyx_n_s_os +#define __pyx_n_s_p __pyx_mstate_global->__pyx_n_s_p +#define __pyx_n_s_path __pyx_mstate_global->__pyx_n_s_path +#define __pyx_n_s_pop __pyx_mstate_global->__pyx_n_s_pop +#define __pyx_n_s_pos __pyx_mstate_global->__pyx_n_s_pos +#define __pyx_n_s_prepare __pyx_mstate_global->__pyx_n_s_prepare +#define __pyx_n_s_qualname __pyx_mstate_global->__pyx_n_s_qualname +#define __pyx_n_u_r __pyx_mstate_global->__pyx_n_u_r +#define __pyx_n_s_re __pyx_mstate_global->__pyx_n_s_re +#define __pyx_n_s_read __pyx_mstate_global->__pyx_n_s_read +#define __pyx_n_u_read __pyx_mstate_global->__pyx_n_u_read +#define __pyx_n_s_regexp __pyx_mstate_global->__pyx_n_s_regexp +#define __pyx_kp_u_s __pyx_mstate_global->__pyx_kp_u_s +#define __pyx_kp_u_s_2 __pyx_mstate_global->__pyx_kp_u_s_2 +#define __pyx_n_s_scan_anonymous_block __pyx_mstate_global->__pyx_n_s_scan_anonymous_block +#define __pyx_n_s_scan_over __pyx_mstate_global->__pyx_n_s_scan_over +#define __pyx_n_s_scan_until __pyx_mstate_global->__pyx_n_s_scan_until +#define __pyx_n_s_self __pyx_mstate_global->__pyx_n_s_self +#define __pyx_n_s_set_name __pyx_mstate_global->__pyx_n_s_set_name +#define __pyx_n_s_spec __pyx_mstate_global->__pyx_n_s_spec +#define __pyx_n_s_split __pyx_mstate_global->__pyx_n_s_split +#define __pyx_n_s_start __pyx_mstate_global->__pyx_n_s_start +#define __pyx_n_s_staticmethod __pyx_mstate_global->__pyx_n_s_staticmethod +#define __pyx_n_s_stop_at __pyx_mstate_global->__pyx_n_s_stop_at +#define __pyx_n_s_string __pyx_mstate_global->__pyx_n_s_string +#define __pyx_n_s_strip __pyx_mstate_global->__pyx_n_s_strip +#define __pyx_n_s_sub __pyx_mstate_global->__pyx_n_s_sub +#define __pyx_n_s_super __pyx_mstate_global->__pyx_n_s_super +#define __pyx_n_s_tag __pyx_mstate_global->__pyx_n_s_tag +#define __pyx_n_s_test __pyx_mstate_global->__pyx_n_s_test +#define __pyx_n_s_text __pyx_mstate_global->__pyx_n_s_text +#define __pyx_n_s_text_2 __pyx_mstate_global->__pyx_n_s_text_2 +#define __pyx_n_s_text_length __pyx_mstate_global->__pyx_n_s_text_length +#define __pyx_n_s_token __pyx_mstate_global->__pyx_n_s_token +#define __pyx_n_s_token_type __pyx_mstate_global->__pyx_n_s_token_type +#define __pyx_kp_u_utf_8_sig __pyx_mstate_global->__pyx_kp_u_utf_8_sig +#define __pyx_n_s_valid __pyx_mstate_global->__pyx_n_s_valid +#define __pyx_n_u_xX __pyx_mstate_global->__pyx_n_u_xX +#define __pyx_int_0 __pyx_mstate_global->__pyx_int_0 +#define __pyx_int_1 __pyx_mstate_global->__pyx_int_1 +#define __pyx_int_2 __pyx_mstate_global->__pyx_int_2 +#define __pyx_int_8 __pyx_mstate_global->__pyx_int_8 +#define __pyx_int_10 __pyx_mstate_global->__pyx_int_10 +#define __pyx_int_16 __pyx_mstate_global->__pyx_int_16 +#define __pyx_tuple__14 __pyx_mstate_global->__pyx_tuple__14 +#define __pyx_tuple__15 __pyx_mstate_global->__pyx_tuple__15 +#define __pyx_tuple__21 __pyx_mstate_global->__pyx_tuple__21 +#define __pyx_tuple__23 __pyx_mstate_global->__pyx_tuple__23 +#define __pyx_tuple__26 __pyx_mstate_global->__pyx_tuple__26 +#define __pyx_tuple__28 __pyx_mstate_global->__pyx_tuple__28 +#define __pyx_tuple__30 __pyx_mstate_global->__pyx_tuple__30 +#define __pyx_tuple__32 __pyx_mstate_global->__pyx_tuple__32 +#define __pyx_tuple__34 __pyx_mstate_global->__pyx_tuple__34 +#define __pyx_tuple__36 __pyx_mstate_global->__pyx_tuple__36 +#define __pyx_tuple__38 __pyx_mstate_global->__pyx_tuple__38 +#define __pyx_tuple__39 __pyx_mstate_global->__pyx_tuple__39 +#define __pyx_tuple__40 __pyx_mstate_global->__pyx_tuple__40 +#define __pyx_tuple__44 __pyx_mstate_global->__pyx_tuple__44 +#define __pyx_tuple__46 __pyx_mstate_global->__pyx_tuple__46 +#define __pyx_tuple__48 __pyx_mstate_global->__pyx_tuple__48 +#define __pyx_codeobj__22 __pyx_mstate_global->__pyx_codeobj__22 +#define __pyx_codeobj__24 __pyx_mstate_global->__pyx_codeobj__24 +#define __pyx_codeobj__25 __pyx_mstate_global->__pyx_codeobj__25 +#define __pyx_codeobj__27 __pyx_mstate_global->__pyx_codeobj__27 +#define __pyx_codeobj__29 __pyx_mstate_global->__pyx_codeobj__29 +#define __pyx_codeobj__31 __pyx_mstate_global->__pyx_codeobj__31 +#define __pyx_codeobj__33 __pyx_mstate_global->__pyx_codeobj__33 +#define __pyx_codeobj__35 __pyx_mstate_global->__pyx_codeobj__35 +#define __pyx_codeobj__37 __pyx_mstate_global->__pyx_codeobj__37 +#define __pyx_codeobj__41 __pyx_mstate_global->__pyx_codeobj__41 +#define __pyx_codeobj__42 __pyx_mstate_global->__pyx_codeobj__42 +#define __pyx_codeobj__43 __pyx_mstate_global->__pyx_codeobj__43 +#define __pyx_codeobj__45 __pyx_mstate_global->__pyx_codeobj__45 +#define __pyx_codeobj__47 __pyx_mstate_global->__pyx_codeobj__47 +#define __pyx_codeobj__49 __pyx_mstate_global->__pyx_codeobj__49 +#define __pyx_codeobj__50 __pyx_mstate_global->__pyx_codeobj__50 +/* #### Code section: module_code ### */ + +/* "fontTools/feaLib/lexer.py":43 + * MODE_FILENAME_ = "FILENAME" + * + * def __init__(self, text, filename): # <<<<<<<<<<<<<< + * self.filename_ = filename + * self.line_ = 1 + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_1__init__(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_5Lexer___init__, "Lexer.__init__(self, text, filename)"); +static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_1__init__ = {"__init__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_1__init__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_5Lexer___init__}; +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_1__init__(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + PyObject *__pyx_v_self = 0; + PyObject *__pyx_v_text = 0; + PyObject *__pyx_v_filename = 0; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[3] = {0,0,0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_text,&__pyx_n_s_filename,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 43, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_text)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[1]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 43, __pyx_L3_error) + else { + __Pyx_RaiseArgtupleInvalid("__init__", 1, 3, 3, 1); __PYX_ERR(0, 43, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_filename)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[2]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 43, __pyx_L3_error) + else { + __Pyx_RaiseArgtupleInvalid("__init__", 1, 3, 3, 2); __PYX_ERR(0, 43, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__init__") < 0)) __PYX_ERR(0, 43, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 3)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); + values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); + } + __pyx_v_self = values[0]; + __pyx_v_text = values[1]; + __pyx_v_filename = values[2]; + } + goto __pyx_L6_skip; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__init__", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 43, __pyx_L3_error) + __pyx_L6_skip:; + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_5Lexer___init__(__pyx_self, __pyx_v_self, __pyx_v_text, __pyx_v_filename); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_text, PyObject *__pyx_v_filename) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__init__", 1); + + /* "fontTools/feaLib/lexer.py":44 + * + * def __init__(self, text, filename): + * self.filename_ = filename # <<<<<<<<<<<<<< + * self.line_ = 1 + * self.pos_ = 0 + */ + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_filename_2, __pyx_v_filename) < 0) __PYX_ERR(0, 44, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":45 + * def __init__(self, text, filename): + * self.filename_ = filename + * self.line_ = 1 # <<<<<<<<<<<<<< + * self.pos_ = 0 + * self.line_start_ = 0 + */ + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_line, __pyx_int_1) < 0) __PYX_ERR(0, 45, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":46 + * self.filename_ = filename + * self.line_ = 1 + * self.pos_ = 0 # <<<<<<<<<<<<<< + * self.line_start_ = 0 + * self.text_ = text + */ + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_int_0) < 0) __PYX_ERR(0, 46, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":47 + * self.line_ = 1 + * self.pos_ = 0 + * self.line_start_ = 0 # <<<<<<<<<<<<<< + * self.text_ = text + * self.text_length_ = len(text) + */ + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_line_start, __pyx_int_0) < 0) __PYX_ERR(0, 47, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":48 + * self.pos_ = 0 + * self.line_start_ = 0 + * self.text_ = text # <<<<<<<<<<<<<< + * self.text_length_ = len(text) + * self.mode_ = Lexer.MODE_NORMAL_ + */ + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_text_2, __pyx_v_text) < 0) __PYX_ERR(0, 48, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":49 + * self.line_start_ = 0 + * self.text_ = text + * self.text_length_ = len(text) # <<<<<<<<<<<<<< + * self.mode_ = Lexer.MODE_NORMAL_ + * + */ + __pyx_t_1 = PyObject_Length(__pyx_v_text); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 49, __pyx_L1_error) + __pyx_t_2 = PyInt_FromSsize_t(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 49, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_text_length, __pyx_t_2) < 0) __PYX_ERR(0, 49, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "fontTools/feaLib/lexer.py":50 + * self.text_ = text + * self.text_length_ = len(text) + * self.mode_ = Lexer.MODE_NORMAL_ # <<<<<<<<<<<<<< + * + * def __iter__(self): + */ + __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 50, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_MODE_NORMAL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 50, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_mode, __pyx_t_3) < 0) __PYX_ERR(0, 50, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "fontTools/feaLib/lexer.py":43 + * MODE_FILENAME_ = "FILENAME" + * + * def __init__(self, text, filename): # <<<<<<<<<<<<<< + * self.filename_ = filename + * self.line_ = 1 + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/feaLib/lexer.py":52 + * self.mode_ = Lexer.MODE_NORMAL_ + * + * def __iter__(self): # <<<<<<<<<<<<<< + * return self + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_3__iter__(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_2__iter__, "Lexer.__iter__(self)"); +static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_3__iter__ = {"__iter__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_3__iter__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_2__iter__}; +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_3__iter__(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + PyObject *__pyx_v_self = 0; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[1] = {0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__iter__ (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 52, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__iter__") < 0)) __PYX_ERR(0, 52, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 1)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + } + __pyx_v_self = values[0]; + } + goto __pyx_L6_skip; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__iter__", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 52, __pyx_L3_error) + __pyx_L6_skip:; + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.__iter__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_2__iter__(__pyx_self, __pyx_v_self); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_2__iter__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__iter__", 1); + + /* "fontTools/feaLib/lexer.py":53 + * + * def __iter__(self): + * return self # <<<<<<<<<<<<<< + * + * def next(self): # Python 2 + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self); + __pyx_r = __pyx_v_self; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":52 + * self.mode_ = Lexer.MODE_NORMAL_ + * + * def __iter__(self): # <<<<<<<<<<<<<< + * return self + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/feaLib/lexer.py":55 + * return self + * + * def next(self): # Python 2 # <<<<<<<<<<<<<< + * return self.__next__() + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_5next(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_4next, "Lexer.next(self)"); +static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_5next = {"next", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_5next, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_4next}; +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_5next(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + PyObject *__pyx_v_self = 0; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[1] = {0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("next (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 55, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "next") < 0)) __PYX_ERR(0, 55, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 1)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + } + __pyx_v_self = values[0]; + } + goto __pyx_L6_skip; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("next", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 55, __pyx_L3_error) + __pyx_L6_skip:; + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.next", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_4next(__pyx_self, __pyx_v_self); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_4next(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + unsigned int __pyx_t_4; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("next", 1); + + /* "fontTools/feaLib/lexer.py":56 + * + * def next(self): # Python 2 + * return self.__next__() # <<<<<<<<<<<<<< + * + * def __next__(self): # Python 3 + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_next); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 56, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = NULL; + __pyx_t_4 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_3)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + __pyx_t_4 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; + __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 0+__pyx_t_4); + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 56, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":55 + * return self + * + * def next(self): # Python 2 # <<<<<<<<<<<<<< + * return self.__next__() + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.next", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/feaLib/lexer.py":58 + * return self.__next__() + * + * def __next__(self): # Python 3 # <<<<<<<<<<<<<< + * while True: + * token_type, token, location = self.next_() + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_7__next__(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_6__next__, "Lexer.__next__(self)"); +static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_7__next__ = {"__next__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_7__next__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_6__next__}; +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_7__next__(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + PyObject *__pyx_v_self = 0; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[1] = {0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__next__ (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__next__") < 0)) __PYX_ERR(0, 58, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 1)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + } + __pyx_v_self = values[0]; + } + goto __pyx_L6_skip; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__next__", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 58, __pyx_L3_error) + __pyx_L6_skip:; + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.__next__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_6__next__(__pyx_self, __pyx_v_self); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_6__next__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) { + PyObject *__pyx_v_token_type = NULL; + PyObject *__pyx_v_token = NULL; + PyObject *__pyx_v_location = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + unsigned int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *(*__pyx_t_7)(PyObject *); + int __pyx_t_8; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__next__", 1); + + /* "fontTools/feaLib/lexer.py":59 + * + * def __next__(self): # Python 3 + * while True: # <<<<<<<<<<<<<< + * token_type, token, location = self.next_() + * if token_type != Lexer.NEWLINE: + */ + while (1) { + + /* "fontTools/feaLib/lexer.py":60 + * def __next__(self): # Python 3 + * while True: + * token_type, token, location = self.next_() # <<<<<<<<<<<<<< + * if token_type != Lexer.NEWLINE: + * return (token_type, token, location) + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_next_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 60, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = NULL; + __pyx_t_4 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_3)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + __pyx_t_4 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; + __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 0+__pyx_t_4); + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 60, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + if ((likely(PyTuple_CheckExact(__pyx_t_1))) || (PyList_CheckExact(__pyx_t_1))) { + PyObject* sequence = __pyx_t_1; + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); + if (unlikely(size != 3)) { + if (size > 3) __Pyx_RaiseTooManyValuesError(3); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(0, 60, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + if (likely(PyTuple_CheckExact(sequence))) { + __pyx_t_2 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); + __pyx_t_5 = PyTuple_GET_ITEM(sequence, 2); + } else { + __pyx_t_2 = PyList_GET_ITEM(sequence, 0); + __pyx_t_3 = PyList_GET_ITEM(sequence, 1); + __pyx_t_5 = PyList_GET_ITEM(sequence, 2); + } + __Pyx_INCREF(__pyx_t_2); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(__pyx_t_5); + #else + __pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 60, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 60, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 60, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + #endif + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + } else { + Py_ssize_t index = -1; + __pyx_t_6 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 60, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_7 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_6); + index = 0; __pyx_t_2 = __pyx_t_7(__pyx_t_6); if (unlikely(!__pyx_t_2)) goto __pyx_L5_unpacking_failed; + __Pyx_GOTREF(__pyx_t_2); + index = 1; __pyx_t_3 = __pyx_t_7(__pyx_t_6); if (unlikely(!__pyx_t_3)) goto __pyx_L5_unpacking_failed; + __Pyx_GOTREF(__pyx_t_3); + index = 2; __pyx_t_5 = __pyx_t_7(__pyx_t_6); if (unlikely(!__pyx_t_5)) goto __pyx_L5_unpacking_failed; + __Pyx_GOTREF(__pyx_t_5); + if (__Pyx_IternextUnpackEndCheck(__pyx_t_7(__pyx_t_6), 3) < 0) __PYX_ERR(0, 60, __pyx_L1_error) + __pyx_t_7 = NULL; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + goto __pyx_L6_unpacking_done; + __pyx_L5_unpacking_failed:; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_t_7 = NULL; + if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); + __PYX_ERR(0, 60, __pyx_L1_error) + __pyx_L6_unpacking_done:; + } + __Pyx_XDECREF_SET(__pyx_v_token_type, __pyx_t_2); + __pyx_t_2 = 0; + __Pyx_XDECREF_SET(__pyx_v_token, __pyx_t_3); + __pyx_t_3 = 0; + __Pyx_XDECREF_SET(__pyx_v_location, __pyx_t_5); + __pyx_t_5 = 0; + + /* "fontTools/feaLib/lexer.py":61 + * while True: + * token_type, token, location = self.next_() + * if token_type != Lexer.NEWLINE: # <<<<<<<<<<<<<< + * return (token_type, token, location) + * + */ + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 61, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_NEWLINE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 61, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyObject_RichCompare(__pyx_v_token_type, __pyx_t_5, Py_NE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 61, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely((__pyx_t_8 < 0))) __PYX_ERR(0, 61, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (__pyx_t_8) { + + /* "fontTools/feaLib/lexer.py":62 + * token_type, token, location = self.next_() + * if token_type != Lexer.NEWLINE: + * return (token_type, token, location) # <<<<<<<<<<<<<< + * + * def location_(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 62, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(__pyx_v_token_type); + __Pyx_GIVEREF(__pyx_v_token_type); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_token_type)) __PYX_ERR(0, 62, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_token); + __Pyx_GIVEREF(__pyx_v_token); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_token)) __PYX_ERR(0, 62, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_location); + __Pyx_GIVEREF(__pyx_v_location); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_location)) __PYX_ERR(0, 62, __pyx_L1_error); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":61 + * while True: + * token_type, token, location = self.next_() + * if token_type != Lexer.NEWLINE: # <<<<<<<<<<<<<< + * return (token_type, token, location) + * + */ + } + } + + /* "fontTools/feaLib/lexer.py":58 + * return self.__next__() + * + * def __next__(self): # Python 3 # <<<<<<<<<<<<<< + * while True: + * token_type, token, location = self.next_() + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.__next__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_token_type); + __Pyx_XDECREF(__pyx_v_token); + __Pyx_XDECREF(__pyx_v_location); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/feaLib/lexer.py":64 + * return (token_type, token, location) + * + * def location_(self): # <<<<<<<<<<<<<< + * column = self.pos_ - self.line_start_ + 1 + * return FeatureLibLocation(self.filename_ or "", self.line_, column) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_9location_(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_8location_, "Lexer.location_(self)"); +static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_9location_ = {"location_", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_9location_, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_8location_}; +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_9location_(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + PyObject *__pyx_v_self = 0; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[1] = {0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("location_ (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 64, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "location_") < 0)) __PYX_ERR(0, 64, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 1)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + } + __pyx_v_self = values[0]; + } + goto __pyx_L6_skip; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("location_", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 64, __pyx_L3_error) + __pyx_L6_skip:; + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.location_", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_8location_(__pyx_self, __pyx_v_self); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_8location_(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) { + PyObject *__pyx_v_column = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_t_5; + PyObject *__pyx_t_6 = NULL; + unsigned int __pyx_t_7; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("location_", 1); + + /* "fontTools/feaLib/lexer.py":65 + * + * def location_(self): + * column = self.pos_ - self.line_start_ + 1 # <<<<<<<<<<<<<< + * return FeatureLibLocation(self.filename_ or "", self.line_, column) + * + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 65, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_line_start); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 65, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_Subtract(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 65, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 65, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_column = __pyx_t_2; + __pyx_t_2 = 0; + + /* "fontTools/feaLib/lexer.py":66 + * def location_(self): + * column = self.pos_ - self.line_start_ + 1 + * return FeatureLibLocation(self.filename_ or "", self.line_, column) # <<<<<<<<<<<<<< + * + * def next_(self): + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_FeatureLibLocation); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 66, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_filename_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 66, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_5 < 0))) __PYX_ERR(0, 66, __pyx_L1_error) + if (!__pyx_t_5) { + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + } else { + __Pyx_INCREF(__pyx_t_4); + __pyx_t_1 = __pyx_t_4; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + goto __pyx_L3_bool_binop_done; + } + __Pyx_INCREF(__pyx_kp_u_features); + __pyx_t_1 = __pyx_kp_u_features; + __pyx_L3_bool_binop_done:; + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_line); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 66, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_6 = NULL; + __pyx_t_7 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_6)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_6); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + __pyx_t_7 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[4] = {__pyx_t_6, __pyx_t_1, __pyx_t_4, __pyx_v_column}; + __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_7, 3+__pyx_t_7); + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 66, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":64 + * return (token_type, token, location) + * + * def location_(self): # <<<<<<<<<<<<<< + * column = self.pos_ - self.line_start_ + 1 + * return FeatureLibLocation(self.filename_ or "", self.line_, column) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.location_", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_column); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/feaLib/lexer.py":68 + * return FeatureLibLocation(self.filename_ or "", self.line_, column) + * + * def next_(self): # <<<<<<<<<<<<<< + * self.scan_over_(Lexer.CHAR_WHITESPACE_) + * location = self.location_() + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_11next_(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_10next_, "Lexer.next_(self)"); +static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_11next_ = {"next_", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_11next_, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_10next_}; +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_11next_(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + PyObject *__pyx_v_self = 0; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[1] = {0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("next_ (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 68, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "next_") < 0)) __PYX_ERR(0, 68, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 1)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + } + __pyx_v_self = values[0]; + } + goto __pyx_L6_skip; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("next_", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 68, __pyx_L3_error) + __pyx_L6_skip:; + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.next_", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_10next_(__pyx_self, __pyx_v_self); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_10next_(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) { + PyObject *__pyx_v_location = NULL; + PyObject *__pyx_v_start = NULL; + PyObject *__pyx_v_text = NULL; + Py_ssize_t __pyx_v_limit; + PyObject *__pyx_v_cur_char = NULL; + PyObject *__pyx_v_next_char = NULL; + PyObject *__pyx_v_glyphclass = NULL; + PyObject *__pyx_v_token = NULL; + PyObject *__pyx_v_string = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + unsigned int __pyx_t_5; + Py_ssize_t __pyx_t_6; + int __pyx_t_7; + int __pyx_t_8; + PyObject *__pyx_t_9 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("next_", 1); + + /* "fontTools/feaLib/lexer.py":69 + * + * def next_(self): + * self.scan_over_(Lexer.CHAR_WHITESPACE_) # <<<<<<<<<<<<<< + * location = self.location_() + * start = self.pos_ + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 69, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 69, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_CHAR_WHITESPACE); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 69, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_3)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4}; + __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 69, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "fontTools/feaLib/lexer.py":70 + * def next_(self): + * self.scan_over_(Lexer.CHAR_WHITESPACE_) + * location = self.location_() # <<<<<<<<<<<<<< + * start = self.pos_ + * text = self.text_ + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_location); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 70, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_4)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_4); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_4, NULL}; + __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_5, 0+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __pyx_v_location = __pyx_t_1; + __pyx_t_1 = 0; + + /* "fontTools/feaLib/lexer.py":71 + * self.scan_over_(Lexer.CHAR_WHITESPACE_) + * location = self.location_() + * start = self.pos_ # <<<<<<<<<<<<<< + * text = self.text_ + * limit = len(text) + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 71, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_start = __pyx_t_1; + __pyx_t_1 = 0; + + /* "fontTools/feaLib/lexer.py":72 + * location = self.location_() + * start = self.pos_ + * text = self.text_ # <<<<<<<<<<<<<< + * limit = len(text) + * if start >= limit: + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_text_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 72, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_text = __pyx_t_1; + __pyx_t_1 = 0; + + /* "fontTools/feaLib/lexer.py":73 + * start = self.pos_ + * text = self.text_ + * limit = len(text) # <<<<<<<<<<<<<< + * if start >= limit: + * raise StopIteration() + */ + __pyx_t_6 = PyObject_Length(__pyx_v_text); if (unlikely(__pyx_t_6 == ((Py_ssize_t)-1))) __PYX_ERR(0, 73, __pyx_L1_error) + __pyx_v_limit = __pyx_t_6; + + /* "fontTools/feaLib/lexer.py":74 + * text = self.text_ + * limit = len(text) + * if start >= limit: # <<<<<<<<<<<<<< + * raise StopIteration() + * cur_char = text[start] + */ + __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_limit); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 74, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = PyObject_RichCompare(__pyx_v_start, __pyx_t_1, Py_GE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 74, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 74, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + if (unlikely(__pyx_t_7)) { + + /* "fontTools/feaLib/lexer.py":75 + * limit = len(text) + * if start >= limit: + * raise StopIteration() # <<<<<<<<<<<<<< + * cur_char = text[start] + * next_char = text[start + 1] if start + 1 < limit else None + */ + __pyx_t_2 = __Pyx_PyObject_CallNoArg(__pyx_builtin_StopIteration); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 75, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(0, 75, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":74 + * text = self.text_ + * limit = len(text) + * if start >= limit: # <<<<<<<<<<<<<< + * raise StopIteration() + * cur_char = text[start] + */ + } + + /* "fontTools/feaLib/lexer.py":76 + * if start >= limit: + * raise StopIteration() + * cur_char = text[start] # <<<<<<<<<<<<<< + * next_char = text[start + 1] if start + 1 < limit else None + * + */ + __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_v_text, __pyx_v_start); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 76, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_v_cur_char = __pyx_t_2; + __pyx_t_2 = 0; + + /* "fontTools/feaLib/lexer.py":77 + * raise StopIteration() + * cur_char = text[start] + * next_char = text[start + 1] if start + 1 < limit else None # <<<<<<<<<<<<<< + * + * if cur_char == "\n": + */ + __pyx_t_1 = __Pyx_PyInt_AddObjC(__pyx_v_start, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 77, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_limit); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 77, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_4, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 77, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 77, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_7) { + __pyx_t_3 = __Pyx_PyInt_AddObjC(__pyx_v_start, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 77, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_v_text, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 77, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_2 = __pyx_t_4; + __pyx_t_4 = 0; + } else { + __Pyx_INCREF(Py_None); + __pyx_t_2 = Py_None; + } + __pyx_v_next_char = __pyx_t_2; + __pyx_t_2 = 0; + + /* "fontTools/feaLib/lexer.py":79 + * next_char = text[start + 1] if start + 1 < limit else None + * + * if cur_char == "\n": # <<<<<<<<<<<<<< + * self.pos_ += 1 + * self.line_ += 1 + */ + __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_cur_char, __pyx_kp_u_, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 79, __pyx_L1_error) + if (__pyx_t_7) { + + /* "fontTools/feaLib/lexer.py":80 + * + * if cur_char == "\n": + * self.pos_ += 1 # <<<<<<<<<<<<<< + * self.line_ += 1 + * self.line_start_ = self.pos_ + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 80, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = __Pyx_PyInt_AddObjC(__pyx_t_2, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 80, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_4) < 0) __PYX_ERR(0, 80, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "fontTools/feaLib/lexer.py":81 + * if cur_char == "\n": + * self.pos_ += 1 + * self.line_ += 1 # <<<<<<<<<<<<<< + * self.line_start_ = self.pos_ + * return (Lexer.NEWLINE, None, location) + */ + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_line); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 81, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_2 = __Pyx_PyInt_AddObjC(__pyx_t_4, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 81, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_line, __pyx_t_2) < 0) __PYX_ERR(0, 81, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "fontTools/feaLib/lexer.py":82 + * self.pos_ += 1 + * self.line_ += 1 + * self.line_start_ = self.pos_ # <<<<<<<<<<<<<< + * return (Lexer.NEWLINE, None, location) + * if cur_char == "\r": + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 82, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_line_start, __pyx_t_2) < 0) __PYX_ERR(0, 82, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "fontTools/feaLib/lexer.py":83 + * self.line_ += 1 + * self.line_start_ = self.pos_ + * return (Lexer.NEWLINE, None, location) # <<<<<<<<<<<<<< + * if cur_char == "\r": + * self.pos_ += 2 if next_char == "\n" else 1 + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_NEWLINE); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GIVEREF(__pyx_t_4); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4)) __PYX_ERR(0, 83, __pyx_L1_error); + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, Py_None)) __PYX_ERR(0, 83, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_location); + __Pyx_GIVEREF(__pyx_v_location); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_location)) __PYX_ERR(0, 83, __pyx_L1_error); + __pyx_t_4 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":79 + * next_char = text[start + 1] if start + 1 < limit else None + * + * if cur_char == "\n": # <<<<<<<<<<<<<< + * self.pos_ += 1 + * self.line_ += 1 + */ + } + + /* "fontTools/feaLib/lexer.py":84 + * self.line_start_ = self.pos_ + * return (Lexer.NEWLINE, None, location) + * if cur_char == "\r": # <<<<<<<<<<<<<< + * self.pos_ += 2 if next_char == "\n" else 1 + * self.line_ += 1 + */ + __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_cur_char, __pyx_kp_u__2, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 84, __pyx_L1_error) + if (__pyx_t_7) { + + /* "fontTools/feaLib/lexer.py":85 + * return (Lexer.NEWLINE, None, location) + * if cur_char == "\r": + * self.pos_ += 2 if next_char == "\n" else 1 # <<<<<<<<<<<<<< + * self.line_ += 1 + * self.line_start_ = self.pos_ + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 85, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_next_char, __pyx_kp_u_, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 85, __pyx_L1_error) + if (__pyx_t_7) { + __Pyx_INCREF(__pyx_int_2); + __pyx_t_4 = __pyx_int_2; + } else { + __Pyx_INCREF(__pyx_int_1); + __pyx_t_4 = __pyx_int_1; + } + __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 85, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_3) < 0) __PYX_ERR(0, 85, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "fontTools/feaLib/lexer.py":86 + * if cur_char == "\r": + * self.pos_ += 2 if next_char == "\n" else 1 + * self.line_ += 1 # <<<<<<<<<<<<<< + * self.line_start_ = self.pos_ + * return (Lexer.NEWLINE, None, location) + */ + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_line); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 86, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 86, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_line, __pyx_t_4) < 0) __PYX_ERR(0, 86, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "fontTools/feaLib/lexer.py":87 + * self.pos_ += 2 if next_char == "\n" else 1 + * self.line_ += 1 + * self.line_start_ = self.pos_ # <<<<<<<<<<<<<< + * return (Lexer.NEWLINE, None, location) + * if cur_char == "#": + */ + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 87, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_line_start, __pyx_t_4) < 0) __PYX_ERR(0, 87, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "fontTools/feaLib/lexer.py":88 + * self.line_ += 1 + * self.line_start_ = self.pos_ + * return (Lexer.NEWLINE, None, location) # <<<<<<<<<<<<<< + * if cur_char == "#": + * self.scan_until_(Lexer.CHAR_NEWLINE_) + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 88, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_NEWLINE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 88, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 88, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_3); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3)) __PYX_ERR(0, 88, __pyx_L1_error); + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None)) __PYX_ERR(0, 88, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_location); + __Pyx_GIVEREF(__pyx_v_location); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_v_location)) __PYX_ERR(0, 88, __pyx_L1_error); + __pyx_t_3 = 0; + __pyx_r = __pyx_t_4; + __pyx_t_4 = 0; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":84 + * self.line_start_ = self.pos_ + * return (Lexer.NEWLINE, None, location) + * if cur_char == "\r": # <<<<<<<<<<<<<< + * self.pos_ += 2 if next_char == "\n" else 1 + * self.line_ += 1 + */ + } + + /* "fontTools/feaLib/lexer.py":89 + * self.line_start_ = self.pos_ + * return (Lexer.NEWLINE, None, location) + * if cur_char == "#": # <<<<<<<<<<<<<< + * self.scan_until_(Lexer.CHAR_NEWLINE_) + * return (Lexer.COMMENT, text[start : self.pos_], location) + */ + __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_cur_char, __pyx_kp_u__3, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 89, __pyx_L1_error) + if (__pyx_t_7) { + + /* "fontTools/feaLib/lexer.py":90 + * return (Lexer.NEWLINE, None, location) + * if cur_char == "#": + * self.scan_until_(Lexer.CHAR_NEWLINE_) # <<<<<<<<<<<<<< + * return (Lexer.COMMENT, text[start : self.pos_], location) + * + */ + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_until); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 90, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 90, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_CHAR_NEWLINE); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 90, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_2)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_2); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_1}; + __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 90, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "fontTools/feaLib/lexer.py":91 + * if cur_char == "#": + * self.scan_until_(Lexer.CHAR_NEWLINE_) + * return (Lexer.COMMENT, text[start : self.pos_], location) # <<<<<<<<<<<<<< + * + * if self.mode_ is Lexer.MODE_FILENAME_: + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 91, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_COMMENT); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 91, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 91, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_v_start, &__pyx_t_4, NULL, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 91, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 91, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_3); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3)) __PYX_ERR(0, 91, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1)) __PYX_ERR(0, 91, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_location); + __Pyx_GIVEREF(__pyx_v_location); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_v_location)) __PYX_ERR(0, 91, __pyx_L1_error); + __pyx_t_3 = 0; + __pyx_t_1 = 0; + __pyx_r = __pyx_t_4; + __pyx_t_4 = 0; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":89 + * self.line_start_ = self.pos_ + * return (Lexer.NEWLINE, None, location) + * if cur_char == "#": # <<<<<<<<<<<<<< + * self.scan_until_(Lexer.CHAR_NEWLINE_) + * return (Lexer.COMMENT, text[start : self.pos_], location) + */ + } + + /* "fontTools/feaLib/lexer.py":93 + * return (Lexer.COMMENT, text[start : self.pos_], location) + * + * if self.mode_ is Lexer.MODE_FILENAME_: # <<<<<<<<<<<<<< + * if cur_char != "(": + * raise FeatureLibError("Expected '(' before file name", location) + */ + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_mode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 93, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 93, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_MODE_FILENAME); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 93, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_7 = (__pyx_t_4 == __pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_7) { + + /* "fontTools/feaLib/lexer.py":94 + * + * if self.mode_ is Lexer.MODE_FILENAME_: + * if cur_char != "(": # <<<<<<<<<<<<<< + * raise FeatureLibError("Expected '(' before file name", location) + * self.scan_until_(")") + */ + __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_cur_char, __pyx_kp_u__4, Py_NE)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 94, __pyx_L1_error) + if (unlikely(__pyx_t_7)) { + + /* "fontTools/feaLib/lexer.py":95 + * if self.mode_ is Lexer.MODE_FILENAME_: + * if cur_char != "(": + * raise FeatureLibError("Expected '(' before file name", location) # <<<<<<<<<<<<<< + * self.scan_until_(")") + * cur_char = text[self.pos_] if self.pos_ < limit else None + */ + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_FeatureLibError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 95, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_1 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_4))) { + __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_4); + if (likely(__pyx_t_1)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); + __Pyx_INCREF(__pyx_t_1); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_4, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[3] = {__pyx_t_1, __pyx_kp_u_Expected_before_file_name, __pyx_v_location}; + __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+1-__pyx_t_5, 2+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 95, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + } + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(0, 95, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":94 + * + * if self.mode_ is Lexer.MODE_FILENAME_: + * if cur_char != "(": # <<<<<<<<<<<<<< + * raise FeatureLibError("Expected '(' before file name", location) + * self.scan_until_(")") + */ + } + + /* "fontTools/feaLib/lexer.py":96 + * if cur_char != "(": + * raise FeatureLibError("Expected '(' before file name", location) + * self.scan_until_(")") # <<<<<<<<<<<<<< + * cur_char = text[self.pos_] if self.pos_ < limit else None + * if cur_char != ")": + */ + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_until); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 96, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_1 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_4))) { + __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_4); + if (likely(__pyx_t_1)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); + __Pyx_INCREF(__pyx_t_1); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_4, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_kp_u__5}; + __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 96, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + } + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "fontTools/feaLib/lexer.py":97 + * raise FeatureLibError("Expected '(' before file name", location) + * self.scan_until_(")") + * cur_char = text[self.pos_] if self.pos_ < limit else None # <<<<<<<<<<<<<< + * if cur_char != ")": + * raise FeatureLibError("Expected ')' after file name", location) + */ + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 97, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_limit); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 97, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = PyObject_RichCompare(__pyx_t_4, __pyx_t_1, Py_LT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 97, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 97, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + if (__pyx_t_7) { + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 97, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_text, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 97, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_3 = __pyx_t_1; + __pyx_t_1 = 0; + } else { + __Pyx_INCREF(Py_None); + __pyx_t_3 = Py_None; + } + __Pyx_DECREF_SET(__pyx_v_cur_char, __pyx_t_3); + __pyx_t_3 = 0; + + /* "fontTools/feaLib/lexer.py":98 + * self.scan_until_(")") + * cur_char = text[self.pos_] if self.pos_ < limit else None + * if cur_char != ")": # <<<<<<<<<<<<<< + * raise FeatureLibError("Expected ')' after file name", location) + * self.pos_ += 1 + */ + __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_cur_char, __pyx_kp_u__5, Py_NE)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 98, __pyx_L1_error) + if (unlikely(__pyx_t_7)) { + + /* "fontTools/feaLib/lexer.py":99 + * cur_char = text[self.pos_] if self.pos_ < limit else None + * if cur_char != ")": + * raise FeatureLibError("Expected ')' after file name", location) # <<<<<<<<<<<<<< + * self.pos_ += 1 + * self.mode_ = Lexer.MODE_NORMAL_ + */ + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_FeatureLibError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 99, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_1))) { + __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_1); + if (likely(__pyx_t_2)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); + __Pyx_INCREF(__pyx_t_2); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_1, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[3] = {__pyx_t_2, __pyx_kp_u_Expected_after_file_name, __pyx_v_location}; + __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+1-__pyx_t_5, 2+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 99, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + } + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(0, 99, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":98 + * self.scan_until_(")") + * cur_char = text[self.pos_] if self.pos_ < limit else None + * if cur_char != ")": # <<<<<<<<<<<<<< + * raise FeatureLibError("Expected ')' after file name", location) + * self.pos_ += 1 + */ + } + + /* "fontTools/feaLib/lexer.py":100 + * if cur_char != ")": + * raise FeatureLibError("Expected ')' after file name", location) + * self.pos_ += 1 # <<<<<<<<<<<<<< + * self.mode_ = Lexer.MODE_NORMAL_ + * return (Lexer.FILENAME, text[start + 1 : self.pos_ - 1], location) + */ + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 100, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 100, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_1) < 0) __PYX_ERR(0, 100, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "fontTools/feaLib/lexer.py":101 + * raise FeatureLibError("Expected ')' after file name", location) + * self.pos_ += 1 + * self.mode_ = Lexer.MODE_NORMAL_ # <<<<<<<<<<<<<< + * return (Lexer.FILENAME, text[start + 1 : self.pos_ - 1], location) + * + */ + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 101, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_MODE_NORMAL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 101, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_mode, __pyx_t_3) < 0) __PYX_ERR(0, 101, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "fontTools/feaLib/lexer.py":102 + * self.pos_ += 1 + * self.mode_ = Lexer.MODE_NORMAL_ + * return (Lexer.FILENAME, text[start + 1 : self.pos_ - 1], location) # <<<<<<<<<<<<<< + * + * if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_: + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 102, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_FILENAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 102, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyInt_AddObjC(__pyx_v_start, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 102, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 102, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = __Pyx_PyInt_SubtractObjC(__pyx_t_2, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 102, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_t_3, &__pyx_t_4, NULL, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 102, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 102, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1)) __PYX_ERR(0, 102, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_2); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2)) __PYX_ERR(0, 102, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_location); + __Pyx_GIVEREF(__pyx_v_location); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_v_location)) __PYX_ERR(0, 102, __pyx_L1_error); + __pyx_t_1 = 0; + __pyx_t_2 = 0; + __pyx_r = __pyx_t_4; + __pyx_t_4 = 0; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":93 + * return (Lexer.COMMENT, text[start : self.pos_], location) + * + * if self.mode_ is Lexer.MODE_FILENAME_: # <<<<<<<<<<<<<< + * if cur_char != "(": + * raise FeatureLibError("Expected '(' before file name", location) + */ + } + + /* "fontTools/feaLib/lexer.py":104 + * return (Lexer.FILENAME, text[start + 1 : self.pos_ - 1], location) + * + * if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_: # <<<<<<<<<<<<<< + * self.pos_ += 1 + * self.scan_over_(Lexer.CHAR_DIGIT_) + */ + __pyx_t_8 = (__Pyx_PyUnicode_Equals(__pyx_v_cur_char, __pyx_kp_u__6, Py_EQ)); if (unlikely((__pyx_t_8 < 0))) __PYX_ERR(0, 104, __pyx_L1_error) + if (__pyx_t_8) { + } else { + __pyx_t_7 = __pyx_t_8; + goto __pyx_L11_bool_binop_done; + } + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 104, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_CHAR_DIGIT); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 104, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_8 = (__Pyx_PySequence_ContainsTF(__pyx_v_next_char, __pyx_t_2, Py_EQ)); if (unlikely((__pyx_t_8 < 0))) __PYX_ERR(0, 104, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_7 = __pyx_t_8; + __pyx_L11_bool_binop_done:; + if (__pyx_t_7) { + + /* "fontTools/feaLib/lexer.py":105 + * + * if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_: + * self.pos_ += 1 # <<<<<<<<<<<<<< + * self.scan_over_(Lexer.CHAR_DIGIT_) + * return (Lexer.CID, int(text[start + 1 : self.pos_], 10), location) + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 105, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = __Pyx_PyInt_AddObjC(__pyx_t_2, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 105, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_4) < 0) __PYX_ERR(0, 105, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "fontTools/feaLib/lexer.py":106 + * if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_: + * self.pos_ += 1 + * self.scan_over_(Lexer.CHAR_DIGIT_) # <<<<<<<<<<<<<< + * return (Lexer.CID, int(text[start + 1 : self.pos_], 10), location) + * if cur_char == "@": + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 106, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 106, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_CHAR_DIGIT); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 106, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_1)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_1); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_3}; + __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 106, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "fontTools/feaLib/lexer.py":107 + * self.pos_ += 1 + * self.scan_over_(Lexer.CHAR_DIGIT_) + * return (Lexer.CID, int(text[start + 1 : self.pos_], 10), location) # <<<<<<<<<<<<<< + * if cur_char == "@": + * self.pos_ += 1 + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 107, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_CID); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 107, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyInt_AddObjC(__pyx_v_start, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 107, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 107, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_t_4, &__pyx_t_3, NULL, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 107, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 107, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1)) __PYX_ERR(0, 107, __pyx_L1_error); + __Pyx_INCREF(__pyx_int_10); + __Pyx_GIVEREF(__pyx_int_10); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_10)) __PYX_ERR(0, 107, __pyx_L1_error); + __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)(&PyInt_Type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 107, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 107, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_2); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2)) __PYX_ERR(0, 107, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1)) __PYX_ERR(0, 107, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_location); + __Pyx_GIVEREF(__pyx_v_location); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_location)) __PYX_ERR(0, 107, __pyx_L1_error); + __pyx_t_2 = 0; + __pyx_t_1 = 0; + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":104 + * return (Lexer.FILENAME, text[start + 1 : self.pos_ - 1], location) + * + * if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_: # <<<<<<<<<<<<<< + * self.pos_ += 1 + * self.scan_over_(Lexer.CHAR_DIGIT_) + */ + } + + /* "fontTools/feaLib/lexer.py":108 + * self.scan_over_(Lexer.CHAR_DIGIT_) + * return (Lexer.CID, int(text[start + 1 : self.pos_], 10), location) + * if cur_char == "@": # <<<<<<<<<<<<<< + * self.pos_ += 1 + * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + */ + __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_cur_char, __pyx_kp_u__7, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 108, __pyx_L1_error) + if (__pyx_t_7) { + + /* "fontTools/feaLib/lexer.py":109 + * return (Lexer.CID, int(text[start + 1 : self.pos_], 10), location) + * if cur_char == "@": + * self.pos_ += 1 # <<<<<<<<<<<<<< + * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + * glyphclass = text[start + 1 : self.pos_] + */ + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 109, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 109, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_1) < 0) __PYX_ERR(0, 109, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "fontTools/feaLib/lexer.py":110 + * if cur_char == "@": + * self.pos_ += 1 + * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) # <<<<<<<<<<<<<< + * glyphclass = text[start + 1 : self.pos_] + * if len(glyphclass) < 1: + */ + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 110, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 110, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_CHAR_NAME_CONTINUATION); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 110, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_2)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_2); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_4}; + __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 110, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "fontTools/feaLib/lexer.py":111 + * self.pos_ += 1 + * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + * glyphclass = text[start + 1 : self.pos_] # <<<<<<<<<<<<<< + * if len(glyphclass) < 1: + * raise FeatureLibError("Expected glyph class name", location) + */ + __pyx_t_1 = __Pyx_PyInt_AddObjC(__pyx_v_start, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 111, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 111, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_t_1, &__pyx_t_3, NULL, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 111, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_glyphclass = __pyx_t_4; + __pyx_t_4 = 0; + + /* "fontTools/feaLib/lexer.py":112 + * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + * glyphclass = text[start + 1 : self.pos_] + * if len(glyphclass) < 1: # <<<<<<<<<<<<<< + * raise FeatureLibError("Expected glyph class name", location) + * if not Lexer.RE_GLYPHCLASS.match(glyphclass): + */ + __pyx_t_6 = PyObject_Length(__pyx_v_glyphclass); if (unlikely(__pyx_t_6 == ((Py_ssize_t)-1))) __PYX_ERR(0, 112, __pyx_L1_error) + __pyx_t_7 = (__pyx_t_6 < 1); + if (unlikely(__pyx_t_7)) { + + /* "fontTools/feaLib/lexer.py":113 + * glyphclass = text[start + 1 : self.pos_] + * if len(glyphclass) < 1: + * raise FeatureLibError("Expected glyph class name", location) # <<<<<<<<<<<<<< + * if not Lexer.RE_GLYPHCLASS.match(glyphclass): + * raise FeatureLibError( + */ + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_FeatureLibError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 113, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_1)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_1); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[3] = {__pyx_t_1, __pyx_kp_u_Expected_glyph_class_name, __pyx_v_location}; + __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 2+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 113, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(0, 113, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":112 + * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + * glyphclass = text[start + 1 : self.pos_] + * if len(glyphclass) < 1: # <<<<<<<<<<<<<< + * raise FeatureLibError("Expected glyph class name", location) + * if not Lexer.RE_GLYPHCLASS.match(glyphclass): + */ + } + + /* "fontTools/feaLib/lexer.py":114 + * if len(glyphclass) < 1: + * raise FeatureLibError("Expected glyph class name", location) + * if not Lexer.RE_GLYPHCLASS.match(glyphclass): # <<<<<<<<<<<<<< + * raise FeatureLibError( + * "Glyph class names must consist of letters, digits, " + */ + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 114, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_RE_GLYPHCLASS); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 114, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_match); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 114, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_1)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_1); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_v_glyphclass}; + __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 114, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 114, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_8 = (!__pyx_t_7); + if (unlikely(__pyx_t_8)) { + + /* "fontTools/feaLib/lexer.py":115 + * raise FeatureLibError("Expected glyph class name", location) + * if not Lexer.RE_GLYPHCLASS.match(glyphclass): + * raise FeatureLibError( # <<<<<<<<<<<<<< + * "Glyph class names must consist of letters, digits, " + * "underscore, period or hyphen", + */ + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_FeatureLibError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 115, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + + /* "fontTools/feaLib/lexer.py":118 + * "Glyph class names must consist of letters, digits, " + * "underscore, period or hyphen", + * location, # <<<<<<<<<<<<<< + * ) + * return (Lexer.GLYPHCLASS, glyphclass, location) + */ + __pyx_t_1 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_1)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_1); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[3] = {__pyx_t_1, __pyx_kp_u_Glyph_class_names_must_consist_o, __pyx_v_location}; + __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 2+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 115, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(0, 115, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":114 + * if len(glyphclass) < 1: + * raise FeatureLibError("Expected glyph class name", location) + * if not Lexer.RE_GLYPHCLASS.match(glyphclass): # <<<<<<<<<<<<<< + * raise FeatureLibError( + * "Glyph class names must consist of letters, digits, " + */ + } + + /* "fontTools/feaLib/lexer.py":120 + * location, + * ) + * return (Lexer.GLYPHCLASS, glyphclass, location) # <<<<<<<<<<<<<< + * if cur_char in Lexer.CHAR_NAME_START_: + * self.pos_ += 1 + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 120, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_GLYPHCLASS); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 120, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 120, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_3); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3)) __PYX_ERR(0, 120, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_glyphclass); + __Pyx_GIVEREF(__pyx_v_glyphclass); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_v_glyphclass)) __PYX_ERR(0, 120, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_location); + __Pyx_GIVEREF(__pyx_v_location); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_v_location)) __PYX_ERR(0, 120, __pyx_L1_error); + __pyx_t_3 = 0; + __pyx_r = __pyx_t_4; + __pyx_t_4 = 0; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":108 + * self.scan_over_(Lexer.CHAR_DIGIT_) + * return (Lexer.CID, int(text[start + 1 : self.pos_], 10), location) + * if cur_char == "@": # <<<<<<<<<<<<<< + * self.pos_ += 1 + * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + */ + } + + /* "fontTools/feaLib/lexer.py":121 + * ) + * return (Lexer.GLYPHCLASS, glyphclass, location) + * if cur_char in Lexer.CHAR_NAME_START_: # <<<<<<<<<<<<<< + * self.pos_ += 1 + * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + */ + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 121, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_CHAR_NAME_START); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 121, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_8 = (__Pyx_PySequence_ContainsTF(__pyx_v_cur_char, __pyx_t_3, Py_EQ)); if (unlikely((__pyx_t_8 < 0))) __PYX_ERR(0, 121, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_8) { + + /* "fontTools/feaLib/lexer.py":122 + * return (Lexer.GLYPHCLASS, glyphclass, location) + * if cur_char in Lexer.CHAR_NAME_START_: + * self.pos_ += 1 # <<<<<<<<<<<<<< + * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + * token = text[start : self.pos_] + */ + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 122, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 122, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_4) < 0) __PYX_ERR(0, 122, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "fontTools/feaLib/lexer.py":123 + * if cur_char in Lexer.CHAR_NAME_START_: + * self.pos_ += 1 + * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) # <<<<<<<<<<<<<< + * token = text[start : self.pos_] + * if token == "include": + */ + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 123, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 123, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_CHAR_NAME_CONTINUATION); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 123, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_1)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_1); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_2}; + __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 123, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "fontTools/feaLib/lexer.py":124 + * self.pos_ += 1 + * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + * token = text[start : self.pos_] # <<<<<<<<<<<<<< + * if token == "include": + * self.mode_ = Lexer.MODE_FILENAME_ + */ + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 124, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_v_start, &__pyx_t_4, NULL, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 124, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_v_token = __pyx_t_3; + __pyx_t_3 = 0; + + /* "fontTools/feaLib/lexer.py":125 + * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + * token = text[start : self.pos_] + * if token == "include": # <<<<<<<<<<<<<< + * self.mode_ = Lexer.MODE_FILENAME_ + * return (Lexer.NAME, token, location) + */ + __pyx_t_8 = (__Pyx_PyUnicode_Equals(__pyx_v_token, __pyx_n_u_include, Py_EQ)); if (unlikely((__pyx_t_8 < 0))) __PYX_ERR(0, 125, __pyx_L1_error) + if (__pyx_t_8) { + + /* "fontTools/feaLib/lexer.py":126 + * token = text[start : self.pos_] + * if token == "include": + * self.mode_ = Lexer.MODE_FILENAME_ # <<<<<<<<<<<<<< + * return (Lexer.NAME, token, location) + * if cur_char == "0" and next_char in "xX": + */ + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_MODE_FILENAME); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_mode, __pyx_t_4) < 0) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "fontTools/feaLib/lexer.py":125 + * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + * token = text[start : self.pos_] + * if token == "include": # <<<<<<<<<<<<<< + * self.mode_ = Lexer.MODE_FILENAME_ + * return (Lexer.NAME, token, location) + */ + } + + /* "fontTools/feaLib/lexer.py":127 + * if token == "include": + * self.mode_ = Lexer.MODE_FILENAME_ + * return (Lexer.NAME, token, location) # <<<<<<<<<<<<<< + * if cur_char == "0" and next_char in "xX": + * self.pos_ += 2 + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 127, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_NAME); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 127, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 127, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_3); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3)) __PYX_ERR(0, 127, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_token); + __Pyx_GIVEREF(__pyx_v_token); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_v_token)) __PYX_ERR(0, 127, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_location); + __Pyx_GIVEREF(__pyx_v_location); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_v_location)) __PYX_ERR(0, 127, __pyx_L1_error); + __pyx_t_3 = 0; + __pyx_r = __pyx_t_4; + __pyx_t_4 = 0; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":121 + * ) + * return (Lexer.GLYPHCLASS, glyphclass, location) + * if cur_char in Lexer.CHAR_NAME_START_: # <<<<<<<<<<<<<< + * self.pos_ += 1 + * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + */ + } + + /* "fontTools/feaLib/lexer.py":128 + * self.mode_ = Lexer.MODE_FILENAME_ + * return (Lexer.NAME, token, location) + * if cur_char == "0" and next_char in "xX": # <<<<<<<<<<<<<< + * self.pos_ += 2 + * self.scan_over_(Lexer.CHAR_HEXDIGIT_) + */ + __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_cur_char, __pyx_kp_u_0, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 128, __pyx_L1_error) + if (__pyx_t_7) { + } else { + __pyx_t_8 = __pyx_t_7; + goto __pyx_L19_bool_binop_done; + } + __pyx_t_7 = (__Pyx_PyUnicode_ContainsTF(__pyx_v_next_char, __pyx_n_u_xX, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 128, __pyx_L1_error) + __pyx_t_8 = __pyx_t_7; + __pyx_L19_bool_binop_done:; + if (__pyx_t_8) { + + /* "fontTools/feaLib/lexer.py":129 + * return (Lexer.NAME, token, location) + * if cur_char == "0" and next_char in "xX": + * self.pos_ += 2 # <<<<<<<<<<<<<< + * self.scan_over_(Lexer.CHAR_HEXDIGIT_) + * return (Lexer.HEXADECIMAL, int(text[start : self.pos_], 16), location) + */ + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 129, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = __Pyx_PyInt_AddObjC(__pyx_t_4, __pyx_int_2, 2, 1, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 129, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_3) < 0) __PYX_ERR(0, 129, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "fontTools/feaLib/lexer.py":130 + * if cur_char == "0" and next_char in "xX": + * self.pos_ += 2 + * self.scan_over_(Lexer.CHAR_HEXDIGIT_) # <<<<<<<<<<<<<< + * return (Lexer.HEXADECIMAL, int(text[start : self.pos_], 16), location) + * if cur_char == "0" and next_char in Lexer.CHAR_DIGIT_: + */ + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 130, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 130, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_CHAR_HEXDIGIT); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 130, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_4))) { + __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4); + if (likely(__pyx_t_2)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); + __Pyx_INCREF(__pyx_t_2); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_4, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_1}; + __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 130, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + } + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "fontTools/feaLib/lexer.py":131 + * self.pos_ += 2 + * self.scan_over_(Lexer.CHAR_HEXDIGIT_) + * return (Lexer.HEXADECIMAL, int(text[start : self.pos_], 16), location) # <<<<<<<<<<<<<< + * if cur_char == "0" and next_char in Lexer.CHAR_DIGIT_: + * self.scan_over_(Lexer.CHAR_DIGIT_) + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 131, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_HEXADECIMAL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 131, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 131, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_v_start, &__pyx_t_3, NULL, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 131, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 131, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1)) __PYX_ERR(0, 131, __pyx_L1_error); + __Pyx_INCREF(__pyx_int_16); + __Pyx_GIVEREF(__pyx_int_16); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_16)) __PYX_ERR(0, 131, __pyx_L1_error); + __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)(&PyInt_Type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 131, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 131, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_4); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4)) __PYX_ERR(0, 131, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1)) __PYX_ERR(0, 131, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_location); + __Pyx_GIVEREF(__pyx_v_location); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_location)) __PYX_ERR(0, 131, __pyx_L1_error); + __pyx_t_4 = 0; + __pyx_t_1 = 0; + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":128 + * self.mode_ = Lexer.MODE_FILENAME_ + * return (Lexer.NAME, token, location) + * if cur_char == "0" and next_char in "xX": # <<<<<<<<<<<<<< + * self.pos_ += 2 + * self.scan_over_(Lexer.CHAR_HEXDIGIT_) + */ + } + + /* "fontTools/feaLib/lexer.py":132 + * self.scan_over_(Lexer.CHAR_HEXDIGIT_) + * return (Lexer.HEXADECIMAL, int(text[start : self.pos_], 16), location) + * if cur_char == "0" and next_char in Lexer.CHAR_DIGIT_: # <<<<<<<<<<<<<< + * self.scan_over_(Lexer.CHAR_DIGIT_) + * return (Lexer.OCTAL, int(text[start : self.pos_], 8), location) + */ + __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_cur_char, __pyx_kp_u_0, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 132, __pyx_L1_error) + if (__pyx_t_7) { + } else { + __pyx_t_8 = __pyx_t_7; + goto __pyx_L22_bool_binop_done; + } + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 132, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_CHAR_DIGIT); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 132, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_7 = (__Pyx_PySequence_ContainsTF(__pyx_v_next_char, __pyx_t_1, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 132, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_8 = __pyx_t_7; + __pyx_L22_bool_binop_done:; + if (__pyx_t_8) { + + /* "fontTools/feaLib/lexer.py":133 + * return (Lexer.HEXADECIMAL, int(text[start : self.pos_], 16), location) + * if cur_char == "0" and next_char in Lexer.CHAR_DIGIT_: + * self.scan_over_(Lexer.CHAR_DIGIT_) # <<<<<<<<<<<<<< + * return (Lexer.OCTAL, int(text[start : self.pos_], 8), location) + * if cur_char in Lexer.CHAR_DIGIT_: + */ + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 133, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 133, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_CHAR_DIGIT); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 133, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_4)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_4); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_2}; + __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 133, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "fontTools/feaLib/lexer.py":134 + * if cur_char == "0" and next_char in Lexer.CHAR_DIGIT_: + * self.scan_over_(Lexer.CHAR_DIGIT_) + * return (Lexer.OCTAL, int(text[start : self.pos_], 8), location) # <<<<<<<<<<<<<< + * if cur_char in Lexer.CHAR_DIGIT_: + * self.scan_over_(Lexer.CHAR_DIGIT_) + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 134, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_OCTAL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 134, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 134, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_v_start, &__pyx_t_1, NULL, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 134, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 134, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_2); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2)) __PYX_ERR(0, 134, __pyx_L1_error); + __Pyx_INCREF(__pyx_int_8); + __Pyx_GIVEREF(__pyx_int_8); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_8)) __PYX_ERR(0, 134, __pyx_L1_error); + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)(&PyInt_Type)), __pyx_t_1, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 134, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 134, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_3); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3)) __PYX_ERR(0, 134, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_2); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_2)) __PYX_ERR(0, 134, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_location); + __Pyx_GIVEREF(__pyx_v_location); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_location)) __PYX_ERR(0, 134, __pyx_L1_error); + __pyx_t_3 = 0; + __pyx_t_2 = 0; + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":132 + * self.scan_over_(Lexer.CHAR_HEXDIGIT_) + * return (Lexer.HEXADECIMAL, int(text[start : self.pos_], 16), location) + * if cur_char == "0" and next_char in Lexer.CHAR_DIGIT_: # <<<<<<<<<<<<<< + * self.scan_over_(Lexer.CHAR_DIGIT_) + * return (Lexer.OCTAL, int(text[start : self.pos_], 8), location) + */ + } + + /* "fontTools/feaLib/lexer.py":135 + * self.scan_over_(Lexer.CHAR_DIGIT_) + * return (Lexer.OCTAL, int(text[start : self.pos_], 8), location) + * if cur_char in Lexer.CHAR_DIGIT_: # <<<<<<<<<<<<<< + * self.scan_over_(Lexer.CHAR_DIGIT_) + * if self.pos_ >= limit or text[self.pos_] != ".": + */ + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 135, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_CHAR_DIGIT); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 135, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_8 = (__Pyx_PySequence_ContainsTF(__pyx_v_cur_char, __pyx_t_2, Py_EQ)); if (unlikely((__pyx_t_8 < 0))) __PYX_ERR(0, 135, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + if (__pyx_t_8) { + + /* "fontTools/feaLib/lexer.py":136 + * return (Lexer.OCTAL, int(text[start : self.pos_], 8), location) + * if cur_char in Lexer.CHAR_DIGIT_: + * self.scan_over_(Lexer.CHAR_DIGIT_) # <<<<<<<<<<<<<< + * if self.pos_ >= limit or text[self.pos_] != ".": + * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 136, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 136, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_CHAR_DIGIT); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 136, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_1))) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); + if (likely(__pyx_t_3)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_1, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4}; + __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 136, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + } + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "fontTools/feaLib/lexer.py":137 + * if cur_char in Lexer.CHAR_DIGIT_: + * self.scan_over_(Lexer.CHAR_DIGIT_) + * if self.pos_ >= limit or text[self.pos_] != ".": # <<<<<<<<<<<<<< + * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) + * self.scan_over_(".") + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_limit); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_4 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_GE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (!__pyx_t_7) { + } else { + __pyx_t_8 = __pyx_t_7; + goto __pyx_L26_bool_binop_done; + } + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_text, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_t_1, __pyx_kp_u__8, Py_NE)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_8 = __pyx_t_7; + __pyx_L26_bool_binop_done:; + if (__pyx_t_8) { + + /* "fontTools/feaLib/lexer.py":138 + * self.scan_over_(Lexer.CHAR_DIGIT_) + * if self.pos_ >= limit or text[self.pos_] != ".": + * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) # <<<<<<<<<<<<<< + * self.scan_over_(".") + * self.scan_over_(Lexer.CHAR_DIGIT_) + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 138, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_NUMBER); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 138, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 138, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_v_start, &__pyx_t_1, NULL, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 138, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 138, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_2); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2)) __PYX_ERR(0, 138, __pyx_L1_error); + __Pyx_INCREF(__pyx_int_10); + __Pyx_GIVEREF(__pyx_int_10); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_10)) __PYX_ERR(0, 138, __pyx_L1_error); + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)(&PyInt_Type)), __pyx_t_1, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 138, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 138, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_4); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4)) __PYX_ERR(0, 138, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_2); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_2)) __PYX_ERR(0, 138, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_location); + __Pyx_GIVEREF(__pyx_v_location); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_location)) __PYX_ERR(0, 138, __pyx_L1_error); + __pyx_t_4 = 0; + __pyx_t_2 = 0; + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":137 + * if cur_char in Lexer.CHAR_DIGIT_: + * self.scan_over_(Lexer.CHAR_DIGIT_) + * if self.pos_ >= limit or text[self.pos_] != ".": # <<<<<<<<<<<<<< + * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) + * self.scan_over_(".") + */ + } + + /* "fontTools/feaLib/lexer.py":139 + * if self.pos_ >= limit or text[self.pos_] != ".": + * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) + * self.scan_over_(".") # <<<<<<<<<<<<<< + * self.scan_over_(Lexer.CHAR_DIGIT_) + * return (Lexer.FLOAT, float(text[start : self.pos_]), location) + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 139, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_4)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_4); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_kp_u__8}; + __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 139, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "fontTools/feaLib/lexer.py":140 + * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) + * self.scan_over_(".") + * self.scan_over_(Lexer.CHAR_DIGIT_) # <<<<<<<<<<<<<< + * return (Lexer.FLOAT, float(text[start : self.pos_]), location) + * if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 140, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 140, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_CHAR_DIGIT); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 140, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_4)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_4); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_3}; + __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 140, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "fontTools/feaLib/lexer.py":141 + * self.scan_over_(".") + * self.scan_over_(Lexer.CHAR_DIGIT_) + * return (Lexer.FLOAT, float(text[start : self.pos_]), location) # <<<<<<<<<<<<<< + * if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: + * self.pos_ += 1 + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 141, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_FLOAT); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 141, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 141, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_v_start, &__pyx_t_1, NULL, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 141, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyNumber_Float(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 141, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 141, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_2); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2)) __PYX_ERR(0, 141, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1)) __PYX_ERR(0, 141, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_location); + __Pyx_GIVEREF(__pyx_v_location); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_location)) __PYX_ERR(0, 141, __pyx_L1_error); + __pyx_t_2 = 0; + __pyx_t_1 = 0; + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":135 + * self.scan_over_(Lexer.CHAR_DIGIT_) + * return (Lexer.OCTAL, int(text[start : self.pos_], 8), location) + * if cur_char in Lexer.CHAR_DIGIT_: # <<<<<<<<<<<<<< + * self.scan_over_(Lexer.CHAR_DIGIT_) + * if self.pos_ >= limit or text[self.pos_] != ".": + */ + } + + /* "fontTools/feaLib/lexer.py":142 + * self.scan_over_(Lexer.CHAR_DIGIT_) + * return (Lexer.FLOAT, float(text[start : self.pos_]), location) + * if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: # <<<<<<<<<<<<<< + * self.pos_ += 1 + * self.scan_over_(Lexer.CHAR_DIGIT_) + */ + __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_cur_char, __pyx_kp_u__9, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 142, __pyx_L1_error) + if (__pyx_t_7) { + } else { + __pyx_t_8 = __pyx_t_7; + goto __pyx_L29_bool_binop_done; + } + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 142, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_CHAR_DIGIT); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 142, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_7 = (__Pyx_PySequence_ContainsTF(__pyx_v_next_char, __pyx_t_1, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 142, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_8 = __pyx_t_7; + __pyx_L29_bool_binop_done:; + if (__pyx_t_8) { + + /* "fontTools/feaLib/lexer.py":143 + * return (Lexer.FLOAT, float(text[start : self.pos_]), location) + * if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: + * self.pos_ += 1 # <<<<<<<<<<<<<< + * self.scan_over_(Lexer.CHAR_DIGIT_) + * if self.pos_ >= limit or text[self.pos_] != ".": + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = __Pyx_PyInt_AddObjC(__pyx_t_1, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_3) < 0) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "fontTools/feaLib/lexer.py":144 + * if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: + * self.pos_ += 1 + * self.scan_over_(Lexer.CHAR_DIGIT_) # <<<<<<<<<<<<<< + * if self.pos_ >= limit or text[self.pos_] != ".": + * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 144, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 144, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_CHAR_DIGIT); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 144, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_1))) { + __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_1); + if (likely(__pyx_t_2)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); + __Pyx_INCREF(__pyx_t_2); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_1, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_4}; + __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 144, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + } + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "fontTools/feaLib/lexer.py":145 + * self.pos_ += 1 + * self.scan_over_(Lexer.CHAR_DIGIT_) + * if self.pos_ >= limit or text[self.pos_] != ".": # <<<<<<<<<<<<<< + * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) + * self.scan_over_(".") + */ + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 145, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_limit); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 145, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_t_1, Py_GE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 145, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 145, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (!__pyx_t_7) { + } else { + __pyx_t_8 = __pyx_t_7; + goto __pyx_L32_bool_binop_done; + } + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 145, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_text, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 145, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_t_1, __pyx_kp_u__8, Py_NE)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 145, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_8 = __pyx_t_7; + __pyx_L32_bool_binop_done:; + if (__pyx_t_8) { + + /* "fontTools/feaLib/lexer.py":146 + * self.scan_over_(Lexer.CHAR_DIGIT_) + * if self.pos_ >= limit or text[self.pos_] != ".": + * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) # <<<<<<<<<<<<<< + * self.scan_over_(".") + * self.scan_over_(Lexer.CHAR_DIGIT_) + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 146, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_NUMBER); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 146, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 146, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_v_start, &__pyx_t_1, NULL, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 146, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 146, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_3); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3)) __PYX_ERR(0, 146, __pyx_L1_error); + __Pyx_INCREF(__pyx_int_10); + __Pyx_GIVEREF(__pyx_int_10); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_10)) __PYX_ERR(0, 146, __pyx_L1_error); + __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)(&PyInt_Type)), __pyx_t_1, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 146, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 146, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_4); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4)) __PYX_ERR(0, 146, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_3); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_3)) __PYX_ERR(0, 146, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_location); + __Pyx_GIVEREF(__pyx_v_location); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_location)) __PYX_ERR(0, 146, __pyx_L1_error); + __pyx_t_4 = 0; + __pyx_t_3 = 0; + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":145 + * self.pos_ += 1 + * self.scan_over_(Lexer.CHAR_DIGIT_) + * if self.pos_ >= limit or text[self.pos_] != ".": # <<<<<<<<<<<<<< + * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) + * self.scan_over_(".") + */ + } + + /* "fontTools/feaLib/lexer.py":147 + * if self.pos_ >= limit or text[self.pos_] != ".": + * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) + * self.scan_over_(".") # <<<<<<<<<<<<<< + * self.scan_over_(Lexer.CHAR_DIGIT_) + * return (Lexer.FLOAT, float(text[start : self.pos_]), location) + */ + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 147, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_4)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_4); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_kp_u__8}; + __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 147, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "fontTools/feaLib/lexer.py":148 + * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) + * self.scan_over_(".") + * self.scan_over_(Lexer.CHAR_DIGIT_) # <<<<<<<<<<<<<< + * return (Lexer.FLOAT, float(text[start : self.pos_]), location) + * if cur_char in Lexer.CHAR_SYMBOL_: + */ + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 148, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 148, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_CHAR_DIGIT); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 148, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_4)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_4); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_2}; + __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 148, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "fontTools/feaLib/lexer.py":149 + * self.scan_over_(".") + * self.scan_over_(Lexer.CHAR_DIGIT_) + * return (Lexer.FLOAT, float(text[start : self.pos_]), location) # <<<<<<<<<<<<<< + * if cur_char in Lexer.CHAR_SYMBOL_: + * self.pos_ += 1 + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 149, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_FLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 149, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 149, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_v_start, &__pyx_t_1, NULL, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 149, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyNumber_Float(__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 149, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 149, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GIVEREF(__pyx_t_3); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3)) __PYX_ERR(0, 149, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_1)) __PYX_ERR(0, 149, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_location); + __Pyx_GIVEREF(__pyx_v_location); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_location)) __PYX_ERR(0, 149, __pyx_L1_error); + __pyx_t_3 = 0; + __pyx_t_1 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":142 + * self.scan_over_(Lexer.CHAR_DIGIT_) + * return (Lexer.FLOAT, float(text[start : self.pos_]), location) + * if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: # <<<<<<<<<<<<<< + * self.pos_ += 1 + * self.scan_over_(Lexer.CHAR_DIGIT_) + */ + } + + /* "fontTools/feaLib/lexer.py":150 + * self.scan_over_(Lexer.CHAR_DIGIT_) + * return (Lexer.FLOAT, float(text[start : self.pos_]), location) + * if cur_char in Lexer.CHAR_SYMBOL_: # <<<<<<<<<<<<<< + * self.pos_ += 1 + * return (Lexer.SYMBOL, cur_char, location) + */ + __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 150, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_CHAR_SYMBOL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 150, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_8 = (__Pyx_PySequence_ContainsTF(__pyx_v_cur_char, __pyx_t_1, Py_EQ)); if (unlikely((__pyx_t_8 < 0))) __PYX_ERR(0, 150, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (__pyx_t_8) { + + /* "fontTools/feaLib/lexer.py":151 + * return (Lexer.FLOAT, float(text[start : self.pos_]), location) + * if cur_char in Lexer.CHAR_SYMBOL_: + * self.pos_ += 1 # <<<<<<<<<<<<<< + * return (Lexer.SYMBOL, cur_char, location) + * if cur_char == '"': + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 151, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyInt_AddObjC(__pyx_t_1, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 151, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_2) < 0) __PYX_ERR(0, 151, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "fontTools/feaLib/lexer.py":152 + * if cur_char in Lexer.CHAR_SYMBOL_: + * self.pos_ += 1 + * return (Lexer.SYMBOL, cur_char, location) # <<<<<<<<<<<<<< + * if cur_char == '"': + * self.pos_ += 1 + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 152, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_SYMBOL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 152, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 152, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1)) __PYX_ERR(0, 152, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_cur_char); + __Pyx_GIVEREF(__pyx_v_cur_char); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_cur_char)) __PYX_ERR(0, 152, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_location); + __Pyx_GIVEREF(__pyx_v_location); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_location)) __PYX_ERR(0, 152, __pyx_L1_error); + __pyx_t_1 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":150 + * self.scan_over_(Lexer.CHAR_DIGIT_) + * return (Lexer.FLOAT, float(text[start : self.pos_]), location) + * if cur_char in Lexer.CHAR_SYMBOL_: # <<<<<<<<<<<<<< + * self.pos_ += 1 + * return (Lexer.SYMBOL, cur_char, location) + */ + } + + /* "fontTools/feaLib/lexer.py":153 + * self.pos_ += 1 + * return (Lexer.SYMBOL, cur_char, location) + * if cur_char == '"': # <<<<<<<<<<<<<< + * self.pos_ += 1 + * self.scan_until_('"') + */ + __pyx_t_8 = (__Pyx_PyUnicode_Equals(__pyx_v_cur_char, __pyx_kp_u__10, Py_EQ)); if (unlikely((__pyx_t_8 < 0))) __PYX_ERR(0, 153, __pyx_L1_error) + if (__pyx_t_8) { + + /* "fontTools/feaLib/lexer.py":154 + * return (Lexer.SYMBOL, cur_char, location) + * if cur_char == '"': + * self.pos_ += 1 # <<<<<<<<<<<<<< + * self.scan_until_('"') + * if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 154, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_1 = __Pyx_PyInt_AddObjC(__pyx_t_2, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 154, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_1) < 0) __PYX_ERR(0, 154, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "fontTools/feaLib/lexer.py":155 + * if cur_char == '"': + * self.pos_ += 1 + * self.scan_until_('"') # <<<<<<<<<<<<<< + * if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': + * self.pos_ += 1 + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_until); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 155, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_3)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_kp_u__10}; + __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 155, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "fontTools/feaLib/lexer.py":156 + * self.pos_ += 1 + * self.scan_until_('"') + * if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': # <<<<<<<<<<<<<< + * self.pos_ += 1 + * # strip newlines embedded within a string + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 156, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_text_length); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 156, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 156, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 156, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_7) { + } else { + __pyx_t_8 = __pyx_t_7; + goto __pyx_L37_bool_binop_done; + } + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_text_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 156, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 156, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 156, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_t_1, __pyx_kp_u__10, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 156, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_8 = __pyx_t_7; + __pyx_L37_bool_binop_done:; + if (likely(__pyx_t_8)) { + + /* "fontTools/feaLib/lexer.py":157 + * self.scan_until_('"') + * if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': + * self.pos_ += 1 # <<<<<<<<<<<<<< + * # strip newlines embedded within a string + * string = re.sub("[\r\n]", "", text[start + 1 : self.pos_ - 1]) + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 157, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyInt_AddObjC(__pyx_t_1, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 157, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_2) < 0) __PYX_ERR(0, 157, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "fontTools/feaLib/lexer.py":159 + * self.pos_ += 1 + * # strip newlines embedded within a string + * string = re.sub("[\r\n]", "", text[start + 1 : self.pos_ - 1]) # <<<<<<<<<<<<<< + * return (Lexer.STRING, string, location) + * else: + */ + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_re); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 159, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_sub); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 159, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyInt_AddObjC(__pyx_v_start, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 159, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 159, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_9 = __Pyx_PyInt_SubtractObjC(__pyx_t_4, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 159, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_t_1, &__pyx_t_9, NULL, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 159, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __pyx_t_9 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_9)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_9); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[4] = {__pyx_t_9, __pyx_kp_u__11, __pyx_kp_u__12, __pyx_t_4}; + __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 3+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 159, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __pyx_v_string = __pyx_t_2; + __pyx_t_2 = 0; + + /* "fontTools/feaLib/lexer.py":160 + * # strip newlines embedded within a string + * string = re.sub("[\r\n]", "", text[start + 1 : self.pos_ - 1]) + * return (Lexer.STRING, string, location) # <<<<<<<<<<<<<< + * else: + * raise FeatureLibError("Expected '\"' to terminate string", location) + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 160, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_STRING); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 160, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 160, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GIVEREF(__pyx_t_3); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3)) __PYX_ERR(0, 160, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_string); + __Pyx_GIVEREF(__pyx_v_string); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_string)) __PYX_ERR(0, 160, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_location); + __Pyx_GIVEREF(__pyx_v_location); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_location)) __PYX_ERR(0, 160, __pyx_L1_error); + __pyx_t_3 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":156 + * self.pos_ += 1 + * self.scan_until_('"') + * if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': # <<<<<<<<<<<<<< + * self.pos_ += 1 + * # strip newlines embedded within a string + */ + } + + /* "fontTools/feaLib/lexer.py":162 + * return (Lexer.STRING, string, location) + * else: + * raise FeatureLibError("Expected '\"' to terminate string", location) # <<<<<<<<<<<<<< + * raise FeatureLibError("Unexpected character: %r" % cur_char, location) + * + */ + /*else*/ { + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_FeatureLibError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 162, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_4)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_4); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[3] = {__pyx_t_4, __pyx_kp_u_Expected_to_terminate_string, __pyx_v_location}; + __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 2+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 162, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(0, 162, __pyx_L1_error) + } + + /* "fontTools/feaLib/lexer.py":153 + * self.pos_ += 1 + * return (Lexer.SYMBOL, cur_char, location) + * if cur_char == '"': # <<<<<<<<<<<<<< + * self.pos_ += 1 + * self.scan_until_('"') + */ + } + + /* "fontTools/feaLib/lexer.py":163 + * else: + * raise FeatureLibError("Expected '\"' to terminate string", location) + * raise FeatureLibError("Unexpected character: %r" % cur_char, location) # <<<<<<<<<<<<<< + * + * def scan_over_(self, valid): + */ + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_FeatureLibError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 163, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyUnicode_FormatSafe(__pyx_kp_u_Unexpected_character_r, __pyx_v_cur_char); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 163, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_9 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_9)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_9); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[3] = {__pyx_t_9, __pyx_t_4, __pyx_v_location}; + __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 2+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 163, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(0, 163, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":68 + * return FeatureLibLocation(self.filename_ or "", self.line_, column) + * + * def next_(self): # <<<<<<<<<<<<<< + * self.scan_over_(Lexer.CHAR_WHITESPACE_) + * location = self.location_() + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_9); + __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.next_", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_location); + __Pyx_XDECREF(__pyx_v_start); + __Pyx_XDECREF(__pyx_v_text); + __Pyx_XDECREF(__pyx_v_cur_char); + __Pyx_XDECREF(__pyx_v_next_char); + __Pyx_XDECREF(__pyx_v_glyphclass); + __Pyx_XDECREF(__pyx_v_token); + __Pyx_XDECREF(__pyx_v_string); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/feaLib/lexer.py":165 + * raise FeatureLibError("Unexpected character: %r" % cur_char, location) + * + * def scan_over_(self, valid): # <<<<<<<<<<<<<< + * p = self.pos_ + * while p < self.text_length_ and self.text_[p] in valid: + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_13scan_over_(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_12scan_over_, "Lexer.scan_over_(self, valid)"); +static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_13scan_over_ = {"scan_over_", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_13scan_over_, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_12scan_over_}; +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_13scan_over_(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + PyObject *__pyx_v_self = 0; + PyObject *__pyx_v_valid = 0; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[2] = {0,0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("scan_over_ (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_valid,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 165, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_valid)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[1]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 165, __pyx_L3_error) + else { + __Pyx_RaiseArgtupleInvalid("scan_over_", 1, 2, 2, 1); __PYX_ERR(0, 165, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "scan_over_") < 0)) __PYX_ERR(0, 165, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 2)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); + } + __pyx_v_self = values[0]; + __pyx_v_valid = values[1]; + } + goto __pyx_L6_skip; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("scan_over_", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 165, __pyx_L3_error) + __pyx_L6_skip:; + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.scan_over_", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_12scan_over_(__pyx_self, __pyx_v_self, __pyx_v_valid); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_12scan_over_(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_valid) { + PyObject *__pyx_v_p = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("scan_over_", 1); + + /* "fontTools/feaLib/lexer.py":166 + * + * def scan_over_(self, valid): + * p = self.pos_ # <<<<<<<<<<<<<< + * while p < self.text_length_ and self.text_[p] in valid: + * p += 1 + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 166, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_p = __pyx_t_1; + __pyx_t_1 = 0; + + /* "fontTools/feaLib/lexer.py":167 + * def scan_over_(self, valid): + * p = self.pos_ + * while p < self.text_length_ and self.text_[p] in valid: # <<<<<<<<<<<<<< + * p += 1 + * self.pos_ = p + */ + while (1) { + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_text_length); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 167, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_p, __pyx_t_1, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 167, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 167, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_4) { + } else { + __pyx_t_2 = __pyx_t_4; + goto __pyx_L5_bool_binop_done; + } + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_text_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 167, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_v_p); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 167, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_4 = (__Pyx_PySequence_ContainsTF(__pyx_t_1, __pyx_v_valid, Py_EQ)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 167, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_2 = __pyx_t_4; + __pyx_L5_bool_binop_done:; + if (!__pyx_t_2) break; + + /* "fontTools/feaLib/lexer.py":168 + * p = self.pos_ + * while p < self.text_length_ and self.text_[p] in valid: + * p += 1 # <<<<<<<<<<<<<< + * self.pos_ = p + * + */ + __pyx_t_1 = __Pyx_PyInt_AddObjC(__pyx_v_p, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 168, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF_SET(__pyx_v_p, __pyx_t_1); + __pyx_t_1 = 0; + } + + /* "fontTools/feaLib/lexer.py":169 + * while p < self.text_length_ and self.text_[p] in valid: + * p += 1 + * self.pos_ = p # <<<<<<<<<<<<<< + * + * def scan_until_(self, stop_at): + */ + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_v_p) < 0) __PYX_ERR(0, 169, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":165 + * raise FeatureLibError("Unexpected character: %r" % cur_char, location) + * + * def scan_over_(self, valid): # <<<<<<<<<<<<<< + * p = self.pos_ + * while p < self.text_length_ and self.text_[p] in valid: + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.scan_over_", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_p); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/feaLib/lexer.py":171 + * self.pos_ = p + * + * def scan_until_(self, stop_at): # <<<<<<<<<<<<<< + * p = self.pos_ + * while p < self.text_length_ and self.text_[p] not in stop_at: + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_15scan_until_(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_14scan_until_, "Lexer.scan_until_(self, stop_at)"); +static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_15scan_until_ = {"scan_until_", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_15scan_until_, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_14scan_until_}; +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_15scan_until_(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + PyObject *__pyx_v_self = 0; + PyObject *__pyx_v_stop_at = 0; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[2] = {0,0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("scan_until_ (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_stop_at,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 171, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_stop_at)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[1]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 171, __pyx_L3_error) + else { + __Pyx_RaiseArgtupleInvalid("scan_until_", 1, 2, 2, 1); __PYX_ERR(0, 171, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "scan_until_") < 0)) __PYX_ERR(0, 171, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 2)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); + } + __pyx_v_self = values[0]; + __pyx_v_stop_at = values[1]; + } + goto __pyx_L6_skip; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("scan_until_", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 171, __pyx_L3_error) + __pyx_L6_skip:; + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.scan_until_", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_14scan_until_(__pyx_self, __pyx_v_self, __pyx_v_stop_at); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_14scan_until_(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_stop_at) { + PyObject *__pyx_v_p = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("scan_until_", 1); + + /* "fontTools/feaLib/lexer.py":172 + * + * def scan_until_(self, stop_at): + * p = self.pos_ # <<<<<<<<<<<<<< + * while p < self.text_length_ and self.text_[p] not in stop_at: + * p += 1 + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 172, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_p = __pyx_t_1; + __pyx_t_1 = 0; + + /* "fontTools/feaLib/lexer.py":173 + * def scan_until_(self, stop_at): + * p = self.pos_ + * while p < self.text_length_ and self.text_[p] not in stop_at: # <<<<<<<<<<<<<< + * p += 1 + * self.pos_ = p + */ + while (1) { + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_text_length); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 173, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_p, __pyx_t_1, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 173, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 173, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_4) { + } else { + __pyx_t_2 = __pyx_t_4; + goto __pyx_L5_bool_binop_done; + } + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_text_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 173, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_v_p); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 173, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_4 = (__Pyx_PySequence_ContainsTF(__pyx_t_1, __pyx_v_stop_at, Py_NE)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 173, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_2 = __pyx_t_4; + __pyx_L5_bool_binop_done:; + if (!__pyx_t_2) break; + + /* "fontTools/feaLib/lexer.py":174 + * p = self.pos_ + * while p < self.text_length_ and self.text_[p] not in stop_at: + * p += 1 # <<<<<<<<<<<<<< + * self.pos_ = p + * + */ + __pyx_t_1 = __Pyx_PyInt_AddObjC(__pyx_v_p, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 174, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF_SET(__pyx_v_p, __pyx_t_1); + __pyx_t_1 = 0; + } + + /* "fontTools/feaLib/lexer.py":175 + * while p < self.text_length_ and self.text_[p] not in stop_at: + * p += 1 + * self.pos_ = p # <<<<<<<<<<<<<< + * + * def scan_anonymous_block(self, tag): + */ + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_v_p) < 0) __PYX_ERR(0, 175, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":171 + * self.pos_ = p + * + * def scan_until_(self, stop_at): # <<<<<<<<<<<<<< + * p = self.pos_ + * while p < self.text_length_ and self.text_[p] not in stop_at: + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.scan_until_", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_p); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/feaLib/lexer.py":177 + * self.pos_ = p + * + * def scan_anonymous_block(self, tag): # <<<<<<<<<<<<<< + * location = self.location_() + * tag = tag.strip() + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_17scan_anonymous_block(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_16scan_anonymous_block, "Lexer.scan_anonymous_block(self, tag)"); +static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_17scan_anonymous_block = {"scan_anonymous_block", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_17scan_anonymous_block, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_16scan_anonymous_block}; +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_17scan_anonymous_block(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + PyObject *__pyx_v_self = 0; + PyObject *__pyx_v_tag = 0; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[2] = {0,0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("scan_anonymous_block (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_tag,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 177, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_tag)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[1]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 177, __pyx_L3_error) + else { + __Pyx_RaiseArgtupleInvalid("scan_anonymous_block", 1, 2, 2, 1); __PYX_ERR(0, 177, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "scan_anonymous_block") < 0)) __PYX_ERR(0, 177, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 2)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); + } + __pyx_v_self = values[0]; + __pyx_v_tag = values[1]; + } + goto __pyx_L6_skip; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("scan_anonymous_block", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 177, __pyx_L3_error) + __pyx_L6_skip:; + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.scan_anonymous_block", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_16scan_anonymous_block(__pyx_self, __pyx_v_self, __pyx_v_tag); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_16scan_anonymous_block(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_tag) { + PyObject *__pyx_v_location = NULL; + PyObject *__pyx_v_regexp = NULL; + PyObject *__pyx_v_split = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + unsigned int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + Py_ssize_t __pyx_t_6; + int __pyx_t_7; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("scan_anonymous_block", 0); + __Pyx_INCREF(__pyx_v_tag); + + /* "fontTools/feaLib/lexer.py":178 + * + * def scan_anonymous_block(self, tag): + * location = self.location_() # <<<<<<<<<<<<<< + * tag = tag.strip() + * self.scan_until_(Lexer.CHAR_NEWLINE_) + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_location); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 178, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = NULL; + __pyx_t_4 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_3)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + __pyx_t_4 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; + __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 0+__pyx_t_4); + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 178, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __pyx_v_location = __pyx_t_1; + __pyx_t_1 = 0; + + /* "fontTools/feaLib/lexer.py":179 + * def scan_anonymous_block(self, tag): + * location = self.location_() + * tag = tag.strip() # <<<<<<<<<<<<<< + * self.scan_until_(Lexer.CHAR_NEWLINE_) + * self.scan_over_(Lexer.CHAR_NEWLINE_) + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_tag, __pyx_n_s_strip); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 179, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = NULL; + __pyx_t_4 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_3)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + __pyx_t_4 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; + __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 0+__pyx_t_4); + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 179, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __Pyx_DECREF_SET(__pyx_v_tag, __pyx_t_1); + __pyx_t_1 = 0; + + /* "fontTools/feaLib/lexer.py":180 + * location = self.location_() + * tag = tag.strip() + * self.scan_until_(Lexer.CHAR_NEWLINE_) # <<<<<<<<<<<<<< + * self.scan_over_(Lexer.CHAR_NEWLINE_) + * regexp = r"}\s*" + tag + r"\s*;" + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_until); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 180, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 180, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_CHAR_NEWLINE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 180, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = NULL; + __pyx_t_4 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_3)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + __pyx_t_4 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_5}; + __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 180, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "fontTools/feaLib/lexer.py":181 + * tag = tag.strip() + * self.scan_until_(Lexer.CHAR_NEWLINE_) + * self.scan_over_(Lexer.CHAR_NEWLINE_) # <<<<<<<<<<<<<< + * regexp = r"}\s*" + tag + r"\s*;" + * split = re.split(regexp, self.text_[self.pos_ :], maxsplit=1) + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 181, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 181, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_CHAR_NEWLINE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 181, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_t_5 = NULL; + __pyx_t_4 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_5)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + __pyx_t_4 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_3}; + __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 181, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "fontTools/feaLib/lexer.py":182 + * self.scan_until_(Lexer.CHAR_NEWLINE_) + * self.scan_over_(Lexer.CHAR_NEWLINE_) + * regexp = r"}\s*" + tag + r"\s*;" # <<<<<<<<<<<<<< + * split = re.split(regexp, self.text_[self.pos_ :], maxsplit=1) + * if len(split) != 2: + */ + __pyx_t_1 = PyNumber_Add(__pyx_kp_u_s, __pyx_v_tag); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 182, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = PyNumber_Add(__pyx_t_1, __pyx_kp_u_s_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 182, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_v_regexp = __pyx_t_2; + __pyx_t_2 = 0; + + /* "fontTools/feaLib/lexer.py":183 + * self.scan_over_(Lexer.CHAR_NEWLINE_) + * regexp = r"}\s*" + tag + r"\s*;" + * split = re.split(regexp, self.text_[self.pos_ :], maxsplit=1) # <<<<<<<<<<<<<< + * if len(split) != 2: + * raise FeatureLibError( + */ + __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_re); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 183, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_split); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 183, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_text_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 183, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 183, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = __Pyx_PyObject_GetSlice(__pyx_t_2, 0, 0, &__pyx_t_3, NULL, NULL, 0, 0, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 183, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 183, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_v_regexp); + __Pyx_GIVEREF(__pyx_v_regexp); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_regexp)) __PYX_ERR(0, 183, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_5); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5)) __PYX_ERR(0, 183, __pyx_L1_error); + __pyx_t_5 = 0; + __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 183, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_maxsplit, __pyx_int_1) < 0) __PYX_ERR(0, 183, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_3, __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 183, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_v_split = __pyx_t_2; + __pyx_t_2 = 0; + + /* "fontTools/feaLib/lexer.py":184 + * regexp = r"}\s*" + tag + r"\s*;" + * split = re.split(regexp, self.text_[self.pos_ :], maxsplit=1) + * if len(split) != 2: # <<<<<<<<<<<<<< + * raise FeatureLibError( + * "Expected '} %s;' to terminate anonymous block" % tag, location + */ + __pyx_t_6 = PyObject_Length(__pyx_v_split); if (unlikely(__pyx_t_6 == ((Py_ssize_t)-1))) __PYX_ERR(0, 184, __pyx_L1_error) + __pyx_t_7 = (__pyx_t_6 != 2); + if (unlikely(__pyx_t_7)) { + + /* "fontTools/feaLib/lexer.py":185 + * split = re.split(regexp, self.text_[self.pos_ :], maxsplit=1) + * if len(split) != 2: + * raise FeatureLibError( # <<<<<<<<<<<<<< + * "Expected '} %s;' to terminate anonymous block" % tag, location + * ) + */ + __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_FeatureLibError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 185, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + + /* "fontTools/feaLib/lexer.py":186 + * if len(split) != 2: + * raise FeatureLibError( + * "Expected '} %s;' to terminate anonymous block" % tag, location # <<<<<<<<<<<<<< + * ) + * self.pos_ += len(split[0]) + */ + __pyx_t_3 = __Pyx_PyUnicode_FormatSafe(__pyx_kp_u_Expected_s_to_terminate_anonymou, __pyx_v_tag); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 186, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = NULL; + __pyx_t_4 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_5))) { + __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_5); + if (likely(__pyx_t_1)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); + __Pyx_INCREF(__pyx_t_1); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_5, function); + __pyx_t_4 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[3] = {__pyx_t_1, __pyx_t_3, __pyx_v_location}; + __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_4, 2+__pyx_t_4); + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 185, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + } + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(0, 185, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":184 + * regexp = r"}\s*" + tag + r"\s*;" + * split = re.split(regexp, self.text_[self.pos_ :], maxsplit=1) + * if len(split) != 2: # <<<<<<<<<<<<<< + * raise FeatureLibError( + * "Expected '} %s;' to terminate anonymous block" % tag, location + */ + } + + /* "fontTools/feaLib/lexer.py":188 + * "Expected '} %s;' to terminate anonymous block" % tag, location + * ) + * self.pos_ += len(split[0]) # <<<<<<<<<<<<<< + * return (Lexer.ANONYMOUS_BLOCK, split[0], location) + * + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 188, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_split, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 188, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = PyObject_Length(__pyx_t_5); if (unlikely(__pyx_t_6 == ((Py_ssize_t)-1))) __PYX_ERR(0, 188, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_t_5 = PyInt_FromSsize_t(__pyx_t_6); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 188, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_t_2, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 188, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_3) < 0) __PYX_ERR(0, 188, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "fontTools/feaLib/lexer.py":189 + * ) + * self.pos_ += len(split[0]) + * return (Lexer.ANONYMOUS_BLOCK, split[0], location) # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 189, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_ANONYMOUS_BLOCK); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 189, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_split, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 189, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 189, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GIVEREF(__pyx_t_5); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_5)) __PYX_ERR(0, 189, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_3); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3)) __PYX_ERR(0, 189, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_location); + __Pyx_GIVEREF(__pyx_v_location); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_location)) __PYX_ERR(0, 189, __pyx_L1_error); + __pyx_t_5 = 0; + __pyx_t_3 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":177 + * self.pos_ = p + * + * def scan_anonymous_block(self, tag): # <<<<<<<<<<<<<< + * location = self.location_() + * tag = tag.strip() + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.scan_anonymous_block", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_location); + __Pyx_XDECREF(__pyx_v_regexp); + __Pyx_XDECREF(__pyx_v_split); + __Pyx_XDECREF(__pyx_v_tag); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/feaLib/lexer.py":207 + * """ + * + * def __init__(self, featurefile, *, includeDir=None): # <<<<<<<<<<<<<< + * """Initializes an IncludingLexer. + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_1__init__(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer___init__, "IncludingLexer.__init__(self, featurefile, *, includeDir=None)\nInitializes an IncludingLexer.\n\n Behavior:\n If includeDir is passed, it will be used to determine the top-level\n include directory to use for all encountered include statements. If it is\n not passed, ``os.path.dirname(featurefile)`` will be considered the\n include directory.\n "); +static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_1__init__ = {"__init__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_1__init__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer___init__}; +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_1__init__(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + PyObject *__pyx_v_self = 0; + PyObject *__pyx_v_featurefile = 0; + PyObject *__pyx_v_includeDir = 0; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[3] = {0,0,0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_featurefile,&__pyx_n_s_includeDir,0}; + values[2] = __Pyx_Arg_NewRef_FASTCALL(((PyObject *)Py_None)); + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 207, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_featurefile)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[1]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 207, __pyx_L3_error) + else { + __Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, 1); __PYX_ERR(0, 207, __pyx_L3_error) + } + } + if (kw_args == 1) { + const Py_ssize_t index = 2; + PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, *__pyx_pyargnames[index]); + if (value) { values[index] = __Pyx_Arg_NewRef_FASTCALL(value); kw_args--; } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 207, __pyx_L3_error) + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__init__") < 0)) __PYX_ERR(0, 207, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 2)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); + } + __pyx_v_self = values[0]; + __pyx_v_featurefile = values[1]; + __pyx_v_includeDir = values[2]; + } + goto __pyx_L6_skip; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 207, __pyx_L3_error) + __pyx_L6_skip:; + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer___init__(__pyx_self, __pyx_v_self, __pyx_v_featurefile, __pyx_v_includeDir); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_featurefile, PyObject *__pyx_v_includeDir) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + unsigned int __pyx_t_4; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__init__", 1); + + /* "fontTools/feaLib/lexer.py":217 + * """ + * + * self.lexers_ = [self.make_lexer_(featurefile)] # <<<<<<<<<<<<<< + * self.featurefilepath = self.lexers_[0].filename_ + * self.includeDir = includeDir + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_make_lexer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 217, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = NULL; + __pyx_t_4 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_3)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + __pyx_t_4 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_featurefile}; + __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 217, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 217, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyList_SET_ITEM(__pyx_t_2, 0, __pyx_t_1)) __PYX_ERR(0, 217, __pyx_L1_error); + __pyx_t_1 = 0; + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_lexers, __pyx_t_2) < 0) __PYX_ERR(0, 217, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "fontTools/feaLib/lexer.py":218 + * + * self.lexers_ = [self.make_lexer_(featurefile)] + * self.featurefilepath = self.lexers_[0].filename_ # <<<<<<<<<<<<<< + * self.includeDir = includeDir + * + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_lexers); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 218, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_2, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 218, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_filename_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 218, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_featurefilepath, __pyx_t_2) < 0) __PYX_ERR(0, 218, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "fontTools/feaLib/lexer.py":219 + * self.lexers_ = [self.make_lexer_(featurefile)] + * self.featurefilepath = self.lexers_[0].filename_ + * self.includeDir = includeDir # <<<<<<<<<<<<<< + * + * def __iter__(self): + */ + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_includeDir, __pyx_v_includeDir) < 0) __PYX_ERR(0, 219, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":207 + * """ + * + * def __init__(self, featurefile, *, includeDir=None): # <<<<<<<<<<<<<< + * """Initializes an IncludingLexer. + * + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/feaLib/lexer.py":221 + * self.includeDir = includeDir + * + * def __iter__(self): # <<<<<<<<<<<<<< + * return self + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_3__iter__(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer_2__iter__, "IncludingLexer.__iter__(self)"); +static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_3__iter__ = {"__iter__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_3__iter__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer_2__iter__}; +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_3__iter__(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + PyObject *__pyx_v_self = 0; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[1] = {0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__iter__ (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 221, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__iter__") < 0)) __PYX_ERR(0, 221, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 1)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + } + __pyx_v_self = values[0]; + } + goto __pyx_L6_skip; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__iter__", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 221, __pyx_L3_error) + __pyx_L6_skip:; + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.__iter__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_2__iter__(__pyx_self, __pyx_v_self); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_2__iter__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__iter__", 1); + + /* "fontTools/feaLib/lexer.py":222 + * + * def __iter__(self): + * return self # <<<<<<<<<<<<<< + * + * def next(self): # Python 2 + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self); + __pyx_r = __pyx_v_self; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":221 + * self.includeDir = includeDir + * + * def __iter__(self): # <<<<<<<<<<<<<< + * return self + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/feaLib/lexer.py":224 + * return self + * + * def next(self): # Python 2 # <<<<<<<<<<<<<< + * return self.__next__() + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_5next(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer_4next, "IncludingLexer.next(self)"); +static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_5next = {"next", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_5next, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer_4next}; +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_5next(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + PyObject *__pyx_v_self = 0; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[1] = {0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("next (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 224, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "next") < 0)) __PYX_ERR(0, 224, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 1)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + } + __pyx_v_self = values[0]; + } + goto __pyx_L6_skip; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("next", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 224, __pyx_L3_error) + __pyx_L6_skip:; + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.next", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_4next(__pyx_self, __pyx_v_self); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_4next(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + unsigned int __pyx_t_4; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("next", 1); + + /* "fontTools/feaLib/lexer.py":225 + * + * def next(self): # Python 2 + * return self.__next__() # <<<<<<<<<<<<<< + * + * def __next__(self): # Python 3 + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_next); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 225, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = NULL; + __pyx_t_4 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_3)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + __pyx_t_4 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; + __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 0+__pyx_t_4); + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 225, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":224 + * return self + * + * def next(self): # Python 2 # <<<<<<<<<<<<<< + * return self.__next__() + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.next", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/feaLib/lexer.py":227 + * return self.__next__() + * + * def __next__(self): # Python 3 # <<<<<<<<<<<<<< + * while self.lexers_: + * lexer = self.lexers_[-1] + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_7__next__(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer_6__next__, "IncludingLexer.__next__(self)"); +static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_7__next__ = {"__next__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_7__next__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer_6__next__}; +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_7__next__(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + PyObject *__pyx_v_self = 0; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[1] = {0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__next__ (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 227, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__next__") < 0)) __PYX_ERR(0, 227, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 1)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + } + __pyx_v_self = values[0]; + } + goto __pyx_L6_skip; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__next__", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 227, __pyx_L3_error) + __pyx_L6_skip:; + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.__next__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_6__next__(__pyx_self, __pyx_v_self); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_6__next__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) { + PyObject *__pyx_v_lexer = NULL; + PyObject *__pyx_v_token_type = NULL; + PyObject *__pyx_v_token = NULL; + PyObject *__pyx_v_location = NULL; + PyObject *__pyx_v_fname_type = NULL; + PyObject *__pyx_v_fname_token = NULL; + PyObject *__pyx_v_fname_location = NULL; + PyObject *__pyx_v_path = NULL; + PyObject *__pyx_v_curpath = NULL; + PyObject *__pyx_v_err = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + PyObject *__pyx_t_9 = NULL; + PyObject *(*__pyx_t_10)(PyObject *); + int __pyx_t_11; + int __pyx_t_12; + unsigned int __pyx_t_13; + Py_ssize_t __pyx_t_14; + int __pyx_t_15; + PyObject *__pyx_t_16 = NULL; + int __pyx_t_17; + char const *__pyx_t_18; + PyObject *__pyx_t_19 = NULL; + PyObject *__pyx_t_20 = NULL; + PyObject *__pyx_t_21 = NULL; + PyObject *__pyx_t_22 = NULL; + PyObject *__pyx_t_23 = NULL; + PyObject *__pyx_t_24 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__next__", 1); + + /* "fontTools/feaLib/lexer.py":228 + * + * def __next__(self): # Python 3 + * while self.lexers_: # <<<<<<<<<<<<<< + * lexer = self.lexers_[-1] + * try: + */ + while (1) { + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_lexers); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 228, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 228, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (!__pyx_t_2) break; + + /* "fontTools/feaLib/lexer.py":229 + * def __next__(self): # Python 3 + * while self.lexers_: + * lexer = self.lexers_[-1] # <<<<<<<<<<<<<< + * try: + * token_type, token, location = next(lexer) + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_lexers); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 229, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, -1L, long, 1, __Pyx_PyInt_From_long, 0, 1, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 229, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF_SET(__pyx_v_lexer, __pyx_t_3); + __pyx_t_3 = 0; + + /* "fontTools/feaLib/lexer.py":230 + * while self.lexers_: + * lexer = self.lexers_[-1] + * try: # <<<<<<<<<<<<<< + * token_type, token, location = next(lexer) + * except StopIteration: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_4, &__pyx_t_5, &__pyx_t_6); + __Pyx_XGOTREF(__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_5); + __Pyx_XGOTREF(__pyx_t_6); + /*try:*/ { + + /* "fontTools/feaLib/lexer.py":231 + * lexer = self.lexers_[-1] + * try: + * token_type, token, location = next(lexer) # <<<<<<<<<<<<<< + * except StopIteration: + * self.lexers_.pop() + */ + __pyx_t_3 = __Pyx_PyIter_Next(__pyx_v_lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 231, __pyx_L5_error) + __Pyx_GOTREF(__pyx_t_3); + if ((likely(PyTuple_CheckExact(__pyx_t_3))) || (PyList_CheckExact(__pyx_t_3))) { + PyObject* sequence = __pyx_t_3; + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); + if (unlikely(size != 3)) { + if (size > 3) __Pyx_RaiseTooManyValuesError(3); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(0, 231, __pyx_L5_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + if (likely(PyTuple_CheckExact(sequence))) { + __pyx_t_1 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_7 = PyTuple_GET_ITEM(sequence, 1); + __pyx_t_8 = PyTuple_GET_ITEM(sequence, 2); + } else { + __pyx_t_1 = PyList_GET_ITEM(sequence, 0); + __pyx_t_7 = PyList_GET_ITEM(sequence, 1); + __pyx_t_8 = PyList_GET_ITEM(sequence, 2); + } + __Pyx_INCREF(__pyx_t_1); + __Pyx_INCREF(__pyx_t_7); + __Pyx_INCREF(__pyx_t_8); + #else + __pyx_t_1 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 231, __pyx_L5_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_7 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 231, __pyx_L5_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_8 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 231, __pyx_L5_error) + __Pyx_GOTREF(__pyx_t_8); + #endif + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } else { + Py_ssize_t index = -1; + __pyx_t_9 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 231, __pyx_L5_error) + __Pyx_GOTREF(__pyx_t_9); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_10 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_9); + index = 0; __pyx_t_1 = __pyx_t_10(__pyx_t_9); if (unlikely(!__pyx_t_1)) goto __pyx_L13_unpacking_failed; + __Pyx_GOTREF(__pyx_t_1); + index = 1; __pyx_t_7 = __pyx_t_10(__pyx_t_9); if (unlikely(!__pyx_t_7)) goto __pyx_L13_unpacking_failed; + __Pyx_GOTREF(__pyx_t_7); + index = 2; __pyx_t_8 = __pyx_t_10(__pyx_t_9); if (unlikely(!__pyx_t_8)) goto __pyx_L13_unpacking_failed; + __Pyx_GOTREF(__pyx_t_8); + if (__Pyx_IternextUnpackEndCheck(__pyx_t_10(__pyx_t_9), 3) < 0) __PYX_ERR(0, 231, __pyx_L5_error) + __pyx_t_10 = NULL; + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + goto __pyx_L14_unpacking_done; + __pyx_L13_unpacking_failed:; + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __pyx_t_10 = NULL; + if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); + __PYX_ERR(0, 231, __pyx_L5_error) + __pyx_L14_unpacking_done:; + } + __Pyx_XDECREF_SET(__pyx_v_token_type, __pyx_t_1); + __pyx_t_1 = 0; + __Pyx_XDECREF_SET(__pyx_v_token, __pyx_t_7); + __pyx_t_7 = 0; + __Pyx_XDECREF_SET(__pyx_v_location, __pyx_t_8); + __pyx_t_8 = 0; + + /* "fontTools/feaLib/lexer.py":230 + * while self.lexers_: + * lexer = self.lexers_[-1] + * try: # <<<<<<<<<<<<<< + * token_type, token, location = next(lexer) + * except StopIteration: + */ + } + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + goto __pyx_L12_try_end; + __pyx_L5_error:; + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; + __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; + + /* "fontTools/feaLib/lexer.py":232 + * try: + * token_type, token, location = next(lexer) + * except StopIteration: # <<<<<<<<<<<<<< + * self.lexers_.pop() + * continue + */ + __pyx_t_11 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_StopIteration); + if (__pyx_t_11) { + __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.__next__", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_3, &__pyx_t_8, &__pyx_t_7) < 0) __PYX_ERR(0, 232, __pyx_L7_except_error) + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_8); + __Pyx_XGOTREF(__pyx_t_7); + + /* "fontTools/feaLib/lexer.py":233 + * token_type, token, location = next(lexer) + * except StopIteration: + * self.lexers_.pop() # <<<<<<<<<<<<<< + * continue + * if token_type is Lexer.NAME and token == "include": + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_lexers); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 233, __pyx_L7_except_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_9 = __Pyx_PyObject_Pop(__pyx_t_1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 233, __pyx_L7_except_error) + __Pyx_GOTREF(__pyx_t_9); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + + /* "fontTools/feaLib/lexer.py":234 + * except StopIteration: + * self.lexers_.pop() + * continue # <<<<<<<<<<<<<< + * if token_type is Lexer.NAME and token == "include": + * fname_type, fname_token, fname_location = lexer.next() + */ + goto __pyx_L15_except_continue; + __pyx_L15_except_continue:; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + goto __pyx_L11_try_continue; + } + goto __pyx_L7_except_error; + + /* "fontTools/feaLib/lexer.py":230 + * while self.lexers_: + * lexer = self.lexers_[-1] + * try: # <<<<<<<<<<<<<< + * token_type, token, location = next(lexer) + * except StopIteration: + */ + __pyx_L7_except_error:; + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_XGIVEREF(__pyx_t_5); + __Pyx_XGIVEREF(__pyx_t_6); + __Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6); + goto __pyx_L1_error; + __pyx_L11_try_continue:; + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_XGIVEREF(__pyx_t_5); + __Pyx_XGIVEREF(__pyx_t_6); + __Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6); + goto __pyx_L3_continue; + __pyx_L12_try_end:; + } + + /* "fontTools/feaLib/lexer.py":235 + * self.lexers_.pop() + * continue + * if token_type is Lexer.NAME and token == "include": # <<<<<<<<<<<<<< + * fname_type, fname_token, fname_location = lexer.next() + * if fname_type is not Lexer.FILENAME: + */ + __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 235, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_NAME); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 235, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __pyx_t_12 = (__pyx_v_token_type == __pyx_t_8); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + if (__pyx_t_12) { + } else { + __pyx_t_2 = __pyx_t_12; + goto __pyx_L18_bool_binop_done; + } + __pyx_t_12 = (__Pyx_PyUnicode_Equals(__pyx_v_token, __pyx_n_u_include, Py_EQ)); if (unlikely((__pyx_t_12 < 0))) __PYX_ERR(0, 235, __pyx_L1_error) + __pyx_t_2 = __pyx_t_12; + __pyx_L18_bool_binop_done:; + if (__pyx_t_2) { + + /* "fontTools/feaLib/lexer.py":236 + * continue + * if token_type is Lexer.NAME and token == "include": + * fname_type, fname_token, fname_location = lexer.next() # <<<<<<<<<<<<<< + * if fname_type is not Lexer.FILENAME: + * raise FeatureLibError("Expected file name", fname_location) + */ + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_lexer, __pyx_n_s_next_3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 236, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_3 = NULL; + __pyx_t_13 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_7))) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7); + if (likely(__pyx_t_3)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_7, function); + __pyx_t_13 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; + __pyx_t_8 = __Pyx_PyObject_FastCall(__pyx_t_7, __pyx_callargs+1-__pyx_t_13, 0+__pyx_t_13); + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 236, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + } + if ((likely(PyTuple_CheckExact(__pyx_t_8))) || (PyList_CheckExact(__pyx_t_8))) { + PyObject* sequence = __pyx_t_8; + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); + if (unlikely(size != 3)) { + if (size > 3) __Pyx_RaiseTooManyValuesError(3); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(0, 236, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + if (likely(PyTuple_CheckExact(sequence))) { + __pyx_t_7 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); + __pyx_t_9 = PyTuple_GET_ITEM(sequence, 2); + } else { + __pyx_t_7 = PyList_GET_ITEM(sequence, 0); + __pyx_t_3 = PyList_GET_ITEM(sequence, 1); + __pyx_t_9 = PyList_GET_ITEM(sequence, 2); + } + __Pyx_INCREF(__pyx_t_7); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(__pyx_t_9); + #else + __pyx_t_7 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 236, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 236, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_9 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 236, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + #endif + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + } else { + Py_ssize_t index = -1; + __pyx_t_1 = PyObject_GetIter(__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 236, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __pyx_t_10 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); + index = 0; __pyx_t_7 = __pyx_t_10(__pyx_t_1); if (unlikely(!__pyx_t_7)) goto __pyx_L20_unpacking_failed; + __Pyx_GOTREF(__pyx_t_7); + index = 1; __pyx_t_3 = __pyx_t_10(__pyx_t_1); if (unlikely(!__pyx_t_3)) goto __pyx_L20_unpacking_failed; + __Pyx_GOTREF(__pyx_t_3); + index = 2; __pyx_t_9 = __pyx_t_10(__pyx_t_1); if (unlikely(!__pyx_t_9)) goto __pyx_L20_unpacking_failed; + __Pyx_GOTREF(__pyx_t_9); + if (__Pyx_IternextUnpackEndCheck(__pyx_t_10(__pyx_t_1), 3) < 0) __PYX_ERR(0, 236, __pyx_L1_error) + __pyx_t_10 = NULL; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + goto __pyx_L21_unpacking_done; + __pyx_L20_unpacking_failed:; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_10 = NULL; + if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); + __PYX_ERR(0, 236, __pyx_L1_error) + __pyx_L21_unpacking_done:; + } + __Pyx_XDECREF_SET(__pyx_v_fname_type, __pyx_t_7); + __pyx_t_7 = 0; + __Pyx_XDECREF_SET(__pyx_v_fname_token, __pyx_t_3); + __pyx_t_3 = 0; + __Pyx_XDECREF_SET(__pyx_v_fname_location, __pyx_t_9); + __pyx_t_9 = 0; + + /* "fontTools/feaLib/lexer.py":237 + * if token_type is Lexer.NAME and token == "include": + * fname_type, fname_token, fname_location = lexer.next() + * if fname_type is not Lexer.FILENAME: # <<<<<<<<<<<<<< + * raise FeatureLibError("Expected file name", fname_location) + * # semi_type, semi_token, semi_location = lexer.next() + */ + __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 237, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_FILENAME); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 237, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __pyx_t_2 = (__pyx_v_fname_type != __pyx_t_9); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + if (unlikely(__pyx_t_2)) { + + /* "fontTools/feaLib/lexer.py":238 + * fname_type, fname_token, fname_location = lexer.next() + * if fname_type is not Lexer.FILENAME: + * raise FeatureLibError("Expected file name", fname_location) # <<<<<<<<<<<<<< + * # semi_type, semi_token, semi_location = lexer.next() + * # if semi_type is not Lexer.SYMBOL or semi_token != ";": + */ + __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_FeatureLibError); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 238, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_3 = NULL; + __pyx_t_13 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_8))) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_8); + if (likely(__pyx_t_3)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_8, function); + __pyx_t_13 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[3] = {__pyx_t_3, __pyx_kp_u_Expected_file_name, __pyx_v_fname_location}; + __pyx_t_9 = __Pyx_PyObject_FastCall(__pyx_t_8, __pyx_callargs+1-__pyx_t_13, 2+__pyx_t_13); + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 238, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + } + __Pyx_Raise(__pyx_t_9, 0, 0, 0); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __PYX_ERR(0, 238, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":237 + * if token_type is Lexer.NAME and token == "include": + * fname_type, fname_token, fname_location = lexer.next() + * if fname_type is not Lexer.FILENAME: # <<<<<<<<<<<<<< + * raise FeatureLibError("Expected file name", fname_location) + * # semi_type, semi_token, semi_location = lexer.next() + */ + } + + /* "fontTools/feaLib/lexer.py":242 + * # if semi_type is not Lexer.SYMBOL or semi_token != ";": + * # raise FeatureLibError("Expected ';'", semi_location) + * if os.path.isabs(fname_token): # <<<<<<<<<<<<<< + * path = fname_token + * else: + */ + __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_os); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 242, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_path); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 242, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_isabs); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 242, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = NULL; + __pyx_t_13 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_8))) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_8); + if (likely(__pyx_t_3)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_8, function); + __pyx_t_13 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_fname_token}; + __pyx_t_9 = __Pyx_PyObject_FastCall(__pyx_t_8, __pyx_callargs+1-__pyx_t_13, 1+__pyx_t_13); + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 242, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + } + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 242, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + if (__pyx_t_2) { + + /* "fontTools/feaLib/lexer.py":243 + * # raise FeatureLibError("Expected ';'", semi_location) + * if os.path.isabs(fname_token): + * path = fname_token # <<<<<<<<<<<<<< + * else: + * if self.includeDir is not None: + */ + __Pyx_INCREF(__pyx_v_fname_token); + __Pyx_XDECREF_SET(__pyx_v_path, __pyx_v_fname_token); + + /* "fontTools/feaLib/lexer.py":242 + * # if semi_type is not Lexer.SYMBOL or semi_token != ";": + * # raise FeatureLibError("Expected ';'", semi_location) + * if os.path.isabs(fname_token): # <<<<<<<<<<<<<< + * path = fname_token + * else: + */ + goto __pyx_L23; + } + + /* "fontTools/feaLib/lexer.py":245 + * path = fname_token + * else: + * if self.includeDir is not None: # <<<<<<<<<<<<<< + * curpath = self.includeDir + * elif self.featurefilepath is not None: + */ + /*else*/ { + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_includeDir); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 245, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_2 = (__pyx_t_9 != Py_None); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + if (__pyx_t_2) { + + /* "fontTools/feaLib/lexer.py":246 + * else: + * if self.includeDir is not None: + * curpath = self.includeDir # <<<<<<<<<<<<<< + * elif self.featurefilepath is not None: + * curpath = os.path.dirname(self.featurefilepath) + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_includeDir); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 246, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __Pyx_XDECREF_SET(__pyx_v_curpath, __pyx_t_9); + __pyx_t_9 = 0; + + /* "fontTools/feaLib/lexer.py":245 + * path = fname_token + * else: + * if self.includeDir is not None: # <<<<<<<<<<<<<< + * curpath = self.includeDir + * elif self.featurefilepath is not None: + */ + goto __pyx_L24; + } + + /* "fontTools/feaLib/lexer.py":247 + * if self.includeDir is not None: + * curpath = self.includeDir + * elif self.featurefilepath is not None: # <<<<<<<<<<<<<< + * curpath = os.path.dirname(self.featurefilepath) + * else: + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_featurefilepath); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 247, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_2 = (__pyx_t_9 != Py_None); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + if (__pyx_t_2) { + + /* "fontTools/feaLib/lexer.py":248 + * curpath = self.includeDir + * elif self.featurefilepath is not None: + * curpath = os.path.dirname(self.featurefilepath) # <<<<<<<<<<<<<< + * else: + * # if the IncludingLexer was initialized from an in-memory + */ + __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_os); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 248, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_path); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 248, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_dirname); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 248, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_featurefilepath); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 248, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_7 = NULL; + __pyx_t_13 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_8))) { + __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_8); + if (likely(__pyx_t_7)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); + __Pyx_INCREF(__pyx_t_7); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_8, function); + __pyx_t_13 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_3}; + __pyx_t_9 = __Pyx_PyObject_FastCall(__pyx_t_8, __pyx_callargs+1-__pyx_t_13, 1+__pyx_t_13); + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 248, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + } + __Pyx_XDECREF_SET(__pyx_v_curpath, __pyx_t_9); + __pyx_t_9 = 0; + + /* "fontTools/feaLib/lexer.py":247 + * if self.includeDir is not None: + * curpath = self.includeDir + * elif self.featurefilepath is not None: # <<<<<<<<<<<<<< + * curpath = os.path.dirname(self.featurefilepath) + * else: + */ + goto __pyx_L24; + } + + /* "fontTools/feaLib/lexer.py":254 + * # its filesystem path, therefore we fall back to using the + * # current working directory to resolve relative includes + * curpath = os.getcwd() # <<<<<<<<<<<<<< + * path = os.path.join(curpath, fname_token) + * if len(self.lexers_) >= 5: + */ + /*else*/ { + __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_os); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 254, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_getcwd); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 254, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __pyx_t_8 = NULL; + __pyx_t_13 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_8)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_8); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + __pyx_t_13 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_8, NULL}; + __pyx_t_9 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_13, 0+__pyx_t_13); + __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; + if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 254, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __Pyx_XDECREF_SET(__pyx_v_curpath, __pyx_t_9); + __pyx_t_9 = 0; + } + __pyx_L24:; + + /* "fontTools/feaLib/lexer.py":255 + * # current working directory to resolve relative includes + * curpath = os.getcwd() + * path = os.path.join(curpath, fname_token) # <<<<<<<<<<<<<< + * if len(self.lexers_) >= 5: + * raise FeatureLibError("Too many recursive includes", fname_location) + */ + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_os); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 255, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_path); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 255, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_join); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 255, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __pyx_t_8 = NULL; + __pyx_t_13 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_8)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_8); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + __pyx_t_13 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[3] = {__pyx_t_8, __pyx_v_curpath, __pyx_v_fname_token}; + __pyx_t_9 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_13, 2+__pyx_t_13); + __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; + if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 255, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __Pyx_XDECREF_SET(__pyx_v_path, __pyx_t_9); + __pyx_t_9 = 0; + } + __pyx_L23:; + + /* "fontTools/feaLib/lexer.py":256 + * curpath = os.getcwd() + * path = os.path.join(curpath, fname_token) + * if len(self.lexers_) >= 5: # <<<<<<<<<<<<<< + * raise FeatureLibError("Too many recursive includes", fname_location) + * try: + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_lexers); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 256, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_14 = PyObject_Length(__pyx_t_9); if (unlikely(__pyx_t_14 == ((Py_ssize_t)-1))) __PYX_ERR(0, 256, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __pyx_t_2 = (__pyx_t_14 >= 5); + if (unlikely(__pyx_t_2)) { + + /* "fontTools/feaLib/lexer.py":257 + * path = os.path.join(curpath, fname_token) + * if len(self.lexers_) >= 5: + * raise FeatureLibError("Too many recursive includes", fname_location) # <<<<<<<<<<<<<< + * try: + * self.lexers_.append(self.make_lexer_(path)) + */ + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_FeatureLibError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 257, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_8 = NULL; + __pyx_t_13 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_8)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_8); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + __pyx_t_13 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[3] = {__pyx_t_8, __pyx_kp_u_Too_many_recursive_includes, __pyx_v_fname_location}; + __pyx_t_9 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_13, 2+__pyx_t_13); + __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; + if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 257, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __Pyx_Raise(__pyx_t_9, 0, 0, 0); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __PYX_ERR(0, 257, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":256 + * curpath = os.getcwd() + * path = os.path.join(curpath, fname_token) + * if len(self.lexers_) >= 5: # <<<<<<<<<<<<<< + * raise FeatureLibError("Too many recursive includes", fname_location) + * try: + */ + } + + /* "fontTools/feaLib/lexer.py":258 + * if len(self.lexers_) >= 5: + * raise FeatureLibError("Too many recursive includes", fname_location) + * try: # <<<<<<<<<<<<<< + * self.lexers_.append(self.make_lexer_(path)) + * except FileNotFoundError as err: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_6, &__pyx_t_5, &__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_6); + __Pyx_XGOTREF(__pyx_t_5); + __Pyx_XGOTREF(__pyx_t_4); + /*try:*/ { + + /* "fontTools/feaLib/lexer.py":259 + * raise FeatureLibError("Too many recursive includes", fname_location) + * try: + * self.lexers_.append(self.make_lexer_(path)) # <<<<<<<<<<<<<< + * except FileNotFoundError as err: + * raise IncludedFeaNotFound(fname_token, fname_location) from err + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_lexers); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 259, __pyx_L26_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_make_lexer); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 259, __pyx_L26_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_7 = NULL; + __pyx_t_13 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_8))) { + __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_8); + if (likely(__pyx_t_7)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); + __Pyx_INCREF(__pyx_t_7); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_8, function); + __pyx_t_13 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_v_path}; + __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_8, __pyx_callargs+1-__pyx_t_13, 1+__pyx_t_13); + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 259, __pyx_L26_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + } + __pyx_t_15 = __Pyx_PyObject_Append(__pyx_t_9, __pyx_t_3); if (unlikely(__pyx_t_15 == ((int)-1))) __PYX_ERR(0, 259, __pyx_L26_error) + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "fontTools/feaLib/lexer.py":258 + * if len(self.lexers_) >= 5: + * raise FeatureLibError("Too many recursive includes", fname_location) + * try: # <<<<<<<<<<<<<< + * self.lexers_.append(self.make_lexer_(path)) + * except FileNotFoundError as err: + */ + } + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + goto __pyx_L33_try_end; + __pyx_L26_error:; + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; + __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; + + /* "fontTools/feaLib/lexer.py":260 + * try: + * self.lexers_.append(self.make_lexer_(path)) + * except FileNotFoundError as err: # <<<<<<<<<<<<<< + * raise IncludedFeaNotFound(fname_token, fname_location) from err + * else: + */ + __Pyx_ErrFetch(&__pyx_t_3, &__pyx_t_9, &__pyx_t_8); + __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_FileNotFoundError); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 260, __pyx_L28_except_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_11 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_3, __pyx_t_7); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_ErrRestore(__pyx_t_3, __pyx_t_9, __pyx_t_8); + __pyx_t_3 = 0; __pyx_t_9 = 0; __pyx_t_8 = 0; + if (__pyx_t_11) { + __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.__next__", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_8, &__pyx_t_9, &__pyx_t_3) < 0) __PYX_ERR(0, 260, __pyx_L28_except_error) + __Pyx_XGOTREF(__pyx_t_8); + __Pyx_XGOTREF(__pyx_t_9); + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_t_9); + __pyx_v_err = __pyx_t_9; + /*try:*/ { + + /* "fontTools/feaLib/lexer.py":261 + * self.lexers_.append(self.make_lexer_(path)) + * except FileNotFoundError as err: + * raise IncludedFeaNotFound(fname_token, fname_location) from err # <<<<<<<<<<<<<< + * else: + * return (token_type, token, location) + */ + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_IncludedFeaNotFound); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 261, __pyx_L39_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_16 = NULL; + __pyx_t_13 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_1))) { + __pyx_t_16 = PyMethod_GET_SELF(__pyx_t_1); + if (likely(__pyx_t_16)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); + __Pyx_INCREF(__pyx_t_16); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_1, function); + __pyx_t_13 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[3] = {__pyx_t_16, __pyx_v_fname_token, __pyx_v_fname_location}; + __pyx_t_7 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+1-__pyx_t_13, 2+__pyx_t_13); + __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0; + if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 261, __pyx_L39_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + } + __Pyx_Raise(__pyx_t_7, 0, 0, __pyx_v_err); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __PYX_ERR(0, 261, __pyx_L39_error) + } + + /* "fontTools/feaLib/lexer.py":260 + * try: + * self.lexers_.append(self.make_lexer_(path)) + * except FileNotFoundError as err: # <<<<<<<<<<<<<< + * raise IncludedFeaNotFound(fname_token, fname_location) from err + * else: + */ + /*finally:*/ { + __pyx_L39_error:; + /*exception exit:*/{ + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __pyx_t_19 = 0; __pyx_t_20 = 0; __pyx_t_21 = 0; __pyx_t_22 = 0; __pyx_t_23 = 0; __pyx_t_24 = 0; + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_22, &__pyx_t_23, &__pyx_t_24); + if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_19, &__pyx_t_20, &__pyx_t_21) < 0)) __Pyx_ErrFetch(&__pyx_t_19, &__pyx_t_20, &__pyx_t_21); + __Pyx_XGOTREF(__pyx_t_19); + __Pyx_XGOTREF(__pyx_t_20); + __Pyx_XGOTREF(__pyx_t_21); + __Pyx_XGOTREF(__pyx_t_22); + __Pyx_XGOTREF(__pyx_t_23); + __Pyx_XGOTREF(__pyx_t_24); + __pyx_t_11 = __pyx_lineno; __pyx_t_17 = __pyx_clineno; __pyx_t_18 = __pyx_filename; + { + __Pyx_DECREF(__pyx_v_err); __pyx_v_err = 0; + } + if (PY_MAJOR_VERSION >= 3) { + __Pyx_XGIVEREF(__pyx_t_22); + __Pyx_XGIVEREF(__pyx_t_23); + __Pyx_XGIVEREF(__pyx_t_24); + __Pyx_ExceptionReset(__pyx_t_22, __pyx_t_23, __pyx_t_24); + } + __Pyx_XGIVEREF(__pyx_t_19); + __Pyx_XGIVEREF(__pyx_t_20); + __Pyx_XGIVEREF(__pyx_t_21); + __Pyx_ErrRestore(__pyx_t_19, __pyx_t_20, __pyx_t_21); + __pyx_t_19 = 0; __pyx_t_20 = 0; __pyx_t_21 = 0; __pyx_t_22 = 0; __pyx_t_23 = 0; __pyx_t_24 = 0; + __pyx_lineno = __pyx_t_11; __pyx_clineno = __pyx_t_17; __pyx_filename = __pyx_t_18; + goto __pyx_L28_except_error; + } + } + } + goto __pyx_L28_except_error; + + /* "fontTools/feaLib/lexer.py":258 + * if len(self.lexers_) >= 5: + * raise FeatureLibError("Too many recursive includes", fname_location) + * try: # <<<<<<<<<<<<<< + * self.lexers_.append(self.make_lexer_(path)) + * except FileNotFoundError as err: + */ + __pyx_L28_except_error:; + __Pyx_XGIVEREF(__pyx_t_6); + __Pyx_XGIVEREF(__pyx_t_5); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_6, __pyx_t_5, __pyx_t_4); + goto __pyx_L1_error; + __pyx_L33_try_end:; + } + + /* "fontTools/feaLib/lexer.py":235 + * self.lexers_.pop() + * continue + * if token_type is Lexer.NAME and token == "include": # <<<<<<<<<<<<<< + * fname_type, fname_token, fname_location = lexer.next() + * if fname_type is not Lexer.FILENAME: + */ + goto __pyx_L17; + } + + /* "fontTools/feaLib/lexer.py":263 + * raise IncludedFeaNotFound(fname_token, fname_location) from err + * else: + * return (token_type, token, location) # <<<<<<<<<<<<<< + * raise StopIteration() + * + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 263, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_v_token_type); + __Pyx_GIVEREF(__pyx_v_token_type); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_token_type)) __PYX_ERR(0, 263, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_token); + __Pyx_GIVEREF(__pyx_v_token); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_token)) __PYX_ERR(0, 263, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_location); + __Pyx_GIVEREF(__pyx_v_location); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_location)) __PYX_ERR(0, 263, __pyx_L1_error); + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + } + __pyx_L17:; + __pyx_L3_continue:; + } + + /* "fontTools/feaLib/lexer.py":264 + * else: + * return (token_type, token, location) + * raise StopIteration() # <<<<<<<<<<<<<< + * + * @staticmethod + */ + __pyx_t_3 = __Pyx_PyObject_CallNoArg(__pyx_builtin_StopIteration); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 264, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(0, 264, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":227 + * return self.__next__() + * + * def __next__(self): # Python 3 # <<<<<<<<<<<<<< + * while self.lexers_: + * lexer = self.lexers_[-1] + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_XDECREF(__pyx_t_9); + __Pyx_XDECREF(__pyx_t_16); + __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.__next__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_lexer); + __Pyx_XDECREF(__pyx_v_token_type); + __Pyx_XDECREF(__pyx_v_token); + __Pyx_XDECREF(__pyx_v_location); + __Pyx_XDECREF(__pyx_v_fname_type); + __Pyx_XDECREF(__pyx_v_fname_token); + __Pyx_XDECREF(__pyx_v_fname_location); + __Pyx_XDECREF(__pyx_v_path); + __Pyx_XDECREF(__pyx_v_curpath); + __Pyx_XDECREF(__pyx_v_err); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/feaLib/lexer.py":266 + * raise StopIteration() + * + * @staticmethod # <<<<<<<<<<<<<< + * def make_lexer_(file_or_path): + * if hasattr(file_or_path, "read"): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_9make_lexer_(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer_8make_lexer_, "IncludingLexer.make_lexer_(file_or_path)"); +static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_9make_lexer_ = {"make_lexer_", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_9make_lexer_, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer_8make_lexer_}; +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_9make_lexer_(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + PyObject *__pyx_v_file_or_path = 0; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[1] = {0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("make_lexer_ (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_file_or_path,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_file_or_path)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 266, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "make_lexer_") < 0)) __PYX_ERR(0, 266, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 1)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + } + __pyx_v_file_or_path = values[0]; + } + goto __pyx_L6_skip; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("make_lexer_", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 266, __pyx_L3_error) + __pyx_L6_skip:; + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.make_lexer_", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_8make_lexer_(__pyx_self, __pyx_v_file_or_path); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_8make_lexer_(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_file_or_path) { + PyObject *__pyx_v_fileobj = NULL; + int __pyx_v_closing; + PyObject *__pyx_v_filename = NULL; + PyObject *__pyx_v_data = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + unsigned int __pyx_t_5; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("make_lexer_", 1); + + /* "fontTools/feaLib/lexer.py":268 + * @staticmethod + * def make_lexer_(file_or_path): + * if hasattr(file_or_path, "read"): # <<<<<<<<<<<<<< + * fileobj, closing = file_or_path, False + * else: + */ + __pyx_t_1 = __Pyx_HasAttr(__pyx_v_file_or_path, __pyx_n_u_read); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(0, 268, __pyx_L1_error) + if (__pyx_t_1) { + + /* "fontTools/feaLib/lexer.py":269 + * def make_lexer_(file_or_path): + * if hasattr(file_or_path, "read"): + * fileobj, closing = file_or_path, False # <<<<<<<<<<<<<< + * else: + * filename, closing = file_or_path, True + */ + __pyx_t_2 = __pyx_v_file_or_path; + __Pyx_INCREF(__pyx_t_2); + __pyx_t_1 = 0; + __pyx_v_fileobj = __pyx_t_2; + __pyx_t_2 = 0; + __pyx_v_closing = __pyx_t_1; + + /* "fontTools/feaLib/lexer.py":268 + * @staticmethod + * def make_lexer_(file_or_path): + * if hasattr(file_or_path, "read"): # <<<<<<<<<<<<<< + * fileobj, closing = file_or_path, False + * else: + */ + goto __pyx_L3; + } + + /* "fontTools/feaLib/lexer.py":271 + * fileobj, closing = file_or_path, False + * else: + * filename, closing = file_or_path, True # <<<<<<<<<<<<<< + * fileobj = open(filename, "r", encoding="utf-8-sig") + * data = fileobj.read() + */ + /*else*/ { + __pyx_t_2 = __pyx_v_file_or_path; + __Pyx_INCREF(__pyx_t_2); + __pyx_t_1 = 1; + __pyx_v_filename = __pyx_t_2; + __pyx_t_2 = 0; + __pyx_v_closing = __pyx_t_1; + + /* "fontTools/feaLib/lexer.py":272 + * else: + * filename, closing = file_or_path, True + * fileobj = open(filename, "r", encoding="utf-8-sig") # <<<<<<<<<<<<<< + * data = fileobj.read() + * filename = getattr(fileobj, "name", None) + */ + __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 272, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_INCREF(__pyx_v_filename); + __Pyx_GIVEREF(__pyx_v_filename); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_filename)) __PYX_ERR(0, 272, __pyx_L1_error); + __Pyx_INCREF(__pyx_n_u_r); + __Pyx_GIVEREF(__pyx_n_u_r); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_n_u_r)) __PYX_ERR(0, 272, __pyx_L1_error); + __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 272, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_encoding, __pyx_kp_u_utf_8_sig) < 0) __PYX_ERR(0, 272, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_open, __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 272, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_fileobj = __pyx_t_4; + __pyx_t_4 = 0; + } + __pyx_L3:; + + /* "fontTools/feaLib/lexer.py":273 + * filename, closing = file_or_path, True + * fileobj = open(filename, "r", encoding="utf-8-sig") + * data = fileobj.read() # <<<<<<<<<<<<<< + * filename = getattr(fileobj, "name", None) + * if closing: + */ + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_fileobj, __pyx_n_s_read); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 273, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_2 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_2)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_2); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; + __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 0+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 273, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __pyx_v_data = __pyx_t_4; + __pyx_t_4 = 0; + + /* "fontTools/feaLib/lexer.py":274 + * fileobj = open(filename, "r", encoding="utf-8-sig") + * data = fileobj.read() + * filename = getattr(fileobj, "name", None) # <<<<<<<<<<<<<< + * if closing: + * fileobj.close() + */ + __pyx_t_4 = __Pyx_GetAttr3(__pyx_v_fileobj, __pyx_n_u_name, Py_None); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 274, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_XDECREF_SET(__pyx_v_filename, __pyx_t_4); + __pyx_t_4 = 0; + + /* "fontTools/feaLib/lexer.py":275 + * data = fileobj.read() + * filename = getattr(fileobj, "name", None) + * if closing: # <<<<<<<<<<<<<< + * fileobj.close() + * return Lexer(data, filename) + */ + if (__pyx_v_closing) { + + /* "fontTools/feaLib/lexer.py":276 + * filename = getattr(fileobj, "name", None) + * if closing: + * fileobj.close() # <<<<<<<<<<<<<< + * return Lexer(data, filename) + * + */ + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_fileobj, __pyx_n_s_close); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 276, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_2 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_2)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_2); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; + __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 0+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 276, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "fontTools/feaLib/lexer.py":275 + * data = fileobj.read() + * filename = getattr(fileobj, "name", None) + * if closing: # <<<<<<<<<<<<<< + * fileobj.close() + * return Lexer(data, filename) + */ + } + + /* "fontTools/feaLib/lexer.py":277 + * if closing: + * fileobj.close() + * return Lexer(data, filename) # <<<<<<<<<<<<<< + * + * def scan_anonymous_block(self, tag): + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 277, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_2 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_2)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_2); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[3] = {__pyx_t_2, __pyx_v_data, __pyx_v_filename}; + __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 2+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 277, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __pyx_r = __pyx_t_4; + __pyx_t_4 = 0; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":266 + * raise StopIteration() + * + * @staticmethod # <<<<<<<<<<<<<< + * def make_lexer_(file_or_path): + * if hasattr(file_or_path, "read"): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.make_lexer_", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_fileobj); + __Pyx_XDECREF(__pyx_v_filename); + __Pyx_XDECREF(__pyx_v_data); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/feaLib/lexer.py":279 + * return Lexer(data, filename) + * + * def scan_anonymous_block(self, tag): # <<<<<<<<<<<<<< + * return self.lexers_[-1].scan_anonymous_block(tag) + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_11scan_anonymous_block(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer_10scan_anonymous_block, "IncludingLexer.scan_anonymous_block(self, tag)"); +static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_11scan_anonymous_block = {"scan_anonymous_block", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_11scan_anonymous_block, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer_10scan_anonymous_block}; +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_11scan_anonymous_block(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + PyObject *__pyx_v_self = 0; + PyObject *__pyx_v_tag = 0; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[2] = {0,0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("scan_anonymous_block (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_tag,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 279, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_tag)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[1]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 279, __pyx_L3_error) + else { + __Pyx_RaiseArgtupleInvalid("scan_anonymous_block", 1, 2, 2, 1); __PYX_ERR(0, 279, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "scan_anonymous_block") < 0)) __PYX_ERR(0, 279, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 2)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); + } + __pyx_v_self = values[0]; + __pyx_v_tag = values[1]; + } + goto __pyx_L6_skip; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("scan_anonymous_block", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 279, __pyx_L3_error) + __pyx_L6_skip:; + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.scan_anonymous_block", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_10scan_anonymous_block(__pyx_self, __pyx_v_self, __pyx_v_tag); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_10scan_anonymous_block(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_tag) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + unsigned int __pyx_t_4; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("scan_anonymous_block", 1); + + /* "fontTools/feaLib/lexer.py":280 + * + * def scan_anonymous_block(self, tag): + * return self.lexers_[-1].scan_anonymous_block(tag) # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_lexers); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 280, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_2, -1L, long, 1, __Pyx_PyInt_From_long, 0, 1, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 280, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_scan_anonymous_block); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 280, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = NULL; + __pyx_t_4 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_3)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + __pyx_t_4 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_tag}; + __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 280, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":279 + * return Lexer(data, filename) + * + * def scan_anonymous_block(self, tag): # <<<<<<<<<<<<<< + * return self.lexers_[-1].scan_anonymous_block(tag) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.scan_anonymous_block", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/feaLib/lexer.py":286 + * """Lexer that does not follow `include` statements, emits them as-is.""" + * + * def __next__(self): # Python 3 # <<<<<<<<<<<<<< + * return next(self.lexers_[0]) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_17NonIncludingLexer_1__next__(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_17NonIncludingLexer___next__, "NonIncludingLexer.__next__(self)"); +static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_17NonIncludingLexer_1__next__ = {"__next__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_17NonIncludingLexer_1__next__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_17NonIncludingLexer___next__}; +static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_17NonIncludingLexer_1__next__(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + PyObject *__pyx_v_self = 0; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[1] = {0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__next__ (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 286, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__next__") < 0)) __PYX_ERR(0, 286, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 1)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + } + __pyx_v_self = values[0]; + } + goto __pyx_L6_skip; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__next__", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 286, __pyx_L3_error) + __pyx_L6_skip:; + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("fontTools.feaLib.lexer.NonIncludingLexer.__next__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_17NonIncludingLexer___next__(__pyx_self, __pyx_v_self); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_17NonIncludingLexer___next__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__next__", 1); + + /* "fontTools/feaLib/lexer.py":287 + * + * def __next__(self): # Python 3 + * return next(self.lexers_[0]) # <<<<<<<<<<<<<< + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_lexers); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 287, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 287, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyIter_Next(__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 287, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "fontTools/feaLib/lexer.py":286 + * """Lexer that does not follow `include` statements, emits them as-is.""" + * + * def __next__(self): # Python 3 # <<<<<<<<<<<<<< + * return next(self.lexers_[0]) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("fontTools.feaLib.lexer.NonIncludingLexer.__next__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyMethodDef __pyx_methods[] = { + {0, 0, 0, 0} +}; +#ifndef CYTHON_SMALL_CODE +#if defined(__clang__) + #define CYTHON_SMALL_CODE +#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) + #define CYTHON_SMALL_CODE __attribute__((cold)) +#else + #define CYTHON_SMALL_CODE +#endif +#endif +/* #### Code section: pystring_table ### */ + +static int __Pyx_CreateStringTabAndInitStrings(void) { + __Pyx_StringTabEntry __pyx_string_tab[] = { + {&__pyx_kp_u_, __pyx_k_, sizeof(__pyx_k_), 0, 1, 0, 0}, + {&__pyx_kp_u_0, __pyx_k_0, sizeof(__pyx_k_0), 0, 1, 0, 0}, + {&__pyx_kp_u_0123456789, __pyx_k_0123456789, sizeof(__pyx_k_0123456789), 0, 1, 0, 0}, + {&__pyx_kp_u_0123456789ABCDEFabcdef, __pyx_k_0123456789ABCDEFabcdef, sizeof(__pyx_k_0123456789ABCDEFabcdef), 0, 1, 0, 0}, + {&__pyx_n_u_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef, __pyx_k_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef, sizeof(__pyx_k_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef), 0, 1, 0, 1}, + {&__pyx_n_s_ANONYMOUS_BLOCK, __pyx_k_ANONYMOUS_BLOCK, sizeof(__pyx_k_ANONYMOUS_BLOCK), 0, 0, 1, 1}, + {&__pyx_n_u_ANONYMOUS_BLOCK, __pyx_k_ANONYMOUS_BLOCK, sizeof(__pyx_k_ANONYMOUS_BLOCK), 0, 1, 0, 1}, + {&__pyx_kp_s_A_Lexer_that_follows_include_sta, __pyx_k_A_Lexer_that_follows_include_sta, sizeof(__pyx_k_A_Lexer_that_follows_include_sta), 0, 0, 1, 0}, + {&__pyx_kp_u_A_Za_z_0_9, __pyx_k_A_Za_z_0_9, sizeof(__pyx_k_A_Za_z_0_9), 0, 1, 0, 0}, + {&__pyx_n_s_CHAR_DIGIT, __pyx_k_CHAR_DIGIT, sizeof(__pyx_k_CHAR_DIGIT), 0, 0, 1, 1}, + {&__pyx_n_s_CHAR_HEXDIGIT, __pyx_k_CHAR_HEXDIGIT, sizeof(__pyx_k_CHAR_HEXDIGIT), 0, 0, 1, 1}, + {&__pyx_n_s_CHAR_LETTER, __pyx_k_CHAR_LETTER, sizeof(__pyx_k_CHAR_LETTER), 0, 0, 1, 1}, + {&__pyx_n_s_CHAR_NAME_CONTINUATION, __pyx_k_CHAR_NAME_CONTINUATION, sizeof(__pyx_k_CHAR_NAME_CONTINUATION), 0, 0, 1, 1}, + {&__pyx_n_s_CHAR_NAME_START, __pyx_k_CHAR_NAME_START, sizeof(__pyx_k_CHAR_NAME_START), 0, 0, 1, 1}, + {&__pyx_n_s_CHAR_NEWLINE, __pyx_k_CHAR_NEWLINE, sizeof(__pyx_k_CHAR_NEWLINE), 0, 0, 1, 1}, + {&__pyx_n_s_CHAR_SYMBOL, __pyx_k_CHAR_SYMBOL, sizeof(__pyx_k_CHAR_SYMBOL), 0, 0, 1, 1}, + {&__pyx_n_s_CHAR_WHITESPACE, __pyx_k_CHAR_WHITESPACE, sizeof(__pyx_k_CHAR_WHITESPACE), 0, 0, 1, 1}, + {&__pyx_n_s_CID, __pyx_k_CID, sizeof(__pyx_k_CID), 0, 0, 1, 1}, + {&__pyx_n_u_CID, __pyx_k_CID, sizeof(__pyx_k_CID), 0, 1, 0, 1}, + {&__pyx_n_s_COMMENT, __pyx_k_COMMENT, sizeof(__pyx_k_COMMENT), 0, 0, 1, 1}, + {&__pyx_n_u_COMMENT, __pyx_k_COMMENT, sizeof(__pyx_k_COMMENT), 0, 1, 0, 1}, + {&__pyx_kp_u_Expected_after_file_name, __pyx_k_Expected_after_file_name, sizeof(__pyx_k_Expected_after_file_name), 0, 1, 0, 0}, + {&__pyx_kp_u_Expected_before_file_name, __pyx_k_Expected_before_file_name, sizeof(__pyx_k_Expected_before_file_name), 0, 1, 0, 0}, + {&__pyx_kp_u_Expected_file_name, __pyx_k_Expected_file_name, sizeof(__pyx_k_Expected_file_name), 0, 1, 0, 0}, + {&__pyx_kp_u_Expected_glyph_class_name, __pyx_k_Expected_glyph_class_name, sizeof(__pyx_k_Expected_glyph_class_name), 0, 1, 0, 0}, + {&__pyx_kp_u_Expected_s_to_terminate_anonymou, __pyx_k_Expected_s_to_terminate_anonymou, sizeof(__pyx_k_Expected_s_to_terminate_anonymou), 0, 1, 0, 0}, + {&__pyx_kp_u_Expected_to_terminate_string, __pyx_k_Expected_to_terminate_string, sizeof(__pyx_k_Expected_to_terminate_string), 0, 1, 0, 0}, + {&__pyx_n_s_FILENAME, __pyx_k_FILENAME, sizeof(__pyx_k_FILENAME), 0, 0, 1, 1}, + {&__pyx_n_u_FILENAME, __pyx_k_FILENAME, sizeof(__pyx_k_FILENAME), 0, 1, 0, 1}, + {&__pyx_n_s_FLOAT, __pyx_k_FLOAT, sizeof(__pyx_k_FLOAT), 0, 0, 1, 1}, + {&__pyx_n_u_FLOAT, __pyx_k_FLOAT, sizeof(__pyx_k_FLOAT), 0, 1, 0, 1}, + {&__pyx_n_s_FeatureLibError, __pyx_k_FeatureLibError, sizeof(__pyx_k_FeatureLibError), 0, 0, 1, 1}, + {&__pyx_n_s_FeatureLibLocation, __pyx_k_FeatureLibLocation, sizeof(__pyx_k_FeatureLibLocation), 0, 0, 1, 1}, + {&__pyx_n_s_FileNotFoundError, __pyx_k_FileNotFoundError, sizeof(__pyx_k_FileNotFoundError), 0, 0, 1, 1}, + {&__pyx_n_s_GLYPHCLASS, __pyx_k_GLYPHCLASS, sizeof(__pyx_k_GLYPHCLASS), 0, 0, 1, 1}, + {&__pyx_n_u_GLYPHCLASS, __pyx_k_GLYPHCLASS, sizeof(__pyx_k_GLYPHCLASS), 0, 1, 0, 1}, + {&__pyx_kp_u_Glyph_class_names_must_consist_o, __pyx_k_Glyph_class_names_must_consist_o, sizeof(__pyx_k_Glyph_class_names_must_consist_o), 0, 1, 0, 0}, + {&__pyx_n_s_HEXADECIMAL, __pyx_k_HEXADECIMAL, sizeof(__pyx_k_HEXADECIMAL), 0, 0, 1, 1}, + {&__pyx_n_u_HEXADECIMAL, __pyx_k_HEXADECIMAL, sizeof(__pyx_k_HEXADECIMAL), 0, 1, 0, 1}, + {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, + {&__pyx_n_s_IncludedFeaNotFound, __pyx_k_IncludedFeaNotFound, sizeof(__pyx_k_IncludedFeaNotFound), 0, 0, 1, 1}, + {&__pyx_n_s_IncludingLexer, __pyx_k_IncludingLexer, sizeof(__pyx_k_IncludingLexer), 0, 0, 1, 1}, + {&__pyx_n_s_IncludingLexer___init, __pyx_k_IncludingLexer___init, sizeof(__pyx_k_IncludingLexer___init), 0, 0, 1, 1}, + {&__pyx_n_s_IncludingLexer___iter, __pyx_k_IncludingLexer___iter, sizeof(__pyx_k_IncludingLexer___iter), 0, 0, 1, 1}, + {&__pyx_n_s_IncludingLexer___next, __pyx_k_IncludingLexer___next, sizeof(__pyx_k_IncludingLexer___next), 0, 0, 1, 1}, + {&__pyx_n_s_IncludingLexer_make_lexer, __pyx_k_IncludingLexer_make_lexer, sizeof(__pyx_k_IncludingLexer_make_lexer), 0, 0, 1, 1}, + {&__pyx_n_s_IncludingLexer_next, __pyx_k_IncludingLexer_next, sizeof(__pyx_k_IncludingLexer_next), 0, 0, 1, 1}, + {&__pyx_n_s_IncludingLexer_scan_anonymous_bl, __pyx_k_IncludingLexer_scan_anonymous_bl, sizeof(__pyx_k_IncludingLexer_scan_anonymous_bl), 0, 0, 1, 1}, + {&__pyx_n_s_Lexer, __pyx_k_Lexer, sizeof(__pyx_k_Lexer), 0, 0, 1, 1}, + {&__pyx_n_s_Lexer___init, __pyx_k_Lexer___init, sizeof(__pyx_k_Lexer___init), 0, 0, 1, 1}, + {&__pyx_n_s_Lexer___iter, __pyx_k_Lexer___iter, sizeof(__pyx_k_Lexer___iter), 0, 0, 1, 1}, + {&__pyx_n_s_Lexer___next, __pyx_k_Lexer___next, sizeof(__pyx_k_Lexer___next), 0, 0, 1, 1}, + {&__pyx_n_s_Lexer_location, __pyx_k_Lexer_location, sizeof(__pyx_k_Lexer_location), 0, 0, 1, 1}, + {&__pyx_n_s_Lexer_next, __pyx_k_Lexer_next, sizeof(__pyx_k_Lexer_next), 0, 0, 1, 1}, + {&__pyx_n_s_Lexer_next_2, __pyx_k_Lexer_next_2, sizeof(__pyx_k_Lexer_next_2), 0, 0, 1, 1}, + {&__pyx_n_s_Lexer_scan_anonymous_block, __pyx_k_Lexer_scan_anonymous_block, sizeof(__pyx_k_Lexer_scan_anonymous_block), 0, 0, 1, 1}, + {&__pyx_n_s_Lexer_scan_over, __pyx_k_Lexer_scan_over, sizeof(__pyx_k_Lexer_scan_over), 0, 0, 1, 1}, + {&__pyx_n_s_Lexer_scan_until, __pyx_k_Lexer_scan_until, sizeof(__pyx_k_Lexer_scan_until), 0, 0, 1, 1}, + {&__pyx_kp_s_Lexer_that_does_not_follow_inclu, __pyx_k_Lexer_that_does_not_follow_inclu, sizeof(__pyx_k_Lexer_that_does_not_follow_inclu), 0, 0, 1, 0}, + {&__pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_k_Lib_fontTools_feaLib_lexer_py, sizeof(__pyx_k_Lib_fontTools_feaLib_lexer_py), 0, 0, 1, 0}, + {&__pyx_n_s_MODE_FILENAME, __pyx_k_MODE_FILENAME, sizeof(__pyx_k_MODE_FILENAME), 0, 0, 1, 1}, + {&__pyx_n_s_MODE_NORMAL, __pyx_k_MODE_NORMAL, sizeof(__pyx_k_MODE_NORMAL), 0, 0, 1, 1}, + {&__pyx_n_s_NAME, __pyx_k_NAME, sizeof(__pyx_k_NAME), 0, 0, 1, 1}, + {&__pyx_n_u_NAME, __pyx_k_NAME, sizeof(__pyx_k_NAME), 0, 1, 0, 1}, + {&__pyx_n_s_NEWLINE, __pyx_k_NEWLINE, sizeof(__pyx_k_NEWLINE), 0, 0, 1, 1}, + {&__pyx_n_u_NEWLINE, __pyx_k_NEWLINE, sizeof(__pyx_k_NEWLINE), 0, 1, 0, 1}, + {&__pyx_n_u_NORMAL, __pyx_k_NORMAL, sizeof(__pyx_k_NORMAL), 0, 1, 0, 1}, + {&__pyx_n_s_NUMBER, __pyx_k_NUMBER, sizeof(__pyx_k_NUMBER), 0, 0, 1, 1}, + {&__pyx_n_u_NUMBER, __pyx_k_NUMBER, sizeof(__pyx_k_NUMBER), 0, 1, 0, 1}, + {&__pyx_n_s_NUMBERS, __pyx_k_NUMBERS, sizeof(__pyx_k_NUMBERS), 0, 0, 1, 1}, + {&__pyx_n_s_NonIncludingLexer, __pyx_k_NonIncludingLexer, sizeof(__pyx_k_NonIncludingLexer), 0, 0, 1, 1}, + {&__pyx_n_s_NonIncludingLexer___next, __pyx_k_NonIncludingLexer___next, sizeof(__pyx_k_NonIncludingLexer___next), 0, 0, 1, 1}, + {&__pyx_n_s_OCTAL, __pyx_k_OCTAL, sizeof(__pyx_k_OCTAL), 0, 0, 1, 1}, + {&__pyx_n_u_OCTAL, __pyx_k_OCTAL, sizeof(__pyx_k_OCTAL), 0, 1, 0, 1}, + {&__pyx_n_s_RE_GLYPHCLASS, __pyx_k_RE_GLYPHCLASS, sizeof(__pyx_k_RE_GLYPHCLASS), 0, 0, 1, 1}, + {&__pyx_n_s_STRING, __pyx_k_STRING, sizeof(__pyx_k_STRING), 0, 0, 1, 1}, + {&__pyx_n_u_STRING, __pyx_k_STRING, sizeof(__pyx_k_STRING), 0, 1, 0, 1}, + {&__pyx_n_s_SYMBOL, __pyx_k_SYMBOL, sizeof(__pyx_k_SYMBOL), 0, 0, 1, 1}, + {&__pyx_n_u_SYMBOL, __pyx_k_SYMBOL, sizeof(__pyx_k_SYMBOL), 0, 1, 0, 1}, + {&__pyx_n_s_StopIteration, __pyx_k_StopIteration, sizeof(__pyx_k_StopIteration), 0, 0, 1, 1}, + {&__pyx_kp_u_Too_many_recursive_includes, __pyx_k_Too_many_recursive_includes, sizeof(__pyx_k_Too_many_recursive_includes), 0, 1, 0, 0}, + {&__pyx_kp_u_Unexpected_character_r, __pyx_k_Unexpected_character_r, sizeof(__pyx_k_Unexpected_character_r), 0, 1, 0, 0}, + {&__pyx_kp_u__10, __pyx_k__10, sizeof(__pyx_k__10), 0, 1, 0, 0}, + {&__pyx_kp_u__11, __pyx_k__11, sizeof(__pyx_k__11), 0, 1, 0, 0}, + {&__pyx_kp_u__12, __pyx_k__12, sizeof(__pyx_k__12), 0, 1, 0, 0}, + {&__pyx_n_s__13, __pyx_k__13, sizeof(__pyx_k__13), 0, 0, 1, 1}, + {&__pyx_kp_u__16, __pyx_k__16, sizeof(__pyx_k__16), 0, 1, 0, 0}, + {&__pyx_kp_u__17, __pyx_k__17, sizeof(__pyx_k__17), 0, 1, 0, 0}, + {&__pyx_kp_u__18, __pyx_k__18, sizeof(__pyx_k__18), 0, 1, 0, 0}, + {&__pyx_kp_u__19, __pyx_k__19, sizeof(__pyx_k__19), 0, 1, 0, 0}, + {&__pyx_kp_u__2, __pyx_k__2, sizeof(__pyx_k__2), 0, 1, 0, 0}, + {&__pyx_kp_u__20, __pyx_k__20, sizeof(__pyx_k__20), 0, 1, 0, 0}, + {&__pyx_kp_u__3, __pyx_k__3, sizeof(__pyx_k__3), 0, 1, 0, 0}, + {&__pyx_kp_u__4, __pyx_k__4, sizeof(__pyx_k__4), 0, 1, 0, 0}, + {&__pyx_kp_u__5, __pyx_k__5, sizeof(__pyx_k__5), 0, 1, 0, 0}, + {&__pyx_n_s__51, __pyx_k__51, sizeof(__pyx_k__51), 0, 0, 1, 1}, + {&__pyx_kp_u__6, __pyx_k__6, sizeof(__pyx_k__6), 0, 1, 0, 0}, + {&__pyx_kp_u__7, __pyx_k__7, sizeof(__pyx_k__7), 0, 1, 0, 0}, + {&__pyx_kp_u__8, __pyx_k__8, sizeof(__pyx_k__8), 0, 1, 0, 0}, + {&__pyx_kp_u__9, __pyx_k__9, sizeof(__pyx_k__9), 0, 1, 0, 0}, + {&__pyx_n_s_append, __pyx_k_append, sizeof(__pyx_k_append), 0, 0, 1, 1}, + {&__pyx_n_s_asyncio_coroutines, __pyx_k_asyncio_coroutines, sizeof(__pyx_k_asyncio_coroutines), 0, 0, 1, 1}, + {&__pyx_n_s_class_getitem, __pyx_k_class_getitem, sizeof(__pyx_k_class_getitem), 0, 0, 1, 1}, + {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, + {&__pyx_n_s_close, __pyx_k_close, sizeof(__pyx_k_close), 0, 0, 1, 1}, + {&__pyx_n_s_closing, __pyx_k_closing, sizeof(__pyx_k_closing), 0, 0, 1, 1}, + {&__pyx_n_s_column, __pyx_k_column, sizeof(__pyx_k_column), 0, 0, 1, 1}, + {&__pyx_n_s_compile, __pyx_k_compile, sizeof(__pyx_k_compile), 0, 0, 1, 1}, + {&__pyx_n_s_cur_char, __pyx_k_cur_char, sizeof(__pyx_k_cur_char), 0, 0, 1, 1}, + {&__pyx_n_s_curpath, __pyx_k_curpath, sizeof(__pyx_k_curpath), 0, 0, 1, 1}, + {&__pyx_n_s_data, __pyx_k_data, sizeof(__pyx_k_data), 0, 0, 1, 1}, + {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, + {&__pyx_n_s_dirname, __pyx_k_dirname, sizeof(__pyx_k_dirname), 0, 0, 1, 1}, + {&__pyx_n_s_doc, __pyx_k_doc, sizeof(__pyx_k_doc), 0, 0, 1, 1}, + {&__pyx_n_s_encoding, __pyx_k_encoding, sizeof(__pyx_k_encoding), 0, 0, 1, 1}, + {&__pyx_n_s_err, __pyx_k_err, sizeof(__pyx_k_err), 0, 0, 1, 1}, + {&__pyx_n_s_featurefile, __pyx_k_featurefile, sizeof(__pyx_k_featurefile), 0, 0, 1, 1}, + {&__pyx_n_s_featurefilepath, __pyx_k_featurefilepath, sizeof(__pyx_k_featurefilepath), 0, 0, 1, 1}, + {&__pyx_kp_u_features, __pyx_k_features, sizeof(__pyx_k_features), 0, 1, 0, 0}, + {&__pyx_n_s_file_or_path, __pyx_k_file_or_path, sizeof(__pyx_k_file_or_path), 0, 0, 1, 1}, + {&__pyx_n_s_filename, __pyx_k_filename, sizeof(__pyx_k_filename), 0, 0, 1, 1}, + {&__pyx_n_s_filename_2, __pyx_k_filename_2, sizeof(__pyx_k_filename_2), 0, 0, 1, 1}, + {&__pyx_n_s_fileobj, __pyx_k_fileobj, sizeof(__pyx_k_fileobj), 0, 0, 1, 1}, + {&__pyx_n_s_fname_location, __pyx_k_fname_location, sizeof(__pyx_k_fname_location), 0, 0, 1, 1}, + {&__pyx_n_s_fname_token, __pyx_k_fname_token, sizeof(__pyx_k_fname_token), 0, 0, 1, 1}, + {&__pyx_n_s_fname_type, __pyx_k_fname_type, sizeof(__pyx_k_fname_type), 0, 0, 1, 1}, + {&__pyx_n_s_fontTools_feaLib_error, __pyx_k_fontTools_feaLib_error, sizeof(__pyx_k_fontTools_feaLib_error), 0, 0, 1, 1}, + {&__pyx_n_s_fontTools_feaLib_lexer, __pyx_k_fontTools_feaLib_lexer, sizeof(__pyx_k_fontTools_feaLib_lexer), 0, 0, 1, 1}, + {&__pyx_n_s_fontTools_feaLib_location, __pyx_k_fontTools_feaLib_location, sizeof(__pyx_k_fontTools_feaLib_location), 0, 0, 1, 1}, + {&__pyx_n_s_getcwd, __pyx_k_getcwd, sizeof(__pyx_k_getcwd), 0, 0, 1, 1}, + {&__pyx_n_s_glyphclass, __pyx_k_glyphclass, sizeof(__pyx_k_glyphclass), 0, 0, 1, 1}, + {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, + {&__pyx_n_u_include, __pyx_k_include, sizeof(__pyx_k_include), 0, 1, 0, 1}, + {&__pyx_n_s_includeDir, __pyx_k_includeDir, sizeof(__pyx_k_includeDir), 0, 0, 1, 1}, + {&__pyx_n_s_init, __pyx_k_init, sizeof(__pyx_k_init), 0, 0, 1, 1}, + {&__pyx_n_s_init_subclass, __pyx_k_init_subclass, sizeof(__pyx_k_init_subclass), 0, 0, 1, 1}, + {&__pyx_n_s_initializing, __pyx_k_initializing, sizeof(__pyx_k_initializing), 0, 0, 1, 1}, + {&__pyx_n_s_is_coroutine, __pyx_k_is_coroutine, sizeof(__pyx_k_is_coroutine), 0, 0, 1, 1}, + {&__pyx_n_s_isabs, __pyx_k_isabs, sizeof(__pyx_k_isabs), 0, 0, 1, 1}, + {&__pyx_n_s_iter, __pyx_k_iter, sizeof(__pyx_k_iter), 0, 0, 1, 1}, + {&__pyx_n_s_join, __pyx_k_join, sizeof(__pyx_k_join), 0, 0, 1, 1}, + {&__pyx_n_s_lexer, __pyx_k_lexer, sizeof(__pyx_k_lexer), 0, 0, 1, 1}, + {&__pyx_n_s_lexers, __pyx_k_lexers, sizeof(__pyx_k_lexers), 0, 0, 1, 1}, + {&__pyx_n_s_limit, __pyx_k_limit, sizeof(__pyx_k_limit), 0, 0, 1, 1}, + {&__pyx_n_s_line, __pyx_k_line, sizeof(__pyx_k_line), 0, 0, 1, 1}, + {&__pyx_n_s_line_start, __pyx_k_line_start, sizeof(__pyx_k_line_start), 0, 0, 1, 1}, + {&__pyx_n_s_location, __pyx_k_location, sizeof(__pyx_k_location), 0, 0, 1, 1}, + {&__pyx_n_s_location_2, __pyx_k_location_2, sizeof(__pyx_k_location_2), 0, 0, 1, 1}, + {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, + {&__pyx_n_s_make_lexer, __pyx_k_make_lexer, sizeof(__pyx_k_make_lexer), 0, 0, 1, 1}, + {&__pyx_n_s_match, __pyx_k_match, sizeof(__pyx_k_match), 0, 0, 1, 1}, + {&__pyx_n_s_maxsplit, __pyx_k_maxsplit, sizeof(__pyx_k_maxsplit), 0, 0, 1, 1}, + {&__pyx_n_s_metaclass, __pyx_k_metaclass, sizeof(__pyx_k_metaclass), 0, 0, 1, 1}, + {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, + {&__pyx_n_s_module, __pyx_k_module, sizeof(__pyx_k_module), 0, 0, 1, 1}, + {&__pyx_n_s_mro_entries, __pyx_k_mro_entries, sizeof(__pyx_k_mro_entries), 0, 0, 1, 1}, + {&__pyx_n_u_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 1, 0, 1}, + {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, + {&__pyx_n_s_next, __pyx_k_next, sizeof(__pyx_k_next), 0, 0, 1, 1}, + {&__pyx_n_s_next_2, __pyx_k_next_2, sizeof(__pyx_k_next_2), 0, 0, 1, 1}, + {&__pyx_n_s_next_3, __pyx_k_next_3, sizeof(__pyx_k_next_3), 0, 0, 1, 1}, + {&__pyx_n_s_next_char, __pyx_k_next_char, sizeof(__pyx_k_next_char), 0, 0, 1, 1}, + {&__pyx_n_s_object, __pyx_k_object, sizeof(__pyx_k_object), 0, 0, 1, 1}, + {&__pyx_n_s_open, __pyx_k_open, sizeof(__pyx_k_open), 0, 0, 1, 1}, + {&__pyx_n_s_os, __pyx_k_os, sizeof(__pyx_k_os), 0, 0, 1, 1}, + {&__pyx_n_s_p, __pyx_k_p, sizeof(__pyx_k_p), 0, 0, 1, 1}, + {&__pyx_n_s_path, __pyx_k_path, sizeof(__pyx_k_path), 0, 0, 1, 1}, + {&__pyx_n_s_pop, __pyx_k_pop, sizeof(__pyx_k_pop), 0, 0, 1, 1}, + {&__pyx_n_s_pos, __pyx_k_pos, sizeof(__pyx_k_pos), 0, 0, 1, 1}, + {&__pyx_n_s_prepare, __pyx_k_prepare, sizeof(__pyx_k_prepare), 0, 0, 1, 1}, + {&__pyx_n_s_qualname, __pyx_k_qualname, sizeof(__pyx_k_qualname), 0, 0, 1, 1}, + {&__pyx_n_u_r, __pyx_k_r, sizeof(__pyx_k_r), 0, 1, 0, 1}, + {&__pyx_n_s_re, __pyx_k_re, sizeof(__pyx_k_re), 0, 0, 1, 1}, + {&__pyx_n_s_read, __pyx_k_read, sizeof(__pyx_k_read), 0, 0, 1, 1}, + {&__pyx_n_u_read, __pyx_k_read, sizeof(__pyx_k_read), 0, 1, 0, 1}, + {&__pyx_n_s_regexp, __pyx_k_regexp, sizeof(__pyx_k_regexp), 0, 0, 1, 1}, + {&__pyx_kp_u_s, __pyx_k_s, sizeof(__pyx_k_s), 0, 1, 0, 0}, + {&__pyx_kp_u_s_2, __pyx_k_s_2, sizeof(__pyx_k_s_2), 0, 1, 0, 0}, + {&__pyx_n_s_scan_anonymous_block, __pyx_k_scan_anonymous_block, sizeof(__pyx_k_scan_anonymous_block), 0, 0, 1, 1}, + {&__pyx_n_s_scan_over, __pyx_k_scan_over, sizeof(__pyx_k_scan_over), 0, 0, 1, 1}, + {&__pyx_n_s_scan_until, __pyx_k_scan_until, sizeof(__pyx_k_scan_until), 0, 0, 1, 1}, + {&__pyx_n_s_self, __pyx_k_self, sizeof(__pyx_k_self), 0, 0, 1, 1}, + {&__pyx_n_s_set_name, __pyx_k_set_name, sizeof(__pyx_k_set_name), 0, 0, 1, 1}, + {&__pyx_n_s_spec, __pyx_k_spec, sizeof(__pyx_k_spec), 0, 0, 1, 1}, + {&__pyx_n_s_split, __pyx_k_split, sizeof(__pyx_k_split), 0, 0, 1, 1}, + {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, + {&__pyx_n_s_staticmethod, __pyx_k_staticmethod, sizeof(__pyx_k_staticmethod), 0, 0, 1, 1}, + {&__pyx_n_s_stop_at, __pyx_k_stop_at, sizeof(__pyx_k_stop_at), 0, 0, 1, 1}, + {&__pyx_n_s_string, __pyx_k_string, sizeof(__pyx_k_string), 0, 0, 1, 1}, + {&__pyx_n_s_strip, __pyx_k_strip, sizeof(__pyx_k_strip), 0, 0, 1, 1}, + {&__pyx_n_s_sub, __pyx_k_sub, sizeof(__pyx_k_sub), 0, 0, 1, 1}, + {&__pyx_n_s_super, __pyx_k_super, sizeof(__pyx_k_super), 0, 0, 1, 1}, + {&__pyx_n_s_tag, __pyx_k_tag, sizeof(__pyx_k_tag), 0, 0, 1, 1}, + {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, + {&__pyx_n_s_text, __pyx_k_text, sizeof(__pyx_k_text), 0, 0, 1, 1}, + {&__pyx_n_s_text_2, __pyx_k_text_2, sizeof(__pyx_k_text_2), 0, 0, 1, 1}, + {&__pyx_n_s_text_length, __pyx_k_text_length, sizeof(__pyx_k_text_length), 0, 0, 1, 1}, + {&__pyx_n_s_token, __pyx_k_token, sizeof(__pyx_k_token), 0, 0, 1, 1}, + {&__pyx_n_s_token_type, __pyx_k_token_type, sizeof(__pyx_k_token_type), 0, 0, 1, 1}, + {&__pyx_kp_u_utf_8_sig, __pyx_k_utf_8_sig, sizeof(__pyx_k_utf_8_sig), 0, 1, 0, 0}, + {&__pyx_n_s_valid, __pyx_k_valid, sizeof(__pyx_k_valid), 0, 0, 1, 1}, + {&__pyx_n_u_xX, __pyx_k_xX, sizeof(__pyx_k_xX), 0, 1, 0, 1}, + {0, 0, 0, 0, 0, 0, 0} + }; + return __Pyx_InitStrings(__pyx_string_tab); +} +/* #### Code section: cached_builtins ### */ +static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { + __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(0, 8, __pyx_L1_error) + __pyx_builtin_object = __Pyx_GetBuiltinName(__pyx_n_s_object); if (!__pyx_builtin_object) __PYX_ERR(0, 13, __pyx_L1_error) + __pyx_builtin_staticmethod = __Pyx_GetBuiltinName(__pyx_n_s_staticmethod); if (!__pyx_builtin_staticmethod) __PYX_ERR(0, 266, __pyx_L1_error) + __pyx_builtin_StopIteration = __Pyx_GetBuiltinName(__pyx_n_s_StopIteration); if (!__pyx_builtin_StopIteration) __PYX_ERR(0, 75, __pyx_L1_error) + __pyx_builtin_open = __Pyx_GetBuiltinName(__pyx_n_s_open); if (!__pyx_builtin_open) __PYX_ERR(0, 272, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} +/* #### Code section: cached_constants ### */ + +static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); + + /* "fontTools/feaLib/lexer.py":13 + * + * + * class Lexer(object): # <<<<<<<<<<<<<< + * NUMBER = "NUMBER" + * HEXADECIMAL = "HEXADECIMAL" + */ + __pyx_tuple__14 = PyTuple_Pack(1, __pyx_builtin_object); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(0, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__14); + __Pyx_GIVEREF(__pyx_tuple__14); + __pyx_tuple__15 = PyTuple_Pack(1, __pyx_builtin_object); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(0, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__15); + __Pyx_GIVEREF(__pyx_tuple__15); + + /* "fontTools/feaLib/lexer.py":43 + * MODE_FILENAME_ = "FILENAME" + * + * def __init__(self, text, filename): # <<<<<<<<<<<<<< + * self.filename_ = filename + * self.line_ = 1 + */ + __pyx_tuple__21 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_text, __pyx_n_s_filename); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(0, 43, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__21); + __Pyx_GIVEREF(__pyx_tuple__21); + __pyx_codeobj__22 = (PyObject*)__Pyx_PyCode_New(3, 0, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_init, 43, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__22)) __PYX_ERR(0, 43, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":52 + * self.mode_ = Lexer.MODE_NORMAL_ + * + * def __iter__(self): # <<<<<<<<<<<<<< + * return self + * + */ + __pyx_tuple__23 = PyTuple_Pack(1, __pyx_n_s_self); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(0, 52, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__23); + __Pyx_GIVEREF(__pyx_tuple__23); + __pyx_codeobj__24 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 1, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_iter, 52, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__24)) __PYX_ERR(0, 52, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":55 + * return self + * + * def next(self): # Python 2 # <<<<<<<<<<<<<< + * return self.__next__() + * + */ + __pyx_codeobj__25 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 1, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_next_3, 55, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__25)) __PYX_ERR(0, 55, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":58 + * return self.__next__() + * + * def __next__(self): # Python 3 # <<<<<<<<<<<<<< + * while True: + * token_type, token, location = self.next_() + */ + __pyx_tuple__26 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_token_type, __pyx_n_s_token, __pyx_n_s_location_2); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(0, 58, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__26); + __Pyx_GIVEREF(__pyx_tuple__26); + __pyx_codeobj__27 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__26, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_next, 58, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__27)) __PYX_ERR(0, 58, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":64 + * return (token_type, token, location) + * + * def location_(self): # <<<<<<<<<<<<<< + * column = self.pos_ - self.line_start_ + 1 + * return FeatureLibLocation(self.filename_ or "", self.line_, column) + */ + __pyx_tuple__28 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_column); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(0, 64, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__28); + __Pyx_GIVEREF(__pyx_tuple__28); + __pyx_codeobj__29 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__28, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_location, 64, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__29)) __PYX_ERR(0, 64, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":68 + * return FeatureLibLocation(self.filename_ or "", self.line_, column) + * + * def next_(self): # <<<<<<<<<<<<<< + * self.scan_over_(Lexer.CHAR_WHITESPACE_) + * location = self.location_() + */ + __pyx_tuple__30 = PyTuple_Pack(10, __pyx_n_s_self, __pyx_n_s_location_2, __pyx_n_s_start, __pyx_n_s_text, __pyx_n_s_limit, __pyx_n_s_cur_char, __pyx_n_s_next_char, __pyx_n_s_glyphclass, __pyx_n_s_token, __pyx_n_s_string); if (unlikely(!__pyx_tuple__30)) __PYX_ERR(0, 68, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__30); + __Pyx_GIVEREF(__pyx_tuple__30); + __pyx_codeobj__31 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 10, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__30, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_next_2, 68, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__31)) __PYX_ERR(0, 68, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":165 + * raise FeatureLibError("Unexpected character: %r" % cur_char, location) + * + * def scan_over_(self, valid): # <<<<<<<<<<<<<< + * p = self.pos_ + * while p < self.text_length_ and self.text_[p] in valid: + */ + __pyx_tuple__32 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_valid, __pyx_n_s_p); if (unlikely(!__pyx_tuple__32)) __PYX_ERR(0, 165, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__32); + __Pyx_GIVEREF(__pyx_tuple__32); + __pyx_codeobj__33 = (PyObject*)__Pyx_PyCode_New(2, 0, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__32, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_scan_over, 165, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__33)) __PYX_ERR(0, 165, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":171 + * self.pos_ = p + * + * def scan_until_(self, stop_at): # <<<<<<<<<<<<<< + * p = self.pos_ + * while p < self.text_length_ and self.text_[p] not in stop_at: + */ + __pyx_tuple__34 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_stop_at, __pyx_n_s_p); if (unlikely(!__pyx_tuple__34)) __PYX_ERR(0, 171, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__34); + __Pyx_GIVEREF(__pyx_tuple__34); + __pyx_codeobj__35 = (PyObject*)__Pyx_PyCode_New(2, 0, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__34, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_scan_until, 171, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__35)) __PYX_ERR(0, 171, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":177 + * self.pos_ = p + * + * def scan_anonymous_block(self, tag): # <<<<<<<<<<<<<< + * location = self.location_() + * tag = tag.strip() + */ + __pyx_tuple__36 = PyTuple_Pack(5, __pyx_n_s_self, __pyx_n_s_tag, __pyx_n_s_location_2, __pyx_n_s_regexp, __pyx_n_s_split); if (unlikely(!__pyx_tuple__36)) __PYX_ERR(0, 177, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__36); + __Pyx_GIVEREF(__pyx_tuple__36); + __pyx_codeobj__37 = (PyObject*)__Pyx_PyCode_New(2, 0, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__36, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_scan_anonymous_block, 177, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__37)) __PYX_ERR(0, 177, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":192 + * + * + * class IncludingLexer(object): # <<<<<<<<<<<<<< + * """A Lexer that follows include statements. + * + */ + __pyx_tuple__38 = PyTuple_Pack(1, __pyx_builtin_object); if (unlikely(!__pyx_tuple__38)) __PYX_ERR(0, 192, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__38); + __Pyx_GIVEREF(__pyx_tuple__38); + __pyx_tuple__39 = PyTuple_Pack(1, __pyx_builtin_object); if (unlikely(!__pyx_tuple__39)) __PYX_ERR(0, 192, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__39); + __Pyx_GIVEREF(__pyx_tuple__39); + + /* "fontTools/feaLib/lexer.py":207 + * """ + * + * def __init__(self, featurefile, *, includeDir=None): # <<<<<<<<<<<<<< + * """Initializes an IncludingLexer. + * + */ + __pyx_tuple__40 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_featurefile, __pyx_n_s_includeDir); if (unlikely(!__pyx_tuple__40)) __PYX_ERR(0, 207, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__40); + __Pyx_GIVEREF(__pyx_tuple__40); + __pyx_codeobj__41 = (PyObject*)__Pyx_PyCode_New(2, 0, 1, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__40, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_init, 207, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__41)) __PYX_ERR(0, 207, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":221 + * self.includeDir = includeDir + * + * def __iter__(self): # <<<<<<<<<<<<<< + * return self + * + */ + __pyx_codeobj__42 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 1, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_iter, 221, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__42)) __PYX_ERR(0, 221, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":224 + * return self + * + * def next(self): # Python 2 # <<<<<<<<<<<<<< + * return self.__next__() + * + */ + __pyx_codeobj__43 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 1, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_next_3, 224, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__43)) __PYX_ERR(0, 224, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":227 + * return self.__next__() + * + * def __next__(self): # Python 3 # <<<<<<<<<<<<<< + * while self.lexers_: + * lexer = self.lexers_[-1] + */ + __pyx_tuple__44 = PyTuple_Pack(11, __pyx_n_s_self, __pyx_n_s_lexer, __pyx_n_s_token_type, __pyx_n_s_token, __pyx_n_s_location_2, __pyx_n_s_fname_type, __pyx_n_s_fname_token, __pyx_n_s_fname_location, __pyx_n_s_path, __pyx_n_s_curpath, __pyx_n_s_err); if (unlikely(!__pyx_tuple__44)) __PYX_ERR(0, 227, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__44); + __Pyx_GIVEREF(__pyx_tuple__44); + __pyx_codeobj__45 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 11, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__44, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_next, 227, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__45)) __PYX_ERR(0, 227, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":266 + * raise StopIteration() + * + * @staticmethod # <<<<<<<<<<<<<< + * def make_lexer_(file_or_path): + * if hasattr(file_or_path, "read"): + */ + __pyx_tuple__46 = PyTuple_Pack(5, __pyx_n_s_file_or_path, __pyx_n_s_fileobj, __pyx_n_s_closing, __pyx_n_s_filename, __pyx_n_s_data); if (unlikely(!__pyx_tuple__46)) __PYX_ERR(0, 266, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__46); + __Pyx_GIVEREF(__pyx_tuple__46); + __pyx_codeobj__47 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__46, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_make_lexer, 266, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__47)) __PYX_ERR(0, 266, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":279 + * return Lexer(data, filename) + * + * def scan_anonymous_block(self, tag): # <<<<<<<<<<<<<< + * return self.lexers_[-1].scan_anonymous_block(tag) + * + */ + __pyx_tuple__48 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_tag); if (unlikely(!__pyx_tuple__48)) __PYX_ERR(0, 279, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__48); + __Pyx_GIVEREF(__pyx_tuple__48); + __pyx_codeobj__49 = (PyObject*)__Pyx_PyCode_New(2, 0, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__48, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_scan_anonymous_block, 279, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__49)) __PYX_ERR(0, 279, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":286 + * """Lexer that does not follow `include` statements, emits them as-is.""" + * + * def __next__(self): # Python 3 # <<<<<<<<<<<<<< + * return next(self.lexers_[0]) + */ + __pyx_codeobj__50 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 1, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_next, 286, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__50)) __PYX_ERR(0, 286, __pyx_L1_error) + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} +/* #### Code section: init_constants ### */ + +static CYTHON_SMALL_CODE int __Pyx_InitConstants(void) { + __pyx_umethod_PyList_Type_pop.type = (PyObject*)&PyList_Type; + __pyx_umethod_PyList_Type_pop.method_name = &__pyx_n_s_pop; + if (__Pyx_CreateStringTabAndInitStrings() < 0) __PYX_ERR(0, 1, __pyx_L1_error); + __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_8 = PyInt_FromLong(8); if (unlikely(!__pyx_int_8)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_10 = PyInt_FromLong(10); if (unlikely(!__pyx_int_10)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_16 = PyInt_FromLong(16); if (unlikely(!__pyx_int_16)) __PYX_ERR(0, 1, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} +/* #### Code section: init_globals ### */ + +static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { + return 0; +} +/* #### Code section: init_module ### */ + +static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ + +static int __Pyx_modinit_global_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); + /*--- Global init code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); + /*--- Variable export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); + /*--- Function export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_type_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); + /*--- Type init code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_type_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); + /*--- Type import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); + /*--- Variable import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); + /*--- Function import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + + +#if PY_MAJOR_VERSION >= 3 +#if CYTHON_PEP489_MULTI_PHASE_INIT +static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ +static int __pyx_pymod_exec_lexer(PyObject* module); /*proto*/ +static PyModuleDef_Slot __pyx_moduledef_slots[] = { + {Py_mod_create, (void*)__pyx_pymod_create}, + {Py_mod_exec, (void*)__pyx_pymod_exec_lexer}, + {0, NULL} +}; +#endif + +#ifdef __cplusplus +namespace { + struct PyModuleDef __pyx_moduledef = + #else + static struct PyModuleDef __pyx_moduledef = + #endif + { + PyModuleDef_HEAD_INIT, + "lexer", + 0, /* m_doc */ + #if CYTHON_PEP489_MULTI_PHASE_INIT + 0, /* m_size */ + #elif CYTHON_USE_MODULE_STATE + sizeof(__pyx_mstate), /* m_size */ + #else + -1, /* m_size */ + #endif + __pyx_methods /* m_methods */, + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_moduledef_slots, /* m_slots */ + #else + NULL, /* m_reload */ + #endif + #if CYTHON_USE_MODULE_STATE + __pyx_m_traverse, /* m_traverse */ + __pyx_m_clear, /* m_clear */ + NULL /* m_free */ + #else + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ + #endif + }; + #ifdef __cplusplus +} /* anonymous namespace */ +#endif +#endif + +#ifndef CYTHON_NO_PYINIT_EXPORT +#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC +#elif PY_MAJOR_VERSION < 3 +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" void +#else +#define __Pyx_PyMODINIT_FUNC void +#endif +#else +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" PyObject * +#else +#define __Pyx_PyMODINIT_FUNC PyObject * +#endif +#endif + + +#if PY_MAJOR_VERSION < 3 +__Pyx_PyMODINIT_FUNC initlexer(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC initlexer(void) +#else +__Pyx_PyMODINIT_FUNC PyInit_lexer(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC PyInit_lexer(void) +#if CYTHON_PEP489_MULTI_PHASE_INIT +{ + return PyModuleDef_Init(&__pyx_moduledef); +} +static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { + #if PY_VERSION_HEX >= 0x030700A1 + static PY_INT64_T main_interpreter_id = -1; + PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); + if (main_interpreter_id == -1) { + main_interpreter_id = current_id; + return (unlikely(current_id == -1)) ? -1 : 0; + } else if (unlikely(main_interpreter_id != current_id)) + #else + static PyInterpreterState *main_interpreter = NULL; + PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; + if (!main_interpreter) { + main_interpreter = current_interpreter; + } else if (unlikely(main_interpreter != current_interpreter)) + #endif + { + PyErr_SetString( + PyExc_ImportError, + "Interpreter change detected - this module can only be loaded into one interpreter per process."); + return -1; + } + return 0; +} +#if CYTHON_COMPILING_IN_LIMITED_API +static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *module, const char* from_name, const char* to_name, int allow_none) +#else +static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) +#endif +{ + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + if (allow_none || value != Py_None) { +#if CYTHON_COMPILING_IN_LIMITED_API + result = PyModule_AddObject(module, to_name, value); +#else + result = PyDict_SetItemString(moddict, to_name, value); +#endif + } + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} +static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + CYTHON_UNUSED_VAR(def); + if (__Pyx_check_single_interpreter()) + return NULL; + if (__pyx_m) + return __Pyx_NewRef(__pyx_m); + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; +#if CYTHON_COMPILING_IN_LIMITED_API + moddict = module; +#else + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; +#endif + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; + return module; +bad: + Py_XDECREF(module); + return NULL; +} + + +static CYTHON_SMALL_CODE int __pyx_pymod_exec_lexer(PyObject *__pyx_pyinit_module) +#endif +#endif +{ + int stringtab_initialized = 0; + #if CYTHON_USE_MODULE_STATE + int pystate_addmodule_run = 0; + #endif + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + PyObject *__pyx_t_9 = NULL; + PyObject *__pyx_t_10 = NULL; + unsigned int __pyx_t_11; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannyDeclarations + #if CYTHON_PEP489_MULTI_PHASE_INIT + if (__pyx_m) { + if (__pyx_m == __pyx_pyinit_module) return 0; + PyErr_SetString(PyExc_RuntimeError, "Module 'lexer' has already been imported. Re-initialisation is not supported."); + return -1; + } + #elif PY_MAJOR_VERSION >= 3 + if (__pyx_m) return __Pyx_NewRef(__pyx_m); + #endif + /*--- Module creation code ---*/ + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_m = __pyx_pyinit_module; + Py_INCREF(__pyx_m); + #else + #if PY_MAJOR_VERSION < 3 + __pyx_m = Py_InitModule4("lexer", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); + if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) + #elif CYTHON_USE_MODULE_STATE + __pyx_t_1 = PyModule_Create(&__pyx_moduledef); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) + { + int add_module_result = PyState_AddModule(__pyx_t_1, &__pyx_moduledef); + __pyx_t_1 = 0; /* transfer ownership from __pyx_t_1 to "lexer" pseudovariable */ + if (unlikely((add_module_result < 0))) __PYX_ERR(0, 1, __pyx_L1_error) + pystate_addmodule_run = 1; + } + #else + __pyx_m = PyModule_Create(&__pyx_moduledef); + if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #endif + CYTHON_UNUSED_VAR(__pyx_t_1); + __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_d); + __pyx_b = __Pyx_PyImport_AddModuleRef(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_cython_runtime = __Pyx_PyImport_AddModuleRef((const char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #if CYTHON_REFNANNY +__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); +if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); +} +#endif + __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_lexer(void)", 0); + if (__Pyx_check_binary_version(__PYX_LIMITED_VERSION_HEX, __Pyx_get_runtime_version(), CYTHON_COMPILING_IN_LIMITED_API) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pxy_PyFrame_Initialize_Offsets + __Pxy_PyFrame_Initialize_Offsets(); + #endif + __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pyx_CyFunction_USED + if (__pyx_CyFunction_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_FusedFunction_USED + if (__pyx_FusedFunction_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Coroutine_USED + if (__pyx_Coroutine_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Generator_USED + if (__pyx_Generator_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_AsyncGen_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_StopAsyncIteration_USED + if (__pyx_StopAsyncIteration_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + /*--- Library function declarations ---*/ + /*--- Threads initialization code ---*/ + #if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 && defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS + PyEval_InitThreads(); + #endif + /*--- Initialize various global constants etc. ---*/ + if (__Pyx_InitConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + stringtab_initialized = 1; + if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) + if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + if (__pyx_module_is_main_fontTools__feaLib__lexer) { + if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + } + #if PY_MAJOR_VERSION >= 3 + { + PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) + if (!PyDict_GetItemString(modules, "fontTools.feaLib.lexer")) { + if (unlikely((PyDict_SetItemString(modules, "fontTools.feaLib.lexer", __pyx_m) < 0))) __PYX_ERR(0, 1, __pyx_L1_error) + } + } + #endif + /*--- Builtin init code ---*/ + if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Constants init code ---*/ + if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Global type/function init code ---*/ + (void)__Pyx_modinit_global_init_code(); + (void)__Pyx_modinit_variable_export_code(); + (void)__Pyx_modinit_function_export_code(); + (void)__Pyx_modinit_type_init_code(); + (void)__Pyx_modinit_type_import_code(); + (void)__Pyx_modinit_variable_import_code(); + (void)__Pyx_modinit_function_import_code(); + /*--- Execution code ---*/ + #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + + /* "fontTools/feaLib/lexer.py":1 + * from fontTools.feaLib.error import FeatureLibError, IncludedFeaNotFound # <<<<<<<<<<<<<< + * from fontTools.feaLib.location import FeatureLibLocation + * import re + */ + __pyx_t_2 = PyList_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_INCREF(__pyx_n_s_FeatureLibError); + __Pyx_GIVEREF(__pyx_n_s_FeatureLibError); + if (__Pyx_PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_FeatureLibError)) __PYX_ERR(0, 1, __pyx_L1_error); + __Pyx_INCREF(__pyx_n_s_IncludedFeaNotFound); + __Pyx_GIVEREF(__pyx_n_s_IncludedFeaNotFound); + if (__Pyx_PyList_SET_ITEM(__pyx_t_2, 1, __pyx_n_s_IncludedFeaNotFound)) __PYX_ERR(0, 1, __pyx_L1_error); + __pyx_t_3 = __Pyx_Import(__pyx_n_s_fontTools_feaLib_error, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_FeatureLibError); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_FeatureLibError, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_IncludedFeaNotFound); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_IncludedFeaNotFound, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "fontTools/feaLib/lexer.py":2 + * from fontTools.feaLib.error import FeatureLibError, IncludedFeaNotFound + * from fontTools.feaLib.location import FeatureLibLocation # <<<<<<<<<<<<<< + * import re + * import os + */ + __pyx_t_3 = PyList_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_n_s_FeatureLibLocation); + __Pyx_GIVEREF(__pyx_n_s_FeatureLibLocation); + if (__Pyx_PyList_SET_ITEM(__pyx_t_3, 0, __pyx_n_s_FeatureLibLocation)) __PYX_ERR(0, 2, __pyx_L1_error); + __pyx_t_2 = __Pyx_Import(__pyx_n_s_fontTools_feaLib_location, __pyx_t_3, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_FeatureLibLocation); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_FeatureLibLocation, __pyx_t_3) < 0) __PYX_ERR(0, 2, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "fontTools/feaLib/lexer.py":3 + * from fontTools.feaLib.error import FeatureLibError, IncludedFeaNotFound + * from fontTools.feaLib.location import FeatureLibLocation + * import re # <<<<<<<<<<<<<< + * import os + * + */ + __pyx_t_2 = __Pyx_ImportDottedModule(__pyx_n_s_re, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_re, __pyx_t_2) < 0) __PYX_ERR(0, 3, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "fontTools/feaLib/lexer.py":4 + * from fontTools.feaLib.location import FeatureLibLocation + * import re + * import os # <<<<<<<<<<<<<< + * + * try: + */ + __pyx_t_2 = __Pyx_ImportDottedModule(__pyx_n_s_os, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_os, __pyx_t_2) < 0) __PYX_ERR(0, 4, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "fontTools/feaLib/lexer.py":6 + * import os + * + * try: # <<<<<<<<<<<<<< + * import cython + * except ImportError: + */ + { + (void)__pyx_t_1; (void)__pyx_t_4; (void)__pyx_t_5; /* mark used */ + /*try:*/ { + + /* "fontTools/feaLib/lexer.py":7 + * + * try: + * import cython # <<<<<<<<<<<<<< + * except ImportError: + * # if cython not installed, use mock module with no-op decorators and types + */ + } + } + + /* "fontTools/feaLib/lexer.py":13 + * + * + * class Lexer(object): # <<<<<<<<<<<<<< + * NUMBER = "NUMBER" + * HEXADECIMAL = "HEXADECIMAL" + */ + __pyx_t_2 = __Pyx_PEP560_update_bases(__pyx_tuple__15); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_CalculateMetaclass(NULL, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_6 = __Pyx_Py3MetaclassPrepare(__pyx_t_3, __pyx_t_2, __pyx_n_s_Lexer, __pyx_n_s_Lexer, (PyObject *) NULL, __pyx_n_s_fontTools_feaLib_lexer, (PyObject *) NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + if (__pyx_t_2 != __pyx_tuple__15) { + if (unlikely((PyDict_SetItemString(__pyx_t_6, "__orig_bases__", __pyx_tuple__15) < 0))) __PYX_ERR(0, 13, __pyx_L1_error) + } + + /* "fontTools/feaLib/lexer.py":14 + * + * class Lexer(object): + * NUMBER = "NUMBER" # <<<<<<<<<<<<<< + * HEXADECIMAL = "HEXADECIMAL" + * OCTAL = "OCTAL" + */ + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_NUMBER, __pyx_n_u_NUMBER) < 0) __PYX_ERR(0, 14, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":15 + * class Lexer(object): + * NUMBER = "NUMBER" + * HEXADECIMAL = "HEXADECIMAL" # <<<<<<<<<<<<<< + * OCTAL = "OCTAL" + * NUMBERS = (NUMBER, HEXADECIMAL, OCTAL) + */ + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_HEXADECIMAL, __pyx_n_u_HEXADECIMAL) < 0) __PYX_ERR(0, 15, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":16 + * NUMBER = "NUMBER" + * HEXADECIMAL = "HEXADECIMAL" + * OCTAL = "OCTAL" # <<<<<<<<<<<<<< + * NUMBERS = (NUMBER, HEXADECIMAL, OCTAL) + * FLOAT = "FLOAT" + */ + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_OCTAL, __pyx_n_u_OCTAL) < 0) __PYX_ERR(0, 16, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":17 + * HEXADECIMAL = "HEXADECIMAL" + * OCTAL = "OCTAL" + * NUMBERS = (NUMBER, HEXADECIMAL, OCTAL) # <<<<<<<<<<<<<< + * FLOAT = "FLOAT" + * STRING = "STRING" + */ + __pyx_t_7 = PyObject_GetItem(__pyx_t_6, __pyx_n_s_NUMBER); + if (unlikely(!__pyx_t_7)) { + PyErr_Clear(); + __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_NUMBER); + } + if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_8 = PyObject_GetItem(__pyx_t_6, __pyx_n_s_HEXADECIMAL); + if (unlikely(!__pyx_t_8)) { + PyErr_Clear(); + __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_HEXADECIMAL); + } + if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 17, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_9 = PyObject_GetItem(__pyx_t_6, __pyx_n_s_OCTAL); + if (unlikely(!__pyx_t_9)) { + PyErr_Clear(); + __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_OCTAL); + } + if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_10 = PyTuple_New(3); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_GIVEREF(__pyx_t_7); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_7)) __PYX_ERR(0, 17, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_8); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_8)) __PYX_ERR(0, 17, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_9); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_10, 2, __pyx_t_9)) __PYX_ERR(0, 17, __pyx_L1_error); + __pyx_t_7 = 0; + __pyx_t_8 = 0; + __pyx_t_9 = 0; + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_NUMBERS, __pyx_t_10) < 0) __PYX_ERR(0, 17, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + + /* "fontTools/feaLib/lexer.py":18 + * OCTAL = "OCTAL" + * NUMBERS = (NUMBER, HEXADECIMAL, OCTAL) + * FLOAT = "FLOAT" # <<<<<<<<<<<<<< + * STRING = "STRING" + * NAME = "NAME" + */ + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_FLOAT, __pyx_n_u_FLOAT) < 0) __PYX_ERR(0, 18, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":19 + * NUMBERS = (NUMBER, HEXADECIMAL, OCTAL) + * FLOAT = "FLOAT" + * STRING = "STRING" # <<<<<<<<<<<<<< + * NAME = "NAME" + * FILENAME = "FILENAME" + */ + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_STRING, __pyx_n_u_STRING) < 0) __PYX_ERR(0, 19, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":20 + * FLOAT = "FLOAT" + * STRING = "STRING" + * NAME = "NAME" # <<<<<<<<<<<<<< + * FILENAME = "FILENAME" + * GLYPHCLASS = "GLYPHCLASS" + */ + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_NAME, __pyx_n_u_NAME) < 0) __PYX_ERR(0, 20, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":21 + * STRING = "STRING" + * NAME = "NAME" + * FILENAME = "FILENAME" # <<<<<<<<<<<<<< + * GLYPHCLASS = "GLYPHCLASS" + * CID = "CID" + */ + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_FILENAME, __pyx_n_u_FILENAME) < 0) __PYX_ERR(0, 21, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":22 + * NAME = "NAME" + * FILENAME = "FILENAME" + * GLYPHCLASS = "GLYPHCLASS" # <<<<<<<<<<<<<< + * CID = "CID" + * SYMBOL = "SYMBOL" + */ + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_GLYPHCLASS, __pyx_n_u_GLYPHCLASS) < 0) __PYX_ERR(0, 22, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":23 + * FILENAME = "FILENAME" + * GLYPHCLASS = "GLYPHCLASS" + * CID = "CID" # <<<<<<<<<<<<<< + * SYMBOL = "SYMBOL" + * COMMENT = "COMMENT" + */ + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_CID, __pyx_n_u_CID) < 0) __PYX_ERR(0, 23, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":24 + * GLYPHCLASS = "GLYPHCLASS" + * CID = "CID" + * SYMBOL = "SYMBOL" # <<<<<<<<<<<<<< + * COMMENT = "COMMENT" + * NEWLINE = "NEWLINE" + */ + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_SYMBOL, __pyx_n_u_SYMBOL) < 0) __PYX_ERR(0, 24, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":25 + * CID = "CID" + * SYMBOL = "SYMBOL" + * COMMENT = "COMMENT" # <<<<<<<<<<<<<< + * NEWLINE = "NEWLINE" + * ANONYMOUS_BLOCK = "ANONYMOUS_BLOCK" + */ + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_COMMENT, __pyx_n_u_COMMENT) < 0) __PYX_ERR(0, 25, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":26 + * SYMBOL = "SYMBOL" + * COMMENT = "COMMENT" + * NEWLINE = "NEWLINE" # <<<<<<<<<<<<<< + * ANONYMOUS_BLOCK = "ANONYMOUS_BLOCK" + * + */ + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_NEWLINE, __pyx_n_u_NEWLINE) < 0) __PYX_ERR(0, 26, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":27 + * COMMENT = "COMMENT" + * NEWLINE = "NEWLINE" + * ANONYMOUS_BLOCK = "ANONYMOUS_BLOCK" # <<<<<<<<<<<<<< + * + * CHAR_WHITESPACE_ = " \t" + */ + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_ANONYMOUS_BLOCK, __pyx_n_u_ANONYMOUS_BLOCK) < 0) __PYX_ERR(0, 27, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":29 + * ANONYMOUS_BLOCK = "ANONYMOUS_BLOCK" + * + * CHAR_WHITESPACE_ = " \t" # <<<<<<<<<<<<<< + * CHAR_NEWLINE_ = "\r\n" + * CHAR_SYMBOL_ = ",;:-+'{}[]<>()=" + */ + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_CHAR_WHITESPACE, __pyx_kp_u__16) < 0) __PYX_ERR(0, 29, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":30 + * + * CHAR_WHITESPACE_ = " \t" + * CHAR_NEWLINE_ = "\r\n" # <<<<<<<<<<<<<< + * CHAR_SYMBOL_ = ",;:-+'{}[]<>()=" + * CHAR_DIGIT_ = "0123456789" + */ + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_CHAR_NEWLINE, __pyx_kp_u__17) < 0) __PYX_ERR(0, 30, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":31 + * CHAR_WHITESPACE_ = " \t" + * CHAR_NEWLINE_ = "\r\n" + * CHAR_SYMBOL_ = ",;:-+'{}[]<>()=" # <<<<<<<<<<<<<< + * CHAR_DIGIT_ = "0123456789" + * CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef" + */ + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_CHAR_SYMBOL, __pyx_kp_u__18) < 0) __PYX_ERR(0, 31, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":32 + * CHAR_NEWLINE_ = "\r\n" + * CHAR_SYMBOL_ = ",;:-+'{}[]<>()=" + * CHAR_DIGIT_ = "0123456789" # <<<<<<<<<<<<<< + * CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef" + * CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + */ + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_CHAR_DIGIT, __pyx_kp_u_0123456789) < 0) __PYX_ERR(0, 32, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":33 + * CHAR_SYMBOL_ = ",;:-+'{}[]<>()=" + * CHAR_DIGIT_ = "0123456789" + * CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef" # <<<<<<<<<<<<<< + * CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + * CHAR_NAME_START_ = CHAR_LETTER_ + "_+*:.^~!\\" + */ + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_CHAR_HEXDIGIT, __pyx_kp_u_0123456789ABCDEFabcdef) < 0) __PYX_ERR(0, 33, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":34 + * CHAR_DIGIT_ = "0123456789" + * CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef" + * CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" # <<<<<<<<<<<<<< + * CHAR_NAME_START_ = CHAR_LETTER_ + "_+*:.^~!\\" + * CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_.+*:^~!/-" + */ + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_CHAR_LETTER, __pyx_n_u_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef) < 0) __PYX_ERR(0, 34, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":35 + * CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef" + * CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + * CHAR_NAME_START_ = CHAR_LETTER_ + "_+*:.^~!\\" # <<<<<<<<<<<<<< + * CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_.+*:^~!/-" + * + */ + __pyx_t_10 = PyObject_GetItem(__pyx_t_6, __pyx_n_s_CHAR_LETTER); + if (unlikely(!__pyx_t_10)) { + PyErr_Clear(); + __Pyx_GetModuleGlobalName(__pyx_t_10, __pyx_n_s_CHAR_LETTER); + } + if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 35, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __pyx_t_9 = PyNumber_Add(__pyx_t_10, __pyx_kp_u__19); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 35, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_CHAR_NAME_START, __pyx_t_9) < 0) __PYX_ERR(0, 35, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + + /* "fontTools/feaLib/lexer.py":36 + * CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + * CHAR_NAME_START_ = CHAR_LETTER_ + "_+*:.^~!\\" + * CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_.+*:^~!/-" # <<<<<<<<<<<<<< + * + * RE_GLYPHCLASS = re.compile(r"^[A-Za-z_0-9.\-]+$") + */ + __pyx_t_9 = PyObject_GetItem(__pyx_t_6, __pyx_n_s_CHAR_LETTER); + if (unlikely(!__pyx_t_9)) { + PyErr_Clear(); + __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_CHAR_LETTER); + } + if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 36, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_10 = PyObject_GetItem(__pyx_t_6, __pyx_n_s_CHAR_DIGIT); + if (unlikely(!__pyx_t_10)) { + PyErr_Clear(); + __Pyx_GetModuleGlobalName(__pyx_t_10, __pyx_n_s_CHAR_DIGIT); + } + if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 36, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __pyx_t_8 = PyNumber_Add(__pyx_t_9, __pyx_t_10); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 36, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __pyx_t_10 = PyNumber_Add(__pyx_t_8, __pyx_kp_u__20); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 36, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_CHAR_NAME_CONTINUATION, __pyx_t_10) < 0) __PYX_ERR(0, 36, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + + /* "fontTools/feaLib/lexer.py":38 + * CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_.+*:^~!/-" + * + * RE_GLYPHCLASS = re.compile(r"^[A-Za-z_0-9.\-]+$") # <<<<<<<<<<<<<< + * + * MODE_NORMAL_ = "NORMAL" + */ + __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_re); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 38, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_compile); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 38, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __pyx_t_8 = NULL; + __pyx_t_11 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_9))) { + __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_9); + if (likely(__pyx_t_8)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); + __Pyx_INCREF(__pyx_t_8); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_9, function); + __pyx_t_11 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_8, __pyx_kp_u_A_Za_z_0_9}; + __pyx_t_10 = __Pyx_PyObject_FastCall(__pyx_t_9, __pyx_callargs+1-__pyx_t_11, 1+__pyx_t_11); + __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; + if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 38, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + } + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_RE_GLYPHCLASS, __pyx_t_10) < 0) __PYX_ERR(0, 38, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + + /* "fontTools/feaLib/lexer.py":40 + * RE_GLYPHCLASS = re.compile(r"^[A-Za-z_0-9.\-]+$") + * + * MODE_NORMAL_ = "NORMAL" # <<<<<<<<<<<<<< + * MODE_FILENAME_ = "FILENAME" + * + */ + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_MODE_NORMAL, __pyx_n_u_NORMAL) < 0) __PYX_ERR(0, 40, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":41 + * + * MODE_NORMAL_ = "NORMAL" + * MODE_FILENAME_ = "FILENAME" # <<<<<<<<<<<<<< + * + * def __init__(self, text, filename): + */ + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_MODE_FILENAME, __pyx_n_u_FILENAME) < 0) __PYX_ERR(0, 41, __pyx_L1_error) + + /* "fontTools/feaLib/lexer.py":43 + * MODE_FILENAME_ = "FILENAME" + * + * def __init__(self, text, filename): # <<<<<<<<<<<<<< + * self.filename_ = filename + * self.line_ = 1 + */ + __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_1__init__, 0, __pyx_n_s_Lexer___init, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__22)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 43, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_init, __pyx_t_10) < 0) __PYX_ERR(0, 43, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + + /* "fontTools/feaLib/lexer.py":52 + * self.mode_ = Lexer.MODE_NORMAL_ + * + * def __iter__(self): # <<<<<<<<<<<<<< + * return self + * + */ + __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_3__iter__, 0, __pyx_n_s_Lexer___iter, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__24)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 52, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_iter, __pyx_t_10) < 0) __PYX_ERR(0, 52, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + + /* "fontTools/feaLib/lexer.py":55 + * return self + * + * def next(self): # Python 2 # <<<<<<<<<<<<<< + * return self.__next__() + * + */ + __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_5next, 0, __pyx_n_s_Lexer_next, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__25)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 55, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_next_3, __pyx_t_10) < 0) __PYX_ERR(0, 55, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + + /* "fontTools/feaLib/lexer.py":58 + * return self.__next__() + * + * def __next__(self): # Python 3 # <<<<<<<<<<<<<< + * while True: + * token_type, token, location = self.next_() + */ + __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_7__next__, 0, __pyx_n_s_Lexer___next, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__27)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 58, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_next, __pyx_t_10) < 0) __PYX_ERR(0, 58, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + + /* "fontTools/feaLib/lexer.py":64 + * return (token_type, token, location) + * + * def location_(self): # <<<<<<<<<<<<<< + * column = self.pos_ - self.line_start_ + 1 + * return FeatureLibLocation(self.filename_ or "", self.line_, column) + */ + __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_9location_, 0, __pyx_n_s_Lexer_location, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__29)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 64, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_location, __pyx_t_10) < 0) __PYX_ERR(0, 64, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + + /* "fontTools/feaLib/lexer.py":68 + * return FeatureLibLocation(self.filename_ or "", self.line_, column) + * + * def next_(self): # <<<<<<<<<<<<<< + * self.scan_over_(Lexer.CHAR_WHITESPACE_) + * location = self.location_() + */ + __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_11next_, 0, __pyx_n_s_Lexer_next_2, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__31)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 68, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_next_2, __pyx_t_10) < 0) __PYX_ERR(0, 68, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + + /* "fontTools/feaLib/lexer.py":165 + * raise FeatureLibError("Unexpected character: %r" % cur_char, location) + * + * def scan_over_(self, valid): # <<<<<<<<<<<<<< + * p = self.pos_ + * while p < self.text_length_ and self.text_[p] in valid: + */ + __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_13scan_over_, 0, __pyx_n_s_Lexer_scan_over, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__33)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 165, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_scan_over, __pyx_t_10) < 0) __PYX_ERR(0, 165, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + + /* "fontTools/feaLib/lexer.py":171 + * self.pos_ = p + * + * def scan_until_(self, stop_at): # <<<<<<<<<<<<<< + * p = self.pos_ + * while p < self.text_length_ and self.text_[p] not in stop_at: + */ + __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_15scan_until_, 0, __pyx_n_s_Lexer_scan_until, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__35)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 171, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_scan_until, __pyx_t_10) < 0) __PYX_ERR(0, 171, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + + /* "fontTools/feaLib/lexer.py":177 + * self.pos_ = p + * + * def scan_anonymous_block(self, tag): # <<<<<<<<<<<<<< + * location = self.location_() + * tag = tag.strip() + */ + __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_17scan_anonymous_block, 0, __pyx_n_s_Lexer_scan_anonymous_block, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__37)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 177, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_scan_anonymous_block, __pyx_t_10) < 0) __PYX_ERR(0, 177, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + + /* "fontTools/feaLib/lexer.py":13 + * + * + * class Lexer(object): # <<<<<<<<<<<<<< + * NUMBER = "NUMBER" + * HEXADECIMAL = "HEXADECIMAL" + */ + __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_3, __pyx_n_s_Lexer, __pyx_t_2, __pyx_t_6, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_Lexer, __pyx_t_10) < 0) __PYX_ERR(0, 13, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "fontTools/feaLib/lexer.py":192 + * + * + * class IncludingLexer(object): # <<<<<<<<<<<<<< + * """A Lexer that follows include statements. + * + */ + __pyx_t_2 = __Pyx_PEP560_update_bases(__pyx_tuple__39); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 192, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_CalculateMetaclass(NULL, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 192, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_6 = __Pyx_Py3MetaclassPrepare(__pyx_t_3, __pyx_t_2, __pyx_n_s_IncludingLexer, __pyx_n_s_IncludingLexer, (PyObject *) NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_kp_s_A_Lexer_that_follows_include_sta); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 192, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + if (__pyx_t_2 != __pyx_tuple__39) { + if (unlikely((PyDict_SetItemString(__pyx_t_6, "__orig_bases__", __pyx_tuple__39) < 0))) __PYX_ERR(0, 192, __pyx_L1_error) + } + + /* "fontTools/feaLib/lexer.py":207 + * """ + * + * def __init__(self, featurefile, *, includeDir=None): # <<<<<<<<<<<<<< + * """Initializes an IncludingLexer. + * + */ + __pyx_t_10 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 207, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + if (PyDict_SetItem(__pyx_t_10, __pyx_n_s_includeDir, Py_None) < 0) __PYX_ERR(0, 207, __pyx_L1_error) + __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_1__init__, 0, __pyx_n_s_IncludingLexer___init, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__41)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 207, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __Pyx_CyFunction_SetDefaultsKwDict(__pyx_t_9, __pyx_t_10); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_init, __pyx_t_9) < 0) __PYX_ERR(0, 207, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + + /* "fontTools/feaLib/lexer.py":221 + * self.includeDir = includeDir + * + * def __iter__(self): # <<<<<<<<<<<<<< + * return self + * + */ + __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_3__iter__, 0, __pyx_n_s_IncludingLexer___iter, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__42)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 221, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_iter, __pyx_t_9) < 0) __PYX_ERR(0, 221, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + + /* "fontTools/feaLib/lexer.py":224 + * return self + * + * def next(self): # Python 2 # <<<<<<<<<<<<<< + * return self.__next__() + * + */ + __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_5next, 0, __pyx_n_s_IncludingLexer_next, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__43)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 224, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_next_3, __pyx_t_9) < 0) __PYX_ERR(0, 224, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + + /* "fontTools/feaLib/lexer.py":227 + * return self.__next__() + * + * def __next__(self): # Python 3 # <<<<<<<<<<<<<< + * while self.lexers_: + * lexer = self.lexers_[-1] + */ + __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_7__next__, 0, __pyx_n_s_IncludingLexer___next, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__45)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 227, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_next, __pyx_t_9) < 0) __PYX_ERR(0, 227, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + + /* "fontTools/feaLib/lexer.py":266 + * raise StopIteration() + * + * @staticmethod # <<<<<<<<<<<<<< + * def make_lexer_(file_or_path): + * if hasattr(file_or_path, "read"): + */ + __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_9make_lexer_, __Pyx_CYFUNCTION_STATICMETHOD, __pyx_n_s_IncludingLexer_make_lexer, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__47)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 266, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_staticmethod, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 266, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_make_lexer, __pyx_t_10) < 0) __PYX_ERR(0, 266, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + + /* "fontTools/feaLib/lexer.py":279 + * return Lexer(data, filename) + * + * def scan_anonymous_block(self, tag): # <<<<<<<<<<<<<< + * return self.lexers_[-1].scan_anonymous_block(tag) + * + */ + __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_11scan_anonymous_block, 0, __pyx_n_s_IncludingLexer_scan_anonymous_bl, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__49)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 279, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_scan_anonymous_block, __pyx_t_10) < 0) __PYX_ERR(0, 279, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + + /* "fontTools/feaLib/lexer.py":192 + * + * + * class IncludingLexer(object): # <<<<<<<<<<<<<< + * """A Lexer that follows include statements. + * + */ + __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_3, __pyx_n_s_IncludingLexer, __pyx_t_2, __pyx_t_6, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 192, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_IncludingLexer, __pyx_t_10) < 0) __PYX_ERR(0, 192, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "fontTools/feaLib/lexer.py":283 + * + * + * class NonIncludingLexer(IncludingLexer): # <<<<<<<<<<<<<< + * """Lexer that does not follow `include` statements, emits them as-is.""" + * + */ + __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_IncludingLexer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 283, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 283, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_2); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2)) __PYX_ERR(0, 283, __pyx_L1_error); + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PEP560_update_bases(__pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 283, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_6 = __Pyx_CalculateMetaclass(NULL, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 283, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_6, __pyx_t_2, __pyx_n_s_NonIncludingLexer, __pyx_n_s_NonIncludingLexer, (PyObject *) NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_kp_s_Lexer_that_does_not_follow_inclu); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 283, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + if (__pyx_t_2 != __pyx_t_3) { + if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_3) < 0))) __PYX_ERR(0, 283, __pyx_L1_error) + } + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "fontTools/feaLib/lexer.py":286 + * """Lexer that does not follow `include` statements, emits them as-is.""" + * + * def __next__(self): # Python 3 # <<<<<<<<<<<<<< + * return next(self.lexers_[0]) + */ + __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_17NonIncludingLexer_1__next__, 0, __pyx_n_s_NonIncludingLexer___next, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__50)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 286, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_n_s_next, __pyx_t_3) < 0) __PYX_ERR(0, 286, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "fontTools/feaLib/lexer.py":283 + * + * + * class NonIncludingLexer(IncludingLexer): # <<<<<<<<<<<<<< + * """Lexer that does not follow `include` statements, emits them as-is.""" + * + */ + __pyx_t_3 = __Pyx_Py3ClassCreate(__pyx_t_6, __pyx_n_s_NonIncludingLexer, __pyx_t_2, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 283, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_NonIncludingLexer, __pyx_t_3) < 0) __PYX_ERR(0, 283, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "fontTools/feaLib/lexer.py":1 + * from fontTools.feaLib.error import FeatureLibError, IncludedFeaNotFound # <<<<<<<<<<<<<< + * from fontTools.feaLib.location import FeatureLibLocation + * import re + */ + __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /*--- Wrapped vars code ---*/ + + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_XDECREF(__pyx_t_9); + __Pyx_XDECREF(__pyx_t_10); + if (__pyx_m) { + if (__pyx_d && stringtab_initialized) { + __Pyx_AddTraceback("init fontTools.feaLib.lexer", __pyx_clineno, __pyx_lineno, __pyx_filename); + } + #if !CYTHON_USE_MODULE_STATE + Py_CLEAR(__pyx_m); + #else + Py_DECREF(__pyx_m); + if (pystate_addmodule_run) { + PyObject *tp, *value, *tb; + PyErr_Fetch(&tp, &value, &tb); + PyState_RemoveModule(&__pyx_moduledef); + PyErr_Restore(tp, value, tb); + } + #endif + } else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ImportError, "init fontTools.feaLib.lexer"); + } + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + #if CYTHON_PEP489_MULTI_PHASE_INIT + return (__pyx_m != NULL) ? 0 : -1; + #elif PY_MAJOR_VERSION >= 3 + return __pyx_m; + #else + return; + #endif +} +/* #### Code section: cleanup_globals ### */ +/* #### Code section: cleanup_module ### */ +/* #### Code section: main_method ### */ +/* #### Code section: utility_code_pragmas ### */ +#ifdef _MSC_VER +#pragma warning( push ) +/* Warning 4127: conditional expression is constant + * Cython uses constant conditional expressions to allow in inline functions to be optimized at + * compile-time, so this warning is not useful + */ +#pragma warning( disable : 4127 ) +#endif + + + +/* #### Code section: utility_code_def ### */ + +/* --- Runtime support code --- */ +/* Refnanny */ +#if CYTHON_REFNANNY +static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { + PyObject *m = NULL, *p = NULL; + void *r = NULL; + m = PyImport_ImportModule(modname); + if (!m) goto end; + p = PyObject_GetAttrString(m, "RefNannyAPI"); + if (!p) goto end; + r = PyLong_AsVoidPtr(p); +end: + Py_XDECREF(p); + Py_XDECREF(m); + return (__Pyx_RefNannyAPIStruct *)r; +} +#endif + +/* PyErrExceptionMatches */ +#if CYTHON_FAST_THREAD_STATE +static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; i= 0x030C00A6 + PyObject *current_exception = tstate->current_exception; + if (unlikely(!current_exception)) return 0; + exc_type = (PyObject*) Py_TYPE(current_exception); + if (exc_type == err) return 1; +#else + exc_type = tstate->curexc_type; + if (exc_type == err) return 1; + if (unlikely(!exc_type)) return 0; +#endif + #if CYTHON_AVOID_BORROWED_REFS + Py_INCREF(exc_type); + #endif + if (unlikely(PyTuple_Check(err))) { + result = __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); + } else { + result = __Pyx_PyErr_GivenExceptionMatches(exc_type, err); + } + #if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(exc_type); + #endif + return result; +} +#endif + +/* PyErrFetchRestore */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { +#if PY_VERSION_HEX >= 0x030C00A6 + PyObject *tmp_value; + assert(type == NULL || (value != NULL && type == (PyObject*) Py_TYPE(value))); + if (value) { + #if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(((PyBaseExceptionObject*) value)->traceback != tb)) + #endif + PyException_SetTraceback(value, tb); + } + tmp_value = tstate->current_exception; + tstate->current_exception = value; + Py_XDECREF(tmp_value); + Py_XDECREF(type); + Py_XDECREF(tb); +#else + PyObject *tmp_type, *tmp_value, *tmp_tb; + tmp_type = tstate->curexc_type; + tmp_value = tstate->curexc_value; + tmp_tb = tstate->curexc_traceback; + tstate->curexc_type = type; + tstate->curexc_value = value; + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +#endif +} +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { +#if PY_VERSION_HEX >= 0x030C00A6 + PyObject* exc_value; + exc_value = tstate->current_exception; + tstate->current_exception = 0; + *value = exc_value; + *type = NULL; + *tb = NULL; + if (exc_value) { + *type = (PyObject*) Py_TYPE(exc_value); + Py_INCREF(*type); + #if CYTHON_COMPILING_IN_CPYTHON + *tb = ((PyBaseExceptionObject*) exc_value)->traceback; + Py_XINCREF(*tb); + #else + *tb = PyException_GetTraceback(exc_value); + #endif + } +#else + *type = tstate->curexc_type; + *value = tstate->curexc_value; + *tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +#endif +} +#endif + +/* PyObjectGetAttrStr */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_getattr)) + return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); +#endif + return PyObject_GetAttr(obj, attr_name); +} +#endif + +/* PyObjectGetAttrStrNoError */ +#if __PYX_LIMITED_VERSION_HEX < 0x030d00A1 +static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) + __Pyx_PyErr_Clear(); +} +#endif +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { + PyObject *result; +#if __PYX_LIMITED_VERSION_HEX >= 0x030d00A1 + (void) PyObject_GetOptionalAttr(obj, attr_name, &result); + return result; +#else +#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { + return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); + } +#endif + result = __Pyx_PyObject_GetAttrStr(obj, attr_name); + if (unlikely(!result)) { + __Pyx_PyObject_GetAttrStr_ClearAttributeError(); + } + return result; +#endif +} + +/* GetBuiltinName */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name) { + PyObject* result = __Pyx_PyObject_GetAttrStrNoError(__pyx_b, name); + if (unlikely(!result) && !PyErr_Occurred()) { + PyErr_Format(PyExc_NameError, +#if PY_MAJOR_VERSION >= 3 + "name '%U' is not defined", name); +#else + "name '%.200s' is not defined", PyString_AS_STRING(name)); +#endif + } + return result; +} + +/* TupleAndListFromArray */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE void __Pyx_copy_object_array(PyObject *const *CYTHON_RESTRICT src, PyObject** CYTHON_RESTRICT dest, Py_ssize_t length) { + PyObject *v; + Py_ssize_t i; + for (i = 0; i < length; i++) { + v = dest[i] = src[i]; + Py_INCREF(v); + } +} +static CYTHON_INLINE PyObject * +__Pyx_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n) +{ + PyObject *res; + if (n <= 0) { + Py_INCREF(__pyx_empty_tuple); + return __pyx_empty_tuple; + } + res = PyTuple_New(n); + if (unlikely(res == NULL)) return NULL; + __Pyx_copy_object_array(src, ((PyTupleObject*)res)->ob_item, n); + return res; +} +static CYTHON_INLINE PyObject * +__Pyx_PyList_FromArray(PyObject *const *src, Py_ssize_t n) +{ + PyObject *res; + if (n <= 0) { + return PyList_New(0); + } + res = PyList_New(n); + if (unlikely(res == NULL)) return NULL; + __Pyx_copy_object_array(src, ((PyListObject*)res)->ob_item, n); + return res; +} +#endif + +/* BytesEquals */ +static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { +#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API + return PyObject_RichCompareBool(s1, s2, equals); +#else + if (s1 == s2) { + return (equals == Py_EQ); + } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { + const char *ps1, *ps2; + Py_ssize_t length = PyBytes_GET_SIZE(s1); + if (length != PyBytes_GET_SIZE(s2)) + return (equals == Py_NE); + ps1 = PyBytes_AS_STRING(s1); + ps2 = PyBytes_AS_STRING(s2); + if (ps1[0] != ps2[0]) { + return (equals == Py_NE); + } else if (length == 1) { + return (equals == Py_EQ); + } else { + int result; +#if CYTHON_USE_UNICODE_INTERNALS && (PY_VERSION_HEX < 0x030B0000) + Py_hash_t hash1, hash2; + hash1 = ((PyBytesObject*)s1)->ob_shash; + hash2 = ((PyBytesObject*)s2)->ob_shash; + if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { + return (equals == Py_NE); + } +#endif + result = memcmp(ps1, ps2, (size_t)length); + return (equals == Py_EQ) ? (result == 0) : (result != 0); + } + } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { + return (equals == Py_NE); + } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { + return (equals == Py_NE); + } else { + int result; + PyObject* py_result = PyObject_RichCompare(s1, s2, equals); + if (!py_result) + return -1; + result = __Pyx_PyObject_IsTrue(py_result); + Py_DECREF(py_result); + return result; + } +#endif +} + +/* UnicodeEquals */ +static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { +#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API + return PyObject_RichCompareBool(s1, s2, equals); +#else +#if PY_MAJOR_VERSION < 3 + PyObject* owned_ref = NULL; +#endif + int s1_is_unicode, s2_is_unicode; + if (s1 == s2) { + goto return_eq; + } + s1_is_unicode = PyUnicode_CheckExact(s1); + s2_is_unicode = PyUnicode_CheckExact(s2); +#if PY_MAJOR_VERSION < 3 + if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { + owned_ref = PyUnicode_FromObject(s2); + if (unlikely(!owned_ref)) + return -1; + s2 = owned_ref; + s2_is_unicode = 1; + } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { + owned_ref = PyUnicode_FromObject(s1); + if (unlikely(!owned_ref)) + return -1; + s1 = owned_ref; + s1_is_unicode = 1; + } else if (((!s2_is_unicode) & (!s1_is_unicode))) { + return __Pyx_PyBytes_Equals(s1, s2, equals); + } +#endif + if (s1_is_unicode & s2_is_unicode) { + Py_ssize_t length; + int kind; + void *data1, *data2; + if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) + return -1; + length = __Pyx_PyUnicode_GET_LENGTH(s1); + if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { + goto return_ne; + } +#if CYTHON_USE_UNICODE_INTERNALS + { + Py_hash_t hash1, hash2; + #if CYTHON_PEP393_ENABLED + hash1 = ((PyASCIIObject*)s1)->hash; + hash2 = ((PyASCIIObject*)s2)->hash; + #else + hash1 = ((PyUnicodeObject*)s1)->hash; + hash2 = ((PyUnicodeObject*)s2)->hash; + #endif + if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { + goto return_ne; + } + } +#endif + kind = __Pyx_PyUnicode_KIND(s1); + if (kind != __Pyx_PyUnicode_KIND(s2)) { + goto return_ne; + } + data1 = __Pyx_PyUnicode_DATA(s1); + data2 = __Pyx_PyUnicode_DATA(s2); + if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { + goto return_ne; + } else if (length == 1) { + goto return_eq; + } else { + int result = memcmp(data1, data2, (size_t)(length * kind)); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_EQ) ? (result == 0) : (result != 0); + } + } else if ((s1 == Py_None) & s2_is_unicode) { + goto return_ne; + } else if ((s2 == Py_None) & s1_is_unicode) { + goto return_ne; + } else { + int result; + PyObject* py_result = PyObject_RichCompare(s1, s2, equals); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + if (!py_result) + return -1; + result = __Pyx_PyObject_IsTrue(py_result); + Py_DECREF(py_result); + return result; + } +return_eq: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_EQ); +return_ne: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_NE); +#endif +} + +/* fastcall */ +#if CYTHON_METH_FASTCALL +static CYTHON_INLINE PyObject * __Pyx_GetKwValue_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues, PyObject *s) +{ + Py_ssize_t i, n = PyTuple_GET_SIZE(kwnames); + for (i = 0; i < n; i++) + { + if (s == PyTuple_GET_ITEM(kwnames, i)) return kwvalues[i]; + } + for (i = 0; i < n; i++) + { + int eq = __Pyx_PyUnicode_Equals(s, PyTuple_GET_ITEM(kwnames, i), Py_EQ); + if (unlikely(eq != 0)) { + if (unlikely(eq < 0)) return NULL; + return kwvalues[i]; + } + } + return NULL; +} +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030d0000 +CYTHON_UNUSED static PyObject *__Pyx_KwargsAsDict_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues) { + Py_ssize_t i, nkwargs = PyTuple_GET_SIZE(kwnames); + PyObject *dict; + dict = PyDict_New(); + if (unlikely(!dict)) + return NULL; + for (i=0; i= 3 + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); + #else + "%s() got multiple values for keyword argument '%s'", func_name, + PyString_AsString(kw_name)); + #endif +} + +/* ParseKeywords */ +static int __Pyx_ParseOptionalKeywords( + PyObject *kwds, + PyObject *const *kwvalues, + PyObject **argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject *key = 0, *value = 0; + Py_ssize_t pos = 0; + PyObject*** name; + PyObject*** first_kw_arg = argnames + num_pos_args; + int kwds_is_tuple = CYTHON_METH_FASTCALL && likely(PyTuple_Check(kwds)); + while (1) { + Py_XDECREF(key); key = NULL; + Py_XDECREF(value); value = NULL; + if (kwds_is_tuple) { + Py_ssize_t size; +#if CYTHON_ASSUME_SAFE_MACROS + size = PyTuple_GET_SIZE(kwds); +#else + size = PyTuple_Size(kwds); + if (size < 0) goto bad; +#endif + if (pos >= size) break; +#if CYTHON_AVOID_BORROWED_REFS + key = __Pyx_PySequence_ITEM(kwds, pos); + if (!key) goto bad; +#elif CYTHON_ASSUME_SAFE_MACROS + key = PyTuple_GET_ITEM(kwds, pos); +#else + key = PyTuple_GetItem(kwds, pos); + if (!key) goto bad; +#endif + value = kwvalues[pos]; + pos++; + } + else + { + if (!PyDict_Next(kwds, &pos, &key, &value)) break; +#if CYTHON_AVOID_BORROWED_REFS + Py_INCREF(key); +#endif + } + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + values[name-argnames] = value; +#if CYTHON_AVOID_BORROWED_REFS + Py_INCREF(value); + Py_DECREF(key); +#endif + key = NULL; + value = NULL; + continue; + } +#if !CYTHON_AVOID_BORROWED_REFS + Py_INCREF(key); +#endif + Py_INCREF(value); + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { + values[name-argnames] = value; +#if CYTHON_AVOID_BORROWED_REFS + value = NULL; +#endif + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; + } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = ( + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**name, key) + ); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; +#if CYTHON_AVOID_BORROWED_REFS + value = NULL; +#endif + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; + } + } + Py_XDECREF(key); + Py_XDECREF(value); + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +invalid_keyword: + #if PY_MAJOR_VERSION < 3 + PyErr_Format(PyExc_TypeError, + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + PyErr_Format(PyExc_TypeError, + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif +bad: + Py_XDECREF(key); + Py_XDECREF(value); + return -1; +} + +/* PyObjectSetAttrStr */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_setattro)) + return tp->tp_setattro(obj, attr_name, value); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_setattr)) + return tp->tp_setattr(obj, PyString_AS_STRING(attr_name), value); +#endif + return PyObject_SetAttr(obj, attr_name, value); +} +#endif + +/* PyDictVersioning */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { + PyObject **dictptr = NULL; + Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; + if (offset) { +#if CYTHON_COMPILING_IN_CPYTHON + dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); +#else + dictptr = _PyObject_GetDictPtr(obj); +#endif + } + return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; +} +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) + return 0; + return obj_dict_version == __Pyx_get_object_dict_version(obj); +} +#endif + +/* GetModuleGlobalName */ +#if CYTHON_USE_DICT_VERSIONS +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) +#else +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) +#endif +{ + PyObject *result; +#if !CYTHON_AVOID_BORROWED_REFS +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && PY_VERSION_HEX < 0x030d0000 + result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } else if (unlikely(PyErr_Occurred())) { + return NULL; + } +#elif CYTHON_COMPILING_IN_LIMITED_API + if (unlikely(!__pyx_m)) { + return NULL; + } + result = PyObject_GetAttr(__pyx_m, name); + if (likely(result)) { + return result; + } +#else + result = PyDict_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } +#endif +#else + result = PyObject_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } + PyErr_Clear(); +#endif + return __Pyx_GetBuiltinName(name); +} + +/* PyFunctionFastCall */ +#if CYTHON_FAST_PYCALL && !CYTHON_VECTORCALL +static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, + PyObject *globals) { + PyFrameObject *f; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject **fastlocals; + Py_ssize_t i; + PyObject *result; + assert(globals != NULL); + /* XXX Perhaps we should create a specialized + PyFrame_New() that doesn't take locals, but does + take builtins without sanity checking them. + */ + assert(tstate != NULL); + f = PyFrame_New(tstate, co, globals, NULL); + if (f == NULL) { + return NULL; + } + fastlocals = __Pyx_PyFrame_GetLocalsplus(f); + for (i = 0; i < na; i++) { + Py_INCREF(*args); + fastlocals[i] = *args++; + } + result = PyEval_EvalFrameEx(f,0); + ++tstate->recursion_depth; + Py_DECREF(f); + --tstate->recursion_depth; + return result; +} +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { + PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); + PyObject *globals = PyFunction_GET_GLOBALS(func); + PyObject *argdefs = PyFunction_GET_DEFAULTS(func); + PyObject *closure; +#if PY_MAJOR_VERSION >= 3 + PyObject *kwdefs; +#endif + PyObject *kwtuple, **k; + PyObject **d; + Py_ssize_t nd; + Py_ssize_t nk; + PyObject *result; + assert(kwargs == NULL || PyDict_Check(kwargs)); + nk = kwargs ? PyDict_Size(kwargs) : 0; + #if PY_MAJOR_VERSION < 3 + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) { + return NULL; + } + #else + if (unlikely(Py_EnterRecursiveCall(" while calling a Python object"))) { + return NULL; + } + #endif + if ( +#if PY_MAJOR_VERSION >= 3 + co->co_kwonlyargcount == 0 && +#endif + likely(kwargs == NULL || nk == 0) && + co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { + if (argdefs == NULL && co->co_argcount == nargs) { + result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); + goto done; + } + else if (nargs == 0 && argdefs != NULL + && co->co_argcount == Py_SIZE(argdefs)) { + /* function called with no arguments, but all parameters have + a default value: use default values as arguments .*/ + args = &PyTuple_GET_ITEM(argdefs, 0); + result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); + goto done; + } + } + if (kwargs != NULL) { + Py_ssize_t pos, i; + kwtuple = PyTuple_New(2 * nk); + if (kwtuple == NULL) { + result = NULL; + goto done; + } + k = &PyTuple_GET_ITEM(kwtuple, 0); + pos = i = 0; + while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { + Py_INCREF(k[i]); + Py_INCREF(k[i+1]); + i += 2; + } + nk = i / 2; + } + else { + kwtuple = NULL; + k = NULL; + } + closure = PyFunction_GET_CLOSURE(func); +#if PY_MAJOR_VERSION >= 3 + kwdefs = PyFunction_GET_KW_DEFAULTS(func); +#endif + if (argdefs != NULL) { + d = &PyTuple_GET_ITEM(argdefs, 0); + nd = Py_SIZE(argdefs); + } + else { + d = NULL; + nd = 0; + } +#if PY_MAJOR_VERSION >= 3 + result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, kwdefs, closure); +#else + result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, closure); +#endif + Py_XDECREF(kwtuple); +done: + Py_LeaveRecursiveCall(); + return result; +} +#endif + +/* PyObjectCall */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *result; + ternaryfunc call = Py_TYPE(func)->tp_call; + if (unlikely(!call)) + return PyObject_Call(func, arg, kw); + #if PY_MAJOR_VERSION < 3 + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + #else + if (unlikely(Py_EnterRecursiveCall(" while calling a Python object"))) + return NULL; + #endif + result = (*call)(func, arg, kw); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectCallMethO */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { + PyObject *self, *result; + PyCFunction cfunc; + cfunc = __Pyx_CyOrPyCFunction_GET_FUNCTION(func); + self = __Pyx_CyOrPyCFunction_GET_SELF(func); + #if PY_MAJOR_VERSION < 3 + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + #else + if (unlikely(Py_EnterRecursiveCall(" while calling a Python object"))) + return NULL; + #endif + result = cfunc(self, arg); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectFastCall */ +#if PY_VERSION_HEX < 0x03090000 || CYTHON_COMPILING_IN_LIMITED_API +static PyObject* __Pyx_PyObject_FastCall_fallback(PyObject *func, PyObject **args, size_t nargs, PyObject *kwargs) { + PyObject *argstuple; + PyObject *result = 0; + size_t i; + argstuple = PyTuple_New((Py_ssize_t)nargs); + if (unlikely(!argstuple)) return NULL; + for (i = 0; i < nargs; i++) { + Py_INCREF(args[i]); + if (__Pyx_PyTuple_SET_ITEM(argstuple, (Py_ssize_t)i, args[i]) < 0) goto bad; + } + result = __Pyx_PyObject_Call(func, argstuple, kwargs); + bad: + Py_DECREF(argstuple); + return result; +} +#endif +static CYTHON_INLINE PyObject* __Pyx_PyObject_FastCallDict(PyObject *func, PyObject **args, size_t _nargs, PyObject *kwargs) { + Py_ssize_t nargs = __Pyx_PyVectorcall_NARGS(_nargs); +#if CYTHON_COMPILING_IN_CPYTHON + if (nargs == 0 && kwargs == NULL) { + if (__Pyx_CyOrPyCFunction_Check(func) && likely( __Pyx_CyOrPyCFunction_GET_FLAGS(func) & METH_NOARGS)) + return __Pyx_PyObject_CallMethO(func, NULL); + } + else if (nargs == 1 && kwargs == NULL) { + if (__Pyx_CyOrPyCFunction_Check(func) && likely( __Pyx_CyOrPyCFunction_GET_FLAGS(func) & METH_O)) + return __Pyx_PyObject_CallMethO(func, args[0]); + } +#endif + #if PY_VERSION_HEX < 0x030800B1 + #if CYTHON_FAST_PYCCALL + if (PyCFunction_Check(func)) { + if (kwargs) { + return _PyCFunction_FastCallDict(func, args, nargs, kwargs); + } else { + return _PyCFunction_FastCallKeywords(func, args, nargs, NULL); + } + } + #if PY_VERSION_HEX >= 0x030700A1 + if (!kwargs && __Pyx_IS_TYPE(func, &PyMethodDescr_Type)) { + return _PyMethodDescr_FastCallKeywords(func, args, nargs, NULL); + } + #endif + #endif + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(func)) { + return __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs); + } + #endif + #endif + if (kwargs == NULL) { + #if CYTHON_VECTORCALL + #if PY_VERSION_HEX < 0x03090000 + vectorcallfunc f = _PyVectorcall_Function(func); + #else + vectorcallfunc f = PyVectorcall_Function(func); + #endif + if (f) { + return f(func, args, (size_t)nargs, NULL); + } + #elif defined(__Pyx_CyFunction_USED) && CYTHON_BACKPORT_VECTORCALL + if (__Pyx_CyFunction_CheckExact(func)) { + __pyx_vectorcallfunc f = __Pyx_CyFunction_func_vectorcall(func); + if (f) return f(func, args, (size_t)nargs, NULL); + } + #endif + } + if (nargs == 0) { + return __Pyx_PyObject_Call(func, __pyx_empty_tuple, kwargs); + } + #if PY_VERSION_HEX >= 0x03090000 && !CYTHON_COMPILING_IN_LIMITED_API + return PyObject_VectorcallDict(func, args, (size_t)nargs, kwargs); + #else + return __Pyx_PyObject_FastCall_fallback(func, args, (size_t)nargs, kwargs); + #endif +} + +/* RaiseTooManyValuesToUnpack */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { + PyErr_Format(PyExc_ValueError, + "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); +} + +/* RaiseNeedMoreValuesToUnpack */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { + PyErr_Format(PyExc_ValueError, + "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", + index, (index == 1) ? "" : "s"); +} + +/* IterFinish */ +static CYTHON_INLINE int __Pyx_IterFinish(void) { + PyObject* exc_type; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + exc_type = __Pyx_PyErr_CurrentExceptionType(); + if (unlikely(exc_type)) { + if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) + return -1; + __Pyx_PyErr_Clear(); + return 0; + } + return 0; +} + +/* UnpackItemEndCheck */ +static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { + if (unlikely(retval)) { + Py_DECREF(retval); + __Pyx_RaiseTooManyValuesError(expected); + return -1; + } + return __Pyx_IterFinish(); +} + +/* PyIntBinop */ +#if !CYTHON_COMPILING_IN_PYPY +static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check) { + CYTHON_MAYBE_UNUSED_VAR(intval); + CYTHON_MAYBE_UNUSED_VAR(inplace); + CYTHON_UNUSED_VAR(zerodivision_check); + #if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(op1))) { + const long b = intval; + long x; + long a = PyInt_AS_LONG(op1); + + x = (long)((unsigned long)a + (unsigned long)b); + if (likely((x^a) >= 0 || (x^b) >= 0)) + return PyInt_FromLong(x); + return PyLong_Type.tp_as_number->nb_add(op1, op2); + } + #endif + #if CYTHON_USE_PYLONG_INTERNALS + if (likely(PyLong_CheckExact(op1))) { + const long b = intval; + long a, x; +#ifdef HAVE_LONG_LONG + const PY_LONG_LONG llb = intval; + PY_LONG_LONG lla, llx; +#endif + if (unlikely(__Pyx_PyLong_IsZero(op1))) { + return __Pyx_NewRef(op2); + } + if (likely(__Pyx_PyLong_IsCompact(op1))) { + a = __Pyx_PyLong_CompactValue(op1); + } else { + const digit* digits = __Pyx_PyLong_Digits(op1); + const Py_ssize_t size = __Pyx_PyLong_SignedDigitCount(op1); + switch (size) { + case -2: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; + #ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { + lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; + #endif + } + CYTHON_FALLTHROUGH; + case 2: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; + #ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; + #endif + } + CYTHON_FALLTHROUGH; + case -3: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; + #ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { + lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; + #endif + } + CYTHON_FALLTHROUGH; + case 3: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; + #ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; + #endif + } + CYTHON_FALLTHROUGH; + case -4: + if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; + #ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { + lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; + #endif + } + CYTHON_FALLTHROUGH; + case 4: + if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; + #ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; + #endif + } + CYTHON_FALLTHROUGH; + default: return PyLong_Type.tp_as_number->nb_add(op1, op2); + } + } + x = a + b; + return PyLong_FromLong(x); +#ifdef HAVE_LONG_LONG + long_long: + llx = lla + llb; + return PyLong_FromLongLong(llx); +#endif + + + } + #endif + if (PyFloat_CheckExact(op1)) { + const long b = intval; +#if CYTHON_COMPILING_IN_LIMITED_API + double a = __pyx_PyFloat_AsDouble(op1); +#else + double a = PyFloat_AS_DOUBLE(op1); +#endif + double result; + + PyFPE_START_PROTECT("add", return NULL) + result = ((double)a) + (double)b; + PyFPE_END_PROTECT(result) + return PyFloat_FromDouble(result); + } + return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); +} +#endif + +/* PyObjectCallNoArg */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { + PyObject *arg[2] = {NULL, NULL}; + return __Pyx_PyObject_FastCall(func, arg + 1, 0 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); +} + +/* RaiseException */ +#if PY_MAJOR_VERSION < 3 +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + __Pyx_PyThreadState_declare + CYTHON_UNUSED_VAR(cause); + Py_XINCREF(type); + if (!value || value == Py_None) + value = NULL; + else + Py_INCREF(value); + if (!tb || tb == Py_None) + tb = NULL; + else { + Py_INCREF(tb); + if (!PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto raise_error; + } + } + if (PyType_Check(type)) { +#if CYTHON_COMPILING_IN_PYPY + if (!value) { + Py_INCREF(Py_None); + value = Py_None; + } +#endif + PyErr_NormalizeException(&type, &value, &tb); + } else { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto raise_error; + } + value = type; + type = (PyObject*) Py_TYPE(type); + Py_INCREF(type); + if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto raise_error; + } + } + __Pyx_PyThreadState_assign + __Pyx_ErrRestore(type, value, tb); + return; +raise_error: + Py_XDECREF(value); + Py_XDECREF(type); + Py_XDECREF(tb); + return; +} +#else +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + PyObject* owned_instance = NULL; + if (tb == Py_None) { + tb = 0; + } else if (tb && !PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto bad; + } + if (value == Py_None) + value = 0; + if (PyExceptionInstance_Check(type)) { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto bad; + } + value = type; + type = (PyObject*) Py_TYPE(value); + } else if (PyExceptionClass_Check(type)) { + PyObject *instance_class = NULL; + if (value && PyExceptionInstance_Check(value)) { + instance_class = (PyObject*) Py_TYPE(value); + if (instance_class != type) { + int is_subclass = PyObject_IsSubclass(instance_class, type); + if (!is_subclass) { + instance_class = NULL; + } else if (unlikely(is_subclass == -1)) { + goto bad; + } else { + type = instance_class; + } + } + } + if (!instance_class) { + PyObject *args; + if (!value) + args = PyTuple_New(0); + else if (PyTuple_Check(value)) { + Py_INCREF(value); + args = value; + } else + args = PyTuple_Pack(1, value); + if (!args) + goto bad; + owned_instance = PyObject_Call(type, args, NULL); + Py_DECREF(args); + if (!owned_instance) + goto bad; + value = owned_instance; + if (!PyExceptionInstance_Check(value)) { + PyErr_Format(PyExc_TypeError, + "calling %R should have returned an instance of " + "BaseException, not %R", + type, Py_TYPE(value)); + goto bad; + } + } + } else { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto bad; + } + if (cause) { + PyObject *fixed_cause; + if (cause == Py_None) { + fixed_cause = NULL; + } else if (PyExceptionClass_Check(cause)) { + fixed_cause = PyObject_CallObject(cause, NULL); + if (fixed_cause == NULL) + goto bad; + } else if (PyExceptionInstance_Check(cause)) { + fixed_cause = cause; + Py_INCREF(fixed_cause); + } else { + PyErr_SetString(PyExc_TypeError, + "exception causes must derive from " + "BaseException"); + goto bad; + } + PyException_SetCause(value, fixed_cause); + } + PyErr_SetObject(type, value); + if (tb) { + #if PY_VERSION_HEX >= 0x030C00A6 + PyException_SetTraceback(value, tb); + #elif CYTHON_FAST_THREAD_STATE + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject* tmp_tb = tstate->curexc_traceback; + if (tb != tmp_tb) { + Py_INCREF(tb); + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_tb); + } +#else + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); + Py_INCREF(tb); + PyErr_Restore(tmp_type, tmp_value, tb); + Py_XDECREF(tmp_tb); +#endif + } +bad: + Py_XDECREF(owned_instance); + return; +} +#endif + +/* GetItemInt */ +static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { + PyObject *r; + if (unlikely(!j)) return NULL; + r = PyObject_GetItem(o, j); + Py_DECREF(j); + return r; +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + Py_ssize_t wrapped_i = i; + if (wraparound & unlikely(i < 0)) { + wrapped_i += PyList_GET_SIZE(o); + } + if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { + PyObject *r = PyList_GET_ITEM(o, wrapped_i); + Py_INCREF(r); + return r; + } + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +#else + return PySequence_GetItem(o, i); +#endif +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + Py_ssize_t wrapped_i = i; + if (wraparound & unlikely(i < 0)) { + wrapped_i += PyTuple_GET_SIZE(o); + } + if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { + PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); + Py_INCREF(r); + return r; + } + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +#else + return PySequence_GetItem(o, i); +#endif +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS + if (is_list || PyList_CheckExact(o)) { + Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); + if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { + PyObject *r = PyList_GET_ITEM(o, n); + Py_INCREF(r); + return r; + } + } + else if (PyTuple_CheckExact(o)) { + Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); + if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { + PyObject *r = PyTuple_GET_ITEM(o, n); + Py_INCREF(r); + return r; + } + } else { + PyMappingMethods *mm = Py_TYPE(o)->tp_as_mapping; + PySequenceMethods *sm = Py_TYPE(o)->tp_as_sequence; + if (mm && mm->mp_subscript) { + PyObject *r, *key = PyInt_FromSsize_t(i); + if (unlikely(!key)) return NULL; + r = mm->mp_subscript(o, key); + Py_DECREF(key); + return r; + } + if (likely(sm && sm->sq_item)) { + if (wraparound && unlikely(i < 0) && likely(sm->sq_length)) { + Py_ssize_t l = sm->sq_length(o); + if (likely(l >= 0)) { + i += l; + } else { + if (!PyErr_ExceptionMatches(PyExc_OverflowError)) + return NULL; + PyErr_Clear(); + } + } + return sm->sq_item(o, i); + } + } +#else + if (is_list || !PyMapping_Check(o)) { + return PySequence_GetItem(o, i); + } +#endif + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +} + +/* PyObjectCallOneArg */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *args[2] = {NULL, arg}; + return __Pyx_PyObject_FastCall(func, args+1, 1 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); +} + +/* ObjectGetItem */ +#if CYTHON_USE_TYPE_SLOTS +static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject *index) { + PyObject *runerr = NULL; + Py_ssize_t key_value; + key_value = __Pyx_PyIndex_AsSsize_t(index); + if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { + return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); + } + if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { + __Pyx_TypeName index_type_name = __Pyx_PyType_GetName(Py_TYPE(index)); + PyErr_Clear(); + PyErr_Format(PyExc_IndexError, + "cannot fit '" __Pyx_FMT_TYPENAME "' into an index-sized integer", index_type_name); + __Pyx_DECREF_TypeName(index_type_name); + } + return NULL; +} +static PyObject *__Pyx_PyObject_GetItem_Slow(PyObject *obj, PyObject *key) { + __Pyx_TypeName obj_type_name; + if (likely(PyType_Check(obj))) { + PyObject *meth = __Pyx_PyObject_GetAttrStrNoError(obj, __pyx_n_s_class_getitem); + if (!meth) { + PyErr_Clear(); + } else { + PyObject *result = __Pyx_PyObject_CallOneArg(meth, key); + Py_DECREF(meth); + return result; + } + } + obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); + PyErr_Format(PyExc_TypeError, + "'" __Pyx_FMT_TYPENAME "' object is not subscriptable", obj_type_name); + __Pyx_DECREF_TypeName(obj_type_name); + return NULL; +} +static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject *key) { + PyTypeObject *tp = Py_TYPE(obj); + PyMappingMethods *mm = tp->tp_as_mapping; + PySequenceMethods *sm = tp->tp_as_sequence; + if (likely(mm && mm->mp_subscript)) { + return mm->mp_subscript(obj, key); + } + if (likely(sm && sm->sq_item)) { + return __Pyx_PyObject_GetIndex(obj, key); + } + return __Pyx_PyObject_GetItem_Slow(obj, key); +} +#endif + +/* SliceObject */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj, + Py_ssize_t cstart, Py_ssize_t cstop, + PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice, + int has_cstart, int has_cstop, int wraparound) { + __Pyx_TypeName obj_type_name; +#if CYTHON_USE_TYPE_SLOTS + PyMappingMethods* mp; +#if PY_MAJOR_VERSION < 3 + PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence; + if (likely(ms && ms->sq_slice)) { + if (!has_cstart) { + if (_py_start && (*_py_start != Py_None)) { + cstart = __Pyx_PyIndex_AsSsize_t(*_py_start); + if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; + } else + cstart = 0; + } + if (!has_cstop) { + if (_py_stop && (*_py_stop != Py_None)) { + cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop); + if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; + } else + cstop = PY_SSIZE_T_MAX; + } + if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) { + Py_ssize_t l = ms->sq_length(obj); + if (likely(l >= 0)) { + if (cstop < 0) { + cstop += l; + if (cstop < 0) cstop = 0; + } + if (cstart < 0) { + cstart += l; + if (cstart < 0) cstart = 0; + } + } else { + if (!PyErr_ExceptionMatches(PyExc_OverflowError)) + goto bad; + PyErr_Clear(); + } + } + return ms->sq_slice(obj, cstart, cstop); + } +#else + CYTHON_UNUSED_VAR(wraparound); +#endif + mp = Py_TYPE(obj)->tp_as_mapping; + if (likely(mp && mp->mp_subscript)) +#else + CYTHON_UNUSED_VAR(wraparound); +#endif + { + PyObject* result; + PyObject *py_slice, *py_start, *py_stop; + if (_py_slice) { + py_slice = *_py_slice; + } else { + PyObject* owned_start = NULL; + PyObject* owned_stop = NULL; + if (_py_start) { + py_start = *_py_start; + } else { + if (has_cstart) { + owned_start = py_start = PyInt_FromSsize_t(cstart); + if (unlikely(!py_start)) goto bad; + } else + py_start = Py_None; + } + if (_py_stop) { + py_stop = *_py_stop; + } else { + if (has_cstop) { + owned_stop = py_stop = PyInt_FromSsize_t(cstop); + if (unlikely(!py_stop)) { + Py_XDECREF(owned_start); + goto bad; + } + } else + py_stop = Py_None; + } + py_slice = PySlice_New(py_start, py_stop, Py_None); + Py_XDECREF(owned_start); + Py_XDECREF(owned_stop); + if (unlikely(!py_slice)) goto bad; + } +#if CYTHON_USE_TYPE_SLOTS + result = mp->mp_subscript(obj, py_slice); +#else + result = PyObject_GetItem(obj, py_slice); +#endif + if (!_py_slice) { + Py_DECREF(py_slice); + } + return result; + } + obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); + PyErr_Format(PyExc_TypeError, + "'" __Pyx_FMT_TYPENAME "' object is unsliceable", obj_type_name); + __Pyx_DECREF_TypeName(obj_type_name); +bad: + return NULL; +} + +/* PyIntBinop */ +#if !CYTHON_COMPILING_IN_PYPY +static PyObject* __Pyx_PyInt_SubtractObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check) { + CYTHON_MAYBE_UNUSED_VAR(intval); + CYTHON_MAYBE_UNUSED_VAR(inplace); + CYTHON_UNUSED_VAR(zerodivision_check); + #if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(op1))) { + const long b = intval; + long x; + long a = PyInt_AS_LONG(op1); + + x = (long)((unsigned long)a - (unsigned long)b); + if (likely((x^a) >= 0 || (x^~b) >= 0)) + return PyInt_FromLong(x); + return PyLong_Type.tp_as_number->nb_subtract(op1, op2); + } + #endif + #if CYTHON_USE_PYLONG_INTERNALS + if (likely(PyLong_CheckExact(op1))) { + const long b = intval; + long a, x; +#ifdef HAVE_LONG_LONG + const PY_LONG_LONG llb = intval; + PY_LONG_LONG lla, llx; +#endif + if (unlikely(__Pyx_PyLong_IsZero(op1))) { + return PyLong_FromLong(-intval); + } + if (likely(__Pyx_PyLong_IsCompact(op1))) { + a = __Pyx_PyLong_CompactValue(op1); + } else { + const digit* digits = __Pyx_PyLong_Digits(op1); + const Py_ssize_t size = __Pyx_PyLong_SignedDigitCount(op1); + switch (size) { + case -2: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; + #ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { + lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; + #endif + } + CYTHON_FALLTHROUGH; + case 2: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; + #ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; + #endif + } + CYTHON_FALLTHROUGH; + case -3: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; + #ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { + lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; + #endif + } + CYTHON_FALLTHROUGH; + case 3: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; + #ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; + #endif + } + CYTHON_FALLTHROUGH; + case -4: + if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; + #ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { + lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; + #endif + } + CYTHON_FALLTHROUGH; + case 4: + if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; + #ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; + #endif + } + CYTHON_FALLTHROUGH; + default: return PyLong_Type.tp_as_number->nb_subtract(op1, op2); + } + } + x = a - b; + return PyLong_FromLong(x); +#ifdef HAVE_LONG_LONG + long_long: + llx = lla - llb; + return PyLong_FromLongLong(llx); +#endif + + + } + #endif + if (PyFloat_CheckExact(op1)) { + const long b = intval; +#if CYTHON_COMPILING_IN_LIMITED_API + double a = __pyx_PyFloat_AsDouble(op1); +#else + double a = PyFloat_AS_DOUBLE(op1); +#endif + double result; + + PyFPE_START_PROTECT("subtract", return NULL) + result = ((double)a) - (double)b; + PyFPE_END_PROTECT(result) + return PyFloat_FromDouble(result); + } + return (inplace ? PyNumber_InPlaceSubtract : PyNumber_Subtract)(op1, op2); +} +#endif + +/* pybytes_as_double */ +static double __Pyx_SlowPyString_AsDouble(PyObject *obj) { + PyObject *float_value; +#if PY_MAJOR_VERSION >= 3 + float_value = PyFloat_FromString(obj); +#else + float_value = PyFloat_FromString(obj, 0); +#endif + if (likely(float_value)) { +#if CYTHON_ASSUME_SAFE_MACROS + double value = PyFloat_AS_DOUBLE(float_value); +#else + double value = PyFloat_AsDouble(float_value); +#endif + Py_DECREF(float_value); + return value; + } + return (double)-1; +} +static const char* __Pyx__PyBytes_AsDouble_Copy(const char* start, char* buffer, Py_ssize_t length) { + int last_was_punctuation = 1; + Py_ssize_t i; + for (i=0; i < length; i++) { + char chr = start[i]; + int is_punctuation = (chr == '_') | (chr == '.') | (chr == 'e') | (chr == 'E'); + *buffer = chr; + buffer += (chr != '_'); + if (unlikely(last_was_punctuation & is_punctuation)) goto parse_failure; + last_was_punctuation = is_punctuation; + } + if (unlikely(last_was_punctuation)) goto parse_failure; + *buffer = '\0'; + return buffer; +parse_failure: + return NULL; +} +static double __Pyx__PyBytes_AsDouble_inf_nan(const char* start, Py_ssize_t length) { + int matches = 1; + char sign = start[0]; + int is_signed = (sign == '+') | (sign == '-'); + start += is_signed; + length -= is_signed; + switch (start[0]) { + #ifdef Py_NAN + case 'n': + case 'N': + if (unlikely(length != 3)) goto parse_failure; + matches &= (start[1] == 'a' || start[1] == 'A'); + matches &= (start[2] == 'n' || start[2] == 'N'); + if (unlikely(!matches)) goto parse_failure; + return (sign == '-') ? -Py_NAN : Py_NAN; + #endif + case 'i': + case 'I': + if (unlikely(length < 3)) goto parse_failure; + matches &= (start[1] == 'n' || start[1] == 'N'); + matches &= (start[2] == 'f' || start[2] == 'F'); + if (likely(length == 3 && matches)) + return (sign == '-') ? -Py_HUGE_VAL : Py_HUGE_VAL; + if (unlikely(length != 8)) goto parse_failure; + matches &= (start[3] == 'i' || start[3] == 'I'); + matches &= (start[4] == 'n' || start[4] == 'N'); + matches &= (start[5] == 'i' || start[5] == 'I'); + matches &= (start[6] == 't' || start[6] == 'T'); + matches &= (start[7] == 'y' || start[7] == 'Y'); + if (unlikely(!matches)) goto parse_failure; + return (sign == '-') ? -Py_HUGE_VAL : Py_HUGE_VAL; + case '.': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': + break; + default: + goto parse_failure; + } + return 0.0; +parse_failure: + return -1.0; +} +static CYTHON_INLINE int __Pyx__PyBytes_AsDouble_IsSpace(char ch) { + return (ch == 0x20) | !((ch < 0x9) | (ch > 0xd)); +} +CYTHON_UNUSED static double __Pyx__PyBytes_AsDouble(PyObject *obj, const char* start, Py_ssize_t length) { + double value; + Py_ssize_t i, digits; + const char *last = start + length; + char *end; + while (__Pyx__PyBytes_AsDouble_IsSpace(*start)) + start++; + while (start < last - 1 && __Pyx__PyBytes_AsDouble_IsSpace(last[-1])) + last--; + length = last - start; + if (unlikely(length <= 0)) goto fallback; + value = __Pyx__PyBytes_AsDouble_inf_nan(start, length); + if (unlikely(value == -1.0)) goto fallback; + if (value != 0.0) return value; + digits = 0; + for (i=0; i < length; digits += start[i++] != '_'); + if (likely(digits == length)) { + value = PyOS_string_to_double(start, &end, NULL); + } else if (digits < 40) { + char number[40]; + last = __Pyx__PyBytes_AsDouble_Copy(start, number, length); + if (unlikely(!last)) goto fallback; + value = PyOS_string_to_double(number, &end, NULL); + } else { + char *number = (char*) PyMem_Malloc((digits + 1) * sizeof(char)); + if (unlikely(!number)) goto fallback; + last = __Pyx__PyBytes_AsDouble_Copy(start, number, length); + if (unlikely(!last)) { + PyMem_Free(number); + goto fallback; + } + value = PyOS_string_to_double(number, &end, NULL); + PyMem_Free(number); + } + if (likely(end == last) || (value == (double)-1 && PyErr_Occurred())) { + return value; + } +fallback: + return __Pyx_SlowPyString_AsDouble(obj); +} + +/* pynumber_float */ +static CYTHON_INLINE PyObject* __Pyx__PyNumber_Float(PyObject* obj) { + double val; + if (PyLong_CheckExact(obj)) { +#if CYTHON_USE_PYLONG_INTERNALS + if (likely(__Pyx_PyLong_IsCompact(obj))) { + val = (double) __Pyx_PyLong_CompactValue(obj); + goto no_error; + } +#endif + val = PyLong_AsDouble(obj); + } else if (PyUnicode_CheckExact(obj)) { + val = __Pyx_PyUnicode_AsDouble(obj); + } else if (PyBytes_CheckExact(obj)) { + val = __Pyx_PyBytes_AsDouble(obj); + } else if (PyByteArray_CheckExact(obj)) { + val = __Pyx_PyByteArray_AsDouble(obj); + } else { + return PyNumber_Float(obj); + } + if (unlikely(val == -1 && PyErr_Occurred())) { + return NULL; + } +#if CYTHON_USE_PYLONG_INTERNALS +no_error: +#endif + return PyFloat_FromDouble(val); +} + +/* IterNext */ +static PyObject *__Pyx_PyIter_Next2Default(PyObject* defval) { + PyObject* exc_type; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + exc_type = __Pyx_PyErr_CurrentExceptionType(); + if (unlikely(exc_type)) { + if (!defval || unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) + return NULL; + __Pyx_PyErr_Clear(); + Py_INCREF(defval); + return defval; + } + if (defval) { + Py_INCREF(defval); + return defval; + } + __Pyx_PyErr_SetNone(PyExc_StopIteration); + return NULL; +} +static void __Pyx_PyIter_Next_ErrorNoIterator(PyObject *iterator) { + __Pyx_TypeName iterator_type_name = __Pyx_PyType_GetName(Py_TYPE(iterator)); + PyErr_Format(PyExc_TypeError, + __Pyx_FMT_TYPENAME " object is not an iterator", iterator_type_name); + __Pyx_DECREF_TypeName(iterator_type_name); +} +static CYTHON_INLINE PyObject *__Pyx_PyIter_Next2(PyObject* iterator, PyObject* defval) { + PyObject* next; + iternextfunc iternext = Py_TYPE(iterator)->tp_iternext; + if (likely(iternext)) { +#if CYTHON_USE_TYPE_SLOTS || CYTHON_COMPILING_IN_PYPY + next = iternext(iterator); + if (likely(next)) + return next; +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030d0000 + if (unlikely(iternext == &_PyObject_NextNotImplemented)) + return NULL; +#endif +#else + next = PyIter_Next(iterator); + if (likely(next)) + return next; +#endif + } else if (CYTHON_USE_TYPE_SLOTS || unlikely(!PyIter_Check(iterator))) { + __Pyx_PyIter_Next_ErrorNoIterator(iterator); + return NULL; + } +#if !CYTHON_USE_TYPE_SLOTS + else { + next = PyIter_Next(iterator); + if (likely(next)) + return next; + } +#endif + return __Pyx_PyIter_Next2Default(defval); +} + +/* GetTopmostException */ +#if CYTHON_USE_EXC_INFO_STACK && CYTHON_FAST_THREAD_STATE +static _PyErr_StackItem * +__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) +{ + _PyErr_StackItem *exc_info = tstate->exc_info; + while ((exc_info->exc_value == NULL || exc_info->exc_value == Py_None) && + exc_info->previous_item != NULL) + { + exc_info = exc_info->previous_item; + } + return exc_info; +} +#endif + +/* SaveResetException */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + #if CYTHON_USE_EXC_INFO_STACK && PY_VERSION_HEX >= 0x030B00a4 + _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); + PyObject *exc_value = exc_info->exc_value; + if (exc_value == NULL || exc_value == Py_None) { + *value = NULL; + *type = NULL; + *tb = NULL; + } else { + *value = exc_value; + Py_INCREF(*value); + *type = (PyObject*) Py_TYPE(exc_value); + Py_INCREF(*type); + *tb = PyException_GetTraceback(exc_value); + } + #elif CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); + *type = exc_info->exc_type; + *value = exc_info->exc_value; + *tb = exc_info->exc_traceback; + Py_XINCREF(*type); + Py_XINCREF(*value); + Py_XINCREF(*tb); + #else + *type = tstate->exc_type; + *value = tstate->exc_value; + *tb = tstate->exc_traceback; + Py_XINCREF(*type); + Py_XINCREF(*value); + Py_XINCREF(*tb); + #endif +} +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + #if CYTHON_USE_EXC_INFO_STACK && PY_VERSION_HEX >= 0x030B00a4 + _PyErr_StackItem *exc_info = tstate->exc_info; + PyObject *tmp_value = exc_info->exc_value; + exc_info->exc_value = value; + Py_XDECREF(tmp_value); + Py_XDECREF(type); + Py_XDECREF(tb); + #else + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = type; + exc_info->exc_value = value; + exc_info->exc_traceback = tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = type; + tstate->exc_value = value; + tstate->exc_traceback = tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); + #endif +} +#endif + +/* GetException */ +#if CYTHON_FAST_THREAD_STATE +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) +#endif +{ + PyObject *local_type = NULL, *local_value, *local_tb = NULL; +#if CYTHON_FAST_THREAD_STATE + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if PY_VERSION_HEX >= 0x030C00A6 + local_value = tstate->current_exception; + tstate->current_exception = 0; + if (likely(local_value)) { + local_type = (PyObject*) Py_TYPE(local_value); + Py_INCREF(local_type); + local_tb = PyException_GetTraceback(local_value); + } + #else + local_type = tstate->curexc_type; + local_value = tstate->curexc_value; + local_tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; + #endif +#else + PyErr_Fetch(&local_type, &local_value, &local_tb); +#endif + PyErr_NormalizeException(&local_type, &local_value, &local_tb); +#if CYTHON_FAST_THREAD_STATE && PY_VERSION_HEX >= 0x030C00A6 + if (unlikely(tstate->current_exception)) +#elif CYTHON_FAST_THREAD_STATE + if (unlikely(tstate->curexc_type)) +#else + if (unlikely(PyErr_Occurred())) +#endif + goto bad; + #if PY_MAJOR_VERSION >= 3 + if (local_tb) { + if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) + goto bad; + } + #endif + Py_XINCREF(local_tb); + Py_XINCREF(local_type); + Py_XINCREF(local_value); + *type = local_type; + *value = local_value; + *tb = local_tb; +#if CYTHON_FAST_THREAD_STATE + #if CYTHON_USE_EXC_INFO_STACK + { + _PyErr_StackItem *exc_info = tstate->exc_info; + #if PY_VERSION_HEX >= 0x030B00a4 + tmp_value = exc_info->exc_value; + exc_info->exc_value = local_value; + tmp_type = NULL; + tmp_tb = NULL; + Py_XDECREF(local_type); + Py_XDECREF(local_tb); + #else + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = local_type; + exc_info->exc_value = local_value; + exc_info->exc_traceback = local_tb; + #endif + } + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = local_type; + tstate->exc_value = local_value; + tstate->exc_traceback = local_tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +#else + PyErr_SetExcInfo(local_type, local_value, local_tb); +#endif + return 0; +bad: + *type = 0; + *value = 0; + *tb = 0; + Py_XDECREF(local_type); + Py_XDECREF(local_value); + Py_XDECREF(local_tb); + return -1; +} + +/* PyObjectGetMethod */ +static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method) { + PyObject *attr; +#if CYTHON_UNPACK_METHODS && CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_PYTYPE_LOOKUP + __Pyx_TypeName type_name; + PyTypeObject *tp = Py_TYPE(obj); + PyObject *descr; + descrgetfunc f = NULL; + PyObject **dictptr, *dict; + int meth_found = 0; + assert (*method == NULL); + if (unlikely(tp->tp_getattro != PyObject_GenericGetAttr)) { + attr = __Pyx_PyObject_GetAttrStr(obj, name); + goto try_unpack; + } + if (unlikely(tp->tp_dict == NULL) && unlikely(PyType_Ready(tp) < 0)) { + return 0; + } + descr = _PyType_Lookup(tp, name); + if (likely(descr != NULL)) { + Py_INCREF(descr); +#if defined(Py_TPFLAGS_METHOD_DESCRIPTOR) && Py_TPFLAGS_METHOD_DESCRIPTOR + if (__Pyx_PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_METHOD_DESCRIPTOR)) +#elif PY_MAJOR_VERSION >= 3 + #ifdef __Pyx_CyFunction_USED + if (likely(PyFunction_Check(descr) || __Pyx_IS_TYPE(descr, &PyMethodDescr_Type) || __Pyx_CyFunction_Check(descr))) + #else + if (likely(PyFunction_Check(descr) || __Pyx_IS_TYPE(descr, &PyMethodDescr_Type))) + #endif +#else + #ifdef __Pyx_CyFunction_USED + if (likely(PyFunction_Check(descr) || __Pyx_CyFunction_Check(descr))) + #else + if (likely(PyFunction_Check(descr))) + #endif +#endif + { + meth_found = 1; + } else { + f = Py_TYPE(descr)->tp_descr_get; + if (f != NULL && PyDescr_IsData(descr)) { + attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); + Py_DECREF(descr); + goto try_unpack; + } + } + } + dictptr = _PyObject_GetDictPtr(obj); + if (dictptr != NULL && (dict = *dictptr) != NULL) { + Py_INCREF(dict); + attr = __Pyx_PyDict_GetItemStr(dict, name); + if (attr != NULL) { + Py_INCREF(attr); + Py_DECREF(dict); + Py_XDECREF(descr); + goto try_unpack; + } + Py_DECREF(dict); + } + if (meth_found) { + *method = descr; + return 1; + } + if (f != NULL) { + attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); + Py_DECREF(descr); + goto try_unpack; + } + if (likely(descr != NULL)) { + *method = descr; + return 0; + } + type_name = __Pyx_PyType_GetName(tp); + PyErr_Format(PyExc_AttributeError, +#if PY_MAJOR_VERSION >= 3 + "'" __Pyx_FMT_TYPENAME "' object has no attribute '%U'", + type_name, name); +#else + "'" __Pyx_FMT_TYPENAME "' object has no attribute '%.400s'", + type_name, PyString_AS_STRING(name)); +#endif + __Pyx_DECREF_TypeName(type_name); + return 0; +#else + attr = __Pyx_PyObject_GetAttrStr(obj, name); + goto try_unpack; +#endif +try_unpack: +#if CYTHON_UNPACK_METHODS + if (likely(attr) && PyMethod_Check(attr) && likely(PyMethod_GET_SELF(attr) == obj)) { + PyObject *function = PyMethod_GET_FUNCTION(attr); + Py_INCREF(function); + Py_DECREF(attr); + *method = function; + return 1; + } +#endif + *method = attr; + return 0; +} + +/* PyObjectCallMethod0 */ +static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name) { + PyObject *method = NULL, *result = NULL; + int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); + if (likely(is_method)) { + result = __Pyx_PyObject_CallOneArg(method, obj); + Py_DECREF(method); + return result; + } + if (unlikely(!method)) goto bad; + result = __Pyx_PyObject_CallNoArg(method); + Py_DECREF(method); +bad: + return result; +} + +/* UnpackUnboundCMethod */ +static PyObject *__Pyx_SelflessCall(PyObject *method, PyObject *args, PyObject *kwargs) { + PyObject *result; + PyObject *selfless_args = PyTuple_GetSlice(args, 1, PyTuple_Size(args)); + if (unlikely(!selfless_args)) return NULL; + result = PyObject_Call(method, selfless_args, kwargs); + Py_DECREF(selfless_args); + return result; +} +static PyMethodDef __Pyx_UnboundCMethod_Def = { + "CythonUnboundCMethod", + __PYX_REINTERPRET_FUNCION(PyCFunction, __Pyx_SelflessCall), + METH_VARARGS | METH_KEYWORDS, + NULL +}; +static int __Pyx_TryUnpackUnboundCMethod(__Pyx_CachedCFunction* target) { + PyObject *method; + method = __Pyx_PyObject_GetAttrStr(target->type, *target->method_name); + if (unlikely(!method)) + return -1; + target->method = method; +#if CYTHON_COMPILING_IN_CPYTHON + #if PY_MAJOR_VERSION >= 3 + if (likely(__Pyx_TypeCheck(method, &PyMethodDescr_Type))) + #else + if (likely(!__Pyx_CyOrPyCFunction_Check(method))) + #endif + { + PyMethodDescrObject *descr = (PyMethodDescrObject*) method; + target->func = descr->d_method->ml_meth; + target->flag = descr->d_method->ml_flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_STACKLESS); + } else +#endif +#if CYTHON_COMPILING_IN_PYPY +#else + if (PyCFunction_Check(method)) +#endif + { + PyObject *self; + int self_found; +#if CYTHON_COMPILING_IN_LIMITED_API || CYTHON_COMPILING_IN_PYPY + self = PyObject_GetAttrString(method, "__self__"); + if (!self) { + PyErr_Clear(); + } +#else + self = PyCFunction_GET_SELF(method); +#endif + self_found = (self && self != Py_None); +#if CYTHON_COMPILING_IN_LIMITED_API || CYTHON_COMPILING_IN_PYPY + Py_XDECREF(self); +#endif + if (self_found) { + PyObject *unbound_method = PyCFunction_New(&__Pyx_UnboundCMethod_Def, method); + if (unlikely(!unbound_method)) return -1; + Py_DECREF(method); + target->method = unbound_method; + } + } + return 0; +} + +/* CallUnboundCMethod0 */ +static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self) { + PyObject *args, *result = NULL; + if (unlikely(!cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL; +#if CYTHON_ASSUME_SAFE_MACROS + args = PyTuple_New(1); + if (unlikely(!args)) goto bad; + Py_INCREF(self); + PyTuple_SET_ITEM(args, 0, self); +#else + args = PyTuple_Pack(1, self); + if (unlikely(!args)) goto bad; +#endif + result = __Pyx_PyObject_Call(cfunc->method, args, NULL); + Py_DECREF(args); +bad: + return result; +} + +/* pop */ +static CYTHON_INLINE PyObject* __Pyx__PyObject_Pop(PyObject* L) { + if (__Pyx_IS_TYPE(L, &PySet_Type)) { + return PySet_Pop(L); + } + return __Pyx_PyObject_CallMethod0(L, __pyx_n_s_pop); +} +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static CYTHON_INLINE PyObject* __Pyx_PyList_Pop(PyObject* L) { + if (likely(PyList_GET_SIZE(L) > (((PyListObject*)L)->allocated >> 1))) { + __Pyx_SET_SIZE(L, Py_SIZE(L) - 1); + return PyList_GET_ITEM(L, PyList_GET_SIZE(L)); + } + return __Pyx_CallUnboundCMethod0(&__pyx_umethod_PyList_Type_pop, L); +} +#endif + +/* PyObjectCall2Args */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { + PyObject *args[3] = {NULL, arg1, arg2}; + return __Pyx_PyObject_FastCall(function, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); +} + +/* PyObjectCallMethod1 */ +#if !(CYTHON_VECTORCALL && __PYX_LIMITED_VERSION_HEX >= 0x030C00A2) +static PyObject* __Pyx__PyObject_CallMethod1(PyObject* method, PyObject* arg) { + PyObject *result = __Pyx_PyObject_CallOneArg(method, arg); + Py_DECREF(method); + return result; +} +#endif +static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg) { +#if CYTHON_VECTORCALL && __PYX_LIMITED_VERSION_HEX >= 0x030C00A2 + PyObject *args[2] = {obj, arg}; + (void) __Pyx_PyObject_GetMethod; + (void) __Pyx_PyObject_CallOneArg; + (void) __Pyx_PyObject_Call2Args; + return PyObject_VectorcallMethod(method_name, args, 2 | PY_VECTORCALL_ARGUMENTS_OFFSET, NULL); +#else + PyObject *method = NULL, *result; + int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); + if (likely(is_method)) { + result = __Pyx_PyObject_Call2Args(method, obj, arg); + Py_DECREF(method); + return result; + } + if (unlikely(!method)) return NULL; + return __Pyx__PyObject_CallMethod1(method, arg); +#endif +} + +/* append */ +static CYTHON_INLINE int __Pyx_PyObject_Append(PyObject* L, PyObject* x) { + if (likely(PyList_CheckExact(L))) { + if (unlikely(__Pyx_PyList_Append(L, x) < 0)) return -1; + } else { + PyObject* retval = __Pyx_PyObject_CallMethod1(L, __pyx_n_s_append, x); + if (unlikely(!retval)) + return -1; + Py_DECREF(retval); + } + return 0; +} + +/* FastTypeChecks */ +#if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = __Pyx_PyType_GetSlot(a, tp_base, PyTypeObject*); + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(a, b); +} +static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (cls == a || cls == b) return 1; + mro = cls->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + PyObject *base = PyTuple_GET_ITEM(mro, i); + if (base == (PyObject *)a || base == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(cls, a) || __Pyx_InBases(cls, b); +} +#if PY_MAJOR_VERSION == 2 +static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { + PyObject *exception, *value, *tb; + int res; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&exception, &value, &tb); + res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + if (!res) { + res = PyObject_IsSubclass(err, exc_type2); + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + } + __Pyx_ErrRestore(exception, value, tb); + return res; +} +#else +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + if (exc_type1) { + return __Pyx_IsAnySubtype2((PyTypeObject*)err, (PyTypeObject*)exc_type1, (PyTypeObject*)exc_type2); + } else { + return __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } +} +#endif +static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + assert(PyExceptionClass_Check(exc_type)); + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; i= 0x030B00a4 + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_value = exc_info->exc_value; + exc_info->exc_value = *value; + if (tmp_value == NULL || tmp_value == Py_None) { + Py_XDECREF(tmp_value); + tmp_value = NULL; + tmp_type = NULL; + tmp_tb = NULL; + } else { + tmp_type = (PyObject*) Py_TYPE(tmp_value); + Py_INCREF(tmp_type); + #if CYTHON_COMPILING_IN_CPYTHON + tmp_tb = ((PyBaseExceptionObject*) tmp_value)->traceback; + Py_XINCREF(tmp_tb); + #else + tmp_tb = PyException_GetTraceback(tmp_value); + #endif + } + #elif CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = *type; + exc_info->exc_value = *value; + exc_info->exc_traceback = *tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = *type; + tstate->exc_value = *value; + tstate->exc_traceback = *tb; + #endif + *type = tmp_type; + *value = tmp_value; + *tb = tmp_tb; +} +#else +static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); + PyErr_SetExcInfo(*type, *value, *tb); + *type = tmp_type; + *value = tmp_value; + *tb = tmp_tb; +} +#endif + +/* GetAttr */ +static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { +#if CYTHON_USE_TYPE_SLOTS +#if PY_MAJOR_VERSION >= 3 + if (likely(PyUnicode_Check(n))) +#else + if (likely(PyString_Check(n))) +#endif + return __Pyx_PyObject_GetAttrStr(o, n); +#endif + return PyObject_GetAttr(o, n); +} + +/* HasAttr */ +static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { + PyObject *r; + if (unlikely(!__Pyx_PyBaseString_Check(n))) { + PyErr_SetString(PyExc_TypeError, + "hasattr(): attribute name must be string"); + return -1; + } + r = __Pyx_GetAttr(o, n); + if (!r) { + PyErr_Clear(); + return 0; + } else { + Py_DECREF(r); + return 1; + } +} + +/* GetAttr3 */ +#if __PYX_LIMITED_VERSION_HEX < 0x030d00A1 +static PyObject *__Pyx_GetAttr3Default(PyObject *d) { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) + return NULL; + __Pyx_PyErr_Clear(); + Py_INCREF(d); + return d; +} +#endif +static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { + PyObject *r; +#if __PYX_LIMITED_VERSION_HEX >= 0x030d00A1 + int res = PyObject_GetOptionalAttr(o, n, &r); + return (res != 0) ? r : __Pyx_NewRef(d); +#else + #if CYTHON_USE_TYPE_SLOTS + if (likely(PyString_Check(n))) { + r = __Pyx_PyObject_GetAttrStrNoError(o, n); + if (unlikely(!r) && likely(!PyErr_Occurred())) { + r = __Pyx_NewRef(d); + } + return r; + } + #endif + r = PyObject_GetAttr(o, n); + return (likely(r)) ? r : __Pyx_GetAttr3Default(d); +#endif +} + +/* Import */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { + PyObject *module = 0; + PyObject *empty_dict = 0; + PyObject *empty_list = 0; + #if PY_MAJOR_VERSION < 3 + PyObject *py_import; + py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); + if (unlikely(!py_import)) + goto bad; + if (!from_list) { + empty_list = PyList_New(0); + if (unlikely(!empty_list)) + goto bad; + from_list = empty_list; + } + #endif + empty_dict = PyDict_New(); + if (unlikely(!empty_dict)) + goto bad; + { + #if PY_MAJOR_VERSION >= 3 + if (level == -1) { + if (strchr(__Pyx_MODULE_NAME, '.') != NULL) { + module = PyImport_ImportModuleLevelObject( + name, __pyx_d, empty_dict, from_list, 1); + if (unlikely(!module)) { + if (unlikely(!PyErr_ExceptionMatches(PyExc_ImportError))) + goto bad; + PyErr_Clear(); + } + } + level = 0; + } + #endif + if (!module) { + #if PY_MAJOR_VERSION < 3 + PyObject *py_level = PyInt_FromLong(level); + if (unlikely(!py_level)) + goto bad; + module = PyObject_CallFunctionObjArgs(py_import, + name, __pyx_d, empty_dict, from_list, py_level, (PyObject *)NULL); + Py_DECREF(py_level); + #else + module = PyImport_ImportModuleLevelObject( + name, __pyx_d, empty_dict, from_list, level); + #endif + } + } +bad: + Py_XDECREF(empty_dict); + Py_XDECREF(empty_list); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_import); + #endif + return module; +} + +/* ImportFrom */ +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { + PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); + if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { + const char* module_name_str = 0; + PyObject* module_name = 0; + PyObject* module_dot = 0; + PyObject* full_name = 0; + PyErr_Clear(); + module_name_str = PyModule_GetName(module); + if (unlikely(!module_name_str)) { goto modbad; } + module_name = PyUnicode_FromString(module_name_str); + if (unlikely(!module_name)) { goto modbad; } + module_dot = PyUnicode_Concat(module_name, __pyx_kp_u__8); + if (unlikely(!module_dot)) { goto modbad; } + full_name = PyUnicode_Concat(module_dot, name); + if (unlikely(!full_name)) { goto modbad; } + #if PY_VERSION_HEX < 0x030700A1 || (CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM < 0x07030400) + { + PyObject *modules = PyImport_GetModuleDict(); + if (unlikely(!modules)) + goto modbad; + value = PyObject_GetItem(modules, full_name); + } + #else + value = PyImport_GetModule(full_name); + #endif + modbad: + Py_XDECREF(full_name); + Py_XDECREF(module_dot); + Py_XDECREF(module_name); + } + if (unlikely(!value)) { + PyErr_Format(PyExc_ImportError, + #if PY_MAJOR_VERSION < 3 + "cannot import name %.230s", PyString_AS_STRING(name)); + #else + "cannot import name %S", name); + #endif + } + return value; +} + +/* ImportDottedModule */ +#if PY_MAJOR_VERSION >= 3 +static PyObject *__Pyx__ImportDottedModule_Error(PyObject *name, PyObject *parts_tuple, Py_ssize_t count) { + PyObject *partial_name = NULL, *slice = NULL, *sep = NULL; + if (unlikely(PyErr_Occurred())) { + PyErr_Clear(); + } + if (likely(PyTuple_GET_SIZE(parts_tuple) == count)) { + partial_name = name; + } else { + slice = PySequence_GetSlice(parts_tuple, 0, count); + if (unlikely(!slice)) + goto bad; + sep = PyUnicode_FromStringAndSize(".", 1); + if (unlikely(!sep)) + goto bad; + partial_name = PyUnicode_Join(sep, slice); + } + PyErr_Format( +#if PY_MAJOR_VERSION < 3 + PyExc_ImportError, + "No module named '%s'", PyString_AS_STRING(partial_name)); +#else +#if PY_VERSION_HEX >= 0x030600B1 + PyExc_ModuleNotFoundError, +#else + PyExc_ImportError, +#endif + "No module named '%U'", partial_name); +#endif +bad: + Py_XDECREF(sep); + Py_XDECREF(slice); + Py_XDECREF(partial_name); + return NULL; +} +#endif +#if PY_MAJOR_VERSION >= 3 +static PyObject *__Pyx__ImportDottedModule_Lookup(PyObject *name) { + PyObject *imported_module; +#if PY_VERSION_HEX < 0x030700A1 || (CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM < 0x07030400) + PyObject *modules = PyImport_GetModuleDict(); + if (unlikely(!modules)) + return NULL; + imported_module = __Pyx_PyDict_GetItemStr(modules, name); + Py_XINCREF(imported_module); +#else + imported_module = PyImport_GetModule(name); +#endif + return imported_module; +} +#endif +#if PY_MAJOR_VERSION >= 3 +static PyObject *__Pyx_ImportDottedModule_WalkParts(PyObject *module, PyObject *name, PyObject *parts_tuple) { + Py_ssize_t i, nparts; + nparts = PyTuple_GET_SIZE(parts_tuple); + for (i=1; i < nparts && module; i++) { + PyObject *part, *submodule; +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + part = PyTuple_GET_ITEM(parts_tuple, i); +#else + part = PySequence_ITEM(parts_tuple, i); +#endif + submodule = __Pyx_PyObject_GetAttrStrNoError(module, part); +#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) + Py_DECREF(part); +#endif + Py_DECREF(module); + module = submodule; + } + if (unlikely(!module)) { + return __Pyx__ImportDottedModule_Error(name, parts_tuple, i); + } + return module; +} +#endif +static PyObject *__Pyx__ImportDottedModule(PyObject *name, PyObject *parts_tuple) { +#if PY_MAJOR_VERSION < 3 + PyObject *module, *from_list, *star = __pyx_n_s__13; + CYTHON_UNUSED_VAR(parts_tuple); + from_list = PyList_New(1); + if (unlikely(!from_list)) + return NULL; + Py_INCREF(star); + PyList_SET_ITEM(from_list, 0, star); + module = __Pyx_Import(name, from_list, 0); + Py_DECREF(from_list); + return module; +#else + PyObject *imported_module; + PyObject *module = __Pyx_Import(name, NULL, 0); + if (!parts_tuple || unlikely(!module)) + return module; + imported_module = __Pyx__ImportDottedModule_Lookup(name); + if (likely(imported_module)) { + Py_DECREF(module); + return imported_module; + } + PyErr_Clear(); + return __Pyx_ImportDottedModule_WalkParts(module, name, parts_tuple); +#endif +} +static PyObject *__Pyx_ImportDottedModule(PyObject *name, PyObject *parts_tuple) { +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030400B1 + PyObject *module = __Pyx__ImportDottedModule_Lookup(name); + if (likely(module)) { + PyObject *spec = __Pyx_PyObject_GetAttrStrNoError(module, __pyx_n_s_spec); + if (likely(spec)) { + PyObject *unsafe = __Pyx_PyObject_GetAttrStrNoError(spec, __pyx_n_s_initializing); + if (likely(!unsafe || !__Pyx_PyObject_IsTrue(unsafe))) { + Py_DECREF(spec); + spec = NULL; + } + Py_XDECREF(unsafe); + } + if (likely(!spec)) { + PyErr_Clear(); + return module; + } + Py_DECREF(spec); + Py_DECREF(module); + } else if (PyErr_Occurred()) { + PyErr_Clear(); + } +#endif + return __Pyx__ImportDottedModule(name, parts_tuple); +} + +/* Py3UpdateBases */ +static PyObject* +__Pyx_PEP560_update_bases(PyObject *bases) +{ + Py_ssize_t i, j, size_bases; + PyObject *base, *meth, *new_base, *result, *new_bases = NULL; + size_bases = PyTuple_GET_SIZE(bases); + for (i = 0; i < size_bases; i++) { + base = PyTuple_GET_ITEM(bases, i); + if (PyType_Check(base)) { + if (new_bases) { + if (PyList_Append(new_bases, base) < 0) { + goto error; + } + } + continue; + } + meth = __Pyx_PyObject_GetAttrStrNoError(base, __pyx_n_s_mro_entries); + if (!meth && PyErr_Occurred()) { + goto error; + } + if (!meth) { + if (new_bases) { + if (PyList_Append(new_bases, base) < 0) { + goto error; + } + } + continue; + } + new_base = __Pyx_PyObject_CallOneArg(meth, bases); + Py_DECREF(meth); + if (!new_base) { + goto error; + } + if (!PyTuple_Check(new_base)) { + PyErr_SetString(PyExc_TypeError, + "__mro_entries__ must return a tuple"); + Py_DECREF(new_base); + goto error; + } + if (!new_bases) { + if (!(new_bases = PyList_New(i))) { + goto error; + } + for (j = 0; j < i; j++) { + base = PyTuple_GET_ITEM(bases, j); + PyList_SET_ITEM(new_bases, j, base); + Py_INCREF(base); + } + } + j = PyList_GET_SIZE(new_bases); + if (PyList_SetSlice(new_bases, j, j, new_base) < 0) { + goto error; + } + Py_DECREF(new_base); + } + if (!new_bases) { + Py_INCREF(bases); + return bases; + } + result = PyList_AsTuple(new_bases); + Py_DECREF(new_bases); + return result; +error: + Py_XDECREF(new_bases); + return NULL; +} + +/* CalculateMetaclass */ +static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases) { + Py_ssize_t i, nbases; +#if CYTHON_ASSUME_SAFE_MACROS + nbases = PyTuple_GET_SIZE(bases); +#else + nbases = PyTuple_Size(bases); + if (nbases < 0) return NULL; +#endif + for (i=0; i < nbases; i++) { + PyTypeObject *tmptype; +#if CYTHON_ASSUME_SAFE_MACROS + PyObject *tmp = PyTuple_GET_ITEM(bases, i); +#else + PyObject *tmp = PyTuple_GetItem(bases, i); + if (!tmp) return NULL; +#endif + tmptype = Py_TYPE(tmp); +#if PY_MAJOR_VERSION < 3 + if (tmptype == &PyClass_Type) + continue; +#endif + if (!metaclass) { + metaclass = tmptype; + continue; + } + if (PyType_IsSubtype(metaclass, tmptype)) + continue; + if (PyType_IsSubtype(tmptype, metaclass)) { + metaclass = tmptype; + continue; + } + PyErr_SetString(PyExc_TypeError, + "metaclass conflict: " + "the metaclass of a derived class " + "must be a (non-strict) subclass " + "of the metaclasses of all its bases"); + return NULL; + } + if (!metaclass) { +#if PY_MAJOR_VERSION < 3 + metaclass = &PyClass_Type; +#else + metaclass = &PyType_Type; +#endif + } + Py_INCREF((PyObject*) metaclass); + return (PyObject*) metaclass; +} + +/* FixUpExtensionType */ +#if CYTHON_USE_TYPE_SPECS +static int __Pyx_fix_up_extension_type_from_spec(PyType_Spec *spec, PyTypeObject *type) { +#if PY_VERSION_HEX > 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API + CYTHON_UNUSED_VAR(spec); + CYTHON_UNUSED_VAR(type); +#else + const PyType_Slot *slot = spec->slots; + while (slot && slot->slot && slot->slot != Py_tp_members) + slot++; + if (slot && slot->slot == Py_tp_members) { + int changed = 0; +#if !(PY_VERSION_HEX <= 0x030900b1 && CYTHON_COMPILING_IN_CPYTHON) + const +#endif + PyMemberDef *memb = (PyMemberDef*) slot->pfunc; + while (memb && memb->name) { + if (memb->name[0] == '_' && memb->name[1] == '_') { +#if PY_VERSION_HEX < 0x030900b1 + if (strcmp(memb->name, "__weaklistoffset__") == 0) { + assert(memb->type == T_PYSSIZET); + assert(memb->flags == READONLY); + type->tp_weaklistoffset = memb->offset; + changed = 1; + } + else if (strcmp(memb->name, "__dictoffset__") == 0) { + assert(memb->type == T_PYSSIZET); + assert(memb->flags == READONLY); + type->tp_dictoffset = memb->offset; + changed = 1; + } +#if CYTHON_METH_FASTCALL + else if (strcmp(memb->name, "__vectorcalloffset__") == 0) { + assert(memb->type == T_PYSSIZET); + assert(memb->flags == READONLY); +#if PY_VERSION_HEX >= 0x030800b4 + type->tp_vectorcall_offset = memb->offset; +#else + type->tp_print = (printfunc) memb->offset; +#endif + changed = 1; + } +#endif +#else + if ((0)); +#endif +#if PY_VERSION_HEX <= 0x030900b1 && CYTHON_COMPILING_IN_CPYTHON + else if (strcmp(memb->name, "__module__") == 0) { + PyObject *descr; + assert(memb->type == T_OBJECT); + assert(memb->flags == 0 || memb->flags == READONLY); + descr = PyDescr_NewMember(type, memb); + if (unlikely(!descr)) + return -1; + if (unlikely(PyDict_SetItem(type->tp_dict, PyDescr_NAME(descr), descr) < 0)) { + Py_DECREF(descr); + return -1; + } + Py_DECREF(descr); + changed = 1; + } +#endif + } + memb++; + } + if (changed) + PyType_Modified(type); + } +#endif + return 0; +} +#endif + +/* FetchSharedCythonModule */ +static PyObject *__Pyx_FetchSharedCythonABIModule(void) { + return __Pyx_PyImport_AddModuleRef((char*) __PYX_ABI_MODULE_NAME); +} + +/* FetchCommonType */ +static int __Pyx_VerifyCachedType(PyObject *cached_type, + const char *name, + Py_ssize_t basicsize, + Py_ssize_t expected_basicsize) { + if (!PyType_Check(cached_type)) { + PyErr_Format(PyExc_TypeError, + "Shared Cython type %.200s is not a type object", name); + return -1; + } + if (basicsize != expected_basicsize) { + PyErr_Format(PyExc_TypeError, + "Shared Cython type %.200s has the wrong size, try recompiling", + name); + return -1; + } + return 0; +} +#if !CYTHON_USE_TYPE_SPECS +static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) { + PyObject* abi_module; + const char* object_name; + PyTypeObject *cached_type = NULL; + abi_module = __Pyx_FetchSharedCythonABIModule(); + if (!abi_module) return NULL; + object_name = strrchr(type->tp_name, '.'); + object_name = object_name ? object_name+1 : type->tp_name; + cached_type = (PyTypeObject*) PyObject_GetAttrString(abi_module, object_name); + if (cached_type) { + if (__Pyx_VerifyCachedType( + (PyObject *)cached_type, + object_name, + cached_type->tp_basicsize, + type->tp_basicsize) < 0) { + goto bad; + } + goto done; + } + if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; + PyErr_Clear(); + if (PyType_Ready(type) < 0) goto bad; + if (PyObject_SetAttrString(abi_module, object_name, (PyObject *)type) < 0) + goto bad; + Py_INCREF(type); + cached_type = type; +done: + Py_DECREF(abi_module); + return cached_type; +bad: + Py_XDECREF(cached_type); + cached_type = NULL; + goto done; +} +#else +static PyTypeObject *__Pyx_FetchCommonTypeFromSpec(PyObject *module, PyType_Spec *spec, PyObject *bases) { + PyObject *abi_module, *cached_type = NULL; + const char* object_name = strrchr(spec->name, '.'); + object_name = object_name ? object_name+1 : spec->name; + abi_module = __Pyx_FetchSharedCythonABIModule(); + if (!abi_module) return NULL; + cached_type = PyObject_GetAttrString(abi_module, object_name); + if (cached_type) { + Py_ssize_t basicsize; +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject *py_basicsize; + py_basicsize = PyObject_GetAttrString(cached_type, "__basicsize__"); + if (unlikely(!py_basicsize)) goto bad; + basicsize = PyLong_AsSsize_t(py_basicsize); + Py_DECREF(py_basicsize); + py_basicsize = 0; + if (unlikely(basicsize == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; +#else + basicsize = likely(PyType_Check(cached_type)) ? ((PyTypeObject*) cached_type)->tp_basicsize : -1; +#endif + if (__Pyx_VerifyCachedType( + cached_type, + object_name, + basicsize, + spec->basicsize) < 0) { + goto bad; + } + goto done; + } + if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; + PyErr_Clear(); + CYTHON_UNUSED_VAR(module); + cached_type = __Pyx_PyType_FromModuleAndSpec(abi_module, spec, bases); + if (unlikely(!cached_type)) goto bad; + if (unlikely(__Pyx_fix_up_extension_type_from_spec(spec, (PyTypeObject *) cached_type) < 0)) goto bad; + if (PyObject_SetAttrString(abi_module, object_name, cached_type) < 0) goto bad; +done: + Py_DECREF(abi_module); + assert(cached_type == NULL || PyType_Check(cached_type)); + return (PyTypeObject *) cached_type; +bad: + Py_XDECREF(cached_type); + cached_type = NULL; + goto done; +} +#endif + +/* PyVectorcallFastCallDict */ +#if CYTHON_METH_FASTCALL +static PyObject *__Pyx_PyVectorcall_FastCallDict_kw(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw) +{ + PyObject *res = NULL; + PyObject *kwnames; + PyObject **newargs; + PyObject **kwvalues; + Py_ssize_t i, pos; + size_t j; + PyObject *key, *value; + unsigned long keys_are_strings; + Py_ssize_t nkw = PyDict_GET_SIZE(kw); + newargs = (PyObject **)PyMem_Malloc((nargs + (size_t)nkw) * sizeof(args[0])); + if (unlikely(newargs == NULL)) { + PyErr_NoMemory(); + return NULL; + } + for (j = 0; j < nargs; j++) newargs[j] = args[j]; + kwnames = PyTuple_New(nkw); + if (unlikely(kwnames == NULL)) { + PyMem_Free(newargs); + return NULL; + } + kwvalues = newargs + nargs; + pos = i = 0; + keys_are_strings = Py_TPFLAGS_UNICODE_SUBCLASS; + while (PyDict_Next(kw, &pos, &key, &value)) { + keys_are_strings &= Py_TYPE(key)->tp_flags; + Py_INCREF(key); + Py_INCREF(value); + PyTuple_SET_ITEM(kwnames, i, key); + kwvalues[i] = value; + i++; + } + if (unlikely(!keys_are_strings)) { + PyErr_SetString(PyExc_TypeError, "keywords must be strings"); + goto cleanup; + } + res = vc(func, newargs, nargs, kwnames); +cleanup: + Py_DECREF(kwnames); + for (i = 0; i < nkw; i++) + Py_DECREF(kwvalues[i]); + PyMem_Free(newargs); + return res; +} +static CYTHON_INLINE PyObject *__Pyx_PyVectorcall_FastCallDict(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw) +{ + if (likely(kw == NULL) || PyDict_GET_SIZE(kw) == 0) { + return vc(func, args, nargs, NULL); + } + return __Pyx_PyVectorcall_FastCallDict_kw(func, vc, args, nargs, kw); +} +#endif + +/* CythonFunctionShared */ +#if CYTHON_COMPILING_IN_LIMITED_API +static CYTHON_INLINE int __Pyx__IsSameCyOrCFunction(PyObject *func, void *cfunc) { + if (__Pyx_CyFunction_Check(func)) { + return PyCFunction_GetFunction(((__pyx_CyFunctionObject*)func)->func) == (PyCFunction) cfunc; + } else if (PyCFunction_Check(func)) { + return PyCFunction_GetFunction(func) == (PyCFunction) cfunc; + } + return 0; +} +#else +static CYTHON_INLINE int __Pyx__IsSameCyOrCFunction(PyObject *func, void *cfunc) { + return __Pyx_CyOrPyCFunction_Check(func) && __Pyx_CyOrPyCFunction_GET_FUNCTION(func) == (PyCFunction) cfunc; +} +#endif +static CYTHON_INLINE void __Pyx__CyFunction_SetClassObj(__pyx_CyFunctionObject* f, PyObject* classobj) { +#if PY_VERSION_HEX < 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API + __Pyx_Py_XDECREF_SET( + __Pyx_CyFunction_GetClassObj(f), + ((classobj) ? __Pyx_NewRef(classobj) : NULL)); +#else + __Pyx_Py_XDECREF_SET( + ((PyCMethodObject *) (f))->mm_class, + (PyTypeObject*)((classobj) ? __Pyx_NewRef(classobj) : NULL)); +#endif +} +static PyObject * +__Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, void *closure) +{ + CYTHON_UNUSED_VAR(closure); + if (unlikely(op->func_doc == NULL)) { +#if CYTHON_COMPILING_IN_LIMITED_API + op->func_doc = PyObject_GetAttrString(op->func, "__doc__"); + if (unlikely(!op->func_doc)) return NULL; +#else + if (((PyCFunctionObject*)op)->m_ml->ml_doc) { +#if PY_MAJOR_VERSION >= 3 + op->func_doc = PyUnicode_FromString(((PyCFunctionObject*)op)->m_ml->ml_doc); +#else + op->func_doc = PyString_FromString(((PyCFunctionObject*)op)->m_ml->ml_doc); +#endif + if (unlikely(op->func_doc == NULL)) + return NULL; + } else { + Py_INCREF(Py_None); + return Py_None; + } +#endif + } + Py_INCREF(op->func_doc); + return op->func_doc; +} +static int +__Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value, void *context) +{ + CYTHON_UNUSED_VAR(context); + if (value == NULL) { + value = Py_None; + } + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(op->func_doc, value); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op, void *context) +{ + CYTHON_UNUSED_VAR(context); + if (unlikely(op->func_name == NULL)) { +#if CYTHON_COMPILING_IN_LIMITED_API + op->func_name = PyObject_GetAttrString(op->func, "__name__"); +#elif PY_MAJOR_VERSION >= 3 + op->func_name = PyUnicode_InternFromString(((PyCFunctionObject*)op)->m_ml->ml_name); +#else + op->func_name = PyString_InternFromString(((PyCFunctionObject*)op)->m_ml->ml_name); +#endif + if (unlikely(op->func_name == NULL)) + return NULL; + } + Py_INCREF(op->func_name); + return op->func_name; +} +static int +__Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value, void *context) +{ + CYTHON_UNUSED_VAR(context); +#if PY_MAJOR_VERSION >= 3 + if (unlikely(value == NULL || !PyUnicode_Check(value))) +#else + if (unlikely(value == NULL || !PyString_Check(value))) +#endif + { + PyErr_SetString(PyExc_TypeError, + "__name__ must be set to a string object"); + return -1; + } + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(op->func_name, value); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op, void *context) +{ + CYTHON_UNUSED_VAR(context); + Py_INCREF(op->func_qualname); + return op->func_qualname; +} +static int +__Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value, void *context) +{ + CYTHON_UNUSED_VAR(context); +#if PY_MAJOR_VERSION >= 3 + if (unlikely(value == NULL || !PyUnicode_Check(value))) +#else + if (unlikely(value == NULL || !PyString_Check(value))) +#endif + { + PyErr_SetString(PyExc_TypeError, + "__qualname__ must be set to a string object"); + return -1; + } + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(op->func_qualname, value); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op, void *context) +{ + CYTHON_UNUSED_VAR(context); + if (unlikely(op->func_dict == NULL)) { + op->func_dict = PyDict_New(); + if (unlikely(op->func_dict == NULL)) + return NULL; + } + Py_INCREF(op->func_dict); + return op->func_dict; +} +static int +__Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value, void *context) +{ + CYTHON_UNUSED_VAR(context); + if (unlikely(value == NULL)) { + PyErr_SetString(PyExc_TypeError, + "function's dictionary may not be deleted"); + return -1; + } + if (unlikely(!PyDict_Check(value))) { + PyErr_SetString(PyExc_TypeError, + "setting function's dictionary to a non-dict"); + return -1; + } + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(op->func_dict, value); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op, void *context) +{ + CYTHON_UNUSED_VAR(context); + Py_INCREF(op->func_globals); + return op->func_globals; +} +static PyObject * +__Pyx_CyFunction_get_closure(__pyx_CyFunctionObject *op, void *context) +{ + CYTHON_UNUSED_VAR(op); + CYTHON_UNUSED_VAR(context); + Py_INCREF(Py_None); + return Py_None; +} +static PyObject * +__Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op, void *context) +{ + PyObject* result = (op->func_code) ? op->func_code : Py_None; + CYTHON_UNUSED_VAR(context); + Py_INCREF(result); + return result; +} +static int +__Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) { + int result = 0; + PyObject *res = op->defaults_getter((PyObject *) op); + if (unlikely(!res)) + return -1; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + op->defaults_tuple = PyTuple_GET_ITEM(res, 0); + Py_INCREF(op->defaults_tuple); + op->defaults_kwdict = PyTuple_GET_ITEM(res, 1); + Py_INCREF(op->defaults_kwdict); + #else + op->defaults_tuple = __Pyx_PySequence_ITEM(res, 0); + if (unlikely(!op->defaults_tuple)) result = -1; + else { + op->defaults_kwdict = __Pyx_PySequence_ITEM(res, 1); + if (unlikely(!op->defaults_kwdict)) result = -1; + } + #endif + Py_DECREF(res); + return result; +} +static int +__Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value, void *context) { + CYTHON_UNUSED_VAR(context); + if (!value) { + value = Py_None; + } else if (unlikely(value != Py_None && !PyTuple_Check(value))) { + PyErr_SetString(PyExc_TypeError, + "__defaults__ must be set to a tuple object"); + return -1; + } + PyErr_WarnEx(PyExc_RuntimeWarning, "changes to cyfunction.__defaults__ will not " + "currently affect the values used in function calls", 1); + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(op->defaults_tuple, value); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op, void *context) { + PyObject* result = op->defaults_tuple; + CYTHON_UNUSED_VAR(context); + if (unlikely(!result)) { + if (op->defaults_getter) { + if (unlikely(__Pyx_CyFunction_init_defaults(op) < 0)) return NULL; + result = op->defaults_tuple; + } else { + result = Py_None; + } + } + Py_INCREF(result); + return result; +} +static int +__Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value, void *context) { + CYTHON_UNUSED_VAR(context); + if (!value) { + value = Py_None; + } else if (unlikely(value != Py_None && !PyDict_Check(value))) { + PyErr_SetString(PyExc_TypeError, + "__kwdefaults__ must be set to a dict object"); + return -1; + } + PyErr_WarnEx(PyExc_RuntimeWarning, "changes to cyfunction.__kwdefaults__ will not " + "currently affect the values used in function calls", 1); + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(op->defaults_kwdict, value); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op, void *context) { + PyObject* result = op->defaults_kwdict; + CYTHON_UNUSED_VAR(context); + if (unlikely(!result)) { + if (op->defaults_getter) { + if (unlikely(__Pyx_CyFunction_init_defaults(op) < 0)) return NULL; + result = op->defaults_kwdict; + } else { + result = Py_None; + } + } + Py_INCREF(result); + return result; +} +static int +__Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value, void *context) { + CYTHON_UNUSED_VAR(context); + if (!value || value == Py_None) { + value = NULL; + } else if (unlikely(!PyDict_Check(value))) { + PyErr_SetString(PyExc_TypeError, + "__annotations__ must be set to a dict object"); + return -1; + } + Py_XINCREF(value); + __Pyx_Py_XDECREF_SET(op->func_annotations, value); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op, void *context) { + PyObject* result = op->func_annotations; + CYTHON_UNUSED_VAR(context); + if (unlikely(!result)) { + result = PyDict_New(); + if (unlikely(!result)) return NULL; + op->func_annotations = result; + } + Py_INCREF(result); + return result; +} +static PyObject * +__Pyx_CyFunction_get_is_coroutine(__pyx_CyFunctionObject *op, void *context) { + int is_coroutine; + CYTHON_UNUSED_VAR(context); + if (op->func_is_coroutine) { + return __Pyx_NewRef(op->func_is_coroutine); + } + is_coroutine = op->flags & __Pyx_CYFUNCTION_COROUTINE; +#if PY_VERSION_HEX >= 0x03050000 + if (is_coroutine) { + PyObject *module, *fromlist, *marker = __pyx_n_s_is_coroutine; + fromlist = PyList_New(1); + if (unlikely(!fromlist)) return NULL; + Py_INCREF(marker); +#if CYTHON_ASSUME_SAFE_MACROS + PyList_SET_ITEM(fromlist, 0, marker); +#else + if (unlikely(PyList_SetItem(fromlist, 0, marker) < 0)) { + Py_DECREF(marker); + Py_DECREF(fromlist); + return NULL; + } +#endif + module = PyImport_ImportModuleLevelObject(__pyx_n_s_asyncio_coroutines, NULL, NULL, fromlist, 0); + Py_DECREF(fromlist); + if (unlikely(!module)) goto ignore; + op->func_is_coroutine = __Pyx_PyObject_GetAttrStr(module, marker); + Py_DECREF(module); + if (likely(op->func_is_coroutine)) { + return __Pyx_NewRef(op->func_is_coroutine); + } +ignore: + PyErr_Clear(); + } +#endif + op->func_is_coroutine = __Pyx_PyBool_FromLong(is_coroutine); + return __Pyx_NewRef(op->func_is_coroutine); +} +#if CYTHON_COMPILING_IN_LIMITED_API +static PyObject * +__Pyx_CyFunction_get_module(__pyx_CyFunctionObject *op, void *context) { + CYTHON_UNUSED_VAR(context); + return PyObject_GetAttrString(op->func, "__module__"); +} +static int +__Pyx_CyFunction_set_module(__pyx_CyFunctionObject *op, PyObject* value, void *context) { + CYTHON_UNUSED_VAR(context); + return PyObject_SetAttrString(op->func, "__module__", value); +} +#endif +static PyGetSetDef __pyx_CyFunction_getsets[] = { + {(char *) "func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, + {(char *) "__doc__", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, + {(char *) "func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, + {(char *) "__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, + {(char *) "__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0}, + {(char *) "func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, + {(char *) "__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, + {(char *) "func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, + {(char *) "__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, + {(char *) "func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, + {(char *) "__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, + {(char *) "func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, + {(char *) "__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, + {(char *) "func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, + {(char *) "__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, + {(char *) "__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0}, + {(char *) "__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0}, + {(char *) "_is_coroutine", (getter)__Pyx_CyFunction_get_is_coroutine, 0, 0, 0}, +#if CYTHON_COMPILING_IN_LIMITED_API + {"__module__", (getter)__Pyx_CyFunction_get_module, (setter)__Pyx_CyFunction_set_module, 0, 0}, +#endif + {0, 0, 0, 0, 0} +}; +static PyMemberDef __pyx_CyFunction_members[] = { +#if !CYTHON_COMPILING_IN_LIMITED_API + {(char *) "__module__", T_OBJECT, offsetof(PyCFunctionObject, m_module), 0, 0}, +#endif +#if CYTHON_USE_TYPE_SPECS + {(char *) "__dictoffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_dict), READONLY, 0}, +#if CYTHON_METH_FASTCALL +#if CYTHON_BACKPORT_VECTORCALL + {(char *) "__vectorcalloffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_vectorcall), READONLY, 0}, +#else +#if !CYTHON_COMPILING_IN_LIMITED_API + {(char *) "__vectorcalloffset__", T_PYSSIZET, offsetof(PyCFunctionObject, vectorcall), READONLY, 0}, +#endif +#endif +#endif +#if PY_VERSION_HEX < 0x030500A0 || CYTHON_COMPILING_IN_LIMITED_API + {(char *) "__weaklistoffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_weakreflist), READONLY, 0}, +#else + {(char *) "__weaklistoffset__", T_PYSSIZET, offsetof(PyCFunctionObject, m_weakreflist), READONLY, 0}, +#endif +#endif + {0, 0, 0, 0, 0} +}; +static PyObject * +__Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, PyObject *args) +{ + CYTHON_UNUSED_VAR(args); +#if PY_MAJOR_VERSION >= 3 + Py_INCREF(m->func_qualname); + return m->func_qualname; +#else + return PyString_FromString(((PyCFunctionObject*)m)->m_ml->ml_name); +#endif +} +static PyMethodDef __pyx_CyFunction_methods[] = { + {"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0}, + {0, 0, 0, 0} +}; +#if PY_VERSION_HEX < 0x030500A0 || CYTHON_COMPILING_IN_LIMITED_API +#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist) +#else +#define __Pyx_CyFunction_weakreflist(cyfunc) (((PyCFunctionObject*)cyfunc)->m_weakreflist) +#endif +static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject *op, PyMethodDef *ml, int flags, PyObject* qualname, + PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { +#if !CYTHON_COMPILING_IN_LIMITED_API + PyCFunctionObject *cf = (PyCFunctionObject*) op; +#endif + if (unlikely(op == NULL)) + return NULL; +#if CYTHON_COMPILING_IN_LIMITED_API + op->func = PyCFunction_NewEx(ml, (PyObject*)op, module); + if (unlikely(!op->func)) return NULL; +#endif + op->flags = flags; + __Pyx_CyFunction_weakreflist(op) = NULL; +#if !CYTHON_COMPILING_IN_LIMITED_API + cf->m_ml = ml; + cf->m_self = (PyObject *) op; +#endif + Py_XINCREF(closure); + op->func_closure = closure; +#if !CYTHON_COMPILING_IN_LIMITED_API + Py_XINCREF(module); + cf->m_module = module; +#endif + op->func_dict = NULL; + op->func_name = NULL; + Py_INCREF(qualname); + op->func_qualname = qualname; + op->func_doc = NULL; +#if PY_VERSION_HEX < 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API + op->func_classobj = NULL; +#else + ((PyCMethodObject*)op)->mm_class = NULL; +#endif + op->func_globals = globals; + Py_INCREF(op->func_globals); + Py_XINCREF(code); + op->func_code = code; + op->defaults_pyobjects = 0; + op->defaults_size = 0; + op->defaults = NULL; + op->defaults_tuple = NULL; + op->defaults_kwdict = NULL; + op->defaults_getter = NULL; + op->func_annotations = NULL; + op->func_is_coroutine = NULL; +#if CYTHON_METH_FASTCALL + switch (ml->ml_flags & (METH_VARARGS | METH_FASTCALL | METH_NOARGS | METH_O | METH_KEYWORDS | METH_METHOD)) { + case METH_NOARGS: + __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_NOARGS; + break; + case METH_O: + __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_O; + break; + case METH_METHOD | METH_FASTCALL | METH_KEYWORDS: + __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD; + break; + case METH_FASTCALL | METH_KEYWORDS: + __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS; + break; + case METH_VARARGS | METH_KEYWORDS: + __Pyx_CyFunction_func_vectorcall(op) = NULL; + break; + default: + PyErr_SetString(PyExc_SystemError, "Bad call flags for CyFunction"); + Py_DECREF(op); + return NULL; + } +#endif + return (PyObject *) op; +} +static int +__Pyx_CyFunction_clear(__pyx_CyFunctionObject *m) +{ + Py_CLEAR(m->func_closure); +#if CYTHON_COMPILING_IN_LIMITED_API + Py_CLEAR(m->func); +#else + Py_CLEAR(((PyCFunctionObject*)m)->m_module); +#endif + Py_CLEAR(m->func_dict); + Py_CLEAR(m->func_name); + Py_CLEAR(m->func_qualname); + Py_CLEAR(m->func_doc); + Py_CLEAR(m->func_globals); + Py_CLEAR(m->func_code); +#if !CYTHON_COMPILING_IN_LIMITED_API +#if PY_VERSION_HEX < 0x030900B1 + Py_CLEAR(__Pyx_CyFunction_GetClassObj(m)); +#else + { + PyObject *cls = (PyObject*) ((PyCMethodObject *) (m))->mm_class; + ((PyCMethodObject *) (m))->mm_class = NULL; + Py_XDECREF(cls); + } +#endif +#endif + Py_CLEAR(m->defaults_tuple); + Py_CLEAR(m->defaults_kwdict); + Py_CLEAR(m->func_annotations); + Py_CLEAR(m->func_is_coroutine); + if (m->defaults) { + PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); + int i; + for (i = 0; i < m->defaults_pyobjects; i++) + Py_XDECREF(pydefaults[i]); + PyObject_Free(m->defaults); + m->defaults = NULL; + } + return 0; +} +static void __Pyx__CyFunction_dealloc(__pyx_CyFunctionObject *m) +{ + if (__Pyx_CyFunction_weakreflist(m) != NULL) + PyObject_ClearWeakRefs((PyObject *) m); + __Pyx_CyFunction_clear(m); + __Pyx_PyHeapTypeObject_GC_Del(m); +} +static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m) +{ + PyObject_GC_UnTrack(m); + __Pyx__CyFunction_dealloc(m); +} +static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg) +{ + Py_VISIT(m->func_closure); +#if CYTHON_COMPILING_IN_LIMITED_API + Py_VISIT(m->func); +#else + Py_VISIT(((PyCFunctionObject*)m)->m_module); +#endif + Py_VISIT(m->func_dict); + Py_VISIT(m->func_name); + Py_VISIT(m->func_qualname); + Py_VISIT(m->func_doc); + Py_VISIT(m->func_globals); + Py_VISIT(m->func_code); +#if !CYTHON_COMPILING_IN_LIMITED_API + Py_VISIT(__Pyx_CyFunction_GetClassObj(m)); +#endif + Py_VISIT(m->defaults_tuple); + Py_VISIT(m->defaults_kwdict); + Py_VISIT(m->func_is_coroutine); + if (m->defaults) { + PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); + int i; + for (i = 0; i < m->defaults_pyobjects; i++) + Py_VISIT(pydefaults[i]); + } + return 0; +} +static PyObject* +__Pyx_CyFunction_repr(__pyx_CyFunctionObject *op) +{ +#if PY_MAJOR_VERSION >= 3 + return PyUnicode_FromFormat("", + op->func_qualname, (void *)op); +#else + return PyString_FromFormat("", + PyString_AsString(op->func_qualname), (void *)op); +#endif +} +static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, PyObject *arg, PyObject *kw) { +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject *f = ((__pyx_CyFunctionObject*)func)->func; + PyObject *py_name = NULL; + PyCFunction meth; + int flags; + meth = PyCFunction_GetFunction(f); + if (unlikely(!meth)) return NULL; + flags = PyCFunction_GetFlags(f); + if (unlikely(flags < 0)) return NULL; +#else + PyCFunctionObject* f = (PyCFunctionObject*)func; + PyCFunction meth = f->m_ml->ml_meth; + int flags = f->m_ml->ml_flags; +#endif + Py_ssize_t size; + switch (flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) { + case METH_VARARGS: + if (likely(kw == NULL || PyDict_Size(kw) == 0)) + return (*meth)(self, arg); + break; + case METH_VARARGS | METH_KEYWORDS: + return (*(PyCFunctionWithKeywords)(void*)meth)(self, arg, kw); + case METH_NOARGS: + if (likely(kw == NULL || PyDict_Size(kw) == 0)) { +#if CYTHON_ASSUME_SAFE_MACROS + size = PyTuple_GET_SIZE(arg); +#else + size = PyTuple_Size(arg); + if (unlikely(size < 0)) return NULL; +#endif + if (likely(size == 0)) + return (*meth)(self, NULL); +#if CYTHON_COMPILING_IN_LIMITED_API + py_name = __Pyx_CyFunction_get_name((__pyx_CyFunctionObject*)func, NULL); + if (!py_name) return NULL; + PyErr_Format(PyExc_TypeError, + "%.200S() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", + py_name, size); + Py_DECREF(py_name); +#else + PyErr_Format(PyExc_TypeError, + "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", + f->m_ml->ml_name, size); +#endif + return NULL; + } + break; + case METH_O: + if (likely(kw == NULL || PyDict_Size(kw) == 0)) { +#if CYTHON_ASSUME_SAFE_MACROS + size = PyTuple_GET_SIZE(arg); +#else + size = PyTuple_Size(arg); + if (unlikely(size < 0)) return NULL; +#endif + if (likely(size == 1)) { + PyObject *result, *arg0; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + arg0 = PyTuple_GET_ITEM(arg, 0); + #else + arg0 = __Pyx_PySequence_ITEM(arg, 0); if (unlikely(!arg0)) return NULL; + #endif + result = (*meth)(self, arg0); + #if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) + Py_DECREF(arg0); + #endif + return result; + } +#if CYTHON_COMPILING_IN_LIMITED_API + py_name = __Pyx_CyFunction_get_name((__pyx_CyFunctionObject*)func, NULL); + if (!py_name) return NULL; + PyErr_Format(PyExc_TypeError, + "%.200S() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", + py_name, size); + Py_DECREF(py_name); +#else + PyErr_Format(PyExc_TypeError, + "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", + f->m_ml->ml_name, size); +#endif + return NULL; + } + break; + default: + PyErr_SetString(PyExc_SystemError, "Bad call flags for CyFunction"); + return NULL; + } +#if CYTHON_COMPILING_IN_LIMITED_API + py_name = __Pyx_CyFunction_get_name((__pyx_CyFunctionObject*)func, NULL); + if (!py_name) return NULL; + PyErr_Format(PyExc_TypeError, "%.200S() takes no keyword arguments", + py_name); + Py_DECREF(py_name); +#else + PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments", + f->m_ml->ml_name); +#endif + return NULL; +} +static CYTHON_INLINE PyObject *__Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *self, *result; +#if CYTHON_COMPILING_IN_LIMITED_API + self = PyCFunction_GetSelf(((__pyx_CyFunctionObject*)func)->func); + if (unlikely(!self) && PyErr_Occurred()) return NULL; +#else + self = ((PyCFunctionObject*)func)->m_self; +#endif + result = __Pyx_CyFunction_CallMethod(func, self, arg, kw); + return result; +} +static PyObject *__Pyx_CyFunction_CallAsMethod(PyObject *func, PyObject *args, PyObject *kw) { + PyObject *result; + __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func; +#if CYTHON_METH_FASTCALL + __pyx_vectorcallfunc vc = __Pyx_CyFunction_func_vectorcall(cyfunc); + if (vc) { +#if CYTHON_ASSUME_SAFE_MACROS + return __Pyx_PyVectorcall_FastCallDict(func, vc, &PyTuple_GET_ITEM(args, 0), (size_t)PyTuple_GET_SIZE(args), kw); +#else + (void) &__Pyx_PyVectorcall_FastCallDict; + return PyVectorcall_Call(func, args, kw); +#endif + } +#endif + if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { + Py_ssize_t argc; + PyObject *new_args; + PyObject *self; +#if CYTHON_ASSUME_SAFE_MACROS + argc = PyTuple_GET_SIZE(args); +#else + argc = PyTuple_Size(args); + if (unlikely(!argc) < 0) return NULL; +#endif + new_args = PyTuple_GetSlice(args, 1, argc); + if (unlikely(!new_args)) + return NULL; + self = PyTuple_GetItem(args, 0); + if (unlikely(!self)) { + Py_DECREF(new_args); +#if PY_MAJOR_VERSION > 2 + PyErr_Format(PyExc_TypeError, + "unbound method %.200S() needs an argument", + cyfunc->func_qualname); +#else + PyErr_SetString(PyExc_TypeError, + "unbound method needs an argument"); +#endif + return NULL; + } + result = __Pyx_CyFunction_CallMethod(func, self, new_args, kw); + Py_DECREF(new_args); + } else { + result = __Pyx_CyFunction_Call(func, args, kw); + } + return result; +} +#if CYTHON_METH_FASTCALL +static CYTHON_INLINE int __Pyx_CyFunction_Vectorcall_CheckArgs(__pyx_CyFunctionObject *cyfunc, Py_ssize_t nargs, PyObject *kwnames) +{ + int ret = 0; + if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { + if (unlikely(nargs < 1)) { + PyErr_Format(PyExc_TypeError, "%.200s() needs an argument", + ((PyCFunctionObject*)cyfunc)->m_ml->ml_name); + return -1; + } + ret = 1; + } + if (unlikely(kwnames) && unlikely(PyTuple_GET_SIZE(kwnames))) { + PyErr_Format(PyExc_TypeError, + "%.200s() takes no keyword arguments", ((PyCFunctionObject*)cyfunc)->m_ml->ml_name); + return -1; + } + return ret; +} +static PyObject * __Pyx_CyFunction_Vectorcall_NOARGS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) +{ + __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; + PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; +#if CYTHON_BACKPORT_VECTORCALL + Py_ssize_t nargs = (Py_ssize_t)nargsf; +#else + Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); +#endif + PyObject *self; + switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, kwnames)) { + case 1: + self = args[0]; + args += 1; + nargs -= 1; + break; + case 0: + self = ((PyCFunctionObject*)cyfunc)->m_self; + break; + default: + return NULL; + } + if (unlikely(nargs != 0)) { + PyErr_Format(PyExc_TypeError, + "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", + def->ml_name, nargs); + return NULL; + } + return def->ml_meth(self, NULL); +} +static PyObject * __Pyx_CyFunction_Vectorcall_O(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) +{ + __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; + PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; +#if CYTHON_BACKPORT_VECTORCALL + Py_ssize_t nargs = (Py_ssize_t)nargsf; +#else + Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); +#endif + PyObject *self; + switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, kwnames)) { + case 1: + self = args[0]; + args += 1; + nargs -= 1; + break; + case 0: + self = ((PyCFunctionObject*)cyfunc)->m_self; + break; + default: + return NULL; + } + if (unlikely(nargs != 1)) { + PyErr_Format(PyExc_TypeError, + "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", + def->ml_name, nargs); + return NULL; + } + return def->ml_meth(self, args[0]); +} +static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) +{ + __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; + PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; +#if CYTHON_BACKPORT_VECTORCALL + Py_ssize_t nargs = (Py_ssize_t)nargsf; +#else + Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); +#endif + PyObject *self; + switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, NULL)) { + case 1: + self = args[0]; + args += 1; + nargs -= 1; + break; + case 0: + self = ((PyCFunctionObject*)cyfunc)->m_self; + break; + default: + return NULL; + } + return ((__Pyx_PyCFunctionFastWithKeywords)(void(*)(void))def->ml_meth)(self, args, nargs, kwnames); +} +static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) +{ + __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; + PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; + PyTypeObject *cls = (PyTypeObject *) __Pyx_CyFunction_GetClassObj(cyfunc); +#if CYTHON_BACKPORT_VECTORCALL + Py_ssize_t nargs = (Py_ssize_t)nargsf; +#else + Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); +#endif + PyObject *self; + switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, NULL)) { + case 1: + self = args[0]; + args += 1; + nargs -= 1; + break; + case 0: + self = ((PyCFunctionObject*)cyfunc)->m_self; + break; + default: + return NULL; + } + return ((__Pyx_PyCMethod)(void(*)(void))def->ml_meth)(self, cls, args, (size_t)nargs, kwnames); +} +#endif +#if CYTHON_USE_TYPE_SPECS +static PyType_Slot __pyx_CyFunctionType_slots[] = { + {Py_tp_dealloc, (void *)__Pyx_CyFunction_dealloc}, + {Py_tp_repr, (void *)__Pyx_CyFunction_repr}, + {Py_tp_call, (void *)__Pyx_CyFunction_CallAsMethod}, + {Py_tp_traverse, (void *)__Pyx_CyFunction_traverse}, + {Py_tp_clear, (void *)__Pyx_CyFunction_clear}, + {Py_tp_methods, (void *)__pyx_CyFunction_methods}, + {Py_tp_members, (void *)__pyx_CyFunction_members}, + {Py_tp_getset, (void *)__pyx_CyFunction_getsets}, + {Py_tp_descr_get, (void *)__Pyx_PyMethod_New}, + {0, 0}, +}; +static PyType_Spec __pyx_CyFunctionType_spec = { + __PYX_TYPE_MODULE_PREFIX "cython_function_or_method", + sizeof(__pyx_CyFunctionObject), + 0, +#ifdef Py_TPFLAGS_METHOD_DESCRIPTOR + Py_TPFLAGS_METHOD_DESCRIPTOR | +#endif +#if (defined(_Py_TPFLAGS_HAVE_VECTORCALL) && CYTHON_METH_FASTCALL) + _Py_TPFLAGS_HAVE_VECTORCALL | +#endif + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, + __pyx_CyFunctionType_slots +}; +#else +static PyTypeObject __pyx_CyFunctionType_type = { + PyVarObject_HEAD_INIT(0, 0) + __PYX_TYPE_MODULE_PREFIX "cython_function_or_method", + sizeof(__pyx_CyFunctionObject), + 0, + (destructor) __Pyx_CyFunction_dealloc, +#if !CYTHON_METH_FASTCALL + 0, +#elif CYTHON_BACKPORT_VECTORCALL + (printfunc)offsetof(__pyx_CyFunctionObject, func_vectorcall), +#else + offsetof(PyCFunctionObject, vectorcall), +#endif + 0, + 0, +#if PY_MAJOR_VERSION < 3 + 0, +#else + 0, +#endif + (reprfunc) __Pyx_CyFunction_repr, + 0, + 0, + 0, + 0, + __Pyx_CyFunction_CallAsMethod, + 0, + 0, + 0, + 0, +#ifdef Py_TPFLAGS_METHOD_DESCRIPTOR + Py_TPFLAGS_METHOD_DESCRIPTOR | +#endif +#if defined(_Py_TPFLAGS_HAVE_VECTORCALL) && CYTHON_METH_FASTCALL + _Py_TPFLAGS_HAVE_VECTORCALL | +#endif + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, + 0, + (traverseproc) __Pyx_CyFunction_traverse, + (inquiry) __Pyx_CyFunction_clear, + 0, +#if PY_VERSION_HEX < 0x030500A0 + offsetof(__pyx_CyFunctionObject, func_weakreflist), +#else + offsetof(PyCFunctionObject, m_weakreflist), +#endif + 0, + 0, + __pyx_CyFunction_methods, + __pyx_CyFunction_members, + __pyx_CyFunction_getsets, + 0, + 0, + __Pyx_PyMethod_New, + 0, + offsetof(__pyx_CyFunctionObject, func_dict), + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, +#if PY_VERSION_HEX >= 0x030400a1 + 0, +#endif +#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) + 0, +#endif +#if __PYX_NEED_TP_PRINT_SLOT + 0, +#endif +#if PY_VERSION_HEX >= 0x030C0000 + 0, +#endif +#if PY_VERSION_HEX >= 0x030d00A4 + 0, +#endif +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 + 0, +#endif +}; +#endif +static int __pyx_CyFunction_init(PyObject *module) { +#if CYTHON_USE_TYPE_SPECS + __pyx_CyFunctionType = __Pyx_FetchCommonTypeFromSpec(module, &__pyx_CyFunctionType_spec, NULL); +#else + CYTHON_UNUSED_VAR(module); + __pyx_CyFunctionType = __Pyx_FetchCommonType(&__pyx_CyFunctionType_type); +#endif + if (unlikely(__pyx_CyFunctionType == NULL)) { + return -1; + } + return 0; +} +static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *func, size_t size, int pyobjects) { + __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; + m->defaults = PyObject_Malloc(size); + if (unlikely(!m->defaults)) + return PyErr_NoMemory(); + memset(m->defaults, 0, size); + m->defaults_pyobjects = pyobjects; + m->defaults_size = size; + return m->defaults; +} +static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) { + __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; + m->defaults_tuple = tuple; + Py_INCREF(tuple); +} +static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) { + __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; + m->defaults_kwdict = dict; + Py_INCREF(dict); +} +static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) { + __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; + m->func_annotations = dict; + Py_INCREF(dict); +} + +/* CythonFunction */ +static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, int flags, PyObject* qualname, + PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { + PyObject *op = __Pyx_CyFunction_Init( + PyObject_GC_New(__pyx_CyFunctionObject, __pyx_CyFunctionType), + ml, flags, qualname, closure, module, globals, code + ); + if (likely(op)) { + PyObject_GC_Track(op); + } + return op; +} + +/* PyObjectLookupSpecial */ +#if CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx__PyObject_LookupSpecial(PyObject* obj, PyObject* attr_name, int with_error) { + PyObject *res; + PyTypeObject *tp = Py_TYPE(obj); +#if PY_MAJOR_VERSION < 3 + if (unlikely(PyInstance_Check(obj))) + return with_error ? __Pyx_PyObject_GetAttrStr(obj, attr_name) : __Pyx_PyObject_GetAttrStrNoError(obj, attr_name); +#endif + res = _PyType_Lookup(tp, attr_name); + if (likely(res)) { + descrgetfunc f = Py_TYPE(res)->tp_descr_get; + if (!f) { + Py_INCREF(res); + } else { + res = f(res, obj, (PyObject *)tp); + } + } else if (with_error) { + PyErr_SetObject(PyExc_AttributeError, attr_name); + } + return res; +} +#endif + +/* Py3ClassCreate */ +static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, + PyObject *qualname, PyObject *mkw, PyObject *modname, PyObject *doc) { + PyObject *ns; + if (metaclass) { + PyObject *prep = __Pyx_PyObject_GetAttrStrNoError(metaclass, __pyx_n_s_prepare); + if (prep) { + PyObject *pargs[3] = {NULL, name, bases}; + ns = __Pyx_PyObject_FastCallDict(prep, pargs+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, mkw); + Py_DECREF(prep); + } else { + if (unlikely(PyErr_Occurred())) + return NULL; + ns = PyDict_New(); + } + } else { + ns = PyDict_New(); + } + if (unlikely(!ns)) + return NULL; + if (unlikely(PyObject_SetItem(ns, __pyx_n_s_module, modname) < 0)) goto bad; +#if PY_VERSION_HEX >= 0x03030000 + if (unlikely(PyObject_SetItem(ns, __pyx_n_s_qualname, qualname) < 0)) goto bad; +#else + CYTHON_MAYBE_UNUSED_VAR(qualname); +#endif + if (unlikely(doc && PyObject_SetItem(ns, __pyx_n_s_doc, doc) < 0)) goto bad; + return ns; +bad: + Py_DECREF(ns); + return NULL; +} +#if PY_VERSION_HEX < 0x030600A4 && CYTHON_PEP487_INIT_SUBCLASS +static int __Pyx_SetNamesPEP487(PyObject *type_obj) { + PyTypeObject *type = (PyTypeObject*) type_obj; + PyObject *names_to_set, *key, *value, *set_name, *tmp; + Py_ssize_t i = 0; +#if CYTHON_USE_TYPE_SLOTS + names_to_set = PyDict_Copy(type->tp_dict); +#else + { + PyObject *d = PyObject_GetAttr(type_obj, __pyx_n_s_dict); + names_to_set = NULL; + if (likely(d)) { + PyObject *names_to_set = PyDict_New(); + int ret = likely(names_to_set) ? PyDict_Update(names_to_set, d) : -1; + Py_DECREF(d); + if (unlikely(ret < 0)) + Py_CLEAR(names_to_set); + } + } +#endif + if (unlikely(names_to_set == NULL)) + goto bad; + while (PyDict_Next(names_to_set, &i, &key, &value)) { + set_name = __Pyx_PyObject_LookupSpecialNoError(value, __pyx_n_s_set_name); + if (unlikely(set_name != NULL)) { + tmp = __Pyx_PyObject_Call2Args(set_name, type_obj, key); + Py_DECREF(set_name); + if (unlikely(tmp == NULL)) { + __Pyx_TypeName value_type_name = + __Pyx_PyType_GetName(Py_TYPE(value)); + __Pyx_TypeName type_name = __Pyx_PyType_GetName(type); + PyErr_Format(PyExc_RuntimeError, +#if PY_MAJOR_VERSION >= 3 + "Error calling __set_name__ on '" __Pyx_FMT_TYPENAME "' instance %R " "in '" __Pyx_FMT_TYPENAME "'", + value_type_name, key, type_name); +#else + "Error calling __set_name__ on '" __Pyx_FMT_TYPENAME "' instance %.100s in '" __Pyx_FMT_TYPENAME "'", + value_type_name, + PyString_Check(key) ? PyString_AS_STRING(key) : "?", + type_name); +#endif + goto bad; + } else { + Py_DECREF(tmp); + } + } + else if (unlikely(PyErr_Occurred())) { + goto bad; + } + } + Py_DECREF(names_to_set); + return 0; +bad: + Py_XDECREF(names_to_set); + return -1; +} +static PyObject *__Pyx_InitSubclassPEP487(PyObject *type_obj, PyObject *mkw) { +#if CYTHON_USE_TYPE_SLOTS && CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + PyTypeObject *type = (PyTypeObject*) type_obj; + PyObject *mro = type->tp_mro; + Py_ssize_t i, nbases; + if (unlikely(!mro)) goto done; + (void) &__Pyx_GetBuiltinName; + Py_INCREF(mro); + nbases = PyTuple_GET_SIZE(mro); + assert(PyTuple_GET_ITEM(mro, 0) == type_obj); + for (i = 1; i < nbases-1; i++) { + PyObject *base, *dict, *meth; + base = PyTuple_GET_ITEM(mro, i); + dict = ((PyTypeObject *)base)->tp_dict; + meth = __Pyx_PyDict_GetItemStrWithError(dict, __pyx_n_s_init_subclass); + if (unlikely(meth)) { + descrgetfunc f = Py_TYPE(meth)->tp_descr_get; + PyObject *res; + Py_INCREF(meth); + if (likely(f)) { + res = f(meth, NULL, type_obj); + Py_DECREF(meth); + if (unlikely(!res)) goto bad; + meth = res; + } + res = __Pyx_PyObject_FastCallDict(meth, NULL, 0, mkw); + Py_DECREF(meth); + if (unlikely(!res)) goto bad; + Py_DECREF(res); + goto done; + } else if (unlikely(PyErr_Occurred())) { + goto bad; + } + } +done: + Py_XDECREF(mro); + return type_obj; +bad: + Py_XDECREF(mro); + Py_DECREF(type_obj); + return NULL; +#else + PyObject *super_type, *super, *func, *res; +#if CYTHON_COMPILING_IN_PYPY && !defined(PySuper_Type) + super_type = __Pyx_GetBuiltinName(__pyx_n_s_super); +#else + super_type = (PyObject*) &PySuper_Type; + (void) &__Pyx_GetBuiltinName; +#endif + super = likely(super_type) ? __Pyx_PyObject_Call2Args(super_type, type_obj, type_obj) : NULL; +#if CYTHON_COMPILING_IN_PYPY && !defined(PySuper_Type) + Py_XDECREF(super_type); +#endif + if (unlikely(!super)) { + Py_CLEAR(type_obj); + goto done; + } + func = __Pyx_PyObject_GetAttrStrNoError(super, __pyx_n_s_init_subclass); + Py_DECREF(super); + if (likely(!func)) { + if (unlikely(PyErr_Occurred())) + Py_CLEAR(type_obj); + goto done; + } + res = __Pyx_PyObject_FastCallDict(func, NULL, 0, mkw); + Py_DECREF(func); + if (unlikely(!res)) + Py_CLEAR(type_obj); + Py_XDECREF(res); +done: + return type_obj; +#endif +} +#endif +static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, + PyObject *dict, PyObject *mkw, + int calculate_metaclass, int allow_py2_metaclass) { + PyObject *result; + PyObject *owned_metaclass = NULL; + PyObject *margs[4] = {NULL, name, bases, dict}; + if (allow_py2_metaclass) { + owned_metaclass = PyObject_GetItem(dict, __pyx_n_s_metaclass); + if (owned_metaclass) { + metaclass = owned_metaclass; + } else if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) { + PyErr_Clear(); + } else { + return NULL; + } + } + if (calculate_metaclass && (!metaclass || PyType_Check(metaclass))) { + metaclass = __Pyx_CalculateMetaclass((PyTypeObject*) metaclass, bases); + Py_XDECREF(owned_metaclass); + if (unlikely(!metaclass)) + return NULL; + owned_metaclass = metaclass; + } + result = __Pyx_PyObject_FastCallDict(metaclass, margs+1, 3 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, +#if PY_VERSION_HEX < 0x030600A4 + (metaclass == (PyObject*)&PyType_Type) ? NULL : mkw +#else + mkw +#endif + ); + Py_XDECREF(owned_metaclass); +#if PY_VERSION_HEX < 0x030600A4 && CYTHON_PEP487_INIT_SUBCLASS + if (likely(result) && likely(PyType_Check(result))) { + if (unlikely(__Pyx_SetNamesPEP487(result) < 0)) { + Py_CLEAR(result); + } else { + result = __Pyx_InitSubclassPEP487(result, mkw); + } + } +#else + (void) &__Pyx_GetBuiltinName; +#endif + return result; +} + +/* CLineInTraceback */ +#ifndef CYTHON_CLINE_IN_TRACEBACK +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) { + PyObject *use_cline; + PyObject *ptype, *pvalue, *ptraceback; +#if CYTHON_COMPILING_IN_CPYTHON + PyObject **cython_runtime_dict; +#endif + CYTHON_MAYBE_UNUSED_VAR(tstate); + if (unlikely(!__pyx_cython_runtime)) { + return c_line; + } + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); +#if CYTHON_COMPILING_IN_CPYTHON + cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); + if (likely(cython_runtime_dict)) { + __PYX_PY_DICT_LOOKUP_IF_MODIFIED( + use_cline, *cython_runtime_dict, + __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) + } else +#endif + { + PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStrNoError(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); + if (use_cline_obj) { + use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; + Py_DECREF(use_cline_obj); + } else { + PyErr_Clear(); + use_cline = NULL; + } + } + if (!use_cline) { + c_line = 0; + (void) PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); + } + else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { + c_line = 0; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + return c_line; +} +#endif + +/* CodeObjectCache */ +#if !CYTHON_COMPILING_IN_LIMITED_API +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { + int start = 0, mid = 0, end = count - 1; + if (end >= 0 && code_line > entries[end].code_line) { + return count; + } + while (start < end) { + mid = start + (end - start) / 2; + if (code_line < entries[mid].code_line) { + end = mid; + } else if (code_line > entries[mid].code_line) { + start = mid + 1; + } else { + return mid; + } + } + if (code_line <= entries[mid].code_line) { + return mid; + } else { + return mid + 1; + } +} +static PyCodeObject *__pyx_find_code_object(int code_line) { + PyCodeObject* code_object; + int pos; + if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { + return NULL; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { + return NULL; + } + code_object = __pyx_code_cache.entries[pos].code_object; + Py_INCREF(code_object); + return code_object; +} +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { + int pos, i; + __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; + if (unlikely(!code_line)) { + return; + } + if (unlikely(!entries)) { + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); + if (likely(entries)) { + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = 64; + __pyx_code_cache.count = 1; + entries[0].code_line = code_line; + entries[0].code_object = code_object; + Py_INCREF(code_object); + } + return; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { + PyCodeObject* tmp = entries[pos].code_object; + entries[pos].code_object = code_object; + Py_DECREF(tmp); + return; + } + if (__pyx_code_cache.count == __pyx_code_cache.max_count) { + int new_max = __pyx_code_cache.max_count + 64; + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( + __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); + if (unlikely(!entries)) { + return; + } + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = new_max; + } + for (i=__pyx_code_cache.count; i>pos; i--) { + entries[i] = entries[i-1]; + } + entries[pos].code_line = code_line; + entries[pos].code_object = code_object; + __pyx_code_cache.count++; + Py_INCREF(code_object); +} +#endif + +/* AddTraceback */ +#include "compile.h" +#include "frameobject.h" +#include "traceback.h" +#if PY_VERSION_HEX >= 0x030b00a6 && !CYTHON_COMPILING_IN_LIMITED_API + #ifndef Py_BUILD_CORE + #define Py_BUILD_CORE 1 + #endif + #include "internal/pycore_frame.h" +#endif +#if CYTHON_COMPILING_IN_LIMITED_API +static PyObject *__Pyx_PyCode_Replace_For_AddTraceback(PyObject *code, PyObject *scratch_dict, + PyObject *firstlineno, PyObject *name) { + PyObject *replace = NULL; + if (unlikely(PyDict_SetItemString(scratch_dict, "co_firstlineno", firstlineno))) return NULL; + if (unlikely(PyDict_SetItemString(scratch_dict, "co_name", name))) return NULL; + replace = PyObject_GetAttrString(code, "replace"); + if (likely(replace)) { + PyObject *result; + result = PyObject_Call(replace, __pyx_empty_tuple, scratch_dict); + Py_DECREF(replace); + return result; + } + PyErr_Clear(); + #if __PYX_LIMITED_VERSION_HEX < 0x030780000 + { + PyObject *compiled = NULL, *result = NULL; + if (unlikely(PyDict_SetItemString(scratch_dict, "code", code))) return NULL; + if (unlikely(PyDict_SetItemString(scratch_dict, "type", (PyObject*)(&PyType_Type)))) return NULL; + compiled = Py_CompileString( + "out = type(code)(\n" + " code.co_argcount, code.co_kwonlyargcount, code.co_nlocals, code.co_stacksize,\n" + " code.co_flags, code.co_code, code.co_consts, code.co_names,\n" + " code.co_varnames, code.co_filename, co_name, co_firstlineno,\n" + " code.co_lnotab)\n", "", Py_file_input); + if (!compiled) return NULL; + result = PyEval_EvalCode(compiled, scratch_dict, scratch_dict); + Py_DECREF(compiled); + if (!result) PyErr_Print(); + Py_DECREF(result); + result = PyDict_GetItemString(scratch_dict, "out"); + if (result) Py_INCREF(result); + return result; + } + #else + return NULL; + #endif +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyObject *code_object = NULL, *py_py_line = NULL, *py_funcname = NULL, *dict = NULL; + PyObject *replace = NULL, *getframe = NULL, *frame = NULL; + PyObject *exc_type, *exc_value, *exc_traceback; + int success = 0; + if (c_line) { + (void) __pyx_cfilenm; + (void) __Pyx_CLineForTraceback(__Pyx_PyThreadState_Current, c_line); + } + PyErr_Fetch(&exc_type, &exc_value, &exc_traceback); + code_object = Py_CompileString("_getframe()", filename, Py_eval_input); + if (unlikely(!code_object)) goto bad; + py_py_line = PyLong_FromLong(py_line); + if (unlikely(!py_py_line)) goto bad; + py_funcname = PyUnicode_FromString(funcname); + if (unlikely(!py_funcname)) goto bad; + dict = PyDict_New(); + if (unlikely(!dict)) goto bad; + { + PyObject *old_code_object = code_object; + code_object = __Pyx_PyCode_Replace_For_AddTraceback(code_object, dict, py_py_line, py_funcname); + Py_DECREF(old_code_object); + } + if (unlikely(!code_object)) goto bad; + getframe = PySys_GetObject("_getframe"); + if (unlikely(!getframe)) goto bad; + if (unlikely(PyDict_SetItemString(dict, "_getframe", getframe))) goto bad; + frame = PyEval_EvalCode(code_object, dict, dict); + if (unlikely(!frame) || frame == Py_None) goto bad; + success = 1; + bad: + PyErr_Restore(exc_type, exc_value, exc_traceback); + Py_XDECREF(code_object); + Py_XDECREF(py_py_line); + Py_XDECREF(py_funcname); + Py_XDECREF(dict); + Py_XDECREF(replace); + if (success) { + PyTraceBack_Here( + (struct _frame*)frame); + } + Py_XDECREF(frame); +} +#else +static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( + const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = NULL; + PyObject *py_funcname = NULL; + #if PY_MAJOR_VERSION < 3 + PyObject *py_srcfile = NULL; + py_srcfile = PyString_FromString(filename); + if (!py_srcfile) goto bad; + #endif + if (c_line) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + if (!py_funcname) goto bad; + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + if (!py_funcname) goto bad; + funcname = PyUnicode_AsUTF8(py_funcname); + if (!funcname) goto bad; + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + if (!py_funcname) goto bad; + #endif + } + #if PY_MAJOR_VERSION < 3 + py_code = __Pyx_PyCode_New( + 0, + 0, + 0, + 0, + 0, + 0, + __pyx_empty_bytes, /*PyObject *code,*/ + __pyx_empty_tuple, /*PyObject *consts,*/ + __pyx_empty_tuple, /*PyObject *names,*/ + __pyx_empty_tuple, /*PyObject *varnames,*/ + __pyx_empty_tuple, /*PyObject *freevars,*/ + __pyx_empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + py_line, + __pyx_empty_bytes /*PyObject *lnotab*/ + ); + Py_DECREF(py_srcfile); + #else + py_code = PyCode_NewEmpty(filename, funcname, py_line); + #endif + Py_XDECREF(py_funcname); + return py_code; +bad: + Py_XDECREF(py_funcname); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_srcfile); + #endif + return NULL; +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject *ptype, *pvalue, *ptraceback; + if (c_line) { + c_line = __Pyx_CLineForTraceback(tstate, c_line); + } + py_code = __pyx_find_code_object(c_line ? -c_line : py_line); + if (!py_code) { + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); + py_code = __Pyx_CreateCodeObjectForTraceback( + funcname, c_line, py_line, filename); + if (!py_code) { + /* If the code object creation fails, then we should clear the + fetched exception references and propagate the new exception */ + Py_XDECREF(ptype); + Py_XDECREF(pvalue); + Py_XDECREF(ptraceback); + goto bad; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); + } + py_frame = PyFrame_New( + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + __pyx_d, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} +#endif + +/* CIntToPy */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const long neg_one = (long) -1, const_zero = (long) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(long) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(long) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(long) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + unsigned char *bytes = (unsigned char *)&value; +#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4 + if (is_unsigned) { + return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1); + } else { + return PyLong_FromNativeBytes(bytes, sizeof(value), -1); + } +#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000 + int one = 1; int little = (int)*(unsigned char *)&one; + return _PyLong_FromByteArray(bytes, sizeof(long), + little, !is_unsigned); +#else + int one = 1; int little = (int)*(unsigned char *)&one; + PyObject *from_bytes, *result = NULL; + PyObject *py_bytes = NULL, *arg_tuple = NULL, *kwds = NULL, *order_str = NULL; + from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes"); + if (!from_bytes) return NULL; + py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(long)); + if (!py_bytes) goto limited_bad; + order_str = PyUnicode_FromString(little ? "little" : "big"); + if (!order_str) goto limited_bad; + arg_tuple = PyTuple_Pack(2, py_bytes, order_str); + if (!arg_tuple) goto limited_bad; + if (!is_unsigned) { + kwds = PyDict_New(); + if (!kwds) goto limited_bad; + if (PyDict_SetItemString(kwds, "signed", __Pyx_NewRef(Py_True))) goto limited_bad; + } + result = PyObject_Call(from_bytes, arg_tuple, kwds); + limited_bad: + Py_XDECREF(kwds); + Py_XDECREF(arg_tuple); + Py_XDECREF(order_str); + Py_XDECREF(py_bytes); + Py_XDECREF(from_bytes); + return result; +#endif + } +} + +/* FormatTypeName */ +#if CYTHON_COMPILING_IN_LIMITED_API +static __Pyx_TypeName +__Pyx_PyType_GetName(PyTypeObject* tp) +{ + PyObject *name = __Pyx_PyObject_GetAttrStr((PyObject *)tp, + __pyx_n_s_name_2); + if (unlikely(name == NULL) || unlikely(!PyUnicode_Check(name))) { + PyErr_Clear(); + Py_XDECREF(name); + name = __Pyx_NewRef(__pyx_n_s__51); + } + return name; +} +#endif + +/* CIntFromPyVerify */ +#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ + {\ + func_type value = func_value;\ + if (sizeof(target_type) < sizeof(func_type)) {\ + if (unlikely(value != (func_type) (target_type) value)) {\ + func_type zero = 0;\ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ + return (target_type) -1;\ + if (is_unsigned && unlikely(value < zero))\ + goto raise_neg_overflow;\ + else\ + goto raise_overflow;\ + }\ + }\ + return (target_type) value;\ + } + +/* CIntFromPy */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const long neg_one = (long) -1, const_zero = (long) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if ((sizeof(long) < sizeof(long))) { + __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (long) val; + } + } +#endif + if (unlikely(!PyLong_Check(x))) { + long val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (long) -1; + val = __Pyx_PyInt_As_long(tmp); + Py_DECREF(tmp); + return val; + } + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + if (unlikely(__Pyx_PyLong_IsNeg(x))) { + goto raise_neg_overflow; + } else if (__Pyx_PyLong_IsCompact(x)) { + __PYX_VERIFY_RETURN_INT(long, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x)) + } else { + const digit* digits = __Pyx_PyLong_Digits(x); + assert(__Pyx_PyLong_DigitCount(x) > 1); + switch (__Pyx_PyLong_DigitCount(x)) { + case 2: + if ((8 * sizeof(long) > 1 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) >= 2 * PyLong_SHIFT)) { + return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 3: + if ((8 * sizeof(long) > 2 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) >= 3 * PyLong_SHIFT)) { + return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 4: + if ((8 * sizeof(long) > 3 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) >= 4 * PyLong_SHIFT)) { + return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + } + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7 + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (long) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if ((sizeof(long) <= sizeof(unsigned long))) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if ((sizeof(long) <= sizeof(unsigned PY_LONG_LONG))) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + if (__Pyx_PyLong_IsCompact(x)) { + __PYX_VERIFY_RETURN_INT(long, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x)) + } else { + const digit* digits = __Pyx_PyLong_Digits(x); + assert(__Pyx_PyLong_DigitCount(x) > 1); + switch (__Pyx_PyLong_SignedDigitCount(x)) { + case -2: + if ((8 * sizeof(long) - 1 > 1 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) { + return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 2: + if ((8 * sizeof(long) > 1 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) { + return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -3: + if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) { + return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 3: + if ((8 * sizeof(long) > 2 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) { + return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -4: + if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) - 1 > 4 * PyLong_SHIFT)) { + return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 4: + if ((8 * sizeof(long) > 3 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) - 1 > 4 * PyLong_SHIFT)) { + return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + } + } +#endif + if ((sizeof(long) <= sizeof(long))) { + __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if ((sizeof(long) <= sizeof(PY_LONG_LONG))) { + __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { + long val; + int ret = -1; +#if PY_VERSION_HEX >= 0x030d00A6 && !CYTHON_COMPILING_IN_LIMITED_API + Py_ssize_t bytes_copied = PyLong_AsNativeBytes( + x, &val, sizeof(val), Py_ASNATIVEBYTES_NATIVE_ENDIAN | (is_unsigned ? Py_ASNATIVEBYTES_UNSIGNED_BUFFER | Py_ASNATIVEBYTES_REJECT_NEGATIVE : 0)); + if (unlikely(bytes_copied == -1)) { + } else if (unlikely(bytes_copied > (Py_ssize_t) sizeof(val))) { + goto raise_overflow; + } else { + ret = 0; + } +#elif PY_VERSION_HEX < 0x030d0000 && !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray) + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + ret = _PyLong_AsByteArray((PyLongObject *)x, + bytes, sizeof(val), + is_little, !is_unsigned); +#else + PyObject *v; + PyObject *stepval = NULL, *mask = NULL, *shift = NULL; + int bits, remaining_bits, is_negative = 0; + int chunk_size = (sizeof(long) < 8) ? 30 : 62; + if (likely(PyLong_CheckExact(x))) { + v = __Pyx_NewRef(x); + } else { + v = PyNumber_Long(x); + if (unlikely(!v)) return (long) -1; + assert(PyLong_CheckExact(v)); + } + { + int result = PyObject_RichCompareBool(v, Py_False, Py_LT); + if (unlikely(result < 0)) { + Py_DECREF(v); + return (long) -1; + } + is_negative = result == 1; + } + if (is_unsigned && unlikely(is_negative)) { + Py_DECREF(v); + goto raise_neg_overflow; + } else if (is_negative) { + stepval = PyNumber_Invert(v); + Py_DECREF(v); + if (unlikely(!stepval)) + return (long) -1; + } else { + stepval = v; + } + v = NULL; + val = (long) 0; + mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done; + shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done; + for (bits = 0; bits < (int) sizeof(long) * 8 - chunk_size; bits += chunk_size) { + PyObject *tmp, *digit; + long idigit; + digit = PyNumber_And(stepval, mask); + if (unlikely(!digit)) goto done; + idigit = PyLong_AsLong(digit); + Py_DECREF(digit); + if (unlikely(idigit < 0)) goto done; + val |= ((long) idigit) << bits; + tmp = PyNumber_Rshift(stepval, shift); + if (unlikely(!tmp)) goto done; + Py_DECREF(stepval); stepval = tmp; + } + Py_DECREF(shift); shift = NULL; + Py_DECREF(mask); mask = NULL; + { + long idigit = PyLong_AsLong(stepval); + if (unlikely(idigit < 0)) goto done; + remaining_bits = ((int) sizeof(long) * 8) - bits - (is_unsigned ? 0 : 1); + if (unlikely(idigit >= (1L << remaining_bits))) + goto raise_overflow; + val |= ((long) idigit) << bits; + } + if (!is_unsigned) { + if (unlikely(val & (((long) 1) << (sizeof(long) * 8 - 1)))) + goto raise_overflow; + if (is_negative) + val = ~val; + } + ret = 0; + done: + Py_XDECREF(shift); + Py_XDECREF(mask); + Py_XDECREF(stepval); +#endif + if (unlikely(ret)) + return (long) -1; + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to long"); + return (long) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to long"); + return (long) -1; +} + +/* CIntFromPy */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const int neg_one = (int) -1, const_zero = (int) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if ((sizeof(int) < sizeof(long))) { + __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (int) val; + } + } +#endif + if (unlikely(!PyLong_Check(x))) { + int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (int) -1; + val = __Pyx_PyInt_As_int(tmp); + Py_DECREF(tmp); + return val; + } + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + if (unlikely(__Pyx_PyLong_IsNeg(x))) { + goto raise_neg_overflow; + } else if (__Pyx_PyLong_IsCompact(x)) { + __PYX_VERIFY_RETURN_INT(int, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x)) + } else { + const digit* digits = __Pyx_PyLong_Digits(x); + assert(__Pyx_PyLong_DigitCount(x) > 1); + switch (__Pyx_PyLong_DigitCount(x)) { + case 2: + if ((8 * sizeof(int) > 1 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) >= 2 * PyLong_SHIFT)) { + return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 3: + if ((8 * sizeof(int) > 2 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) >= 3 * PyLong_SHIFT)) { + return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 4: + if ((8 * sizeof(int) > 3 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) >= 4 * PyLong_SHIFT)) { + return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + } + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7 + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if ((sizeof(int) <= sizeof(unsigned long))) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if ((sizeof(int) <= sizeof(unsigned PY_LONG_LONG))) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + if (__Pyx_PyLong_IsCompact(x)) { + __PYX_VERIFY_RETURN_INT(int, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x)) + } else { + const digit* digits = __Pyx_PyLong_Digits(x); + assert(__Pyx_PyLong_DigitCount(x) > 1); + switch (__Pyx_PyLong_SignedDigitCount(x)) { + case -2: + if ((8 * sizeof(int) - 1 > 1 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) { + return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 2: + if ((8 * sizeof(int) > 1 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) { + return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -3: + if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) { + return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 3: + if ((8 * sizeof(int) > 2 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) { + return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -4: + if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) - 1 > 4 * PyLong_SHIFT)) { + return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 4: + if ((8 * sizeof(int) > 3 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) - 1 > 4 * PyLong_SHIFT)) { + return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + } + } +#endif + if ((sizeof(int) <= sizeof(long))) { + __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if ((sizeof(int) <= sizeof(PY_LONG_LONG))) { + __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { + int val; + int ret = -1; +#if PY_VERSION_HEX >= 0x030d00A6 && !CYTHON_COMPILING_IN_LIMITED_API + Py_ssize_t bytes_copied = PyLong_AsNativeBytes( + x, &val, sizeof(val), Py_ASNATIVEBYTES_NATIVE_ENDIAN | (is_unsigned ? Py_ASNATIVEBYTES_UNSIGNED_BUFFER | Py_ASNATIVEBYTES_REJECT_NEGATIVE : 0)); + if (unlikely(bytes_copied == -1)) { + } else if (unlikely(bytes_copied > (Py_ssize_t) sizeof(val))) { + goto raise_overflow; + } else { + ret = 0; + } +#elif PY_VERSION_HEX < 0x030d0000 && !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray) + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + ret = _PyLong_AsByteArray((PyLongObject *)x, + bytes, sizeof(val), + is_little, !is_unsigned); +#else + PyObject *v; + PyObject *stepval = NULL, *mask = NULL, *shift = NULL; + int bits, remaining_bits, is_negative = 0; + int chunk_size = (sizeof(long) < 8) ? 30 : 62; + if (likely(PyLong_CheckExact(x))) { + v = __Pyx_NewRef(x); + } else { + v = PyNumber_Long(x); + if (unlikely(!v)) return (int) -1; + assert(PyLong_CheckExact(v)); + } + { + int result = PyObject_RichCompareBool(v, Py_False, Py_LT); + if (unlikely(result < 0)) { + Py_DECREF(v); + return (int) -1; + } + is_negative = result == 1; + } + if (is_unsigned && unlikely(is_negative)) { + Py_DECREF(v); + goto raise_neg_overflow; + } else if (is_negative) { + stepval = PyNumber_Invert(v); + Py_DECREF(v); + if (unlikely(!stepval)) + return (int) -1; + } else { + stepval = v; + } + v = NULL; + val = (int) 0; + mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done; + shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done; + for (bits = 0; bits < (int) sizeof(int) * 8 - chunk_size; bits += chunk_size) { + PyObject *tmp, *digit; + long idigit; + digit = PyNumber_And(stepval, mask); + if (unlikely(!digit)) goto done; + idigit = PyLong_AsLong(digit); + Py_DECREF(digit); + if (unlikely(idigit < 0)) goto done; + val |= ((int) idigit) << bits; + tmp = PyNumber_Rshift(stepval, shift); + if (unlikely(!tmp)) goto done; + Py_DECREF(stepval); stepval = tmp; + } + Py_DECREF(shift); shift = NULL; + Py_DECREF(mask); mask = NULL; + { + long idigit = PyLong_AsLong(stepval); + if (unlikely(idigit < 0)) goto done; + remaining_bits = ((int) sizeof(int) * 8) - bits - (is_unsigned ? 0 : 1); + if (unlikely(idigit >= (1L << remaining_bits))) + goto raise_overflow; + val |= ((int) idigit) << bits; + } + if (!is_unsigned) { + if (unlikely(val & (((int) 1) << (sizeof(int) * 8 - 1)))) + goto raise_overflow; + if (is_negative) + val = ~val; + } + ret = 0; + done: + Py_XDECREF(shift); + Py_XDECREF(mask); + Py_XDECREF(stepval); +#endif + if (unlikely(ret)) + return (int) -1; + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to int"); + return (int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to int"); + return (int) -1; +} + +/* CheckBinaryVersion */ +static unsigned long __Pyx_get_runtime_version(void) { +#if __PYX_LIMITED_VERSION_HEX >= 0x030B00A4 + return Py_Version & ~0xFFUL; +#else + const char* rt_version = Py_GetVersion(); + unsigned long version = 0; + unsigned long factor = 0x01000000UL; + unsigned int digit = 0; + int i = 0; + while (factor) { + while ('0' <= rt_version[i] && rt_version[i] <= '9') { + digit = digit * 10 + (unsigned int) (rt_version[i] - '0'); + ++i; + } + version += factor * digit; + if (rt_version[i] != '.') + break; + digit = 0; + factor >>= 8; + ++i; + } + return version; +#endif +} +static int __Pyx_check_binary_version(unsigned long ct_version, unsigned long rt_version, int allow_newer) { + const unsigned long MAJOR_MINOR = 0xFFFF0000UL; + if ((rt_version & MAJOR_MINOR) == (ct_version & MAJOR_MINOR)) + return 0; + if (likely(allow_newer && (rt_version & MAJOR_MINOR) > (ct_version & MAJOR_MINOR))) + return 1; + { + char message[200]; + PyOS_snprintf(message, sizeof(message), + "compile time Python version %d.%d " + "of module '%.100s' " + "%s " + "runtime version %d.%d", + (int) (ct_version >> 24), (int) ((ct_version >> 16) & 0xFF), + __Pyx_MODULE_NAME, + (allow_newer) ? "was newer than" : "does not match", + (int) (rt_version >> 24), (int) ((rt_version >> 16) & 0xFF) + ); + return PyErr_WarnEx(NULL, message, 1); + } +} + +/* InitStrings */ +#if PY_MAJOR_VERSION >= 3 +static int __Pyx_InitString(__Pyx_StringTabEntry t, PyObject **str) { + if (t.is_unicode | t.is_str) { + if (t.intern) { + *str = PyUnicode_InternFromString(t.s); + } else if (t.encoding) { + *str = PyUnicode_Decode(t.s, t.n - 1, t.encoding, NULL); + } else { + *str = PyUnicode_FromStringAndSize(t.s, t.n - 1); + } + } else { + *str = PyBytes_FromStringAndSize(t.s, t.n - 1); + } + if (!*str) + return -1; + if (PyObject_Hash(*str) == -1) + return -1; + return 0; +} +#endif +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { + while (t->p) { + #if PY_MAJOR_VERSION >= 3 + __Pyx_InitString(*t, t->p); + #else + if (t->is_unicode) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + if (!*t->p) + return -1; + if (PyObject_Hash(*t->p) == -1) + return -1; + #endif + ++t; + } + return 0; +} + +#include +static CYTHON_INLINE Py_ssize_t __Pyx_ssize_strlen(const char *s) { + size_t len = strlen(s); + if (unlikely(len > (size_t) PY_SSIZE_T_MAX)) { + PyErr_SetString(PyExc_OverflowError, "byte string is too long"); + return -1; + } + return (Py_ssize_t) len; +} +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { + Py_ssize_t len = __Pyx_ssize_strlen(c_str); + if (unlikely(len < 0)) return NULL; + return __Pyx_PyUnicode_FromStringAndSize(c_str, len); +} +static CYTHON_INLINE PyObject* __Pyx_PyByteArray_FromString(const char* c_str) { + Py_ssize_t len = __Pyx_ssize_strlen(c_str); + if (unlikely(len < 0)) return NULL; + return PyByteArray_FromStringAndSize(c_str, len); +} +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { + Py_ssize_t ignore; + return __Pyx_PyObject_AsStringAndSize(o, &ignore); +} +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#if !CYTHON_PEP393_ENABLED +static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + char* defenc_c; + PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); + if (!defenc) return NULL; + defenc_c = PyBytes_AS_STRING(defenc); +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + { + char* end = defenc_c + PyBytes_GET_SIZE(defenc); + char* c; + for (c = defenc_c; c < end; c++) { + if ((unsigned char) (*c) >= 128) { + PyUnicode_AsASCIIString(o); + return NULL; + } + } + } +#endif + *length = PyBytes_GET_SIZE(defenc); + return defenc_c; +} +#else +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + if (likely(PyUnicode_IS_ASCII(o))) { + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + PyUnicode_AsASCIIString(o); + return NULL; + } +#else + return PyUnicode_AsUTF8AndSize(o, length); +#endif +} +#endif +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT + if ( +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + __Pyx_sys_getdefaultencoding_not_ascii && +#endif + PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); + } else +#endif +#if (!CYTHON_COMPILING_IN_PYPY && !CYTHON_COMPILING_IN_LIMITED_API) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + if (PyByteArray_Check(o)) { + *length = PyByteArray_GET_SIZE(o); + return PyByteArray_AS_STRING(o); + } else +#endif + { + char* result; + int r = PyBytes_AsStringAndSize(o, &result, length); + if (unlikely(r < 0)) { + return NULL; + } else { + return result; + } + } +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + int is_true = x == Py_True; + if (is_true | (x == Py_False) | (x == Py_None)) return is_true; + else return PyObject_IsTrue(x); +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { + int retval; + if (unlikely(!x)) return -1; + retval = __Pyx_PyObject_IsTrue(x); + Py_DECREF(x); + return retval; +} +static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { + __Pyx_TypeName result_type_name = __Pyx_PyType_GetName(Py_TYPE(result)); +#if PY_MAJOR_VERSION >= 3 + if (PyLong_Check(result)) { + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type " __Pyx_FMT_TYPENAME "). " + "The ability to return an instance of a strict subclass of int is deprecated, " + "and may be removed in a future version of Python.", + result_type_name)) { + __Pyx_DECREF_TypeName(result_type_name); + Py_DECREF(result); + return NULL; + } + __Pyx_DECREF_TypeName(result_type_name); + return result; + } +#endif + PyErr_Format(PyExc_TypeError, + "__%.4s__ returned non-%.4s (type " __Pyx_FMT_TYPENAME ")", + type_name, type_name, result_type_name); + __Pyx_DECREF_TypeName(result_type_name); + Py_DECREF(result); + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS + PyNumberMethods *m; +#endif + const char *name = NULL; + PyObject *res = NULL; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x) || PyLong_Check(x))) +#else + if (likely(PyLong_Check(x))) +#endif + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS + m = Py_TYPE(x)->tp_as_number; + #if PY_MAJOR_VERSION < 3 + if (m && m->nb_int) { + name = "int"; + res = m->nb_int(x); + } + else if (m && m->nb_long) { + name = "long"; + res = m->nb_long(x); + } + #else + if (likely(m && m->nb_int)) { + name = "int"; + res = m->nb_int(x); + } + #endif +#else + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Int(x); + } +#endif + if (likely(res)) { +#if PY_MAJOR_VERSION < 3 + if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { +#else + if (unlikely(!PyLong_CheckExact(res))) { +#endif + return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); + } + } + else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "an integer is required"); + } + return res; +} +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject *x; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(b))) { + if (sizeof(Py_ssize_t) >= sizeof(long)) + return PyInt_AS_LONG(b); + else + return PyInt_AsSsize_t(b); + } +#endif + if (likely(PyLong_CheckExact(b))) { + #if CYTHON_USE_PYLONG_INTERNALS + if (likely(__Pyx_PyLong_IsCompact(b))) { + return __Pyx_PyLong_CompactValue(b); + } else { + const digit* digits = __Pyx_PyLong_Digits(b); + const Py_ssize_t size = __Pyx_PyLong_SignedDigitCount(b); + switch (size) { + case 2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + } + } + #endif + return PyLong_AsSsize_t(b); + } + x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} +static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject* o) { + if (sizeof(Py_hash_t) == sizeof(Py_ssize_t)) { + return (Py_hash_t) __Pyx_PyIndex_AsSsize_t(o); +#if PY_MAJOR_VERSION < 3 + } else if (likely(PyInt_CheckExact(o))) { + return PyInt_AS_LONG(o); +#endif + } else { + Py_ssize_t ival; + PyObject *x; + x = PyNumber_Index(o); + if (!x) return -1; + ival = PyInt_AsLong(x); + Py_DECREF(x); + return ival; + } +} +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { + return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); +} +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { + return PyInt_FromSize_t(ival); +} + + +/* #### Code section: utility_code_pragmas_end ### */ +#ifdef _MSC_VER +#pragma warning( pop ) +#endif + + + +/* #### Code section: end ### */ +#endif /* Py_PYTHON_H */ diff --git a/infer_4_47_1/lib/python3.10/site-packages/fontTools/feaLib/lexer.py b/infer_4_47_1/lib/python3.10/site-packages/fontTools/feaLib/lexer.py new file mode 100644 index 0000000000000000000000000000000000000000..4b6499d06f591efbdb5e603cbcd256840695f431 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/fontTools/feaLib/lexer.py @@ -0,0 +1,287 @@ +from fontTools.feaLib.error import FeatureLibError, IncludedFeaNotFound +from fontTools.feaLib.location import FeatureLibLocation +import re +import os + +try: + import cython +except ImportError: + # if cython not installed, use mock module with no-op decorators and types + from fontTools.misc import cython + + +class Lexer(object): + NUMBER = "NUMBER" + HEXADECIMAL = "HEXADECIMAL" + OCTAL = "OCTAL" + NUMBERS = (NUMBER, HEXADECIMAL, OCTAL) + FLOAT = "FLOAT" + STRING = "STRING" + NAME = "NAME" + FILENAME = "FILENAME" + GLYPHCLASS = "GLYPHCLASS" + CID = "CID" + SYMBOL = "SYMBOL" + COMMENT = "COMMENT" + NEWLINE = "NEWLINE" + ANONYMOUS_BLOCK = "ANONYMOUS_BLOCK" + + CHAR_WHITESPACE_ = " \t" + CHAR_NEWLINE_ = "\r\n" + CHAR_SYMBOL_ = ",;:-+'{}[]<>()=" + CHAR_DIGIT_ = "0123456789" + CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef" + CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + CHAR_NAME_START_ = CHAR_LETTER_ + "_+*:.^~!\\" + CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_.+*:^~!/-" + + RE_GLYPHCLASS = re.compile(r"^[A-Za-z_0-9.\-]+$") + + MODE_NORMAL_ = "NORMAL" + MODE_FILENAME_ = "FILENAME" + + def __init__(self, text, filename): + self.filename_ = filename + self.line_ = 1 + self.pos_ = 0 + self.line_start_ = 0 + self.text_ = text + self.text_length_ = len(text) + self.mode_ = Lexer.MODE_NORMAL_ + + def __iter__(self): + return self + + def next(self): # Python 2 + return self.__next__() + + def __next__(self): # Python 3 + while True: + token_type, token, location = self.next_() + if token_type != Lexer.NEWLINE: + return (token_type, token, location) + + def location_(self): + column = self.pos_ - self.line_start_ + 1 + return FeatureLibLocation(self.filename_ or "", self.line_, column) + + def next_(self): + self.scan_over_(Lexer.CHAR_WHITESPACE_) + location = self.location_() + start = self.pos_ + text = self.text_ + limit = len(text) + if start >= limit: + raise StopIteration() + cur_char = text[start] + next_char = text[start + 1] if start + 1 < limit else None + + if cur_char == "\n": + self.pos_ += 1 + self.line_ += 1 + self.line_start_ = self.pos_ + return (Lexer.NEWLINE, None, location) + if cur_char == "\r": + self.pos_ += 2 if next_char == "\n" else 1 + self.line_ += 1 + self.line_start_ = self.pos_ + return (Lexer.NEWLINE, None, location) + if cur_char == "#": + self.scan_until_(Lexer.CHAR_NEWLINE_) + return (Lexer.COMMENT, text[start : self.pos_], location) + + if self.mode_ is Lexer.MODE_FILENAME_: + if cur_char != "(": + raise FeatureLibError("Expected '(' before file name", location) + self.scan_until_(")") + cur_char = text[self.pos_] if self.pos_ < limit else None + if cur_char != ")": + raise FeatureLibError("Expected ')' after file name", location) + self.pos_ += 1 + self.mode_ = Lexer.MODE_NORMAL_ + return (Lexer.FILENAME, text[start + 1 : self.pos_ - 1], location) + + if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_: + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.CID, int(text[start + 1 : self.pos_], 10), location) + if cur_char == "@": + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + glyphclass = text[start + 1 : self.pos_] + if len(glyphclass) < 1: + raise FeatureLibError("Expected glyph class name", location) + if not Lexer.RE_GLYPHCLASS.match(glyphclass): + raise FeatureLibError( + "Glyph class names must consist of letters, digits, " + "underscore, period or hyphen", + location, + ) + return (Lexer.GLYPHCLASS, glyphclass, location) + if cur_char in Lexer.CHAR_NAME_START_: + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + token = text[start : self.pos_] + if token == "include": + self.mode_ = Lexer.MODE_FILENAME_ + return (Lexer.NAME, token, location) + if cur_char == "0" and next_char in "xX": + self.pos_ += 2 + self.scan_over_(Lexer.CHAR_HEXDIGIT_) + return (Lexer.HEXADECIMAL, int(text[start : self.pos_], 16), location) + if cur_char == "0" and next_char in Lexer.CHAR_DIGIT_: + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.OCTAL, int(text[start : self.pos_], 8), location) + if cur_char in Lexer.CHAR_DIGIT_: + self.scan_over_(Lexer.CHAR_DIGIT_) + if self.pos_ >= limit or text[self.pos_] != ".": + return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) + self.scan_over_(".") + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.FLOAT, float(text[start : self.pos_]), location) + if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_DIGIT_) + if self.pos_ >= limit or text[self.pos_] != ".": + return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) + self.scan_over_(".") + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.FLOAT, float(text[start : self.pos_]), location) + if cur_char in Lexer.CHAR_SYMBOL_: + self.pos_ += 1 + return (Lexer.SYMBOL, cur_char, location) + if cur_char == '"': + self.pos_ += 1 + self.scan_until_('"') + if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': + self.pos_ += 1 + # strip newlines embedded within a string + string = re.sub("[\r\n]", "", text[start + 1 : self.pos_ - 1]) + return (Lexer.STRING, string, location) + else: + raise FeatureLibError("Expected '\"' to terminate string", location) + raise FeatureLibError("Unexpected character: %r" % cur_char, location) + + def scan_over_(self, valid): + p = self.pos_ + while p < self.text_length_ and self.text_[p] in valid: + p += 1 + self.pos_ = p + + def scan_until_(self, stop_at): + p = self.pos_ + while p < self.text_length_ and self.text_[p] not in stop_at: + p += 1 + self.pos_ = p + + def scan_anonymous_block(self, tag): + location = self.location_() + tag = tag.strip() + self.scan_until_(Lexer.CHAR_NEWLINE_) + self.scan_over_(Lexer.CHAR_NEWLINE_) + regexp = r"}\s*" + tag + r"\s*;" + split = re.split(regexp, self.text_[self.pos_ :], maxsplit=1) + if len(split) != 2: + raise FeatureLibError( + "Expected '} %s;' to terminate anonymous block" % tag, location + ) + self.pos_ += len(split[0]) + return (Lexer.ANONYMOUS_BLOCK, split[0], location) + + +class IncludingLexer(object): + """A Lexer that follows include statements. + + The OpenType feature file specification states that due to + historical reasons, relative imports should be resolved in this + order: + + 1. If the source font is UFO format, then relative to the UFO's + font directory + 2. relative to the top-level include file + 3. relative to the parent include file + + We only support 1 (via includeDir) and 2. + """ + + def __init__(self, featurefile, *, includeDir=None): + """Initializes an IncludingLexer. + + Behavior: + If includeDir is passed, it will be used to determine the top-level + include directory to use for all encountered include statements. If it is + not passed, ``os.path.dirname(featurefile)`` will be considered the + include directory. + """ + + self.lexers_ = [self.make_lexer_(featurefile)] + self.featurefilepath = self.lexers_[0].filename_ + self.includeDir = includeDir + + def __iter__(self): + return self + + def next(self): # Python 2 + return self.__next__() + + def __next__(self): # Python 3 + while self.lexers_: + lexer = self.lexers_[-1] + try: + token_type, token, location = next(lexer) + except StopIteration: + self.lexers_.pop() + continue + if token_type is Lexer.NAME and token == "include": + fname_type, fname_token, fname_location = lexer.next() + if fname_type is not Lexer.FILENAME: + raise FeatureLibError("Expected file name", fname_location) + # semi_type, semi_token, semi_location = lexer.next() + # if semi_type is not Lexer.SYMBOL or semi_token != ";": + # raise FeatureLibError("Expected ';'", semi_location) + if os.path.isabs(fname_token): + path = fname_token + else: + if self.includeDir is not None: + curpath = self.includeDir + elif self.featurefilepath is not None: + curpath = os.path.dirname(self.featurefilepath) + else: + # if the IncludingLexer was initialized from an in-memory + # file-like stream, it doesn't have a 'name' pointing to + # its filesystem path, therefore we fall back to using the + # current working directory to resolve relative includes + curpath = os.getcwd() + path = os.path.join(curpath, fname_token) + if len(self.lexers_) >= 5: + raise FeatureLibError("Too many recursive includes", fname_location) + try: + self.lexers_.append(self.make_lexer_(path)) + except FileNotFoundError as err: + raise IncludedFeaNotFound(fname_token, fname_location) from err + else: + return (token_type, token, location) + raise StopIteration() + + @staticmethod + def make_lexer_(file_or_path): + if hasattr(file_or_path, "read"): + fileobj, closing = file_or_path, False + else: + filename, closing = file_or_path, True + fileobj = open(filename, "r", encoding="utf-8-sig") + data = fileobj.read() + filename = getattr(fileobj, "name", None) + if closing: + fileobj.close() + return Lexer(data, filename) + + def scan_anonymous_block(self, tag): + return self.lexers_[-1].scan_anonymous_block(tag) + + +class NonIncludingLexer(IncludingLexer): + """Lexer that does not follow `include` statements, emits them as-is.""" + + def __next__(self): # Python 3 + return next(self.lexers_[0]) diff --git a/infer_4_47_1/lib/python3.10/site-packages/fontTools/feaLib/location.py b/infer_4_47_1/lib/python3.10/site-packages/fontTools/feaLib/location.py new file mode 100644 index 0000000000000000000000000000000000000000..50f761d2d2a13bd101a7db9c259fedc98eed52cf --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/fontTools/feaLib/location.py @@ -0,0 +1,12 @@ +from typing import NamedTuple + + +class FeatureLibLocation(NamedTuple): + """A location in a feature file""" + + file: str + line: int + column: int + + def __str__(self): + return f"{self.file}:{self.line}:{self.column}" diff --git a/infer_4_47_1/lib/python3.10/site-packages/fontTools/ufoLib/__pycache__/etree.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/fontTools/ufoLib/__pycache__/etree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05ac26f9d3e37811e00eba8912c92ec81328a16c Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/fontTools/ufoLib/__pycache__/etree.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/fontTools/ufoLib/__pycache__/filenames.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/fontTools/ufoLib/__pycache__/filenames.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..919ea4059e041ea50ea8e19adaea1c2b469063ac Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/fontTools/ufoLib/__pycache__/filenames.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/fontTools/ufoLib/__pycache__/kerning.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/fontTools/ufoLib/__pycache__/kerning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7524d048d1de9e252b57c56c6ccc45a60d16cf51 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/fontTools/ufoLib/__pycache__/kerning.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/fontTools/ufoLib/__pycache__/utils.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/fontTools/ufoLib/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..794291eb628d783e45f0e67f86102d7daf2e06b9 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/fontTools/ufoLib/__pycache__/utils.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/fontTools/ufoLib/__pycache__/validators.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/fontTools/ufoLib/__pycache__/validators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8afd30feb6473d752742492c754e655d3ca38f3a Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/fontTools/ufoLib/__pycache__/validators.cpython-310.pyc differ