hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
13806032a5703310f410b87dafb215a89e72b385 | 4,991 | py | Python | sib_api_v3_sdk/models/remaining_credit_model_reseller.py | Edraak/APIv3-python-library | 4a97bf479d92ca08d5a2881ac37e397d3a1846b4 | [
"MIT"
] | null | null | null | sib_api_v3_sdk/models/remaining_credit_model_reseller.py | Edraak/APIv3-python-library | 4a97bf479d92ca08d5a2881ac37e397d3a1846b4 | [
"MIT"
] | null | null | null | sib_api_v3_sdk/models/remaining_credit_model_reseller.py | Edraak/APIv3-python-library | 4a97bf479d92ca08d5a2881ac37e397d3a1846b4 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
SendinBlue API
SendinBlue provide a RESTFul API that can be used with any languages. With this API, you will be able to : - Manage your campaigns and get the statistics - Manage your contacts - Send transactional Emails and SMS - and much more... You can download our wrappers at https://github.com/orgs/sendinblue **Possible responses** | Code | Message | | :-------------: | ------------- | | 200 | OK. Successful Request | | 201 | OK. Successful Creation | | 202 | OK. Request accepted | | 204 | OK. Successful Update/Deletion | | 400 | Error. Bad Request | | 401 | Error. Authentication Needed | | 402 | Error. Not enough credit, plan upgrade needed | | 403 | Error. Permission denied | | 404 | Error. Object does not exist | | 405 | Error. Method not allowed | | 406 | Error. Not Acceptable | # noqa: E501
OpenAPI spec version: 3.0.0
Contact: contact@sendinblue.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RemainingCreditModelReseller(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'sms': 'int',
'email': 'int'
}
attribute_map = {
'sms': 'sms',
'email': 'email'
}
def __init__(self, sms=None, email=None): # noqa: E501
"""RemainingCreditModelReseller - a model defined in Swagger""" # noqa: E501
self._sms = None
self._email = None
self.discriminator = None
self.sms = sms
self.email = email
@property
def sms(self):
"""Gets the sms of this RemainingCreditModelReseller. # noqa: E501
SMS Credits remaining for reseller account # noqa: E501
:return: The sms of this RemainingCreditModelReseller. # noqa: E501
:rtype: int
"""
return self._sms
@sms.setter
def sms(self, sms):
"""Sets the sms of this RemainingCreditModelReseller.
SMS Credits remaining for reseller account # noqa: E501
:param sms: The sms of this RemainingCreditModelReseller. # noqa: E501
:type: int
"""
if sms is None:
raise ValueError("Invalid value for `sms`, must not be `None`") # noqa: E501
self._sms = sms
@property
def email(self):
"""Gets the email of this RemainingCreditModelReseller. # noqa: E501
Email Credits remaining for reseller account # noqa: E501
:return: The email of this RemainingCreditModelReseller. # noqa: E501
:rtype: int
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this RemainingCreditModelReseller.
Email Credits remaining for reseller account # noqa: E501
:param email: The email of this RemainingCreditModelReseller. # noqa: E501
:type: int
"""
if email is None:
raise ValueError("Invalid value for `email`, must not be `None`") # noqa: E501
self._email = email
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RemainingCreditModelReseller, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RemainingCreditModelReseller):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 33.722973 | 856 | 0.582248 |
5cd304ab73b9bd75e6d4da823332247af506b3cf | 2,042 | py | Python | seq2seqRL/model/decoder.py | heiseish/DLPlayground | 24b528779bfdcbea3e295cba847514bf840b9c06 | [
"MIT"
] | 1 | 2019-03-16T04:45:33.000Z | 2019-03-16T04:45:33.000Z | seq2seqRL/model/decoder.py | heiseish/DLPlayground | 24b528779bfdcbea3e295cba847514bf840b9c06 | [
"MIT"
] | null | null | null | seq2seqRL/model/decoder.py | heiseish/DLPlayground | 24b528779bfdcbea3e295cba847514bf840b9c06 | [
"MIT"
] | null | null | null | # Package
import torch.nn as nn
import torch.nn.functional as F
# Local files
from model.attention import *
class LuongAttnDecoderRNN(nn.Module):
def __init__(self, attn_model, embedding, hidden_size, output_size, n_layers=1, dropout=0.1):
super(LuongAttnDecoderRNN, self).__init__()
# Keep for reference
self.attn_model = attn_model
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout = dropout
# Define layers
self.embedding = embedding
self.embedding_dropout = nn.Dropout(dropout)
self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=(0 if n_layers == 1 else dropout))
self.concat = nn.Linear(hidden_size * 2, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
self.attn = Attn(attn_model, hidden_size)
def forward(self, input_step, last_hidden, encoder_outputs):
# Note: we run this one step (word) at a time
# Get embedding of current input word
embedded = self.embedding(input_step)
embedded = self.embedding_dropout(embedded)
# Forward through unidirectional GRU
rnn_output, hidden = self.gru(embedded, last_hidden)
# Calculate attention weights from the current GRU output
attn_weights = self.attn(rnn_output, encoder_outputs)
# Multiply attention weights to encoder outputs to get new "weighted sum" context vector
context = attn_weights.bmm(encoder_outputs.transpose(0, 1))
# Concatenate weighted context vector and GRU output using Luong eq. 5
rnn_output = rnn_output.squeeze(0)
context = context.squeeze(1)
concat_input = torch.cat((rnn_output, context), 1)
concat_output = torch.tanh(self.concat(concat_input))
# Predict next word using Luong eq. 6
output = self.out(concat_output)
output = F.softmax(output, dim=1)
# Return output and final hidden state
return output, hidden | 41.673469 | 104 | 0.681195 |
ba717f444d606b1f0f3d1541d1664d21e826de05 | 347 | py | Python | utils/align.py | catskillsresearch/xview2-catskills | 5671cff323c8121c0ae251e360e454a1e8568f58 | [
"BSD-3-Clause"
] | null | null | null | utils/align.py | catskillsresearch/xview2-catskills | 5671cff323c8121c0ae251e360e454a1e8568f58 | [
"BSD-3-Clause"
] | null | null | null | utils/align.py | catskillsresearch/xview2-catskills | 5671cff323c8121c0ae251e360e454a1e8568f58 | [
"BSD-3-Clause"
] | null | null | null | DISASTER='santa-rosa-wildfire'
IMAGEID='00000030'
XBD='/home/catskills/Desktop/dataxv2/xBD'
im_reference=XBD+'/'+DISASTER+'/images/'+DISASTER+'_'+IMAGEID+'_pre_disaster.png'
im_target=XBD+'/'+ DISASTER+'/images/'+DISASTER+'_'+IMAGEID+'_post_disaster.png'
from arosics import COREG
CR = COREG(im_reference, im_target)
CR.calculate_spatial_shifts()
| 38.555556 | 81 | 0.775216 |
fbf45c311c0bd43c887534b7137dbb22d36faf47 | 4,990 | py | Python | lstm_predictor.py | mgorkove/Time-Series-Prediction-with-LSTM-Recurrent-Neural-Networks-in-Python-with-Keras | 71937e6b25736c17bdc68abea0519f88f7410077 | [
"MIT"
] | 10 | 2017-05-23T09:02:16.000Z | 2021-08-04T22:52:59.000Z | lstm_predictor.py | alastairrough/Time-Series-Prediction-with-LSTM-Recurrent-Neural-Networks-in-Python-with-Keras | 71937e6b25736c17bdc68abea0519f88f7410077 | [
"MIT"
] | null | null | null | lstm_predictor.py | alastairrough/Time-Series-Prediction-with-LSTM-Recurrent-Neural-Networks-in-Python-with-Keras | 71937e6b25736c17bdc68abea0519f88f7410077 | [
"MIT"
] | 7 | 2018-03-11T16:47:15.000Z | 2021-07-21T17:24:32.000Z | import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.python.framework import dtypes
from tensorflow.contrib import learn
import logging
logging.basicConfig(level=logging.INFO)
def x_sin(x):
return x * np.sin(x)
def sin_cos(x):
return pd.DataFrame(dict(a=np.sin(x), b=np.cos(x)), index=x)
def rnn_data(data, time_steps, labels=False):
"""
creates new data frame based on previous observation
* example:
l = [1, 2, 3, 4, 5]
time_steps = 2
-> labels == False [[1, 2], [2, 3], [3, 4]]
-> labels == True [2, 3, 4, 5]
"""
rnn_df = []
for i in range(len(data) - time_steps):
if labels:
try:
rnn_df.append(data.iloc[i + time_steps].as_matrix())
except AttributeError:
rnn_df.append(data.iloc[i + time_steps])
else:
data_ = data.iloc[i: i + time_steps].as_matrix()
rnn_df.append(data_ if len(data_.shape) > 1 else [[i] for i in data_])
return np.array(rnn_df)
def split_data(data, val_size=0.1, test_size=0.1):
"""
splits data to training, validation and testing parts
"""
ntest = int(round(len(data) * (1 - test_size)))
nval = int(round(len(data.iloc[:ntest]) * (1 - val_size)))
df_train, df_val, df_test = data.iloc[:nval], data.iloc[nval:ntest], data.iloc[ntest:]
return df_train, df_val, df_test
def prepare_data(data, time_steps, labels=False, val_size=0.05, test_size=0.05):
"""
Given the number of `time_steps` and some data,
prepares training, validation and test data for an lstm cell.
"""
df_train, df_val, df_test = split_data(data, val_size, test_size)
return (rnn_data(df_train, time_steps, labels=labels),
rnn_data(df_val, time_steps, labels=labels),
rnn_data(df_test, time_steps, labels=labels))
def generate_data(fct, x, time_steps, seperate=False):
"""generates data with based on a function fct"""
data = fct(x)
if not isinstance(data, pd.DataFrame):
data = pd.DataFrame(data)
train_x, val_x, test_x = prepare_data(data['a'] if seperate else data, time_steps)
train_y, val_y, test_y = prepare_data(data['b'] if seperate else data, time_steps, labels=True)
return dict(train=train_x, val=val_x, test=test_x), dict(train=train_y, val=val_y, test=test_y)
def load_csvdata(rawdata, time_steps, seperate=False):
data = rawdata
if not isinstance(data, pd.DataFrame):
data = pd.DataFrame(data)
train_x, val_x, test_x = prepare_data(data['a'] if seperate else data, time_steps)
train_y, val_y, test_y = prepare_data(data['b'] if seperate else data, time_steps, labels=True)
return dict(train=train_x, val=val_x, test=test_x), dict(train=train_y, val=val_y, test=test_y)
def lstm_model(time_steps, rnn_layers, dense_layers=None):
"""
Creates a deep model based on:
* stacked lstm cells
* an optional dense layers
:param time_steps: the number of time steps the model will be looking at.
:param rnn_layers: list of int or dict
* list of int: the steps used to instantiate the `BasicLSTMCell` cell
* list of dict: [{steps: int, keep_prob: int}, ...]
:param dense_layers: list of nodes for each layer
:return: the model definition
"""
def lstm_cells(layers):
if isinstance(layers[0], dict):
return [tf.nn.rnn_cell.DropoutWrapper(tf.nn.rnn_cell.BasicLSTMCell(layer['steps'],
state_is_tuple=True),
layer['keep_prob'])
if layer.get('keep_prob') else tf.nn.rnn_cell.BasicLSTMCell(layer['steps'],
state_is_tuple=True)
for layer in layers]
return [tf.nn.rnn_cell.BasicLSTMCell(steps, state_is_tuple=True) for steps in layers]
def dnn_layers(input_layers, layers):
if layers and isinstance(layers, dict):
return learn.ops.dnn(input_layers,
layers['layers'],
activation=layers.get('activation'),
dropout=layers.get('dropout'))
elif layers:
return learn.ops.dnn(input_layers, layers)
else:
return input_layers
def _lstm_model(X, y):
stacked_lstm = tf.nn.rnn_cell.MultiRNNCell(lstm_cells(rnn_layers), state_is_tuple=True)
x_ = learn.ops.split_squeeze(1, time_steps, X)
output, layers = tf.nn.rnn(stacked_lstm, x_, dtype=dtypes.float32)
output = dnn_layers(output[-1], dense_layers)
return learn.models.linear_regression(output, y)
return _lstm_model | 41.239669 | 101 | 0.597796 |
64def41c572d639c5ea149c44713349b5af6dfde | 492 | py | Python | ecos/orchestrator.py | jinho-park/ECOS | f50c095d7eb9c6b77432711b9a5b32e883835802 | [
"MIT"
] | null | null | null | ecos/orchestrator.py | jinho-park/ECOS | f50c095d7eb9c6b77432711b9a5b32e883835802 | [
"MIT"
] | null | null | null | ecos/orchestrator.py | jinho-park/ECOS | f50c095d7eb9c6b77432711b9a5b32e883835802 | [
"MIT"
] | null | null | null | from ecos.simulator import Simulator
import random
class Orchestrator:
def __init__(self, _policy):
self.policy = _policy
def offloading_target(self, task, source):
collaborationTarget = 0
simul = Simulator.get_instance()
if self.policy == "RANDOM":
num_of_edge = simul.get_num_of_edge()
selectServer = random.randrange(0, num_of_edge + 1)
collaborationTarget = selectServer
return collaborationTarget
| 24.6 | 63 | 0.660569 |
086b739114d84967d7c6b851f0fd96ee797c98f5 | 53,164 | py | Python | submission_files/transformer.py | asafmaman101/transformer_exercise | 444c69daab33df706c5f2317a35056926e855dc0 | [
"MIT"
] | null | null | null | submission_files/transformer.py | asafmaman101/transformer_exercise | 444c69daab33df706c5f2317a35056926e855dc0 | [
"MIT"
] | null | null | null | submission_files/transformer.py | asafmaman101/transformer_exercise | 444c69daab33df706c5f2317a35056926e855dc0 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.distributed import fsdp_wrap
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
BaseLayer,
FairseqDropout,
LayerDropModuleList,
LayerNorm,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
TransformerDecoderLayer,
TransformerEncoderLayer,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from torch import Tensor
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
DEFAULT_MIN_PARAMS_TO_WRAP = int(1e8)
@register_model("transformer")
class TransformerModel(FairseqEncoderDecoderModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
"""
@classmethod
def hub_models(cls):
# fmt: off
def moses_subword(path):
return {
'path': path,
'tokenizer': 'moses',
'bpe': 'subword_nmt',
}
def moses_fastbpe(path):
return {
'path': path,
'tokenizer': 'moses',
'bpe': 'fastbpe',
}
def spm(path):
return {
'path': path,
'bpe': 'sentencepiece',
'tokenizer': 'space',
}
return {
'transformer.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2'),
'transformer.wmt16.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2',
'transformer.wmt18.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt18.en-de.ensemble.tar.gz'),
'transformer.wmt19.en-de': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz'),
'transformer.wmt19.en-ru': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz'),
'transformer.wmt19.de-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz'),
'transformer.wmt19.ru-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz'),
'transformer.wmt19.en-de.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.single_model.tar.gz'),
'transformer.wmt19.en-ru.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.single_model.tar.gz'),
'transformer.wmt19.de-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.single_model.tar.gz'),
'transformer.wmt19.ru-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.single_model.tar.gz'),
'transformer.wmt20.en-ta': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-ta.single.tar.gz'),
'transformer.wmt20.en-iu.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.news.single.tar.gz'),
'transformer.wmt20.en-iu.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.nh.single.tar.gz'),
'transformer.wmt20.ta-en': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.ta-en.single.tar.gz'),
'transformer.wmt20.iu-en.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.news.single.tar.gz'),
'transformer.wmt20.iu-en.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.nh.single.tar.gz'),
}
# fmt: on
def __init__(self, args, encoder, decoder):
super().__init__(encoder, decoder)
self.args = args
self.supports_align_args = True
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--decoder-output-dim', type=int, metavar='N',
help='decoder output dimension (extra linear layer '
'if different from decoder embed dim')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--layernorm-embedding', action='store_true',
help='add layernorm to embedding')
parser.add_argument('--no-scale-embedding', action='store_true',
help='if True, dont scale embeddings')
parser.add_argument('--checkpoint-activations', action='store_true',
help='checkpoint activations at each layer, which saves GPU '
'memory usage at the cost of some additional compute')
parser.add_argument('--offload-activations', action='store_true',
help='checkpoint activations at each layer, then save to gpu. Sets --checkpoint-activations.')
# args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019)
parser.add_argument('--no-cross-attention', default=False, action='store_true',
help='do not perform cross-attention')
parser.add_argument('--cross-self-attention', default=False, action='store_true',
help='perform cross+self-attention')
# args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for encoder')
parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for decoder')
parser.add_argument('--encoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
parser.add_argument('--decoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
# args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020)
parser.add_argument('--quant-noise-pq', type=float, metavar='D', default=0,
help='iterative PQ quantization noise at training time')
parser.add_argument('--quant-noise-pq-block-size', type=int, metavar='D', default=8,
help='block size of quantization noise at training time')
parser.add_argument('--quant-noise-scalar', type=float, metavar='D', default=0,
help='scalar quantization noise and scalar quantization at training time')
# args for Fully Sharded Data Parallel (FSDP) training
parser.add_argument(
'--min-params-to-wrap', type=int, metavar='D', default=DEFAULT_MIN_PARAMS_TO_WRAP,
help=(
'minimum number of params for a layer to be wrapped with FSDP() when '
'training with --ddp-backend=fully_sharded. Smaller values will '
'improve memory efficiency, but may make torch.distributed '
'communication less efficient due to smaller input sizes. This option '
'is set to 0 (i.e., always wrap) when --checkpoint-activations or '
'--offload-activations are passed.'
)
)
# fmt: on
parser.add_argument('--mask-layer', type=int, metavar='N', default=-1,
help='index of layer to mask one of its heads')
parser.add_argument('--mask-head', type=int, metavar='N', default=-1,
help='index of head to mask')
parser.add_argument('--mask-layer-type', type=str, metavar='N', default="",
help='type of attention to mask a head')
parser.add_argument('--enc-layer-configuration', type=str, metavar='N', default="",
help='MHA as A and FFN as F"')
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if args.encoder_layers_to_keep:
args.encoder_layers = len(args.encoder_layers_to_keep.split(","))
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if getattr(args, "max_source_positions", None) is None:
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError("--share-all-embeddings requires a joined dictionary")
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise ValueError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = cls.build_embedding(
args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
if getattr(args, "offload_activations", False):
args.checkpoint_activations = True # offloading implies checkpointing
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
if not args.share_all_embeddings:
min_params_to_wrap = getattr(
args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP
)
# fsdp_wrap is a no-op when --ddp-backend != fully_sharded
encoder = fsdp_wrap(encoder, min_num_params=min_params_to_wrap)
decoder = fsdp_wrap(decoder, min_num_params=min_params_to_wrap)
return cls(args, encoder, decoder)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoder(
args,
tgt_dict,
embed_tokens,
no_encoder_attn=getattr(args, "no_cross_attention", False),
)
# TorchScript doesn't support optional arguments with variable length (**kwargs).
# Current workaround is to add union of all arguments in child classes.
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
return_all_hiddens: bool = True,
features_only: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
"""
Run the forward pass for an encoder-decoder model.
Copied from the base class, but without ``**kwargs``,
which are not supported by TorchScript.
"""
encoder_out = self.encoder(
src_tokens, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens
)
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
features_only=features_only,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
src_lengths=src_lengths,
return_all_hiddens=return_all_hiddens,
)
return decoder_out
# Since get_normalized_probs is in the Fairseq Model which is not scriptable,
# I rewrite the get_normalized_probs from Base Class to call the
# helper function in the Base Class.
@torch.jit.export
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
class TransformerEncoder(FairseqEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, args, dictionary, embed_tokens):
self.args = args
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.encoder_layerdrop = args.encoder_layerdrop
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
self.embed_positions = (
PositionalEmbedding(
args.max_source_positions,
embed_dim,
self.padding_idx,
learned=args.encoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
if getattr(args, "layernorm_embedding", False):
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
if not args.adaptive_input and args.quant_noise_pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
args.quant_noise_pq,
args.quant_noise_pq_block_size,
)
else:
self.quant_noise = None
if self.encoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.encoder_layerdrop)
else:
self.layers = nn.ModuleList([])
if not args.enc_layer_configuration:
self.layers.extend(
[
self.build_encoder_layer(args, layer_index=layer_index)
for layer_index in range(args.encoder_layers)
]
)
else:
self.layers.extend(
[
self.build_encoder_layer(args, sublayer_key=sublayer_key)
for sublayer_key in args.enc_layer_configuration
]
)
self.num_layers = len(self.layers)
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def build_encoder_layer(self, args, layer_index=None, sublayer_key=None):
if not sublayer_key:
layer = TransformerEncoderLayer(args, layer_index=layer_index)
else:
layer = TransformerEncoderLayer(args, sublayer_key=sublayer_key)
checkpoint = getattr(args, "checkpoint_activations", False)
if checkpoint:
offload_to_cpu = getattr(args, "offload_activations", False)
layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
# if we are checkpointing, enforce that FSDP always wraps the
# checkpointed layer, regardless of layer size
min_params_to_wrap = (
getattr(args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP)
if not checkpoint else 0
)
layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
return layer
def forward_embedding(
self, src_tokens, token_embedding: Optional[torch.Tensor] = None
):
# embed tokens and positions
if token_embedding is None:
token_embedding = self.embed_tokens(src_tokens)
x = embed = self.embed_scale * token_embedding
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
if self.quant_noise is not None:
x = self.quant_noise(x)
return x, embed
def forward(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
return self.forward_scriptable(src_tokens,
src_lengths,
return_all_hiddens,
token_embeddings)
# TorchScript doesn't support super() method so that the scriptable Subclass
# can't access the base class model in Torchscript.
# Current workaround is to add a helper function with different name and
# call the helper function from scriptable Subclass.
def forward_scriptable(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
has_pads = (src_tokens.device.type == "xla" or encoder_padding_mask.any())
x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings)
# account for padding while computing the representation
if has_pads:
x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x))
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_states = []
if return_all_hiddens:
encoder_states.append(x)
# encoder layers
for layer in self.layers:
x = layer(
x, encoder_padding_mask=encoder_padding_mask if has_pads else None
)
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
# The Pytorch Mobile lite interpreter does not supports returning NamedTuple in
# `forward` so we use a dictionary instead.
# TorchScript does not support mixed values so the values are all lists.
# The empty list is equivalent to None.
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask], # B x T
"encoder_embedding": [encoder_embedding], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
@torch.jit.export
def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if len(encoder_out["encoder_out"]) == 0:
new_encoder_out = []
else:
new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)]
if len(encoder_out["encoder_padding_mask"]) == 0:
new_encoder_padding_mask = []
else:
new_encoder_padding_mask = [
encoder_out["encoder_padding_mask"][0].index_select(0, new_order)
]
if len(encoder_out["encoder_embedding"]) == 0:
new_encoder_embedding = []
else:
new_encoder_embedding = [
encoder_out["encoder_embedding"][0].index_select(0, new_order)
]
if len(encoder_out["src_tokens"]) == 0:
src_tokens = []
else:
src_tokens = [(encoder_out["src_tokens"][0]).index_select(0, new_order)]
if len(encoder_out["src_lengths"]) == 0:
src_lengths = []
else:
src_lengths = [(encoder_out["src_lengths"][0]).index_select(0, new_order)]
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": src_tokens, # B x T
"src_lengths": src_lengths, # B x 1
}
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions)
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
print("deleting {0}".format(weights_key))
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
for i in range(self.num_layers):
# update layer norms
self.layers[i].upgrade_state_dict_named(
state_dict, "{}.layers.{}".format(name, i)
)
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self,
args,
dictionary,
embed_tokens,
no_encoder_attn=False,
output_projection=None,
):
self.args = args
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self._future_mask = torch.empty(0)
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.decoder_layerdrop = args.decoder_layerdrop
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.embed_dim = embed_dim
self.output_embed_dim = args.decoder_output_dim
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
if not args.adaptive_input and args.quant_noise_pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
args.quant_noise_pq,
args.quant_noise_pq_block_size,
)
else:
self.quant_noise = None
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
self.max_target_positions,
embed_dim,
self.padding_idx,
learned=args.decoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
if getattr(args, "layernorm_embedding", False):
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
self.cross_self_attention = getattr(args, "cross_self_attention", False)
if self.decoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.decoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[
self.build_decoder_layer(args, no_encoder_attn, layer_index=layer_index)
for layer_index in range(args.decoder_layers)
]
)
self.num_layers = len(self.layers)
if args.decoder_normalize_before and not getattr(
args, "no_decoder_final_norm", False
):
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
self.project_out_dim = (
Linear(embed_dim, self.output_embed_dim, bias=False)
if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights
else None
)
self.adaptive_softmax = None
self.output_projection = output_projection
if self.output_projection is None:
self.build_output_projection(args, dictionary, embed_tokens)
def build_output_projection(self, args, dictionary, embed_tokens):
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
utils.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif self.share_input_output_embed:
self.output_projection = nn.Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
)
self.output_projection.weight = self.embed_tokens.weight
else:
self.output_projection = nn.Linear(
self.output_embed_dim, len(dictionary), bias=False
)
nn.init.normal_(
self.output_projection.weight, mean=0, std=self.output_embed_dim ** -0.5
)
num_base_layers = getattr(args, "base_layers", 0)
for i in range(num_base_layers):
self.layers.insert(((i+1) * args.decoder_layers) // (num_base_layers + 1), BaseLayer(args))
def build_decoder_layer(self, args, no_encoder_attn=False, layer_index=None):
layer = TransformerDecoderLayer(args, no_encoder_attn, layer_index=layer_index)
checkpoint = getattr(args, "checkpoint_activations", False)
if checkpoint:
offload_to_cpu = getattr(args, "offload_activations", False)
layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
# if we are checkpointing, enforce that FSDP always wraps the
# checkpointed layer, regardless of layer size
min_params_to_wrap = (
getattr(args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP)
if not checkpoint else 0
)
layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
return layer
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention, should be of size T x B x C
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
)
if not features_only:
x = self.output_layer(x)
return x, extra
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
return self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
"""
A scriptable subclass of this class has an extract_features method and calls
super().extract_features, but super() is not supported in torchscript. A copy of
this function is made to be used in the subclass instead.
"""
def extract_features_scriptable(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
"""
Similar to *forward* but only return features.
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
alignment_layer (int, optional): return mean alignment over
heads at this layer (default: last layer).
alignment_heads (int, optional): only average alignment over
this many heads (default: all heads).
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
bs, slen = prev_output_tokens.size()
if alignment_layer is None:
alignment_layer = self.num_layers - 1
enc: Optional[Tensor] = None
padding_mask: Optional[Tensor] = None
if encoder_out is not None and len(encoder_out["encoder_out"]) > 0:
enc = encoder_out["encoder_out"][0]
assert (
enc.size()[1] == bs
), f"Expected enc.shape == (t, {bs}, c) got {enc.shape}"
if encoder_out is not None and len(encoder_out["encoder_padding_mask"]) > 0:
padding_mask = encoder_out["encoder_padding_mask"][0]
# embed positions
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
self_attn_padding_mask: Optional[Tensor] = None
if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# decoder layers
attn: Optional[Tensor] = None
inner_states: List[Optional[Tensor]] = [x]
for idx, layer in enumerate(self.layers):
if incremental_state is None and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
x, layer_attn, _ = layer(
x,
enc,
padding_mask,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
)
inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
if attn is not None:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": [attn], "inner_states": inner_states}
def output_layer(self, features):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
return self.output_projection(features)
else:
return features
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
# self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.
if (
self._future_mask.size(0) == 0
or (not self._future_mask.device == tensor.device)
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1
)
self._future_mask = self._future_mask.to(tensor)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
if f"{name}.output_projection.weight" not in state_dict:
if self.share_input_output_embed:
embed_out_key = f"{name}.embed_tokens.weight"
else:
embed_out_key = f"{name}.embed_out"
if embed_out_key in state_dict:
state_dict[f"{name}.output_projection.weight"] = state_dict[
embed_out_key
]
if not self.share_input_output_embed:
del state_dict[embed_out_key]
for i in range(self.num_layers):
# update layer norms
layer_norm_map = {
"0": "self_attn_layer_norm",
"1": "encoder_attn_layer_norm",
"2": "final_layer_norm",
}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m)
if k in state_dict:
state_dict[
"{}.layers.{}.{}.{}".format(name, i, new, m)
] = state_dict[k]
del state_dict[k]
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
@register_model_architecture("transformer", "transformer_tiny")
def tiny_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 64)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 64)
args.encoder_layers = getattr(args, "encoder_layers", 2)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 2)
args.decoder_layers = getattr(args, "decoder_layers", 2)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 2)
return base_architecture(args)
@register_model_architecture("transformer", "transformer")
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
@register_model_architecture("transformer", "transformer_iwslt_de_en")
def transformer_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 6)
base_architecture(args)
@register_model_architecture("transformer", "transformer_wmt_en_de")
def transformer_wmt_en_de(args):
base_architecture(args)
# parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017)
@register_model_architecture("transformer", "transformer_vaswani_wmt_en_de_big")
def transformer_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
base_architecture(args)
@register_model_architecture("transformer", "transformer_vaswani_wmt_en_fr_big")
def transformer_vaswani_wmt_en_fr_big(args):
args.dropout = getattr(args, "dropout", 0.1)
transformer_vaswani_wmt_en_de_big(args)
@register_model_architecture("transformer", "transformer_wmt_en_de_big")
def transformer_wmt_en_de_big(args):
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
transformer_vaswani_wmt_en_de_big(args)
# default parameters used in tensor2tensor implementation
@register_model_architecture("transformer", "transformer_wmt_en_de_big_t2t")
def transformer_wmt_en_de_big_t2t(args):
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.1)
transformer_vaswani_wmt_en_de_big(args)
| 44.119502 | 159 | 0.629844 |
a6774c397ab3bbe9657f9401f9c7015eda2c70e7 | 572 | py | Python | esphome/components/fujitsu_general/climate.py | huhuhugo1/esphome | eb895d2095861a4d51f1a5fcd582a97389c27b4f | [
"MIT"
] | null | null | null | esphome/components/fujitsu_general/climate.py | huhuhugo1/esphome | eb895d2095861a4d51f1a5fcd582a97389c27b4f | [
"MIT"
] | null | null | null | esphome/components/fujitsu_general/climate.py | huhuhugo1/esphome | eb895d2095861a4d51f1a5fcd582a97389c27b4f | [
"MIT"
] | null | null | null | import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import climate_ir
from esphome.const import CONF_ID
AUTO_LOAD = ['climate_ir']
fujitsu_general_ns = cg.esphome_ns.namespace('fujitsu_general')
FujitsuGeneralClimate = fujitsu_general_ns.class_('FujitsuGeneralClimate', climate_ir.ClimateIR)
CONFIG_SCHEMA = climate_ir.CLIMATE_IR_SCHEMA.extend({
cv.GenerateID(): cv.declare_id(FujitsuGeneralClimate),
})
def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
yield climate_ir.register_climate_ir(var, config)
| 30.105263 | 96 | 0.809441 |
31d5d2797d74d2ac9d22015d1b749f3d51f601ee | 1,559 | py | Python | config/includes.chroot/usr/local/share/S0lar0S/src/ranger/ranger/ext/human_readable.py | ddarksmith/S0lar0S | b91971000c089f77d1ff76a00262252a65680e5b | [
"WTFPL"
] | null | null | null | config/includes.chroot/usr/local/share/S0lar0S/src/ranger/ranger/ext/human_readable.py | ddarksmith/S0lar0S | b91971000c089f77d1ff76a00262252a65680e5b | [
"WTFPL"
] | null | null | null | config/includes.chroot/usr/local/share/S0lar0S/src/ranger/ranger/ext/human_readable.py | ddarksmith/S0lar0S | b91971000c089f77d1ff76a00262252a65680e5b | [
"WTFPL"
] | null | null | null | # This file is part of ranger, the console file manager.
# License: GNU GPL version 3, see the file "AUTHORS" for details.
def human_readable(byte, separator=' '):
"""Convert a large number of bytes to an easily readable format.
>>> human_readable(54)
'54 B'
>>> human_readable(1500)
'1.46 K'
>>> human_readable(2 ** 20 * 1023)
'1023 M'
"""
# I know this can be written much shorter, but this long version
# performs much better than what I had before. If you attempt to
# shorten this code, take performance into consideration.
if byte <= 0:
return '0'
if byte < 2**10:
return '%d%sB' % (byte, separator)
if byte < 2**10 * 999:
return '%.3g%sK' % (byte / 2**10.0, separator)
if byte < 2**20:
return '%.4g%sK' % (byte / 2**10.0, separator)
if byte < 2**20 * 999:
return '%.3g%sM' % (byte / 2**20.0, separator)
if byte < 2**30:
return '%.4g%sM' % (byte / 2**20.0, separator)
if byte < 2**30 * 999:
return '%.3g%sG' % (byte / 2**30.0, separator)
if byte < 2**40:
return '%.4g%sG' % (byte / 2**30.0, separator)
if byte < 2**40 * 999:
return '%.3g%sT' % (byte / 2**40.0, separator)
if byte < 2**50:
return '%.4g%sT' % (byte / 2**40.0, separator)
if byte < 2**50 * 999:
return '%.3g%sP' % (byte / 2**50.0, separator)
if byte < 2**60:
return '%.4g%sP' % (byte / 2**50.0, separator)
return '>9000'
if __name__ == '__main__':
import doctest
doctest.testmod()
| 32.479167 | 69 | 0.551636 |
96c72b09993c2130a4693b1563c79cdd8cdfed84 | 941 | py | Python | model/test/nhl/test_predict.py | shuyi1981/bayes-bet | 4f0715d31e726e8f6f4363dc9743f48fbb330b1d | [
"MIT"
] | 1 | 2021-08-20T12:59:34.000Z | 2021-08-20T12:59:34.000Z | model/test/nhl/test_predict.py | shuyi1981/bayes-bet | 4f0715d31e726e8f6f4363dc9743f48fbb330b1d | [
"MIT"
] | null | null | null | model/test/nhl/test_predict.py | shuyi1981/bayes-bet | 4f0715d31e726e8f6f4363dc9743f48fbb330b1d | [
"MIT"
] | null | null | null | import pandas as pd
import pytest
from bayesbet.nhl.predict import bayesian_poisson_pdf
from bayesbet.nhl.predict import bayesian_bernoulli_win_pdf
from bayesbet.nhl.predict import bayesian_goal_within_time
@pytest.fixture
def poisson_cases():
cases = [
(0.0, 0.1, 5,
[0.3678839711698872, 0.366049307392375, 0.18392385257827024, 0.06222208712974248,
0.015944512312484223, 0.00397626941724083]),
(0.0, 0.1, 6,
[0.3678839711698872, 0.366049307392375, 0.18392385257827024, 0.06222208712974248,
0.015944512312484223, 0.00397626941724083, 0.0006751111828385836])
]
return cases
def test_bayesian_poisson_pdf(poisson_cases):
for case in poisson_cases:
μ, σ, max_y, expected = case
poisson_pdf = bayesian_poisson_pdf(μ, σ, max_y)
assert poisson_pdf == expected, \
f"Did not get the expected bayesian Poisson pdf (μ={μ}, σ={σ}, max_y={max_y}" | 37.64 | 90 | 0.709883 |
4803ec425164b39d584896809bc74ac32898e57e | 1,814 | py | Python | fit_scripts/plot_binom.py | jensengroup/prohxms | 411f208efd1a1dcc06e988f1df11b5e43d406f8e | [
"BSD-2-Clause"
] | null | null | null | fit_scripts/plot_binom.py | jensengroup/prohxms | 411f208efd1a1dcc06e988f1df11b5e43d406f8e | [
"BSD-2-Clause"
] | null | null | null | fit_scripts/plot_binom.py | jensengroup/prohxms | 411f208efd1a1dcc06e988f1df11b5e43d406f8e | [
"BSD-2-Clause"
] | 1 | 2021-04-24T11:11:59.000Z | 2021-04-24T11:11:59.000Z | import numpy
from scipy.special import binom, gamma
from matplotlib import pyplot
import sys
def factorial(x):
try:
return gamma(x + 1)
except OverflowError:
print "Overflow, x =",x
exit(0)
def B(x, y):
return factorial(x - 1) * factorial(y - 1) / factorial(x + y - 1)
n = int(sys.argv[1])
mu = float(sys.argv[2])
sigma = float(sys.argv[3])
alpha = - mu * (mu * mu - mu * n + sigma * sigma) / (sigma * sigma * n + mu * mu - mu * n)
beta = (n * alpha) / mu - alpha
alpha = float(sys.argv[2])
beta = float(sys.argv[3])
if (alpha < 0.0) or (beta < 0.0):
print "ERROR: Negative parameter value:"
print "alpha =", alpha, "beta =", beta
exit(0)
sigma = numpy.sqrt( n * alpha * beta * (alpha + beta + n) / ((alpha + beta) * (alpha + beta) * (1 + alpha + beta)))
mu = n * alpha / (alpha + beta)
print "alpha =", alpha, "beta =", beta
print "mu = %f sigma = %f" % (mu, sigma)
def beta_binom(k):
return binom(n, k) * B(k + alpha, n - k + beta) / B(alpha, beta)
for k in range(0, n + 1):
print "P(N =%3i) = %6.4f" % (k, beta_binom(k))
pyplot.rc('text', usetex=True)
pyplot.rc('font', family='serif')
vals = numpy.arange(0, n + 1)
probs = numpy.array([beta_binom(val) for val in vals])
bar_width = 0.55
pyplot.bar(vals + bar_width/2, probs, bar_width, color = 'DarkSlateBlue', alpha=0.6)
pyplot.title(r"$n = %i,\ \mu= %5.2f,\ \sigma = %5.2f\ (\alpha = %5.2f,\ \beta = %5.2f)$" % (n, mu, sigma, alpha, beta), fontsize=20)
val_texts = [r"$%i$" % (val) for val in vals]
pyplot.xlabel(r"$k$", fontsize=16)
pyplot.xticks(vals + bar_width, val_texts, fontsize=16)
pyplot.xlim([0.0, numpy.amax(vals) + bar_width*2])
pyplot.yticks(fontsize=16)
pyplot.ylabel(r"$P(N_\mathrm{HB}=k)$", fontsize=16)
pyplot.grid(True)
pyplot.savefig("bar.png")
| 24.849315 | 132 | 0.597574 |
01f6635719f9e7d73a486a1fc3df37350abf5ab6 | 2,150 | py | Python | samfp/tests/clustering.py | b1quint/samfp | 1cd9b85851c02dc61a2294d67a309f62083d358d | [
"BSD-3-Clause"
] | null | null | null | samfp/tests/clustering.py | b1quint/samfp | 1cd9b85851c02dc61a2294d67a309f62083d358d | [
"BSD-3-Clause"
] | 19 | 2016-07-15T21:32:59.000Z | 2017-09-12T00:31:26.000Z | samfp/tests/clustering.py | b1quint/samfp | 1cd9b85851c02dc61a2294d67a309f62083d358d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import gridspec
from scipy import signal
class Parabola:
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
def __call__(self, x):
y = self.a * x ** 2 + self.b * x + self.c
return y
# Use Kernel Density Estimation to identify the two different clusters
def kde_scipy(x, x_grid, bandwidth=0.2, **kwargs):
"""Kernel Density Estimation with Scipy"""
# Note that scipy weights its bandwidth by the covariance of the
# input data. To make the results comparable to the other methods,
# we divide the bandwidth by the sample standard deviation here.
kde = stats.gaussian_kde(x, bw_method=bandwidth / x.std(ddof=1),
**kwargs)
return kde.evaluate(x_grid)
# Create my initial parabola
fsr = 60
x = np.linspace(0, 10, 100)
p = Parabola(1, -10, 10)
y = p(x) + (np.random.random_sample(x.size) - 0.5) * 1
# Add a fake shift representing the fsr
y[y < 0] += fsr
# Add a second FSR
y = np.concatenate((y, y + fsr))
x = np.concatenate((x, x))
# Plot this
fig = plt.figure()
gs = gridspec.GridSpec(2, 1)
ax1 = fig.add_subplot(gs[0])
ax1.plot(y, x, 'k+')
ax1.set_xlabel("y")
ax1.set_ylabel("x")
# Create a lateral plot to visualize the distribution in 1D since I do not
# care about the 2D distribution.
x_ = -2 * np.ones_like(x) + np.random.random_sample(x.size) * 0.1
ax1.plot(y, x_, 'kx', alpha=0.10)
# Make a histogram
bins = np.linspace(y.min(), y.max(), 50)
ax2 = fig.add_subplot(gs[1])
ax2.hist(y, bins=bins, alpha=0.5)
# Split
y_indexes = np.argsort(y)
y_ = np.sort(y)
yl_ = np.diff(y_)
ayl_ = np.abs(yl_)
ayl_[np.abs(ayl_ - np.median(ayl_)) < np.std(ayl_)] = 0
split_indexes = signal.argrelmax(ayl_)[0]
split_y_indexes = np.split(y_indexes, split_indexes + 1)
for (i, idx) in enumerate(split_y_indexes):
ax1.plot(y[idx], x[idx], 'o', alpha=0.25)
y[idx] -= fsr * i
ax1.plot(y[idx], x[idx], 'ko', alpha=0.10)
# Display the plot
plt.tight_layout()
plt.show() | 24.431818 | 74 | 0.650698 |
24760ad11b3013dfa626a8adda470d30d06893e6 | 2,304 | py | Python | userbot/plugins/antiflood.py | NoobRider/catuserbot | dea79d5d8b7174efefcc1c35ed3434516a490f58 | [
"MIT"
] | 2 | 2020-04-12T11:51:06.000Z | 2020-04-18T14:08:06.000Z | userbot/plugins/antiflood.py | NoobRider/catuserbot | dea79d5d8b7174efefcc1c35ed3434516a490f58 | [
"MIT"
] | null | null | null | userbot/plugins/antiflood.py | NoobRider/catuserbot | dea79d5d8b7174efefcc1c35ed3434516a490f58 | [
"MIT"
] | 1 | 2020-05-13T02:51:35.000Z | 2020-05-13T02:51:35.000Z | import asyncio
from telethon import events
from telethon.tl.functions.channels import EditBannedRequest
from telethon.tl.types import ChatBannedRights
from userbot.utils import admin_cmd
import userbot.plugins.sql_helper.antiflood_sql as sql
import userbot.utils
from userbot.utils import humanbytes, progress, time_formatter
CHAT_FLOOD = sql.__load_flood_settings()
# warn mode for anti flood
ANTI_FLOOD_WARN_MODE = ChatBannedRights(
until_date=None,
view_messages=None,
send_messages=True
)
@borg.on(admin_cmd(incoming=True))
async def _(event):
# logger.info(CHAT_FLOOD)
if not CHAT_FLOOD:
return
if not (str(event.chat_id) in CHAT_FLOOD):
return
# TODO: exempt admins from this
should_ban = sql.update_flood(event.chat_id, event.message.from_id)
if not should_ban:
return
try:
await event.client(EditBannedRequest(
event.chat_id,
event.message.from_id,
ANTI_FLOOD_WARN_MODE
))
except Exception as e: # pylint:disable=C0103,W0703
no_admin_privilege_message = await event.client.send_message(
entity=event.chat_id,
message="""**Automatic AntiFlooder**
@admin [User](tg://user?id={}) is flooding this chat.
`{}`""".format(event.message.from_id, str(e)),
reply_to=event.message.id
)
await asyncio.sleep(10)
await no_admin_privilege_message.edit(
"This is useless SPAM dude . stop this enjoy chat man ",
link_preview=False
)
else:
await event.client.send_message(
entity=event.chat_id,
message="""**Automatic AntiFlooder**
[User](tg://user?id={}) has been automatically restricted
because he reached the defined flood limit.""".format(event.message.from_id),
reply_to=event.message.id
)
@borg.on(admin_cmd(pattern="setflood (.*)"))
async def _(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
try:
sql.set_flood(event.chat_id, input_str)
CHAT_FLOOD = sql.__load_flood_settings()
await event.edit("Antiflood updated to {} in the current chat".format(input_str))
except Exception as e: # pylint:disable=C0103,W0703
await event.edit(str(e))
| 32 | 89 | 0.676649 |
eb9319c4201da0c0ebcf8c5591a7e08747c681ee | 107 | py | Python | src/roscam_application/__init__.py | gaelfargeas/roscam_application | 7b8da48f5e6e468bbab8238ac3e5591d92f94a79 | [
"BSD-3-Clause"
] | null | null | null | src/roscam_application/__init__.py | gaelfargeas/roscam_application | 7b8da48f5e6e468bbab8238ac3e5591d92f94a79 | [
"BSD-3-Clause"
] | null | null | null | src/roscam_application/__init__.py | gaelfargeas/roscam_application | 7b8da48f5e6e468bbab8238ac3e5591d92f94a79 | [
"BSD-3-Clause"
] | null | null | null | from roscam_application import roscam_main
def main():
roscam_var = roscam_main.roscam_application()
| 17.833333 | 49 | 0.794393 |
fc60c128a7ced52334380d0c2c522d780c33447a | 457 | py | Python | env/lib/python3.8/site-packages/plotly/validators/layout/xaxis/_tickformat.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | env/lib/python3.8/site-packages/plotly/validators/layout/xaxis/_tickformat.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | env/lib/python3.8/site-packages/plotly/validators/layout/xaxis/_tickformat.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | import _plotly_utils.basevalidators
class TickformatValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="tickformat", parent_name="layout.xaxis", **kwargs):
super(TickformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "ticks"),
role=kwargs.pop("role", "style"),
**kwargs
)
| 35.153846 | 87 | 0.66302 |
e30b0746a188c6e193cfc4d309b5a3e9497f99d8 | 1,877 | py | Python | app/utils/json_formater.py | a1136395507/Blog | e890dbe24bd2c3a82dad55e90f717db59a3e51a1 | [
"Unlicense"
] | null | null | null | app/utils/json_formater.py | a1136395507/Blog | e890dbe24bd2c3a82dad55e90f717db59a3e51a1 | [
"Unlicense"
] | null | null | null | app/utils/json_formater.py | a1136395507/Blog | e890dbe24bd2c3a82dad55e90f717db59a3e51a1 | [
"Unlicense"
] | null | null | null | import logging
import json
import datetime
import socket
REMOVE_ATTR = ["filename", "module", "exc_text", "stack_info", "created", "", "relativeCreated", "exc_info", "msg"]
class HostIp(object):
host_name = None
host_ip = None
@classmethod
def get_host_ip(cls):
if not cls.host_name or not cls.host_ip:
try:
cls.host_name = socket.gethostname()
cls.host_ip = socket.gethostbyname(cls.host_name)
except Exception as err:
cls.host_name = "unknown hostname"
cls.host_ip = "unknow hostip"
return cls.host_name,cls.host_ip
class JSONFormatter(logging.Formatter):
host_name, host_ip = HostIp.get_host_ip()
def format(self, record):
extra = self.build_record(record)
self.set_format_time(extra) # set time
self.set_host_ip(extra) # set host name and host ip
extra['message'] = record.msg # set message
# if record.exc_info:
# extra['exc_info'] = self.formatException(record.exc_info)
if self._fmt == 'pretty':
return json.dumps(extra, indent=1, ensure_ascii=False)
else:
return json.dumps(extra, ensure_ascii=False)
@classmethod
def build_record(cls, record):
return {
attr_name: record.__dict__[attr_name]
for attr_name in record.__dict__
if attr_name not in REMOVE_ATTR
}
@classmethod
def set_format_time(cls, extra):
now = datetime.datetime.utcnow()
format_time = now.strftime("%Y-%m-%dT%H:%M:%S" + ".%03d" % (now.microsecond / 1000) + "Z")
extra['@timestamp'] = format_time
return format_time
@classmethod
def set_host_ip(cls, extra):
extra['host_name'] = JSONFormatter.host_name
extra['host_ip'] = JSONFormatter.host_ip
| 30.274194 | 115 | 0.616942 |
8fafa28767e90db6657f8b75e3591da46982741c | 11,308 | py | Python | grammar_induction/earley_parser/nltk/tokenize/casual.py | tdonca/OpenBottle | f03d80e7b3645232fb97f91cf7fc2dc02f101ac2 | [
"MIT"
] | 6 | 2017-01-22T03:15:01.000Z | 2019-12-01T16:19:36.000Z | grammar_induction/earley_parser/nltk/tokenize/casual.py | tdonca/OpenBottle | f03d80e7b3645232fb97f91cf7fc2dc02f101ac2 | [
"MIT"
] | 3 | 2020-03-24T15:38:23.000Z | 2021-02-02T21:44:18.000Z | grammar_induction/earley_parser/nltk/tokenize/casual.py | tdonca/OpenBottle | f03d80e7b3645232fb97f91cf7fc2dc02f101ac2 | [
"MIT"
] | 6 | 2017-01-19T21:49:55.000Z | 2021-04-14T09:57:17.000Z | # coding: utf-8
#
# Natural Language Toolkit: Twitter Tokenizer
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Christopher Potts <cgpotts@stanford.edu>
# Ewan Klein <ewan@inf.ed.ac.uk> (modifications)
# Pierpaolo Pantone <> (modifications)
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
#
"""
Twitter-aware tokenizer, designed to be flexible and easy to adapt to new
domains and tasks. The basic logic is this:
1. The tuple regex_strings defines a list of regular expression
strings.
2. The regex_strings strings are put, in order, into a compiled
regular expression object called word_re.
3. The tokenization is done by word_re.findall(s), where s is the
user-supplied string, inside the tokenize() method of the class
Tokenizer.
4. When instantiating Tokenizer objects, there is a single option:
preserve_case. By default, it is set to True. If it is set to
False, then the tokenizer will downcase everything except for
emoticons.
"""
######################################################################
from __future__ import unicode_literals
import re
from nltk.compat import htmlentitydefs, int2byte, unichr
######################################################################
# The following strings are components in the regular expression
# that is used for tokenizing. It's important that phone_number
# appears first in the final regex (since it can contain whitespace).
# It also could matter that tags comes after emoticons, due to the
# possibility of having text like
#
# <:| and some text >:)
#
# Most importantly, the final element should always be last, since it
# does a last ditch whitespace-based tokenization of whatever is left.
# ToDo: Update with http://en.wikipedia.org/wiki/List_of_emoticons ?
# This particular element is used in a couple ways, so we define it
# with a name:
EMOTICONS = r"""
(?:
[<>]?
[:;=8] # eyes
[\-o\*\']? # optional nose
[\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
|
[\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
[\-o\*\']? # optional nose
[:;=8] # eyes
[<>]?
|
<3 # heart
)"""
# URL pattern due to John Gruber, modified by Tom Winzig. See
# https://gist.github.com/winzig/8894715
URLS = r""" # Capture 1: entire matched URL
(?:
https?: # URL protocol and colon
(?:
/{1,3} # 1-3 slashes
| # or
[a-z0-9%] # Single letter or digit or '%'
# (Trying not to match e.g. "URI::Escape")
)
| # or
# looks like domain name followed by a slash:
[a-z0-9.\-]+[.]
(?:[a-z]{2,13})
/
)
(?: # One or more:
[^\s()<>{}\[\]]+ # Run of non-space, non-()<>{}[]
| # or
\([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...)
|
\([^\s]+?\) # balanced parens, non-recursive: (...)
)+
(?: # End with:
\([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...)
|
\([^\s]+?\) # balanced parens, non-recursive: (...)
| # or
[^\s`!()\[\]{};:'".,<>?«»“”‘’] # not a space or one of these punct chars
)
| # OR, the following to match naked domains:
(?:
(?<!@) # not preceded by a @, avoid matching foo@_gmail.com_
[a-z0-9]+
(?:[.\-][a-z0-9]+)*
[.]
(?:[a-z]{2,13})
\b
/?
(?!@) # not succeeded by a @,
# avoid matching "foo.na" in "foo.na@example.com"
)
"""
# The components of the tokenizer:
REGEXPS = (
URLS,
# Phone numbers:
r"""
(?:
(?: # (international)
\+?[01]
[\-\s.]*
)?
(?: # (area code)
[\(]?
\d{3}
[\-\s.\)]*
)?
\d{3} # exchange
[\-\s.]*
\d{4} # base
)"""
,
# ASCII Emoticons
EMOTICONS
,
# HTML tags:
r"""<[^>\s]+>"""
,
# ASCII Arrows
r"""[\-]+>|<[\-]+"""
,
# Twitter username:
r"""(?:@[\w_]+)"""
,
# Twitter hashtags:
r"""(?:\#+[\w_]+[\w\'_\-]*[\w_]+)"""
,
# email addresses
r"""[\w.+-]+@[\w-]+\.(?:[\w-]\.?)+[\w-]"""
,
# Remaining word types:
r"""
(?:[^\W\d_](?:[^\W\d_]|['\-_])+[^\W\d_]) # Words with apostrophes or dashes.
|
(?:[+\-]?\d+[,/.:-]\d+[+\-]?) # Numbers, including fractions, decimals.
|
(?:[\w_]+) # Words without apostrophes or dashes.
|
(?:\.(?:\s*\.){1,}) # Ellipsis dots.
|
(?:\S) # Everything else that isn't whitespace.
"""
)
######################################################################
# This is the core tokenizing regex:
WORD_RE = re.compile(r"""(%s)""" % "|".join(REGEXPS), re.VERBOSE | re.I
| re.UNICODE)
# WORD_RE performs poorly on these patterns:
HANG_RE = re.compile(r'([^a-zA-Z0-9])\1{3,}')
# The emoticon string gets its own regex so that we can preserve case for
# them as needed:
EMOTICON_RE = re.compile(EMOTICONS, re.VERBOSE | re.I | re.UNICODE)
# These are for regularizing HTML entities to Unicode:
ENT_RE = re.compile(r'&(#?(x?))([^&;\s]+);')
######################################################################
# Functions for converting html entities
######################################################################
def _str_to_unicode(text, encoding=None, errors='strict'):
if encoding is None:
encoding = 'utf-8'
if isinstance(text, bytes):
return text.decode(encoding, errors)
return text
def _replace_html_entities(text, keep=(), remove_illegal=True, encoding='utf-8'):
"""
Remove entities from text by converting them to their
corresponding unicode character.
:param text: a unicode string or a byte string encoded in the given
`encoding` (which defaults to 'utf-8').
:param list keep: list of entity names which should not be replaced.\
This supports both numeric entities (``&#nnnn;`` and ``&#hhhh;``)
and named entities (such as `` `` or ``>``).
:param bool remove_illegal: If `True`, entities that can't be converted are\
removed. Otherwise, entities that can't be converted are kept "as
is".
:returns: A unicode string with the entities removed.
See https://github.com/scrapy/w3lib/blob/master/w3lib/html.py
>>> from nltk.tokenize.casual import _replace_html_entities
>>> _replace_html_entities(b'Price: £100')
'Price: \\xa3100'
>>> print(_replace_html_entities(b'Price: £100'))
Price: £100
>>>
"""
def _convert_entity(match):
entity_body = match.group(3)
if match.group(1):
try:
if match.group(2):
number = int(entity_body, 16)
else:
number = int(entity_body, 10)
# Numeric character references in the 80-9F range are typically
# interpreted by browsers as representing the characters mapped
# to bytes 80-9F in the Windows-1252 encoding. For more info
# see: http://en.wikipedia.org/wiki/Character_encodings_in_HTML
if 0x80 <= number <= 0x9f:
return int2byte(number).decode('cp1252')
except ValueError:
number = None
else:
if entity_body in keep:
return match.group(0)
else:
number = htmlentitydefs.name2codepoint.get(entity_body)
if number is not None:
try:
return unichr(number)
except ValueError:
pass
return "" if remove_illegal else match.group(0)
return ENT_RE.sub(_convert_entity, _str_to_unicode(text, encoding))
######################################################################
class TweetTokenizer:
r"""
Tokenizer for tweets.
>>> from nltk.tokenize import TweetTokenizer
>>> tknzr = TweetTokenizer()
>>> s0 = "This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--"
>>> tknzr.tokenize(s0)
['This', 'is', 'a', 'cooool', '#dummysmiley', ':', ':-)', ':-P', '<3', 'and', 'some', 'arrows', '<', '>', '->', '<--']
Examples using `strip_handles` and `reduce_len parameters`:
>>> tknzr = TweetTokenizer(strip_handles=True, reduce_len=True)
>>> s1 = '@remy: This is waaaaayyyy too much for you!!!!!!'
>>> tknzr.tokenize(s1)
[':', 'This', 'is', 'waaayyy', 'too', 'much', 'for', 'you', '!', '!', '!']
"""
def __init__(self, preserve_case=True, reduce_len=False, strip_handles=False):
self.preserve_case = preserve_case
self.reduce_len = reduce_len
self.strip_handles = strip_handles
def tokenize(self, text):
"""
:param text: str
:rtype: list(str)
:return: a tokenized list of strings; concatenating this list returns\
the original string if `preserve_case=False`
"""
# Fix HTML character entities:
text = _replace_html_entities(text)
# Remove username handles
if self.strip_handles:
text = remove_handles(text)
# Normalize word lengthening
if self.reduce_len:
text = reduce_lengthening(text)
# Shorten problematic sequences of characters
safe_text = HANG_RE.sub(r'\1\1\1', text)
# Tokenize:
words = WORD_RE.findall(safe_text)
# Possibly alter the case, but avoid changing emoticons like :D into :d:
if not self.preserve_case:
words = list(map((lambda x : x if EMOTICON_RE.search(x) else
x.lower()), words))
return words
######################################################################
# Normalization Functions
######################################################################
def reduce_lengthening(text):
"""
Replace repeated character sequences of length 3 or greater with sequences
of length 3.
"""
pattern = re.compile(r"(.)\1{2,}")
return pattern.sub(r"\1\1\1", text)
def remove_handles(text):
"""
Remove Twitter username handles from text.
"""
pattern = re.compile(r"(?<![A-Za-z0-9_!@#\$%&*])@(([A-Za-z0-9_]){20}(?!@))|(?<![A-Za-z0-9_!@#\$%&*])@(([A-Za-z0-9_]){1,19})(?![A-Za-z0-9_]*@)")
# Substitute hadnles with ' ' to ensure that text on either side of removed handles are tokenized correctly
return pattern.sub(' ', text)
######################################################################
# Tokenization Function
######################################################################
def casual_tokenize(text, preserve_case=True, reduce_len=False, strip_handles=False):
"""
Convenience function for wrapping the tokenizer.
"""
return TweetTokenizer(preserve_case=preserve_case, reduce_len=reduce_len,
strip_handles=strip_handles).tokenize(text)
###############################################################################
| 32.872093 | 147 | 0.51583 |
262da14fedd979db34ec549bca9143fccf31c2c2 | 2,219 | py | Python | package/spack-perl-test-cleannamespaces/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | 1 | 2018-07-17T07:45:09.000Z | 2018-07-17T07:45:09.000Z | package/spack-perl-test-cleannamespaces/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | package/spack-perl-test-cleannamespaces/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PerlTestCleannamespaces(PerlPackage):
"""This module lets you check your module's namespaces for imported
functions you might have forgotten to remove"""
homepage = "http://search.cpan.org/~ether/Test-CleanNamespaces-0.22/lib/Test/CleanNamespaces.pm"
url = "http://search.cpan.org/CPAN/authors/id/E/ET/ETHER/Test-CleanNamespaces-0.22.tar.gz"
version('0.22', '8c48bb0427f2077edce57c50491468ec')
depends_on('perl-sub-exporter', type=('build', 'run'))
depends_on('perl-module-runtime', type=('build', 'run'))
depends_on('perl-test-needs', type=('build', 'run'))
depends_on('perl-test-deep', type=('build', 'run'))
depends_on('perl-test-warnings', type=('build', 'run'))
depends_on('perl-file-pushd', type=('build', 'run'))
depends_on('perl-package-stash', type=('build', 'run'))
depends_on('perl-sub-identify', type=('build', 'run'))
depends_on('perl-namespace-clean', type=('build', 'run'))
| 48.23913 | 100 | 0.673727 |
b68c9a3d0ae6bd8591c0725be24beb016a161907 | 684 | py | Python | src/PGAN/data_loading.py | konstantinjdobler/gan-n1 | 8813fa5efd7a64603b60d1dd0722e8aecdec5763 | [
"MIT"
] | null | null | null | src/PGAN/data_loading.py | konstantinjdobler/gan-n1 | 8813fa5efd7a64603b60d1dd0722e8aecdec5763 | [
"MIT"
] | 10 | 2020-07-07T15:19:21.000Z | 2020-07-30T20:12:31.000Z | src/PGAN/data_loading.py | konstantinjdobler/gan-n1 | 8813fa5efd7a64603b60d1dd0722e8aecdec5763 | [
"MIT"
] | null | null | null | import torchvision.datasets as dset
import torch.utils.data as data
import torch
# from https://github.com/caffeinism/cDC-GAN-pytorch
class ImageFeatureFolder(dset.ImageFolder):
def __init__(self, image_root, attribute_file, transform):
super(ImageFeatureFolder, self).__init__(
root=image_root, transform=transform)
with open(attribute_file, 'r') as f:
data = f.read()
data = data.strip().split('\n')
self.attrs = torch.FloatTensor(
[list(map(float, line.split()[1:])) for line in data[2:]])
def __getitem__(self, index):
img, _ = super().__getitem__(index)
return img, self.attrs[index] | 34.2 | 70 | 0.654971 |
015e566c67f5a553d01a65f1bb0598f31f4cb16a | 9,777 | py | Python | dr/coordinates.py | rscalzo/sami | 7ac5632e018cdf2384f5ff067c503177684f61c8 | [
"BSD-3-Clause"
] | 1 | 2021-12-07T08:30:38.000Z | 2021-12-07T08:30:38.000Z | dr/coordinates.py | rscalzo/sami | 7ac5632e018cdf2384f5ff067c503177684f61c8 | [
"BSD-3-Clause"
] | null | null | null | dr/coordinates.py | rscalzo/sami | 7ac5632e018cdf2384f5ff067c503177684f61c8 | [
"BSD-3-Clause"
] | 3 | 2021-02-15T19:51:59.000Z | 2021-05-04T05:48:46.000Z | """
Functions for modifying fibre coordinates in SAMI FITS files. These were
necessary to correct the files produced during the March 2013 run (the first
with the upgraded SAMI instrument), which had two problems:
* The probes were numbered in the wrong order (1-13 instead of 13-1)
* The position angles were calculated 180deg out
The top-level function correct_coordinates checks which of these issues
affects a given file, and makes the necessary corrections.
These functions will presumably never be needed again, but should be kept
for reference.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import astropy.io.fits as pf
import numpy as np
from sami.utils.other import find_fibre_table
from scipy.optimize import leastsq
import os
def reverse_probes(fibre_table):
"""Reverse the order of the probes in the fibre table.
This function is to correct a fault before 6th March 2013 in which the
probe numbers were in the wrong order. The code in fact changes the
fibre numbers (SPEC_ID) to match the given probe numbers, and then
sorts by SPEC_ID.
"""
# Correct each fibre number (SPEC_ID)
for fibre in fibre_table:
probenum_0 = fibre['PROBENUM'] - 1
# This is the correct mapping for the fibre numbers
if 'SKY' in fibre['PROBENAME']:
fibre['SPEC_ID'] = 820 - fibre['SPEC_ID']
else:
rel_spec_id = fibre['SPEC_ID'] - 63 * probenum_0
fibre['SPEC_ID'] = 63 * (12 - probenum_0) + rel_spec_id
# Sort the fibre_table by fibre number
fibre_table.sort(order='SPEC_ID')
return
def rotate_all_hexas(fibre_table):
"""Rotate all hexabundles by 180 degrees.
See rotate_probe for further details.
"""
for probenum in range(1,14):
# Have to do things as a slice to avoid copying the data back and forth
this_probe = np.where((fibre_table['PROBENUM'] == probenum) &
(fibre_table['TYPE'] == 'P'))[0]
if np.size(this_probe) > 0:
fibre_table_hexa = fibre_table[this_probe[0]:this_probe[-1]+1]
rotate_hexa(fibre_table_hexa)
return
def rotate_hexa(fibre_table_hexa):
"""Rotate hexabundle by 180 degrees.
This function is to correct a fault before 1st April 2013 in which the
hexabundles were given a rotation of 0 degrees, when they should have
had 180 degrees.
We know that FIPBOS_X/Y is on a nice square coordinate system, so these
coordinates are rotated by 180 degrees, and then converted into all
other coordinate systems by interpolating between the original
FIBPOS_X/Y values.
"""
# Define the centre of the hexabundle
alpha, beta = define_hexa_centre(fibre_table_hexa)
# Rotate FIBPOS_X/Y, but don't overwrite the old coordinates yet
cen_x, cen_y = coordinate_centre(
fibre_table_hexa, 'FIBPOS_X', 'FIBPOS_Y', alpha, beta)
new_fibpos_x = cen_x - (fibre_table_hexa['FIBPOS_X'] - cen_x)
new_fibpos_y = cen_y - (fibre_table_hexa['FIBPOS_Y'] - cen_y)
# Now rotate each other coordinate pair in turn, using interpolation
name_pair_list = [('XPOS', 'YPOS'),
('FIB_MRA', 'FIB_MDEC'),
('FIB_ARA', 'FIB_ADEC')]
for x_name, y_name in name_pair_list:
interpolate(fibre_table_hexa, x_name, y_name,
new_fibpos_x, new_fibpos_y)
# Update the FIBPOS_X/Y positions
fibre_table_hexa['FIBPOS_X'][:] = np.round(new_fibpos_x).astype(int)
fibre_table_hexa['FIBPOS_Y'][:] = np.round(new_fibpos_y).astype(int)
# Update the PORIENT values
fibre_table_hexa['PORIENT'][:] = 180.0
return
def define_hexa_centre(fibre_table_hexa):
"""Define the centre of a hexabundle relative to fibres 1-3.
x_cen = x_0 + alpha * (x_1 - x_0) + beta * (x_2 - x_0)
y_cen = y_0 + alpha * (y_1 - y_0) + beta * (y_2 - y_0)
"""
order = np.argsort(fibre_table_hexa['FIBNUM'])
x = fibre_table_hexa['FIB_PX'][order].astype(float)
y = fibre_table_hexa['FIB_PY'][order].astype(float)
alpha = ((y[0] * (x[2] - x[0]) - x[0] * (y[2] - y[0])) /
((x[1] - x[0]) * (y[2] - y[0]) -
(y[1] - y[0]) * (x[2] - x[0])))
beta = ((y[0] * (x[1] - x[0]) - x[0] * (y[1] - y[0])) /
((x[2] - x[0]) * (y[1] - y[0]) -
(y[2] - y[0]) * (x[1] - x[0])))
return alpha, beta
def coordinate_centre(fibre_table_hexa, x_name, y_name, alpha, beta):
"""Return the centre of the hexabundle in the given coordinates."""
order = np.argsort(fibre_table_hexa['FIBNUM'])
x = fibre_table_hexa[x_name][order]
y = fibre_table_hexa[y_name][order]
cen_x = x[0] + alpha * (x[1] - x[0]) + beta * (x[2] - x[0])
cen_y = y[0] + alpha * (y[1] - y[0]) + beta * (y[2] - y[0])
return cen_x, cen_y
def interpolate(fibre_table_hexa, x_name, y_name, new_fibpos_x, new_fibpos_y):
"""Update the coordinates in x/y_name to the new fibpos_x/y positions.
Works by interpolating between the old fibpos_x/y positions, which are
in fibre_table_hexa. The coordinates are assumed to relate to
fibpos_x/y according to:
x = x_0 + a_x * fibpos_x + b_x * fibpos_y
y = y_0 + a_y * fibpos_x + b_y * fibpos_y
x_0, a_x, b_x, y_0, a_y, b_y are found by fitting to the old coordinates.
"""
old_coords_x = fibre_table_hexa[x_name]
old_coords_y = fibre_table_hexa[y_name]
old_fibpos_x = fibre_table_hexa['FIBPOS_X']
old_fibpos_y = fibre_table_hexa['FIBPOS_Y']
# Define the function to fit
fitfunc = lambda par, fibpos_x, fibpos_y: \
par[0] + par[1]*fibpos_x + par[2]*fibpos_y
errfunc = lambda par, fibpos_x, fibpos_y, coords: \
coords - fitfunc(par, fibpos_x, fibpos_y)
# Initial guess for x
par_x_0 = np.zeros(3)
par_x_0[1] = ((old_coords_x.max() - old_coords_x.min()) /
(old_fibpos_x.max() - old_fibpos_x.min()))
par_x_0[0] = old_coords_x.mean() / (par_x_0[1] * old_fibpos_x.mean())
# Do the fit for x
args_x = (old_fibpos_x, old_fibpos_y, old_coords_x)
par_x = leastsq(errfunc, par_x_0, args=args_x)[0]
# Initial guess for x
par_y_0 = np.zeros(3)
par_y_0[2] = ((old_coords_y.max() - old_coords_y.min()) /
(old_fibpos_y.max() - old_fibpos_y.min()))
par_y_0[0] = old_coords_y.mean() / (par_y_0[2] * old_fibpos_y.mean())
# Do the fit for x
args_y = (old_fibpos_x, old_fibpos_y, old_coords_y)
par_y = leastsq(errfunc, par_y_0, args=args_y)[0]
# Now use the new_fibpos_x/y to get the new coordinates
new_coords_x = fitfunc(par_x, new_fibpos_x, new_fibpos_y)
new_coords_y = fitfunc(par_y, new_fibpos_x, new_fibpos_y)
# Finally, save the new coordinates
fibre_table_hexa[x_name][:] = new_coords_x
fibre_table_hexa[y_name][:] = new_coords_y
return
def copy_coords(hdulist):
"""Copy the fibre coordinate information into a new fibre table."""
fibre_table_extension = hdulist[find_fibre_table(hdulist)]
new_extension = fibre_table_extension.copy()
# Name the extension so it can be found later
new_extension.header['EXTNAME'] = 'OLD_COORDS'
hdulist.append(new_extension)
return
def correct_coordinates(filename):
"""See which corrections are necessary and apply them to the file.
If the hexabundles have PORIENT = 0.0, they will be rotated 180
degrees. If the probes are in the wrong order, they will be
re-ordered. If neither of these is the case, nothing is done.
If either has been done, the old coordinates will be put in an
extension named OLD_COORDS."""
hdulist = pf.open(filename, 'update')
try:
fibre_table_extno = find_fibre_table(hdulist)
except KeyError:
# No fibres to correct
return
fibre_table = hdulist[fibre_table_extno].data
epoch = hdulist[0].header['EPOCH']
# Check if the probes need to be rotated
if np.all(fibre_table['PORIENT'] == 0.0) and epoch >= 2013.0:
do_rotate = True
else:
do_rotate = False
# Check if the probes need to be switched
if (np.all(fibre_table['PROBENUM'][31+63*np.arange(13)] ==
(1+np.arange(13))) and epoch >= 2013.0):
do_switch = True
else:
do_switch = False
# If anything needs doing...
if do_rotate or do_switch:
header = hdulist[0].header
try:
# First try to copy the old coordinates back into the fibre table
hdulist[fibre_table_extno].data = hdulist['OLD_COORDS'].data
except KeyError:
# That didn't work, so we must need to create the OLD_COORDS
# extension instead
copy_coords(hdulist)
# Do the manipulations
if do_rotate:
rotate_all_hexas(fibre_table)
header['COORDROT'] = (True,
'The hexabundle coordinates were rotated')
else:
header['COORDROT'] = (False,
'The hexabundle coordinates were rotated')
if do_switch:
reverse_probes(fibre_table)
header['COORDREV'] = (True,
'The hexabundle probe allocations were reversed')
else:
header['COORDREV'] = (False,
'The hexabundle probe allocations were reversed')
hdulist.close()
def correct_all_coordinates(root='.'):
"""Run correct_coordinates on all files in all subdirectories."""
for dirname, subdir_list, filename_list in os.walk(root):
for filename in filename_list:
if filename.endswith('.fits'):
print(filename)
correct_coordinates(os.path.join(dirname, filename))
return
| 42.324675 | 82 | 0.646517 |
d9b1467d9e24c4a39f09be5ee61819205f6f49d9 | 447 | py | Python | padpo/checkers/empty.py | christopheNan/padpo | 429ef81277452db4c2563f6ab5c71547b5e519e3 | [
"BSD-3-Clause"
] | 4 | 2019-11-05T16:47:40.000Z | 2020-01-04T17:38:29.000Z | padpo/checkers/empty.py | christopheNan/padpo | 429ef81277452db4c2563f6ab5c71547b5e519e3 | [
"BSD-3-Clause"
] | 46 | 2019-11-06T10:23:16.000Z | 2020-12-04T08:47:54.000Z | padpo/checkers/empty.py | christopheNan/padpo | 429ef81277452db4c2563f6ab5c71547b5e519e3 | [
"BSD-3-Clause"
] | 5 | 2019-11-06T13:08:58.000Z | 2020-10-15T11:10:30.000Z | """Checker for missing translations."""
from padpo.checkers.baseclass import Checker
from padpo.pofile import PoItem
class EmptyChecker(Checker):
"""Checker for missing translations."""
name = "Empty"
def check_item(self, item: PoItem):
"""Check an item in a `*.po` file."""
if item.msgid_full_content and not item.msgstr_full_content:
item.add_warning(self.name, "This entry is not translated yet.")
| 26.294118 | 76 | 0.686801 |
2870df0e324989eaf6172ba4d0e34a3cac2c86ff | 6,931 | py | Python | lwrl/models/ddpg_model.py | sealday/lwrl | 52bcd67751e605c38db4afa609c58938c7034e8d | [
"MIT"
] | 2 | 2019-04-11T11:55:48.000Z | 2020-05-29T18:09:51.000Z | lwrl/models/ddpg_model.py | sealday/lwrl | 52bcd67751e605c38db4afa609c58938c7034e8d | [
"MIT"
] | 6 | 2021-06-01T22:21:00.000Z | 2022-03-11T23:24:36.000Z | lwrl/models/ddpg_model.py | sealday/lwrl | 52bcd67751e605c38db4afa609c58938c7034e8d | [
"MIT"
] | 1 | 2019-04-12T03:09:47.000Z | 2019-04-12T03:09:47.000Z | import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import lwrl.utils.th_helper as H
from lwrl.models import DistributionModel
from lwrl.optimizers import optimizer_factory
class DDPGCriticModel(nn.Module):
def __init__(self, state_spec, action_spec, hidden1=400, hidden2=300):
super().__init__()
state_shape = state_spec['shape']
#action_shape = action_spec['shape']
assert len(state_shape) == 1
self.action_size = 1
self.fc1 = nn.Linear(state_shape[0], hidden1)
self.fc2 = nn.Linear(hidden1 + self.action_size, hidden2)
self.fc3 = nn.Linear(hidden2, 1)
#nn.init.uniform_(self.fc3.weight)
#nn.init.uniform_(self.fc3.bias)
def forward(self, s, a):
a = a.view(-1, self.action_size)
out = F.relu(self.fc1(s))
out = F.relu(self.fc2(torch.cat([out, a], 1)))
out = self.fc3(out)
return out.squeeze()
class DDPGModel(DistributionModel):
def __init__(self,
state_spec,
action_spec,
network_spec,
exploration_schedule,
optimizer,
saver_spec,
discount_factor,
update_target_freq,
update_target_weight,
critic_network_spec,
critic_optimizer,
state_preprocess_pipeline=None):
self.network_spec = network_spec
self.critic_network_spec = critic_network_spec
self.critic_optimizer = critic_optimizer
self.update_target_freq = update_target_freq
self.update_target_weight = update_target_weight
super().__init__(
state_spec=state_spec,
action_spec=action_spec,
network_spec=network_spec,
exploration_schedule=exploration_schedule,
optimizer=optimizer,
saver_spec=saver_spec,
discount_factor=discount_factor,
state_preprocess_pipeline=state_preprocess_pipeline,
require_deterministic=True)
def init_model(self):
super().init_model()
self.target_network = self.create_network(
self.network_spec, self.action_spec).type(H.float_tensor)
hidden1 = self.critic_network_spec['hidden1']
hidden2 = self.critic_network_spec['hidden2']
self.critic_network = DDPGCriticModel(
self.state_spec,
self.action_spec,
hidden1=hidden1,
hidden2=hidden2).type(H.float_tensor)
self.target_critic_network = DDPGCriticModel(
self.state_spec,
self.action_spec,
hidden1=hidden1,
hidden2=hidden2).type(H.float_tensor)
self.critic_optimizer = optimizer_factory(
self.critic_optimizer['type'], self.critic_network.parameters(),
**self.critic_optimizer['args'])
self.target_network.load_state_dict(self.network.state_dict())
self.target_critic_network.load_state_dict(
self.critic_network.state_dict())
def get_target_network_action(self, obs, random_action):
with torch.no_grad():
dist_param = self.target_network(H.Variable(obs))
action = self.target_network.sample(
dist_param,
deterministic=(not random_action) or self.require_deterministic)
return action
def predict_target_q(self, obs_batch, action_batch, reward_batch,
neg_done_mask):
q_value = self.target_critic_network(obs_batch, action_batch)
return reward_batch + neg_done_mask * self.discount_factor * q_value
def update_target_model(self, target_model, model):
for target_param, param in zip(target_model.parameters(),
model.parameters()):
target_param.data.copy_(
(1 - self.update_target_weight) * param.data +
self.update_target_weight * target_param.data)
def update(self, obs_batch, action_batch, reward_batch, next_obs_batch,
done_mask):
obs_batch = self.preprocess_state(
H.Variable(torch.from_numpy(obs_batch).type(H.float_tensor)))
next_obs_batch = self.preprocess_state(
H.Variable(torch.from_numpy(next_obs_batch).type(H.float_tensor)))
if self.action_spec['type'] == 'int':
action_batch = H.Variable(torch.from_numpy(action_batch).long())
else:
action_batch = H.Variable(torch.from_numpy(action_batch))
reward_batch = H.Variable(torch.from_numpy(reward_batch))
neg_done_mask = H.Variable(
torch.from_numpy(1.0 - done_mask).type(H.float_tensor))
if H.use_cuda:
action_batch = action_batch.cuda()
reward_batch = reward_batch.cuda()
# predict action using target network
next_target_actions = self.get_target_network_action(
next_obs_batch, random_action=False)
# predict Q values for next states
next_q_values = self.predict_target_q(
next_obs_batch, next_target_actions, reward_batch,
neg_done_mask).detach()
q_values = self.critic_network(obs_batch, action_batch)
#critic_loss = (q_values - next_q_values).pow(2).mean()
critic_loss = F.smooth_l1_loss(q_values, next_q_values)
# update critic
self.critic_optimizer.step(critic_loss)
# update actor
predicted_actions = self.get_action(
obs_batch, random_action=False, update=True)
actor_loss = -self.critic_network(obs_batch, predicted_actions).mean()
self.optimizer.step(actor_loss)
self.num_updates += 1
# target networks <- online networks
if self.num_updates % self.update_target_freq == 0:
self.update_target_model(self.target_network, self.network)
self.update_target_model(self.target_critic_network,
self.critic_network)
def save(self, timestep):
self.saver.save(
{
'global_step': timestep,
'network': self.network.state_dict(),
'target_network': self.target_network.state_dict(),
'critic_network': self.critic_network.state_dict(),
'target_critic_network':
self.target_critic_network.state_dict(),
}, timestep)
def restore(self):
checkpoint = self.saver.restore()
self.global_step = checkpoint['global_step']
self.network.load_state_dict(checkpoint['network'])
self.target_network.load_state_dict(checkpoint['target_network'])
self.critic_network.load_state_dict(checkpoint['critic_network'])
self.target_critic_network.load_state_dict(
checkpoint['target_critic_network'])
| 38.292818 | 78 | 0.635983 |
795b4aff0a0fc4f3af01aa38c585c7745e6fe11b | 5,350 | py | Python | test/Deprecated/Options/help.py | EmanueleCannizzaro/scons | 6baa4e65cdf4df6951473545b69435711864e509 | [
"MIT"
] | 1 | 2019-09-18T06:37:02.000Z | 2019-09-18T06:37:02.000Z | test/Deprecated/Options/help.py | EmanueleCannizzaro/scons | 6baa4e65cdf4df6951473545b69435711864e509 | [
"MIT"
] | null | null | null | test/Deprecated/Options/help.py | EmanueleCannizzaro/scons | 6baa4e65cdf4df6951473545b69435711864e509 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Deprecated/Options/help.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Test the Options help messages.
"""
import os
import re
import TestSCons
str_True = str(True)
str_False = str(False)
test = TestSCons.TestSCons(match = TestSCons.match_re_dotall)
workpath = test.workpath()
qtpath = os.path.join(workpath, 'qt')
libpath = os.path.join(qtpath, 'lib')
libdirvar = os.path.join('$qtdir', 'lib')
qtpath_re = re.escape(qtpath)
libpath_re = re.escape(libpath)
libdirvar_re = re.escape(libdirvar)
test.subdir(qtpath)
test.subdir(libpath)
test.write('SConstruct', """
from SCons.Options import BoolOption, EnumOption, ListOption, \
PackageOption, PathOption
list_of_libs = Split('x11 gl qt ical')
qtdir = r'%(qtpath)s'
opts = Options(args=ARGUMENTS)
opts.AddOptions(
BoolOption('warnings', 'compilation with -Wall and similiar', 1),
BoolOption('profile', 'create profiling informations', 0),
EnumOption('debug', 'debug output and symbols', 'no',
allowed_values=('yes', 'no', 'full'),
map={}, ignorecase=0), # case sensitive
EnumOption('guilib', 'gui lib to use', 'gtk',
allowed_values=('motif', 'gtk', 'kde'),
map={}, ignorecase=1), # case insensitive
EnumOption('some', 'some option', 'xaver',
allowed_values=('xaver', 'eins'),
map={}, ignorecase=2), # make lowercase
ListOption('shared',
'libraries to build as shared libraries',
'all',
names = list_of_libs),
PackageOption('x11',
'use X11 installed here (yes = search some places)',
'yes'),
PathOption('qtdir', 'where the root of Qt is installed', qtdir),
PathOption('qt_libraries',
'where the Qt library is installed',
r'%(libdirvar)s'),
)
env = Environment(options=opts)
Help(opts.GenerateHelpText(env))
print env['warnings']
print env['profile']
Default(env.Alias('dummy', None))
""" % locals())
expected_stdout = """\
scons: Reading SConscript files ...
%(str_True)s
%(str_False)s
scons: done reading SConscript files.
warnings: compilation with -Wall and similiar \\(yes|no\\)
default: 1
actual: %(str_True)s
profile: create profiling informations \\(yes|no\\)
default: 0
actual: %(str_False)s
debug: debug output and symbols \\(yes|no|full\\)
default: no
actual: no
guilib: gui lib to use \\(motif|gtk|kde\\)
default: gtk
actual: gtk
some: some option \\(xaver|eins\\)
default: xaver
actual: xaver
shared: libraries to build as shared libraries
\\(all|none|comma-separated list of names\\)
allowed names: x11 gl qt ical
default: all
actual: x11 gl qt ical
x11: use X11 installed here \\(yes = search some places\\)
\\( yes | no | /path/to/x11 \\)
default: yes
actual: %(str_True)s
qtdir: where the root of Qt is installed \\( /path/to/qtdir \\)
default: %(qtpath_re)s
actual: %(qtpath_re)s
qt_libraries: where the Qt library is installed \\( /path/to/qt_libraries \\)
default: %(libdirvar_re)s
actual: %(libpath_re)s
Use scons -H for help about command-line options.
""" % locals()
file_expr = TestSCons.file_expr
expected_stderr = """
scons: warning: The Options class is deprecated; use the Variables class instead.
%(file_expr)s
scons: warning: The BoolOption\\(\\) function is deprecated; use the BoolVariable\\(\\) function instead.
%(file_expr)s
scons: warning: The EnumOption\\(\\) function is deprecated; use the EnumVariable\\(\\) function instead.
%(file_expr)s
scons: warning: The ListOption\\(\\) function is deprecated; use the ListVariable\\(\\) function instead.
%(file_expr)s
scons: warning: The PackageOption\\(\\) function is deprecated; use the PackageVariable\\(\\) function instead.
%(file_expr)s
scons: warning: The PathOption\\(\\) function is deprecated; use the PathVariable\\(\\) function instead.
%(file_expr)s""" % locals()
test.run(arguments='-h', stdout=expected_stdout, stderr=expected_stderr)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 31.28655 | 111 | 0.68729 |
9a4af4bfac167a0a72dcb286327dfe1376e404dc | 12,020 | py | Python | sdk/containerinstance/azure-mgmt-containerinstance/azure/mgmt/containerinstance/aio/operations/_location_operations.py | adewaleo/azure-sdk-for-python | 169457edbea5e3c5557246cfcf8bd635d528bae4 | [
"MIT"
] | 1 | 2021-09-07T18:35:07.000Z | 2021-09-07T18:35:07.000Z | sdk/containerinstance/azure-mgmt-containerinstance/azure/mgmt/containerinstance/aio/operations/_location_operations.py | adewaleo/azure-sdk-for-python | 169457edbea5e3c5557246cfcf8bd635d528bae4 | [
"MIT"
] | null | null | null | sdk/containerinstance/azure-mgmt-containerinstance/azure/mgmt/containerinstance/aio/operations/_location_operations.py | adewaleo/azure-sdk-for-python | 169457edbea5e3c5557246cfcf8bd635d528bae4 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LocationOperations:
"""LocationOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerinstance.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_usage(
self,
location: str,
**kwargs
) -> AsyncIterable["models.UsageListResult"]:
"""Get the usage for a subscription.
:param location: The identifier for the physical azure location.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsageListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerinstance.models.UsageListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.UsageListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_usage.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('UsageListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_usage.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ContainerInstance/locations/{location}/usages'} # type: ignore
def list_cached_images(
self,
location: str,
**kwargs
) -> AsyncIterable["models.CachedImagesListResult"]:
"""Get the list of cached images.
Get the list of cached images on specific OS type for a subscription in a region.
:param location: The identifier for the physical azure location.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CachedImagesListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerinstance.models.CachedImagesListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CachedImagesListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_cached_images.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CachedImagesListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_cached_images.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ContainerInstance/locations/{location}/cachedImages'} # type: ignore
def list_capabilities(
self,
location: str,
**kwargs
) -> AsyncIterable["models.CapabilitiesListResult"]:
"""Get the list of capabilities of the location.
Get the list of CPU/memory/GPU capabilities of a region.
:param location: The identifier for the physical azure location.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CapabilitiesListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerinstance.models.CapabilitiesListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CapabilitiesListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_capabilities.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CapabilitiesListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_capabilities.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ContainerInstance/locations/{location}/capabilities'} # type: ignore
| 46.770428 | 164 | 0.644509 |
ec95ae097cf9fd53015b5bcbc1bb713ddca586e8 | 8,724 | py | Python | test/vanilla/version-tolerant/AcceptanceTests/asynctests/test_xml.py | cfculhane/autorest.python | 8cbca95faee88d933a58bbbd17b76834faa8d387 | [
"MIT"
] | 35 | 2018-04-03T12:15:53.000Z | 2022-03-11T14:03:34.000Z | test/vanilla/version-tolerant/AcceptanceTests/asynctests/test_xml.py | cfculhane/autorest.python | 8cbca95faee88d933a58bbbd17b76834faa8d387 | [
"MIT"
] | 652 | 2017-08-28T22:44:41.000Z | 2022-03-31T21:20:31.000Z | test/vanilla/version-tolerant/AcceptanceTests/asynctests/test_xml.py | cfculhane/autorest.python | 8cbca95faee88d933a58bbbd17b76834faa8d387 | [
"MIT"
] | 29 | 2017-08-28T20:57:01.000Z | 2022-03-11T14:03:38.000Z | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import logging
from ..serializer import deserialize_base64
from async_generator import yield_, async_generator
from xmlserviceversiontolerant.aio import AutoRestSwaggerBATXMLService
import pytest
_LOGGER = logging.getLogger(__name__)
@pytest.fixture
@async_generator
async def client():
async with AutoRestSwaggerBATXMLService() as client:
await yield_(client)
async def _assert_with_log(func, *args, **kwargs):
def raise_for_status(response, deserialized, headers):
response.http_response._internal_response.raise_for_status()
try:
http_response = await func(*args, cls=raise_for_status, **kwargs)
except Exception as err:
print(err.response.text())
pytest.fail()
@pytest.mark.asyncio
async def test_json_xml(client):
await client.xml.json_input({"id": 42})
result = await client.xml.json_output()
assert result['id'] == 42
@pytest.mark.asyncio
async def test_simple(client):
# Slideshow
slideshow = await client.xml.get_simple()
assert slideshow.attrib['title'] == "Sample Slide Show"
assert slideshow.attrib['date'] == "Date of publication"
assert slideshow.attrib['author'] == "Yours Truly"
slides = list(slideshow.iterfind('slide'))
assert len(slides) == 2
slide1 = slides[0]
assert slide1.attrib['type'] == "all"
assert next(slide1.iterfind('title')).text == "Wake up to WonderWidgets!"
assert len(list(slide1.iterfind('item'))) == 0
slide2 = slides[1]
assert slide2.attrib['type'] == "all"
assert next(slide2.iterfind('title')).text == "Overview"
items = list(slide2.iterfind('item'))
assert len(items) == 3
assert items[0].text == "Why WonderWidgets are great"
assert items[1].text == None
assert items[2].text == "Who buys WonderWidgets"
await _assert_with_log(client.xml.put_simple, slideshow)
@pytest.mark.asyncio
async def test_empty_child_element(client):
banana = await client.xml.get_empty_child_element()
assert banana.attrib == {} # That's the point of this test, it was an empty node.
await _assert_with_log(client.xml.put_empty_child_element, banana)
@pytest.mark.asyncio
async def test_empty_root_list(client):
bananas = await client.xml.get_empty_root_list()
assert bananas.tag == 'bananas'
assert bananas.attrib == {}
await _assert_with_log(client.xml.put_empty_root_list, bananas)
@pytest.mark.asyncio
async def test_root_list_single_item(client):
xml_body = await client.xml.get_root_list_single_item()
bananas = list(xml_body.iterfind('banana'))
assert len(bananas) == 1
assert next(bananas[0].iterfind('name')).text == "Cavendish"
await _assert_with_log(client.xml.put_root_list_single_item, xml_body)
@pytest.mark.asyncio
async def test_root_list(client):
xml_body = await client.xml.get_root_list()
bananas = list(xml_body.iterfind('banana'))
assert len(bananas) == 2
await _assert_with_log(client.xml.put_root_list, xml_body)
@pytest.mark.asyncio
async def test_empty_wrapped_lists(client):
bananas = await client.xml.get_empty_wrapped_lists()
assert [a for a in bananas.iterfind('GoodApples') if a.text] == []
assert [a for a in bananas.iterfind('BadApples') if a.text] == []
await _assert_with_log(client.xml.put_empty_wrapped_lists, bananas)
@pytest.mark.asyncio
async def test_get_empty(client):
slideshow = await client.xml.get_empty_list()
await _assert_with_log(client.xml.put_empty_list, slideshow)
@pytest.mark.asyncio
async def test_wrapped_lists(client):
bananas = await client.xml.get_wrapped_lists()
good_apples = bananas.find('GoodApples')
assert [a.text for a in good_apples.iterfind('Apple')] == ['Fuji', 'Gala']
bad_apples = bananas.find('BadApples')
assert [a.text for a in bad_apples.iterfind('Apple')] == ['Red Delicious']
await _assert_with_log(client.xml.put_wrapped_lists, bananas)
@pytest.mark.asyncio
async def test_complex_types(client):
root = await client.xml.get_complex_type_ref_no_meta()
ref_to_model = root.find('RefToModel')
assert ref_to_model.find('ID').text == "myid"
await client.xml.put_complex_type_ref_no_meta(root)
root = await client.xml.get_complex_type_ref_with_meta()
ref_to_model = root.find('XMLComplexTypeWithMeta')
assert ref_to_model.find('ID').text == "myid"
await client.xml.put_complex_type_ref_with_meta(root)
@pytest.mark.asyncio
async def test_list_containers(client):
xml_body = await client.xml.list_containers()
containers = xml_body.find('Containers')
container_list = list(containers.iterfind('Container'))
assert len(container_list) == 3
@pytest.mark.asyncio
async def test_list_blobs(client):
xml_body = await client.xml.list_blobs()
blobs_xml_body = xml_body.find('Blobs')
blobs = list(blobs_xml_body.iterfind('Blob'))
assert len(blobs) == 5
assert blobs_xml_body.find('BlobPrefix') is None
blob = blobs[0]
assert blob.find('Name').text == "blob1.txt"
properties = blob.find('Properties')
assert properties.find('Last-Modified').text == 'Wed, 09 Sep 2009 09:20:02 GMT'
assert properties.find('Etag').text == "0x8CBFF45D8A29A19"
assert properties.find('Content-Length').text == "100"
assert properties.find('Content-Type').text == "text/html"
# Check that an empty field in the XML is empty string
assert properties.find('Content-Encoding').text is None
assert properties.find('Content-Language').text == "en-US"
assert properties.find('Content-MD5').text is None
assert properties.find('Cache-Control').text == "no-cache"
assert properties.find('BlobType').text == "BlockBlob"
# Check that a field NOT in the XML is None
assert properties.find('Destination-Snapshot') is None
metadata_body = blob.find('Metadata')
assert metadata_body.find("Color").text == "blue"
assert metadata_body.find("BlobNumber").text == "01"
assert metadata_body.find("SomeMetadataName").text == "SomeMetadataValue"
@pytest.mark.asyncio
async def test_service_properties(client):
properties = await client.xml.get_service_properties()
assert properties.find('HourMetrics') is not None
assert properties.find('MinuteMetrics') is not None
await _assert_with_log(client.xml.put_service_properties, properties)
@pytest.mark.asyncio
async def test_acls(client):
acls = await client.xml.get_acls()
signed_identifiers = list(acls.iterfind('SignedIdentifier'))
assert len(signed_identifiers) == 1
assert signed_identifiers[0].find('Id').text == 'MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTI='
await _assert_with_log(client.xml.put_acls, acls)
@pytest.mark.asyncio
async def test_xms_text(client):
xml_object = await client.xml.get_xms_text()
assert xml_object.attrib['language'] == "english"
assert xml_object.text == "I am text"
@pytest.mark.asyncio
async def test_bytes(client):
bytes_object = await client.xml.get_bytes()
assert bytes_object.tag == 'ModelWithByteProperty'
assert deserialize_base64(bytes_object.find('Bytes').text) == b"Hello world"
await client.xml.put_binary(bytes_object)
@pytest.mark.asyncio
async def test_url(client):
url_object = await client.xml.get_uri()
assert url_object.tag == 'ModelWithUrlProperty'
assert url_object.find('Url').text == 'https://myaccount.blob.core.windows.net/'
await client.xml.put_uri(url_object)
| 40.766355 | 98 | 0.723063 |
5bbecd792100bd6e11a70569e4548b018e2ff8db | 10,294 | py | Python | torch_geometric/data/sampler.py | m30m/pytorch_geometric | 4e36103299debee269cefcb3c869d45b7977bcb3 | [
"MIT"
] | null | null | null | torch_geometric/data/sampler.py | m30m/pytorch_geometric | 4e36103299debee269cefcb3c869d45b7977bcb3 | [
"MIT"
] | null | null | null | torch_geometric/data/sampler.py | m30m/pytorch_geometric | 4e36103299debee269cefcb3c869d45b7977bcb3 | [
"MIT"
] | null | null | null | import copy
from typing import List, Optional, Tuple, NamedTuple, Union
import torch
from torch import Tensor
from torch_sparse import SparseTensor
from torch_geometric.utils.num_nodes import maybe_num_nodes
class EdgeIndex(NamedTuple):
edge_index: Tensor
e_id: Optional[Tensor]
size: Tuple[int, int]
def to(self, *args, **kwargs):
edge_index = self.edge_index.to(*args, **kwargs)
e_id = self.e_id.to(*args, **kwargs) if self.e_id is not None else None
return EdgeIndex(edge_index, e_id, self.size)
class Adj(NamedTuple):
adj_t: SparseTensor
e_id: Optional[Tensor]
size: Tuple[int, int]
def to(self, *args, **kwargs):
adj_t = self.adj_t.to(*args, **kwargs)
e_id = self.e_id.to(*args, **kwargs) if self.e_id is not None else None
return Adj(adj_t, e_id, self.size)
class NeighborSampler(torch.utils.data.DataLoader):
r"""The neighbor sampler from the `"Inductive Representation Learning on
Large Graphs" <https://arxiv.org/abs/1706.02216>`_ paper, which allows
for mini-batch training of GNNs on large-scale graphs where full-batch
training is not feasible.
Given a GNN with :math:`L` layers and a specific mini-batch of nodes
:obj:`node_idx` for which we want to compute embeddings, this module
iteratively samples neighbors and constructs bipartite graphs that simulate
the actual computation flow of GNNs.
More specifically, :obj:`sizes` denotes how much neighbors we want to
sample for each node in each layer.
This module then takes in these :obj:`sizes` and iteratively samples
:obj:`sizes[l]` for each node involved in layer :obj:`l`.
In the next layer, sampling is repeated for the union of nodes that were
already encountered.
The actual computation graphs are then returned in reverse-mode, meaning
that we pass messages from a larger set of nodes to a smaller one, until we
reach the nodes for which we originally wanted to compute embeddings.
Hence, an item returned by :class:`NeighborSampler` holds the current
:obj:`batch_size`, the IDs :obj:`n_id` of all nodes involved in the
computation, and a list of bipartite graph objects via the tuple
:obj:`(edge_index, e_id, size)`, where :obj:`edge_index` represents the
bipartite edges between source and target nodes, :obj:`e_id` denotes the
IDs of original edges in the full graph, and :obj:`size` holds the shape
of the bipartite graph.
For each bipartite graph, target nodes are also included at the beginning
of the list of source nodes so that one can easily apply skip-connections
or add self-loops.
.. note::
For an example of using :obj:`NeighborSampler`, see
`examples/reddit.py
<https://github.com/rusty1s/pytorch_geometric/blob/master/examples/
reddit.py>`_ or
`examples/ogbn_products_sage.py
<https://github.com/rusty1s/pytorch_geometric/blob/master/examples/
ogbn_products_sage.py>`_.
Args:
edge_index (Tensor or SparseTensor): A :obj:`torch.LongTensor` or a
:obj:`torch_sparse.SparseTensor` that defines the underlying graph
connectivity/message passing flow.
:obj:`edge_index` holds the indices of a (sparse) symmetric
adjacency matrix.
If :obj:`edge_index` is of type :obj:`torch.LongTensor`, its shape
must be defined as :obj:`[2, num_edges]`, where messages from nodes
:obj:`edge_index[0]` are sent to nodes in :obj:`edge_index[1]`
(in case :obj:`flow="source_to_target"`).
If :obj:`edge_index` is of type :obj:`torch_sparse.SparseTensor`,
its sparse indices :obj:`(row, col)` should relate to
:obj:`row = edge_index[1]` and :obj:`col = edge_index[0]`.
The major difference between both formats is that we need to input
the *transposed* sparse adjacency matrix.
size ([int]): The number of neighbors to sample for each node in each
layer. If set to :obj:`sizes[i] = -1`, all neighbors are included
in layer :obj:`l`.
node_idx (LongTensor, optional): The nodes that should be considered
for creating mini-batches. If set to :obj:`None`, all nodes will be
considered.
num_nodes (int, optional): The number of nodes in the graph.
(default: :obj:`None`)
return_e_id (bool, optional): If set to :obj:`False`, will not return
original edge indices of sampled edges. This is only useful in case
when operating on graphs without edge features to save memory.
(default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch.utils.data.DataLoader`, such as :obj:`batch_size`,
:obj:`shuffle`, :obj:`drop_last` or :obj:`num_workers`.
"""
def __init__(self, edge_index: Union[Tensor, SparseTensor],
sizes: List[int], node_idx: Optional[Tensor] = None,
num_nodes: Optional[int] = None, return_e_id: bool = True,
**kwargs):
self.sizes = sizes
self.return_e_id = return_e_id
self.is_sparse_tensor = isinstance(edge_index, SparseTensor)
self.__val__ = None
# Obtain a *transposed* `SparseTensor` instance.
edge_index = edge_index.to('cpu')
if not self.is_sparse_tensor:
num_nodes = maybe_num_nodes(edge_index, num_nodes)
value = torch.arange(edge_index.size(1)) if return_e_id else None
self.adj_t = SparseTensor(row=edge_index[0], col=edge_index[1],
value=value,
sparse_sizes=(num_nodes, num_nodes)).t()
else:
adj_t = edge_index
if return_e_id:
self.__val__ = adj_t.storage.value()
value = torch.arange(adj_t.nnz()) if return_e_id else self.val
adj_t = adj_t.set_value(value, layout='coo')
self.adj_t = adj_t
self.adj_t.storage.rowptr()
if node_idx is None:
node_idx = torch.arange(self.adj_t.sparse_size(0))
elif node_idx.dtype == torch.bool:
node_idx = node_idx.nonzero(as_tuple=False).view(-1)
super(NeighborSampler, self).__init__(
node_idx.view(-1).tolist(), collate_fn=self.sample, **kwargs)
def sample(self, batch):
if not isinstance(batch, Tensor):
batch = torch.tensor(batch)
batch_size: int = len(batch)
adjs = []
n_id = batch
for size in self.sizes:
adj_t, n_id = self.adj_t.sample_adj(n_id, size, replace=False)
e_id = adj_t.storage.value()
size = adj_t.sparse_sizes()[::-1]
if self.__val__ is not None:
adj_t.set_value_(self.__val__[e_id], layout='coo')
if self.is_sparse_tensor:
adjs.append(Adj(adj_t, e_id, size))
else:
row, col, _ = adj_t.coo()
edge_index = torch.stack([col, row], dim=0)
adjs.append(EdgeIndex(edge_index, e_id, size))
if len(adjs) > 1:
return batch_size, n_id, adjs[::-1]
else:
return batch_size, n_id, adjs[0]
def __repr__(self):
return '{}(sizes={})'.format(self.__class__.__name__, self.sizes)
class RandomIndexSampler(torch.utils.data.Sampler):
def __init__(self, num_nodes: int, num_parts: int, shuffle: bool = False):
self.N = num_nodes
self.num_parts = num_parts
self.shuffle = shuffle
self.n_ids = self.get_node_indices()
def get_node_indices(self):
n_id = torch.randint(self.num_parts, (self.N, ), dtype=torch.long)
n_ids = [(n_id == i).nonzero(as_tuple=False).view(-1)
for i in range(self.num_parts)]
return n_ids
def __iter__(self):
if self.shuffle:
self.n_ids = self.get_node_indices()
return iter(self.n_ids)
def __len__(self):
return self.num_parts
class RandomNodeSampler(torch.utils.data.DataLoader):
r"""A data loader that randomly samples nodes within a graph and returns
their induced subgraph.
.. note::
For an example of using :obj:`RandomNodeSampler`, see
`examples/ogbn_proteins_deepgcn.py
<https://github.com/rusty1s/pytorch_geometric/blob/master/examples/
ogbn_proteins_deepgcn.py>`_.
Args:
data (torch_geometric.data.Data): The graph data object.
num_parts (int): The number of partitions.
shuffle (bool, optional): If set to :obj:`True`, the data is reshuffled
at every epoch (default: :obj:`False`).
**kwargs (optional): Additional arguments of
:class:`torch.utils.data.DataLoader`, such as :obj:`num_workers`.
"""
def __init__(self, data, num_parts: int, shuffle: bool = False, **kwargs):
assert data.edge_index is not None
self.N = N = data.num_nodes
self.E = data.num_edges
self.adj = SparseTensor(
row=data.edge_index[0], col=data.edge_index[1],
value=torch.arange(self.E, device=data.edge_index.device),
sparse_sizes=(N, N))
self.data = copy.copy(data)
self.data.edge_index = None
super(RandomNodeSampler, self).__init__(
self, batch_size=1,
sampler=RandomIndexSampler(self.N, num_parts, shuffle),
collate_fn=self.__collate__, **kwargs)
def __getitem__(self, idx):
return idx
def __collate__(self, node_idx):
node_idx = node_idx[0]
data = self.data.__class__()
data.num_nodes = node_idx.size(0)
adj, _ = self.adj.saint_subgraph(node_idx)
row, col, edge_idx = adj.coo()
data.edge_index = torch.stack([row, col], dim=0)
for key, item in self.data:
if isinstance(item, Tensor) and item.size(0) == self.N:
data[key] = item[node_idx]
elif isinstance(item, Tensor) and item.size(0) == self.E:
data[key] = item[edge_idx]
else:
data[key] = item
return data
| 40.527559 | 79 | 0.629493 |
4740288f2f586e8c1a632338cbd08f2fdd2ed987 | 2,239 | py | Python | recipes/python/flask/{{cookiecutter.app_name}}/{{cookiecutter.app_name}}/app.py | roscopecoltran/sniperkit-cookiecutter | 50b7ecd87d4127875764c2b7d4668ede2ed4b299 | [
"BSD-3-Clause"
] | null | null | null | recipes/python/flask/{{cookiecutter.app_name}}/{{cookiecutter.app_name}}/app.py | roscopecoltran/sniperkit-cookiecutter | 50b7ecd87d4127875764c2b7d4668ede2ed4b299 | [
"BSD-3-Clause"
] | null | null | null | recipes/python/flask/{{cookiecutter.app_name}}/{{cookiecutter.app_name}}/app.py | roscopecoltran/sniperkit-cookiecutter | 50b7ecd87d4127875764c2b7d4668ede2ed4b299 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""The app module, containing the app factory function."""
from flask import Flask, render_template
from {{cookiecutter.app_name}} import commands, public, user
from {{cookiecutter.app_name}}.extensions import bcrypt, cache, csrf_protect, db, debug_toolbar, login_manager, migrate, webpack
from {{cookiecutter.app_name}}.settings import ProdConfig
def create_app(config_object=ProdConfig):
"""An application factory, as explained here: http://flask.pocoo.org/docs/patterns/appfactories/.
:param config_object: The configuration object to use.
"""
app = Flask(__name__.split('.')[0])
app.config.from_object(config_object)
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
register_shellcontext(app)
register_commands(app)
return app
def register_extensions(app):
"""Register Flask extensions."""
bcrypt.init_app(app)
cache.init_app(app)
db.init_app(app)
csrf_protect.init_app(app)
login_manager.init_app(app)
debug_toolbar.init_app(app)
migrate.init_app(app, db)
webpack.init_app(app)
return None
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(public.views.blueprint)
app.register_blueprint(user.views.blueprint)
return None
def register_errorhandlers(app):
"""Register error handlers."""
def render_error(error):
"""Render error template."""
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, 'code', 500)
return render_template('{0}.html'.format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
return {
'db': db,
'User': user.models.User}
app.shell_context_processor(shell_context)
def register_commands(app):
"""Register Click commands."""
app.cli.add_command(commands.test)
app.cli.add_command(commands.lint)
app.cli.add_command(commands.clean)
app.cli.add_command(commands.urls)
| 30.256757 | 128 | 0.702546 |
6b9f83123cb64bc92e203cd1384bad42d353dae0 | 1,566 | py | Python | app/utils.py | A-NL/simplelogin-app | f17f9aaf8c57373c09dc3393975d2509f37815b9 | [
"MIT"
] | 4 | 2021-07-06T14:51:24.000Z | 2021-07-23T16:40:53.000Z | app/utils.py | A-NL/simplelogin-app | f17f9aaf8c57373c09dc3393975d2509f37815b9 | [
"MIT"
] | 1 | 2021-05-11T13:02:48.000Z | 2021-05-11T13:03:32.000Z | app/utils.py | A-NL/simplelogin-app | f17f9aaf8c57373c09dc3393975d2509f37815b9 | [
"MIT"
] | null | null | null | import random
import string
import urllib.parse
from unidecode import unidecode
from .config import WORDS_FILE_PATH
from .log import LOG
with open(WORDS_FILE_PATH) as f:
LOG.d("load words file: %s", WORDS_FILE_PATH)
_words = f.read().split()
def random_word():
return random.choice(_words)
def word_exist(word):
return word in _words
def random_words():
"""Generate a random words. Used to generate user-facing string, for ex email addresses"""
# nb_words = random.randint(2, 3)
nb_words = 2
return "_".join([random.choice(_words) for i in range(nb_words)])
def random_string(length=10):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return "".join(random.choice(letters) for _ in range(length))
def convert_to_id(s: str):
"""convert a string to id-like: remove space, remove special accent"""
s = s.replace(" ", "")
s = s.lower()
s = unidecode(s)
return s
_ALLOWED_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-."
def convert_to_alphanumeric(s: str) -> str:
ret = []
# drop all control characters like shift, separator, etc
for c in s:
if c not in _ALLOWED_CHARS:
ret.append("_")
else:
ret.append(c)
return "".join(ret)
def encode_url(url):
return urllib.parse.quote(url, safe="")
def sanitize_email(email_address: str) -> str:
if email_address:
return email_address.lower().strip().replace(" ", "").replace("\n", " ")
return email_address
| 23.029412 | 94 | 0.66986 |
7e1fab5c4911e10452c7add69692028c87126ace | 904 | py | Python | geotrek/trekking/migrations/0017_auto_20200831_1406.py | pierreloicq/Geotrek-admin | 00cd29f29843f2cc25e5a3c7372fcccf14956887 | [
"BSD-2-Clause"
] | 50 | 2016-10-19T23:01:21.000Z | 2022-03-28T08:28:34.000Z | geotrek/trekking/migrations/0017_auto_20200831_1406.py | pierreloicq/Geotrek-admin | 00cd29f29843f2cc25e5a3c7372fcccf14956887 | [
"BSD-2-Clause"
] | 1,422 | 2016-10-27T10:39:40.000Z | 2022-03-31T13:37:10.000Z | geotrek/trekking/migrations/0017_auto_20200831_1406.py | pierreloicq/Geotrek-admin | 00cd29f29843f2cc25e5a3c7372fcccf14956887 | [
"BSD-2-Clause"
] | 46 | 2016-10-27T10:59:10.000Z | 2022-03-22T15:55:56.000Z | # Generated by Django 2.2.15 on 2020-08-31 14:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trekking', '0016_auto_20200708_1608'),
]
operations = [
migrations.AlterField(
model_name='poi',
name='published',
field=models.BooleanField(default=False, help_text='Visible on Geotrek-rando', verbose_name='Published'),
),
migrations.AlterField(
model_name='servicetype',
name='published',
field=models.BooleanField(default=False, help_text='Visible on Geotrek-rando', verbose_name='Published'),
),
migrations.AlterField(
model_name='trek',
name='published',
field=models.BooleanField(default=False, help_text='Visible on Geotrek-rando', verbose_name='Published'),
),
]
| 31.172414 | 117 | 0.619469 |
fa391cf9f573f7b00bba1139516d99909b93dc56 | 1,210 | py | Python | test1.py | NarmadaBalasooriya/Climate-Change-AI | 2d773f6fa1c5f4669b5cd424ec0bc50a68bb47cc | [
"MIT"
] | 6 | 2019-03-29T04:57:18.000Z | 2021-07-16T07:16:20.000Z | test1.py | NarmadaBalasooriya/Climate-Change-AI | 2d773f6fa1c5f4669b5cd424ec0bc50a68bb47cc | [
"MIT"
] | null | null | null | test1.py | NarmadaBalasooriya/Climate-Change-AI | 2d773f6fa1c5f4669b5cd424ec0bc50a68bb47cc | [
"MIT"
] | 2 | 2019-04-14T17:51:58.000Z | 2022-01-11T13:39:38.000Z | import os
import sys
import os
from options.test_options import TestOptions
from data import create_dataset
from models import create_model
from util.visualizer import save_images
from util import html
from googlegeocoder import GoogleGeocoder
import google_streetview.api
google_key = "AIzaSyBDc5jaJG0k0o1k1NHoinwU7E89AMujmso"
search = sys.argv[1:]
search_addr = ",".join(search)
geocoder = GoogleGeocoder(google_key)
location = geocoder.get(search_addr)
location = location[0]
print('Address of ', search_addr, ' is ', location.formatted_address)
loc_lat = location.geometry.location.lat
loc_lng = location.geometry.location.lng
print('Latitude and Longitudes of ', search_addr, ' are ', [loc_lat, loc_lng])
loc_lat_lng = [loc_lat, loc_lng]
loc_lat_lng = ",".join(map(str,loc_lat_lng))
loc = str(loc_lat_lng)
print(loc)
params = {
'size': '600x300', # max 640x640 pixels
'location': loc,
'heading': '0;90;180;270;360',
'pitch': '0',
'key': google_key
}
api_list = google_streetview.helpers.api_list(params)
results = google_streetview.api.results(api_list)
results.download_links(str(search_addr))
results.save_metadata('metadata.json')
| 24.693878 | 79 | 0.740496 |
ae6d8e5ba3d0877e00ce29f93a259ab60106c243 | 68 | py | Python | src/Quotient.py | pgs8/IS601_Calculator-with-unit-tests | d6d2337b53a2c095d450bd31382bacdd3293e0b5 | [
"MIT"
] | null | null | null | src/Quotient.py | pgs8/IS601_Calculator-with-unit-tests | d6d2337b53a2c095d450bd31382bacdd3293e0b5 | [
"MIT"
] | null | null | null | src/Quotient.py | pgs8/IS601_Calculator-with-unit-tests | d6d2337b53a2c095d450bd31382bacdd3293e0b5 | [
"MIT"
] | null | null | null | def division(a, b):
return "{:.9f}".format(float(a) / float(b))
| 22.666667 | 47 | 0.573529 |
7b067c3a4147f4dd5ab37487bfca44ce8ed50043 | 4,080 | py | Python | CPythonLib/test/test_pow.py | doom38/jython_v2.2.1 | 0803a0c953c294e6d14f9fc7d08edf6a3e630a15 | [
"CNRI-Jython"
] | null | null | null | CPythonLib/test/test_pow.py | doom38/jython_v2.2.1 | 0803a0c953c294e6d14f9fc7d08edf6a3e630a15 | [
"CNRI-Jython"
] | null | null | null | CPythonLib/test/test_pow.py | doom38/jython_v2.2.1 | 0803a0c953c294e6d14f9fc7d08edf6a3e630a15 | [
"CNRI-Jython"
] | null | null | null | import sys
import test_support
def powtest(type):
if type != float:
print " Testing 2-argument pow() function..."
for i in range(-1000, 1000):
if pow(type(i), 0) != 1:
raise ValueError, 'pow('+str(i)+',0) != 1'
if pow(type(i), 1) != type(i):
raise ValueError, 'pow('+str(i)+',1) != '+str(i)
if pow(type(0), 1) != type(0):
raise ValueError, 'pow(0,'+str(i)+') != 0'
if pow(type(1), 1) != type(1):
raise ValueError, 'pow(1,'+str(i)+') != 1'
for i in range(-100, 100):
if pow(type(i), 3) != i*i*i:
raise ValueError, 'pow('+str(i)+',3) != '+str(i*i*i)
pow2 = 1
for i in range(0,31):
if pow(2, i) != pow2:
raise ValueError, 'pow(2,'+str(i)+') != '+str(pow2)
if i != 30 : pow2 = pow2*2
for othertype in int, long:
for i in range(-10, 0) + range(1, 10):
ii = type(i)
for j in range(1, 11):
jj = -othertype(j)
try:
pow(ii, jj)
except ValueError:
raise ValueError, "pow(%s, %s) failed" % (ii, jj)
for othertype in int, long, float:
for i in range(1, 100):
zero = type(0)
exp = -othertype(i/10.0)
if exp == 0:
continue
try:
pow(zero, exp)
except ZeroDivisionError:
pass # taking zero to any negative exponent should fail
else:
raise ValueError, "pow(%s, %s) did not fail" % (zero, exp)
print " Testing 3-argument pow() function..."
il, ih = -20, 20
jl, jh = -5, 5
kl, kh = -10, 10
compare = cmp
if type == float:
il = 1
compare = test_support.fcmp
elif type == int:
jl = 0
elif type == long:
jl, jh = 0, 15
for i in range(il, ih+1):
for j in range(jl, jh+1):
for k in range(kl, kh+1):
if k != 0:
if type == float or j < 0:
try:
pow(type(i),j,k)
except TypeError:
pass
else:
raise TestFailed("expected TypeError from "
"pow%r" % ((type(i), j, k)))
continue
if compare(pow(type(i),j,k), pow(type(i),j)% type(k)):
raise ValueError, "pow(" +str(i)+ "," +str(j)+ \
"," +str(k)+ ") != pow(" +str(i)+ "," + \
str(j)+ ") % " +str(k)
print 'Testing integer mode...'
powtest(int)
print 'Testing long integer mode...'
powtest(long)
print 'Testing floating point mode...'
powtest(float)
# Other tests-- not very systematic
print 'The number in both columns should match.'
print `pow(3,3) % 8`, `pow(3,3,8)`
print `pow(3,3) % -8`, `pow(3,3,-8)`
print `pow(3,2) % -2`, `pow(3,2,-2)`
print `pow(-3,3) % 8`, `pow(-3,3,8)`
print `pow(-3,3) % -8`, `pow(-3,3,-8)`
print `pow(5,2) % -8`, `pow(5,2,-8)`
print
print `pow(3L,3L) % 8`, `pow(3L,3L,8)`
print `pow(3L,3L) % -8`, `pow(3L,3L,-8)`
print `pow(3L,2) % -2`, `pow(3L,2,-2)`
print `pow(-3L,3L) % 8`, `pow(-3L,3L,8)`
print `pow(-3L,3L) % -8`, `pow(-3L,3L,-8)`
print `pow(5L,2) % -8`, `pow(5L,2,-8)`
print
print
for i in range(-10, 11):
for j in range(0, 6):
for k in range(-7, 11):
if j >= 0 and k != 0:
o = pow(i,j) % k
n = pow(i,j,k)
if o != n: print 'Integer mismatch:', i,j,k
if j >= 0 and k != 0:
o = pow(long(i),j) % k
n = pow(long(i),j,k)
if o != n: print 'Integer mismatch:', i,j,k
class TestRpow:
def __rpow__(self, other):
return None
None ** TestRpow() # Won't fail when __rpow__ invoked. SF bug #643260.
| 32.380952 | 76 | 0.431127 |
c334b04d7d9773cc6b5b0f3d7b4db15c441f6dd5 | 437 | py | Python | _includes/code/search-a-2d-matrix/solution.py | rajat19/interview-questions | cb1fa382a76f2f287f1c12dd3d1fca9bfb7fa311 | [
"MIT"
] | null | null | null | _includes/code/search-a-2d-matrix/solution.py | rajat19/interview-questions | cb1fa382a76f2f287f1c12dd3d1fca9bfb7fa311 | [
"MIT"
] | 2 | 2022-03-01T06:30:35.000Z | 2022-03-13T07:05:50.000Z | _includes/code/search-a-2d-matrix/solution.py | rajat19/interview-questions | cb1fa382a76f2f287f1c12dd3d1fca9bfb7fa311 | [
"MIT"
] | 1 | 2022-02-09T12:13:36.000Z | 2022-02-09T12:13:36.000Z | from typing import List
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
n, m = len(matrix), len(matrix[0])
row, col = 0, m - 1
while row < n and col >= 0:
cell = matrix[row][col]
if cell == target:
return True
if cell < target:
row += 1
else:
col -= 1
return False
| 25.705882 | 73 | 0.459954 |
aeeafe143045fdf0156c51f775732a3524b34fcc | 72 | py | Python | src/mixcli/command/util/__init__.py | zhuoyanli/nuance_mix_pycli | 72fe76eb715d4e0be60616d282230fa90ad7250f | [
"MIT"
] | null | null | null | src/mixcli/command/util/__init__.py | zhuoyanli/nuance_mix_pycli | 72fe76eb715d4e0be60616d282230fa90ad7250f | [
"MIT"
] | null | null | null | src/mixcli/command/util/__init__.py | zhuoyanli/nuance_mix_pycli | 72fe76eb715d4e0be60616d282230fa90ad7250f | [
"MIT"
] | null | null | null | """
MixCli **util** command group for various **utility** use cases
"""
| 18 | 63 | 0.652778 |
8f607130081368c64b7d39d08ca5ef00f7c6b2bc | 169 | py | Python | ex12.py | AyeAyeNwe/python-exercises | 68c4152e3527c04e5c0f2a6c34f66ad54701d715 | [
"MIT"
] | null | null | null | ex12.py | AyeAyeNwe/python-exercises | 68c4152e3527c04e5c0f2a6c34f66ad54701d715 | [
"MIT"
] | null | null | null | ex12.py | AyeAyeNwe/python-exercises | 68c4152e3527c04e5c0f2a6c34f66ad54701d715 | [
"MIT"
] | null | null | null | age =input ("How old are you?")
height =input("How tall are you?")
weight =input("How much do you weight?")
print(f"So,you're {age} old, {height} tall,{weight} heavy.")
| 33.8 | 60 | 0.668639 |
bedeb81f769aa8cc54a27406469a5b74d213b13b | 4,320 | py | Python | tests/clpy_tests/statics_tests/test_meanvar.py | fixstars/clpy | 693485f85397cc110fa45803c36c30c24c297df0 | [
"BSD-3-Clause"
] | 142 | 2018-06-07T07:43:10.000Z | 2021-10-30T21:06:32.000Z | tests/clpy_tests/statics_tests/test_meanvar.py | fixstars/clpy | 693485f85397cc110fa45803c36c30c24c297df0 | [
"BSD-3-Clause"
] | 282 | 2018-06-07T08:35:03.000Z | 2021-03-31T03:14:32.000Z | tests/clpy_tests/statics_tests/test_meanvar.py | fixstars/clpy | 693485f85397cc110fa45803c36c30c24c297df0 | [
"BSD-3-Clause"
] | 19 | 2018-06-19T11:07:53.000Z | 2021-05-13T20:57:04.000Z | import unittest
from clpy import testing
@testing.gpu
class TestMeanVar(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.for_all_dtypes()
@testing.numpy_clpy_allclose()
def test_mean_all(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return a.mean()
@testing.for_all_dtypes()
@testing.numpy_clpy_allclose()
def test_external_mean_all(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return xp.mean(a)
@testing.for_all_dtypes()
@testing.numpy_clpy_allclose()
def test_mean_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.mean(axis=1)
@testing.for_all_dtypes()
@testing.numpy_clpy_allclose()
def test_external_mean_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.mean(a, axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_clpy_allclose()
def test_var_all(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return a.var()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_clpy_allclose()
def test_external_var_all(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return xp.var(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_clpy_allclose()
def test_var_all_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return a.var(ddof=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_clpy_allclose()
def test_external_var_all_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return xp.var(a, ddof=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_clpy_allclose()
def test_var_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.var(axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_clpy_allclose()
def test_external_var_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.var(a, axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_clpy_allclose()
def test_var_axis_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.var(axis=1, ddof=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_clpy_allclose()
def test_external_var_axis_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.var(a, axis=1, ddof=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_clpy_allclose()
def test_std_all(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return a.std()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_clpy_allclose()
def test_external_std_all(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return xp.std(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_clpy_allclose()
def test_std_all_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return a.std(ddof=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_clpy_allclose()
def test_external_std_all_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return xp.std(a, ddof=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_clpy_allclose()
def test_std_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.std(axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_clpy_allclose()
def test_external_std_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.std(a, axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_clpy_allclose()
def test_std_axis_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.std(axis=1, ddof=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_clpy_allclose()
def test_external_std_axis_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.std(a, axis=1, ddof=1)
| 33.230769 | 55 | 0.661343 |
0e15cef9603b721450b5f024c55f54491a74886e | 536 | py | Python | wafer/registration/templatetags/wafer_crispy.py | drnlm/wafer | 1d843190428c401df06fcdfb89d1f9d9af67229e | [
"ISC"
] | 41 | 2015-03-16T17:47:00.000Z | 2022-01-07T04:31:21.000Z | wafer/registration/templatetags/wafer_crispy.py | drnlm/wafer | 1d843190428c401df06fcdfb89d1f9d9af67229e | [
"ISC"
] | 338 | 2015-03-15T17:26:36.000Z | 2021-12-02T04:34:53.000Z | wafer/registration/templatetags/wafer_crispy.py | drnlm/wafer | 1d843190428c401df06fcdfb89d1f9d9af67229e | [
"ISC"
] | 28 | 2015-07-27T14:11:13.000Z | 2020-11-16T03:50:30.000Z | from django import template
import sys
register = template.Library()
@register.simple_tag(takes_context=True)
def wafer_form_helper(context, helper_name):
'''
Find the specified Crispy FormHelper and instantiate it.
Handy when you are crispyifying other apps' forms.
'''
request = context.request
module, class_name = helper_name.rsplit('.', 1)
if module not in sys.modules:
__import__(module)
mod = sys.modules[module]
class_ = getattr(mod, class_name)
return class_(request=request)
| 26.8 | 60 | 0.714552 |
581bda3b133a11f9bc14a9279b2a9d98497f451e | 89 | py | Python | app/app/mqtt/__init__.py | MartinHeinz/IoT-Cloud | 2e6fddcfe2624862c9351759334a6655a896e8c7 | [
"MIT"
] | 14 | 2019-11-17T23:49:20.000Z | 2022-02-04T23:28:45.000Z | app/app/mqtt/__init__.py | MartinHeinz/IoT-Cloud | 2e6fddcfe2624862c9351759334a6655a896e8c7 | [
"MIT"
] | 3 | 2019-12-02T18:26:11.000Z | 2021-04-30T20:46:06.000Z | app/app/mqtt/__init__.py | MartinHeinz/IoT-Cloud | 2e6fddcfe2624862c9351759334a6655a896e8c7 | [
"MIT"
] | 4 | 2018-12-28T13:41:44.000Z | 2020-09-13T14:14:06.000Z | from .mqtt import handle_on_connect, handle_on_log, handle_on_publish, handle_on_message
| 44.5 | 88 | 0.876404 |
107dd8de95264b97534c63c64ef1ae15b1f93e4b | 2,145 | py | Python | censusreporter/apps/census/management/commands/cache_to_s3.py | Durellg/censusreporter | c006c2f1c67fd29086fe532974f1eb57e70a0e2c | [
"MIT"
] | 1 | 2020-07-15T23:47:28.000Z | 2020-07-15T23:47:28.000Z | censusreporter/apps/census/management/commands/cache_to_s3.py | Durellg/censusreporter | c006c2f1c67fd29086fe532974f1eb57e70a0e2c | [
"MIT"
] | null | null | null | censusreporter/apps/census/management/commands/cache_to_s3.py | Durellg/censusreporter | c006c2f1c67fd29086fe532974f1eb57e70a0e2c | [
"MIT"
] | 1 | 2020-07-17T17:49:42.000Z | 2020-07-17T17:49:42.000Z | from django.core.management.base import BaseCommand
from multiprocessing import Pool
from traceback import format_exc
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import json
import cStringIO
import gzip
from ...profile import geo_profile, enhance_api_data
import logging
logging.basicConfig(level=logging.WARN)
logger = logging.getLogger(__name__)
s3 = S3Connection()
def s3_keyname(geoid):
return '/1.0/data/profiles/%s.json' % geoid
def key(geoid):
bucket = s3.get_bucket('embed.censusreporter.org')
keyname = s3_keyname(geoid)
key = Key(bucket, keyname)
return key
def write_profile_json(s3_key, data):
s3_key.metadata['Content-Type'] = 'application/json'
s3_key.metadata['Content-Encoding'] = 'gzip'
# create gzipped version of json in memory
memfile = cStringIO.StringIO()
#memfile.write(data)
with gzip.GzipFile(filename=s3_key.key, mode='wb', fileobj=memfile) as gzip_data:
gzip_data.write(data)
memfile.seek(0)
# store static version on S3
s3_key.set_contents_from_file(memfile)
def seed(geoid):
logger.info("Working on {}".format(geoid))
try:
api_data = geo_profile(geoid)
api_data = enhance_api_data(api_data)
s3key = key(geoid)
write_profile_json(s3key, json.dumps(api_data))
logger.info("Wrote to key {}".format(s3key))
except Exception, e:
logger.error("Problem caching {}".format(geoid))
logger.exception(e)
logger.info("Done working on {}".format(geoid))
class Command(BaseCommand):
help = 'Pre-generates some Census Reporter content and places it on S3.'
def handle(self, *args, **options):
if not args:
print "Please include the name of a file containing the seed geo_ids."
return False
parallelism = 4
if 'parallelism' in options:
parallelism = int(options.get('parallelism'))
pool = Pool(parallelism)
seed_file = open(args[0], 'r')
for geoid in seed_file:
pool.apply_async(seed, (geoid.strip(),))
pool.close()
pool.join()
| 27.151899 | 85 | 0.675524 |
62a3fb21823d64ea7d87192d63c3d02b9578f775 | 721 | py | Python | tests/shuffle_tests.py | kimdiep/algorithmic-complexity | e3ffa1728f87a1a6a841b41a2784e32a76722a46 | [
"MIT"
] | null | null | null | tests/shuffle_tests.py | kimdiep/algorithmic-complexity | e3ffa1728f87a1a6a841b41a2784e32a76722a46 | [
"MIT"
] | null | null | null | tests/shuffle_tests.py | kimdiep/algorithmic-complexity | e3ffa1728f87a1a6a841b41a2784e32a76722a46 | [
"MIT"
] | null | null | null | import pytest
import sys
sys.path.append('./')
from shuffle import *
# assumption made that array (list) input will not be empty []
def test_shuffle_for_empty_string():
arr = ['']
assert type(random_shuffle(arr)) is list
def test_shuffle_for_list_of_integers():
arr = [1,2,3,4,5]
assert type(random_shuffle(arr)) is list
def test_shuffle_for_list_of_strings():
arr = ['1','2','3','4','5']
assert type(random_shuffle(arr)) is list
def test_shuffle_for_list_of_strings_and_integers():
arr = ['1',2,'3',4,5]
assert type(random_shuffle(arr)) is list
def test_shuffle_for_list_of_strings_and_integers_with_words():
arr = ['car', 'truck', 8, 4, 'bus', 6, 1]
assert type(random_shuffle(arr)) is list
| 26.703704 | 63 | 0.71706 |
b7aa9354ab6dbf172990c2cd0a590ec6f5fe0f81 | 3,787 | py | Python | haiku/_src/initializers_test.py | madisonmay/dm-haiku | de95f6f83561edeb582d46b2e3bf135051792b91 | [
"Apache-2.0"
] | null | null | null | haiku/_src/initializers_test.py | madisonmay/dm-haiku | de95f6f83561edeb582d46b2e3bf135051792b91 | [
"Apache-2.0"
] | null | null | null | haiku/_src/initializers_test.py | madisonmay/dm-haiku | de95f6f83561edeb582d46b2e3bf135051792b91 | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.initializers."""
from absl.testing import absltest
from haiku._src import initializers
from haiku._src import test_utils
import jax.numpy as jnp
class InitializersTest(absltest.TestCase):
@test_utils.transform_and_run
def test_initializers(self):
# This just makes sure we can call the initializers in accordance to the
# API and get the right shapes and dtypes out.
inits = [
initializers.Constant(42.0),
initializers.RandomNormal(),
initializers.RandomNormal(2.0),
initializers.RandomUniform(),
initializers.RandomUniform(3.0),
initializers.VarianceScaling(),
initializers.VarianceScaling(2.0),
initializers.VarianceScaling(2.0, mode="fan_in"),
initializers.VarianceScaling(2.0, mode="fan_out"),
initializers.VarianceScaling(2.0, mode="fan_avg"),
initializers.VarianceScaling(2.0, distribution="truncated_normal"),
initializers.VarianceScaling(2.0, distribution="normal"),
initializers.VarianceScaling(2.0, distribution="uniform"),
initializers.UniformScaling(),
initializers.UniformScaling(2.0),
initializers.TruncatedNormal(),
initializers.Orthogonal(),
# Users are supposed to be able to use these.
jnp.zeros,
jnp.ones,
]
# TODO(ibab): Test other shapes as well.
shape = (20, 42)
dtype = jnp.float32
for init in inits:
generated = init(shape, dtype)
self.assertEqual(generated.shape, shape)
self.assertEqual(generated.dtype, dtype)
@test_utils.transform_and_run
def test_invalid_variance_scale(self):
with self.assertRaisesRegex(ValueError, "scale.*must be a positive float"):
initializers.VarianceScaling(scale=-1.0)
with self.assertRaisesRegex(ValueError, "Invalid `mode` argument*"):
initializers.VarianceScaling(mode="foo")
with self.assertRaisesRegex(ValueError, "Invalid `distribution` argument*"):
initializers.VarianceScaling(distribution="bar")
@test_utils.transform_and_run
def test_compute_fans(self):
fan_in_out1 = initializers._compute_fans([])
self.assertEqual(fan_in_out1, (1, 1))
fan_in_out2 = initializers._compute_fans([2])
self.assertEqual(fan_in_out2, (2, 2))
fan_in_out3 = initializers._compute_fans([3, 4])
self.assertEqual(fan_in_out3, (3, 4))
fan_in_out4 = initializers._compute_fans([1, 2, 3, 4])
self.assertEqual(fan_in_out4, (6, 8))
@test_utils.transform_and_run
def test_orthogonal_invalid_shape(self):
init = initializers.Orthogonal()
shape = (20,)
with self.assertRaisesRegex(
ValueError, "Orthogonal initializer requires at least a 2D shape."):
init(shape, jnp.float32)
@test_utils.transform_and_run
def test_orthogonal_orthogonal(self):
init = initializers.Orthogonal()
shape = (42, 20)
generated = init(shape, jnp.float32)
self.assertEqual(generated.shape, shape)
self.assertEqual(generated.dtype, jnp.float32)
if __name__ == "__main__":
absltest.main()
| 36.066667 | 80 | 0.70029 |
e459a6e688e3c9d51565d16f56827ef2e2a73d4d | 160 | py | Python | terraform_builder/release.py | mrlesmithjr/terraform-builder | 08ed71333e988682ce50c6ef865fdd8ba27de395 | [
"MIT"
] | 7 | 2020-03-21T20:40:50.000Z | 2022-02-17T17:17:53.000Z | terraform_builder/release.py | mrlesmithjr/terraform-builder | 08ed71333e988682ce50c6ef865fdd8ba27de395 | [
"MIT"
] | 39 | 2020-03-24T04:37:21.000Z | 2020-06-17T04:20:22.000Z | terraform_builder/release.py | mrlesmithjr/terraform-builder | 08ed71333e988682ce50c6ef865fdd8ba27de395 | [
"MIT"
] | null | null | null | """terraform_builder/release.py"""
# Version tracking for package.
__author__ = 'Larry Smith Jr.'
__version__ = '0.1.0'
__package_name__ = 'terraform_builder'
| 22.857143 | 38 | 0.75 |
e624a1941ef6c296e1795cce20b65a8ad6927785 | 1,169 | py | Python | orion/packages/utils/tests/test_nlp_utils.py | orion-search/orion-backend | b28815f85de1046612a777f290f982446b2a5ad7 | [
"MIT"
] | 19 | 2020-02-18T17:03:42.000Z | 2021-09-22T08:02:17.000Z | orion/packages/utils/tests/test_nlp_utils.py | orion-search/orion-backend | b28815f85de1046612a777f290f982446b2a5ad7 | [
"MIT"
] | 116 | 2020-01-10T10:02:52.000Z | 2022-03-01T23:10:10.000Z | orion/packages/utils/tests/test_nlp_utils.py | orion-search/orion-backend | b28815f85de1046612a777f290f982446b2a5ad7 | [
"MIT"
] | 2 | 2020-11-04T17:10:52.000Z | 2021-02-14T18:37:02.000Z | import pytest
from orion.packages.utils.nlp_utils import clean_name
from orion.packages.utils.nlp_utils import identity_tokenizer
def test_clean_name_from_double_initials():
name = "A. B. FooBar"
result = clean_name(name)
expected_result = None
assert result == expected_result
def test_clean_name_from_single_initial():
name = "A. FooBar"
result = clean_name(name)
expected_result = None
assert result == expected_result
def test_clean_name_from_single_initial_variation():
name = "Foo A. FooBar"
result = clean_name(name)
expected_result = "Foo FooBar"
assert result == expected_result
def test_clean_name_symbols():
name = "허준 ( Joon Hur ) 이용구 ( Yong Goo Lee )"
result = clean_name(name)
expected_result = "허준 Joon Hur 이용구 Yong Goo Lee"
assert result == expected_result
def test_clean_name():
name = "Foo FooBar"
result = clean_name(name)
expected_result = "Foo FooBar"
assert result == expected_result
def test_identity_tokenizer():
data = [1, 2, 3]
expected_result = [1, 2, 3]
result = identity_tokenizer(data)
assert result == expected_result
| 20.155172 | 61 | 0.704021 |
ea2eff0ec5fc319d8de4393ae9a6cd9d4f6d1e94 | 937 | py | Python | flask_wtforms_tutorial/routes.py | msmith2777/FinalProjectReal | 460b302b783aae0857742e23b70dfdd110169689 | [
"MIT"
] | null | null | null | flask_wtforms_tutorial/routes.py | msmith2777/FinalProjectReal | 460b302b783aae0857742e23b70dfdd110169689 | [
"MIT"
] | null | null | null | flask_wtforms_tutorial/routes.py | msmith2777/FinalProjectReal | 460b302b783aae0857742e23b70dfdd110169689 | [
"MIT"
] | 2 | 2020-12-08T01:28:41.000Z | 2020-12-08T01:32:16.000Z | from flask import current_app as app
from flask import redirect, render_template, url_for, request, flash
from .forms import *
#@app.route("/", methods=['GET', 'POST'])
@app.route("/", methods=['GET', 'POST'])
def user_options():
form = UserOptionForm()
if request.method == 'POST' and form.validate_on_submit():
option = request.form['option']
if option == "1":
return redirect('/admin')
else:
return redirect("/reservations")
return render_template("options.html", form=form, template="form-template")
@app.route("/admin", methods=['GET', 'POST'])
def admin():
form = AdminLoginForm()
return render_template("admin.html", form=form, template="form-template")
@app.route("/reservations", methods=['GET', 'POST'])
def reservations():
form = ReservationForm()
return render_template("reservations.html", form=form, template="form-template")
| 26.027778 | 84 | 0.649947 |
36f5f5cc29e885c74f5d25f35fc3a0ed20b52a2e | 144,350 | py | Python | upstream/emscripten/emcc.py | mkonicek/wasm | 47441e963566ecb159f457eaf635a9822ecea056 | [
"MIT"
] | 1 | 2021-04-25T23:39:18.000Z | 2021-04-25T23:39:18.000Z | emcc.py | intgr/emscripten | dff33368427fba16745c8ce52f11484a67b2855d | [
"MIT"
] | null | null | null | emcc.py | intgr/emscripten | dff33368427fba16745c8ce52f11484a67b2855d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2011 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""emcc - compiler helper script
=============================
emcc is a drop-in replacement for a compiler like gcc or clang.
See emcc --help for details.
emcc can be influenced by a few environment variables:
EMCC_DEBUG - "1" will log out useful information during compilation, as well as
save each compiler step as an emcc-* file in the temp dir
(by default /tmp/emscripten_temp). "2" will save additional emcc-*
steps, that would normally not be separately produced (so this
slows down compilation).
EMMAKEN_NO_SDK - Will tell emcc *not* to use the emscripten headers. Instead
your system headers will be used.
"""
from tools.toolchain_profiler import ToolchainProfiler
import base64
import json
import logging
import os
import re
import shlex
import shutil
import stat
import sys
import time
from enum import Enum
from subprocess import PIPE
from urllib.parse import quote
import emscripten
from tools import shared, system_libs
from tools import colored_logger, diagnostics, building
from tools.shared import unsuffixed, unsuffixed_basename, WINDOWS, safe_copy
from tools.shared import run_process, read_and_preprocess, exit_with_error, DEBUG
from tools.shared import do_replace
from tools.response_file import substitute_response_files
from tools.minimal_runtime_shell import generate_minimal_runtime_html
import tools.line_endings
from tools import js_manipulation
from tools import wasm2c
from tools import webassembly
from tools import config
from tools.settings import settings
logger = logging.getLogger('emcc')
# endings = dot + a suffix, safe to test by filename.endswith(endings)
C_ENDINGS = ('.c', '.i')
CXX_ENDINGS = ('.cpp', '.cxx', '.cc', '.c++', '.CPP', '.CXX', '.C', '.CC', '.C++', '.ii')
OBJC_ENDINGS = ('.m', '.mi')
OBJCXX_ENDINGS = ('.mm', '.mii')
ASSEMBLY_CPP_ENDINGS = ('.S',)
SPECIAL_ENDINGLESS_FILENAMES = (os.devnull,)
SOURCE_ENDINGS = C_ENDINGS + CXX_ENDINGS + OBJC_ENDINGS + OBJCXX_ENDINGS + SPECIAL_ENDINGLESS_FILENAMES + ASSEMBLY_CPP_ENDINGS
C_ENDINGS = C_ENDINGS + SPECIAL_ENDINGLESS_FILENAMES # consider the special endingless filenames like /dev/null to be C
EXECUTABLE_ENDINGS = ('.wasm', '.html', '.js', '.mjs', '.out', '')
DYNAMICLIB_ENDINGS = ('.dylib', '.so') # Windows .dll suffix is not included in this list, since those are never linked to directly on the command line.
STATICLIB_ENDINGS = ('.a',)
ASSEMBLY_ENDINGS = ('.ll', '.s')
HEADER_ENDINGS = ('.h', '.hxx', '.hpp', '.hh', '.H', '.HXX', '.HPP', '.HH')
# Supported LLD flags which we will pass through to the linker.
SUPPORTED_LINKER_FLAGS = (
'--start-group', '--end-group',
'-(', '-)',
'--whole-archive', '--no-whole-archive',
'-whole-archive', '-no-whole-archive'
)
# Unsupported LLD flags which we will ignore.
# Maps to true if the flag takes an argument.
UNSUPPORTED_LLD_FLAGS = {
# macOS-specific linker flag that libtool (ltmain.sh) will if macOS is detected.
'-bind_at_load': False,
'-M': False,
# wasm-ld doesn't support soname or other dynamic linking flags (yet). Ignore them
# in order to aid build systems that want to pass these flags.
'-soname': True,
'-allow-shlib-undefined': False,
'-rpath': True,
'-rpath-link': True,
'-version-script': True,
}
DEFAULT_ASYNCIFY_IMPORTS = [
'emscripten_sleep', 'emscripten_wget', 'emscripten_wget_data', 'emscripten_idb_load',
'emscripten_idb_store', 'emscripten_idb_delete', 'emscripten_idb_exists',
'emscripten_idb_load_blob', 'emscripten_idb_store_blob', 'SDL_Delay',
'emscripten_scan_registers', 'emscripten_lazy_load_code',
'emscripten_fiber_swap',
'wasi_snapshot_preview1.fd_sync', '__wasi_fd_sync', '_emval_await']
# Mapping of emcc opt levels to llvm opt levels. We use llvm opt level 3 in emcc
# opt levels 2 and 3 (emcc 3 is unsafe opts, so unsuitable for the only level to
# get llvm opt level 3, and speed-wise emcc level 2 is already the slowest/most
# optimizing level)
LLVM_OPT_LEVEL = {
0: ['-O0'],
1: ['-O1'],
2: ['-O3'],
3: ['-O3'],
}
# Target options
final_js = None
UBSAN_SANITIZERS = {
'alignment',
'bool',
'builtin',
'bounds',
'enum',
'float-cast-overflow',
'float-divide-by-zero',
'function',
'implicit-unsigned-integer-truncation',
'implicit-signed-integer-truncation',
'implicit-integer-sign-change',
'integer-divide-by-zero',
'nonnull-attribute',
'null',
'nullability-arg',
'nullability-assign',
'nullability-return',
'object-size',
'pointer-overflow',
'return',
'returns-nonnull-attribute',
'shift',
'signed-integer-overflow',
'unreachable',
'unsigned-integer-overflow',
'vla-bound',
'vptr',
'undefined',
'undefined-trap',
'implicit-integer-truncation',
'implicit-integer-arithmetic-value-change',
'implicit-conversion',
'integer',
'nullability',
}
VALID_ENVIRONMENTS = ('web', 'webview', 'worker', 'node', 'shell')
SIMD_INTEL_FEATURE_TOWER = ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-mavx']
SIMD_NEON_FLAGS = ['-mfpu=neon']
# this function uses the global 'final' variable, which contains the current
# final output file. if a method alters final, and calls this method, then it
# must modify final globally (i.e. it can't receive final as a param and
# return it)
# TODO: refactor all this, a singleton that abstracts over the final output
# and saving of intermediates
def save_intermediate(name, suffix='js'):
if not DEBUG:
return
if not final_js:
logger.debug('(not saving intermediate %s because not generating JS)' % name)
return
building.save_intermediate(final_js, name + '.' + suffix)
def save_intermediate_with_wasm(name, wasm_binary):
if not DEBUG:
return
save_intermediate(name) # save the js
building.save_intermediate(wasm_binary, name + '.wasm')
class TimeLogger:
last = time.time()
@staticmethod
def update():
TimeLogger.last = time.time()
def log_time(name):
"""Log out times for emcc stages"""
if DEBUG:
now = time.time()
logger.debug('emcc step "%s" took %.2f seconds', name, now - TimeLogger.last)
TimeLogger.update()
def base64_encode(b):
b64 = base64.b64encode(b)
return b64.decode('ascii')
class OFormat(Enum):
WASM = 1
JS = 2
MJS = 3
HTML = 4
BARE = 5
class EmccOptions:
def __init__(self):
self.output_file = None
self.post_link = False
self.executable = False
self.compiler_wrapper = None
self.oformat = None
self.requested_debug = ''
self.profiling = False
self.profiling_funcs = False
self.tracing = False
self.emit_symbol_map = False
self.use_closure_compiler = None
self.closure_args = []
self.js_transform = None
self.pre_js = '' # before all js
self.post_js = '' # after all js
self.extern_pre_js = '' # before all js, external to optimized code
self.extern_post_js = '' # after all js, external to optimized code
self.preload_files = []
self.embed_files = []
self.exclude_files = []
self.ignore_dynamic_linking = False
self.shell_path = shared.path_from_root('src', 'shell.html')
self.source_map_base = ''
self.emrun = False
self.cpu_profiler = False
self.thread_profiler = False
self.memory_profiler = False
self.memory_init_file = None
self.use_preload_cache = False
self.use_preload_plugins = False
self.default_object_extension = '.o'
self.valid_abspaths = []
self.cfi = False
# Specifies the line ending format to use for all generated text files.
# Defaults to using the native EOL on each platform (\r\n on Windows, \n on
# Linux & MacOS)
self.output_eol = os.linesep
self.no_entry = False
self.shared = False
self.relocatable = False
def will_metadce():
# The metadce JS parsing code does not currently support the JS that gets generated
# when assertions are enabled.
if settings.ASSERTIONS:
return False
return settings.OPT_LEVEL >= 3 or settings.SHRINK_LEVEL >= 1
def setup_environment_settings():
# Environment setting based on user input
environments = settings.ENVIRONMENT.split(',')
if any([x for x in environments if x not in VALID_ENVIRONMENTS]):
exit_with_error('Invalid environment specified in "ENVIRONMENT": ' + settings.ENVIRONMENT + '. Should be one of: ' + ','.join(VALID_ENVIRONMENTS))
settings.ENVIRONMENT_MAY_BE_WEB = not settings.ENVIRONMENT or 'web' in environments
settings.ENVIRONMENT_MAY_BE_WEBVIEW = not settings.ENVIRONMENT or 'webview' in environments
settings.ENVIRONMENT_MAY_BE_NODE = not settings.ENVIRONMENT or 'node' in environments
settings.ENVIRONMENT_MAY_BE_SHELL = not settings.ENVIRONMENT or 'shell' in environments
# The worker case also includes Node.js workers when pthreads are
# enabled and Node.js is one of the supported environments for the build to
# run on. Node.js workers are detected as a combination of
# ENVIRONMENT_IS_WORKER and ENVIRONMENT_IS_NODE.
settings.ENVIRONMENT_MAY_BE_WORKER = \
not settings.ENVIRONMENT or \
'worker' in environments or \
(settings.ENVIRONMENT_MAY_BE_NODE and settings.USE_PTHREADS)
if not settings.ENVIRONMENT_MAY_BE_WORKER and settings.PROXY_TO_WORKER:
exit_with_error('If you specify --proxy-to-worker and specify a "-s ENVIRONMENT=" directive, it must include "worker" as a target! (Try e.g. -s ENVIRONMENT=web,worker)')
if not settings.ENVIRONMENT_MAY_BE_WORKER and settings.USE_PTHREADS:
exit_with_error('When building with multithreading enabled and a "-s ENVIRONMENT=" directive is specified, it must include "worker" as a target! (Try e.g. -s ENVIRONMENT=web,worker)')
def minify_whitespace():
return settings.OPT_LEVEL >= 2 and settings.DEBUG_LEVEL == 0
def embed_memfile():
return (settings.SINGLE_FILE or
(settings.MEM_INIT_METHOD == 0 and
(not settings.MAIN_MODULE and
not settings.SIDE_MODULE and
not settings.GENERATE_SOURCE_MAP)))
def expand_byte_size_suffixes(value):
"""Given a string with KB/MB size suffixes, such as "32MB", computes how
many bytes that is and returns it as an integer.
"""
value = value.strip()
match = re.match(r'^(\d+)\s*([kmgt]?b)?$', value, re.I)
if not match:
exit_with_error("invalid byte size `%s`. Valid suffixes are: kb, mb, gb, tb" % value)
value, suffix = match.groups()
value = int(value)
if suffix:
size_suffixes = {suffix: 1024 ** i for i, suffix in enumerate(['b', 'kb', 'mb', 'gb', 'tb'])}
value *= size_suffixes[suffix.lower()]
return value
def apply_settings(changes):
"""Take a map of users settings {NAME: VALUE} and apply them to the global
settings object.
"""
def standardize_setting_change(key, value):
# boolean NO_X settings are aliases for X
# (note that *non*-boolean setting values have special meanings,
# and we can't just flip them, so leave them as-is to be
# handled in a special way later)
if key.startswith('NO_') and value in ('0', '1'):
key = key[3:]
value = str(1 - int(value))
return key, value
for key, value in changes.items():
key, value = standardize_setting_change(key, value)
if key in settings.internal_settings:
exit_with_error('%s is an internal setting and cannot be set from command line', key)
# map legacy settings which have aliases to the new names
# but keep the original key so errors are correctly reported via the `setattr` below
user_key = key
if key in settings.legacy_settings and key in settings.alt_names:
key = settings.alt_names[key]
# In those settings fields that represent amount of memory, translate suffixes to multiples of 1024.
if key in ('TOTAL_STACK', 'INITIAL_MEMORY', 'MEMORY_GROWTH_LINEAR_STEP', 'MEMORY_GROWTH_GEOMETRIC_CAP',
'GL_MAX_TEMP_BUFFER_SIZE', 'MAXIMUM_MEMORY', 'DEFAULT_PTHREAD_STACK_SIZE'):
value = str(expand_byte_size_suffixes(value))
if value and value[0] == '@':
filename = value[1:]
if not os.path.exists(filename):
exit_with_error('%s: file not found parsing argument: %s=%s' % (filename, key, value))
value = open(filename).read()
else:
value = value.replace('\\', '\\\\')
existing = getattr(settings, user_key, None)
expect_list = type(existing) == list
try:
value = parse_value(value, expect_list)
except Exception as e:
exit_with_error('a problem occurred in evaluating the content after a "-s", specifically "%s=%s": %s', key, value, str(e))
# Do some basic type checking by comparing to the existing settings.
# Sadly we can't do this generically in the SettingsManager since there are settings
# that so change types internally over time.
# We only currently worry about lists vs non-lists.
if expect_list != (type(value) == list):
exit_with_error('setting `%s` expects `%s` but got `%s`' % (user_key, type(existing), type(value)))
setattr(settings, user_key, value)
if key == 'EXPORTED_FUNCTIONS':
# used for warnings in emscripten.py
settings.USER_EXPORTED_FUNCTIONS = settings.EXPORTED_FUNCTIONS.copy()
# TODO(sbc): Remove this legacy way.
if key == 'WASM_OBJECT_FILES':
settings.LTO = 0 if value else 'full'
def is_ar_file_with_missing_index(archive_file):
# We parse the archive header outselves because llvm-nm --print-armap is slower and less
# reliable.
# See: https://github.com/emscripten-core/emscripten/issues/10195
archive_header = b'!<arch>\n'
file_header_size = 60
with open(archive_file, 'rb') as f:
header = f.read(len(archive_header))
if header != archive_header:
# This is not even an ar file
return False
file_header = f.read(file_header_size)
if len(file_header) != file_header_size:
# We don't have any file entires at all so we don't consider the index missing
return False
name = file_header[:16].strip()
# If '/' is the name of the first file we have an index
return name != b'/'
def ensure_archive_index(archive_file):
# Fastcomp linking works without archive indexes.
if not settings.AUTO_ARCHIVE_INDEXES:
return
if is_ar_file_with_missing_index(archive_file):
diagnostics.warning('emcc', '%s: archive is missing an index; Use emar when creating libraries to ensure an index is created', archive_file)
diagnostics.warning('emcc', '%s: adding index', archive_file)
run_process([shared.LLVM_RANLIB, archive_file])
def get_all_js_syms():
# Runs the js compiler to generate a list of all symbols available in the JS
# libraries. This must be done separately for each linker invokation since the
# list of symbols depends on what settings are used.
# TODO(sbc): Find a way to optimize this. Potentially we could add a super-set
# mode of the js compiler that would generate a list of all possible symbols
# that could be checked in.
old_full = settings.INCLUDE_FULL_LIBRARY
try:
# Temporarily define INCLUDE_FULL_LIBRARY since we want a full list
# of all available JS library functions.
settings.INCLUDE_FULL_LIBRARY = True
settings.ONLY_CALC_JS_SYMBOLS = True
emscripten.generate_struct_info()
glue, forwarded_data = emscripten.compile_settings()
forwarded_json = json.loads(forwarded_data)
library_fns = forwarded_json['Functions']['libraryFunctions']
library_fns_list = []
for name in library_fns:
if shared.is_c_symbol(name):
name = shared.demangle_c_symbol_name(name)
library_fns_list.append(name)
finally:
settings.ONLY_CALC_JS_SYMBOLS = False
settings.INCLUDE_FULL_LIBRARY = old_full
return library_fns_list
def filter_link_flags(flags, using_lld):
def is_supported(f):
if using_lld:
for flag, takes_arg in UNSUPPORTED_LLD_FLAGS.items():
# lld allows various flags to have either a single -foo or double --foo
if f.startswith(flag) or f.startswith('-' + flag):
diagnostics.warning('linkflags', 'ignoring unsupported linker flag: `%s`', f)
return False, takes_arg
return True, False
else:
if f in SUPPORTED_LINKER_FLAGS:
return True, False
# Silently ignore -l/-L flags when not using lld. If using lld allow
# them to pass through the linker
if f.startswith('-l') or f.startswith('-L'):
return False, False
diagnostics.warning('linkflags', 'ignoring unsupported linker flag: `%s`', f)
return False, False
results = []
skip_next = False
for f in flags:
if skip_next:
skip_next = False
continue
keep, skip_next = is_supported(f[1])
if keep:
results.append(f)
return results
def fix_windows_newlines(text):
# Avoid duplicating \r\n to \r\r\n when writing out text.
if WINDOWS:
text = text.replace('\r\n', '\n')
return text
def cxx_to_c_compiler(cxx):
# Convert C++ compiler name into C compiler name
dirname, basename = os.path.split(cxx)
basename = basename.replace('clang++', 'clang').replace('g++', 'gcc').replace('em++', 'emcc')
return os.path.join(dirname, basename)
def get_binaryen_passes():
# run the binaryen optimizer in -O2+. in -O0 we don't need it obviously, while
# in -O1 we don't run it as the LLVM optimizer has been run, and it does the
# great majority of the work; not running the binaryen optimizer in that case
# keeps -O1 mostly-optimized while compiling quickly and without rewriting
# DWARF etc.
run_binaryen_optimizer = settings.OPT_LEVEL >= 2
passes = []
# safe heap must run before post-emscripten, so post-emscripten can apply the sbrk ptr
if settings.SAFE_HEAP:
passes += ['--safe-heap']
if settings.MEMORY64 == 2:
passes += ['--memory64-lowering']
if run_binaryen_optimizer:
passes += ['--post-emscripten']
if not settings.EXIT_RUNTIME:
passes += ['--no-exit-runtime']
if run_binaryen_optimizer:
passes += [building.opt_level_to_str(settings.OPT_LEVEL, settings.SHRINK_LEVEL)]
elif settings.STANDALONE_WASM:
# even if not optimizing, make an effort to remove all unused imports and
# exports, to make the wasm as standalone as possible
passes += ['--remove-unused-module-elements']
# when optimizing, use the fact that low memory is never used (1024 is a
# hardcoded value in the binaryen pass)
if run_binaryen_optimizer and settings.GLOBAL_BASE >= 1024:
passes += ['--low-memory-unused']
if settings.AUTODEBUG:
# adding '--flatten' here may make these even more effective
passes += ['--instrument-locals']
passes += ['--log-execution']
passes += ['--instrument-memory']
if settings.LEGALIZE_JS_FFI:
# legalize it again now, as the instrumentation may need it
passes += ['--legalize-js-interface']
if settings.EMULATE_FUNCTION_POINTER_CASTS:
# note that this pass must run before asyncify, as if it runs afterwards we only
# generate the byn$fpcast_emu functions after asyncify runs, and so we wouldn't
# be able to further process them.
passes += ['--fpcast-emu']
if settings.ASYNCIFY:
passes += ['--asyncify']
if settings.ASSERTIONS:
passes += ['--pass-arg=asyncify-asserts']
if settings.ASYNCIFY_ADVISE:
passes += ['--pass-arg=asyncify-verbose']
if settings.ASYNCIFY_IGNORE_INDIRECT:
passes += ['--pass-arg=asyncify-ignore-indirect']
passes += ['--pass-arg=asyncify-imports@%s' % ','.join(settings.ASYNCIFY_IMPORTS)]
# shell escaping can be confusing; try to emit useful warnings
def check_human_readable_list(items):
for item in items:
if item.count('(') != item.count(')'):
logger.warning('''emcc: ASYNCIFY list contains an item without balanced parentheses ("(", ")"):''')
logger.warning(''' ''' + item)
logger.warning('''This may indicate improper escaping that led to splitting inside your names.''')
logger.warning('''Try to quote the entire argument, like this: -s 'ASYNCIFY_ONLY=["foo(int, char)", "bar"]' ''')
break
if settings.ASYNCIFY_REMOVE:
check_human_readable_list(settings.ASYNCIFY_REMOVE)
passes += ['--pass-arg=asyncify-removelist@%s' % ','.join(settings.ASYNCIFY_REMOVE)]
if settings.ASYNCIFY_ADD:
check_human_readable_list(settings.ASYNCIFY_ADD)
passes += ['--pass-arg=asyncify-addlist@%s' % ','.join(settings.ASYNCIFY_ADD)]
if settings.ASYNCIFY_ONLY:
check_human_readable_list(settings.ASYNCIFY_ONLY)
passes += ['--pass-arg=asyncify-onlylist@%s' % ','.join(settings.ASYNCIFY_ONLY)]
if settings.BINARYEN_IGNORE_IMPLICIT_TRAPS:
passes += ['--ignore-implicit-traps']
# normally we can assume the memory, if imported, has not been modified
# beforehand (in fact, in most cases the memory is not even imported anyhow,
# but it is still safe to pass the flag), and is therefore filled with zeros.
# the one exception is dynamic linking of a side module: the main module is ok
# as it is loaded first, but the side module may be assigned memory that was
# previously used.
if run_binaryen_optimizer and not settings.SIDE_MODULE:
passes += ['--zero-filled-memory']
if settings.BINARYEN_EXTRA_PASSES:
# BINARYEN_EXTRA_PASSES is comma-separated, and we support both '-'-prefixed and
# unprefixed pass names
extras = settings.BINARYEN_EXTRA_PASSES.split(',')
passes += [('--' + p) if p[0] != '-' else p for p in extras if p]
return passes
def make_js_executable(script):
src = open(script).read()
cmd = shared.shlex_join(config.JS_ENGINE)
if not os.path.isabs(config.JS_ENGINE[0]):
# TODO: use whereis etc. And how about non-*NIX?
cmd = '/usr/bin/env -S ' + cmd
logger.debug('adding `#!` to JavaScript file: %s' % cmd)
# add shebang
with open(script, 'w') as f:
f.write('#!%s\n' % cmd)
f.write(src)
try:
os.chmod(script, stat.S_IMODE(os.stat(script).st_mode) | stat.S_IXUSR) # make executable
except OSError:
pass # can fail if e.g. writing the executable to /dev/null
def do_split_module(wasm_file):
os.rename(wasm_file, wasm_file + '.orig')
args = ['--instrument']
building.run_binaryen_command('wasm-split', wasm_file + '.orig', outfile=wasm_file, args=args)
def is_dash_s_for_emcc(args, i):
# -s OPT=VALUE or -s OPT or -sOPT are all interpreted as emscripten flags.
# -s by itself is a linker option (alias for --strip-all)
if args[i] == '-s':
if len(args) <= i + 1:
return False
arg = args[i + 1]
else:
arg = args[i][2:]
arg = arg.split('=')[0]
return arg.isidentifier() and arg.isupper()
def filter_out_dynamic_libs(options, inputs):
# Filters out "fake" dynamic libraries that are really just intermediate object files.
def check(input_file):
if get_file_suffix(input_file) in DYNAMICLIB_ENDINGS:
if not options.ignore_dynamic_linking:
diagnostics.warning('emcc', 'ignoring dynamic library %s because not compiling to JS or HTML, remember to link it when compiling to JS or HTML at the end', os.path.basename(input_file))
return False
else:
return True
return [f for f in inputs if check(f[1])]
def filter_out_duplicate_dynamic_libs(inputs):
seen = set()
# Filter out duplicate "fake" shared libraries (intermediate object files).
# See test_core.py:test_redundant_link
def check(input_file):
if get_file_suffix(input_file) in DYNAMICLIB_ENDINGS:
abspath = os.path.abspath(input_file)
if abspath in seen:
return False
seen.add(abspath)
return True
return [f for f in inputs if check(f[1])]
def process_dynamic_libs(dylibs):
for dylib in dylibs:
imports = webassembly.get_imports(dylib)
new_exports = []
for imp in imports:
if imp.kind not in (webassembly.ExternType.FUNC, webassembly.ExternType.GLOBAL):
continue
new_exports.append(imp.field)
logger.debug('Adding exports based on `%s`: %s', dylib, new_exports)
settings.EXPORTED_FUNCTIONS.extend(shared.asmjs_mangle(e) for e in new_exports)
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE.extend(new_exports)
building.user_requested_exports.update(shared.asmjs_mangle(e) for e in new_exports)
exports = webassembly.get_exports(dylib)
for export in exports:
settings.SIDE_MODULE_EXPORTS.append(export.name)
def unmangle_symbols_from_cmdline(symbols):
def unmangle(x):
return x.replace('.', ' ').replace('#', '&').replace('?', ',')
if type(symbols) is list:
return [unmangle(x) for x in symbols]
return unmangle(symbols)
def parse_s_args(args):
settings_changes = []
for i in range(len(args)):
if args[i].startswith('-s'):
if is_dash_s_for_emcc(args, i):
if args[i] == '-s':
key = args[i + 1]
args[i + 1] = ''
else:
key = args[i][2:]
args[i] = ''
# If not = is specified default to 1
if '=' not in key:
key += '=1'
# Special handling of browser version targets. A version -1 means that the specific version
# is not supported at all. Replace those with INT32_MAX to make it possible to compare e.g.
# #if MIN_FIREFOX_VERSION < 68
if re.match(r'MIN_.*_VERSION(=.*)?', key):
try:
if int(key.split('=')[1]) < 0:
key = key.split('=')[0] + '=0x7FFFFFFF'
except Exception:
pass
settings_changes.append(key)
newargs = [a for a in args if a]
return (settings_changes, newargs)
def emsdk_ldflags(user_args):
if os.environ.get('EMMAKEN_NO_SDK'):
return []
library_paths = [
shared.Cache.get_lib_dir(absolute=True)
]
ldflags = ['-L' + l for l in library_paths]
if '-nostdlib' in user_args:
return ldflags
# TODO(sbc): Add system libraries here rather than conditionally including
# them via .symbols files.
libraries = []
ldflags += ['-l' + l for l in libraries]
return ldflags
def emsdk_cflags(user_args):
cflags = ['--sysroot=' + shared.Cache.get_sysroot_dir(absolute=True)]
def array_contains_any_of(hay, needles):
for n in needles:
if n in hay:
return True
if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER) or array_contains_any_of(user_args, SIMD_NEON_FLAGS):
if '-msimd128' not in user_args:
exit_with_error('Passing any of ' + ', '.join(SIMD_INTEL_FEATURE_TOWER + SIMD_NEON_FLAGS) + ' flags also requires passing -msimd128!')
cflags += ['-D__SSE__=1']
if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER[1:]):
cflags += ['-D__SSE2__=1']
if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER[2:]):
cflags += ['-D__SSE3__=1']
if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER[3:]):
cflags += ['-D__SSSE3__=1']
if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER[4:]):
cflags += ['-D__SSE4_1__=1']
if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER[5:]):
cflags += ['-D__SSE4_2__=1']
if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER[6:]):
cflags += ['-D__AVX__=1']
if array_contains_any_of(user_args, SIMD_NEON_FLAGS):
cflags += ['-D__ARM_NEON__=1']
return cflags + ['-Xclang', '-iwithsysroot' + os.path.join('/include', 'compat')]
def get_clang_flags():
return ['-target', get_llvm_target()]
def get_llvm_target():
if settings.MEMORY64:
return 'wasm64-unknown-emscripten'
else:
return 'wasm32-unknown-emscripten'
cflags = None
def get_cflags(options, user_args):
global cflags
if cflags:
return cflags
# Flags we pass to the compiler when building C/C++ code
# We add these to the user's flags (newargs), but not when building .s or .S assembly files
cflags = get_clang_flags()
if options.tracing:
cflags.append('-D__EMSCRIPTEN_TRACING__=1')
if settings.USE_PTHREADS:
cflags.append('-D__EMSCRIPTEN_PTHREADS__=1')
if not settings.STRICT:
# The preprocessor define EMSCRIPTEN is deprecated. Don't pass it to code
# in strict mode. Code should use the define __EMSCRIPTEN__ instead.
cflags.append('-DEMSCRIPTEN')
# if exception catching is disabled, we can prevent that code from being
# generated in the frontend
if settings.DISABLE_EXCEPTION_CATCHING and not settings.EXCEPTION_HANDLING:
cflags.append('-fignore-exceptions')
if settings.INLINING_LIMIT:
cflags.append('-fno-inline-functions')
if settings.RELOCATABLE:
cflags.append('-fPIC')
cflags.append('-fvisibility=default')
if settings.LTO:
cflags.append('-flto=' + settings.LTO)
else:
# With LTO mode these args get passed instead
# at link time when the backend runs.
for a in building.llvm_backend_args():
cflags += ['-mllvm', a]
# Set the LIBCPP ABI version to at least 2 so that we get nicely aligned string
# data and other nice fixes.
cflags += [# '-fno-threadsafe-statics', # disabled due to issue 1289
'-D__EMSCRIPTEN_major__=' + str(shared.EMSCRIPTEN_VERSION_MAJOR),
'-D__EMSCRIPTEN_minor__=' + str(shared.EMSCRIPTEN_VERSION_MINOR),
'-D__EMSCRIPTEN_tiny__=' + str(shared.EMSCRIPTEN_VERSION_TINY),
'-D_LIBCPP_ABI_VERSION=2']
# For compatability with the fastcomp compiler that defined these
cflags += ['-Dunix',
'-D__unix',
'-D__unix__']
# Changes to default clang behavior
# Implicit functions can cause horribly confusing function pointer type errors, see #2175
# If your codebase really needs them - very unrecommended! - you can disable the error with
# -Wno-error=implicit-function-declaration
# or disable even a warning about it with
# -Wno-implicit-function-declaration
cflags += ['-Werror=implicit-function-declaration']
system_libs.add_ports_cflags(cflags, settings)
if os.environ.get('EMMAKEN_NO_SDK') or '-nostdinc' in user_args:
return cflags
cflags += emsdk_cflags(user_args)
return cflags
def get_file_suffix(filename):
"""Parses the essential suffix of a filename, discarding Unix-style version
numbers in the name. For example for 'libz.so.1.2.8' returns '.so'"""
if filename in SPECIAL_ENDINGLESS_FILENAMES:
return filename
while filename:
filename, suffix = os.path.splitext(filename)
if not suffix[1:].isdigit():
return suffix
return ''
def get_secondary_target(target, ext):
# Depending on the output format emscripten creates zero or more secondary
# output files (e.g. the .wasm file when creating JS output, or the
# .js and the .wasm file when creating html output.
# Thus function names the secondary output files, while ensuring they
# never collide with the primary one.
base = unsuffixed(target)
if get_file_suffix(target) == ext:
base += '_'
return base + ext
def in_temp(name):
temp_dir = shared.get_emscripten_temp_dir()
return os.path.join(temp_dir, os.path.basename(name))
run_via_emxx = False
#
# Main run() function
#
def run(args):
# Additional compiler flags that we treat as if they were passed to us on the
# commandline
EMCC_CFLAGS = os.environ.get('EMCC_CFLAGS')
if DEBUG:
cmd = shared.shlex_join(args)
if EMCC_CFLAGS:
cmd += ' + ' + EMCC_CFLAGS
logger.warning('invocation: ' + cmd + ' (in ' + os.getcwd() + ')')
if EMCC_CFLAGS:
args.extend(shlex.split(EMCC_CFLAGS))
# Strip args[0] (program name)
args = args[1:]
misc_temp_files = shared.configuration.get_temp_files()
# Handle some global flags
# read response files very early on
try:
args = substitute_response_files(args)
except IOError as e:
exit_with_error(e)
if '--help' in args:
# Documentation for emcc and its options must be updated in:
# site/source/docs/tools_reference/emcc.rst
# This then gets built (via: `make -C site text`) to:
# site/build/text/docs/tools_reference/emcc.txt
# This then needs to be copied to its final home in docs/emcc.txt from where
# we read it here. We have CI rules that ensure its always up-to-date.
with open(shared.path_from_root('docs', 'emcc.txt'), 'r') as f:
print(f.read())
print('''
------------------------------------------------------------------
emcc: supported targets: llvm bitcode, WebAssembly, NOT elf
(autoconf likes to see elf above to enable shared object support)
''')
return 0
if '--version' in args:
# if the emscripten folder is not a git repo, don't run git show - that can
# look up and find the revision in a parent directory that is a git repo
revision = ''
if os.path.exists(shared.path_from_root('.git')):
revision = run_process(['git', 'rev-parse', 'HEAD'], stdout=PIPE, stderr=PIPE, cwd=shared.path_from_root()).stdout.strip()
elif os.path.exists(shared.path_from_root('emscripten-revision.txt')):
revision = open(shared.path_from_root('emscripten-revision.txt')).read().strip()
if revision:
revision = ' (%s)' % revision
print('''%s%s
Copyright (C) 2014 the Emscripten authors (see AUTHORS.txt)
This is free and open source software under the MIT license.
There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
''' % (version_string(), revision))
return 0
if run_via_emxx:
clang = shared.CLANG_CXX
else:
clang = shared.CLANG_CC
if len(args) == 1 and args[0] == '-v': # -v with no inputs
# autoconf likes to see 'GNU' in the output to enable shared object support
print(version_string(), file=sys.stderr)
return shared.check_call([clang, '-v'] + get_clang_flags(), check=False).returncode
if '-dumpmachine' in args:
print(get_llvm_target())
return 0
if '-dumpversion' in args: # gcc's doc states "Print the compiler version [...] and don't do anything else."
print(shared.EMSCRIPTEN_VERSION)
return 0
if '--cflags' in args:
# fake running the command, to see the full args we pass to clang
args = [x for x in args if x != '--cflags']
with misc_temp_files.get_file(suffix='.o') as temp_target:
input_file = 'hello_world.c'
cmd = [shared.PYTHON, sys.argv[0], shared.path_from_root('tests', input_file), '-v', '-c', '-o', temp_target] + args
proc = run_process(cmd, stderr=PIPE, check=False)
if proc.returncode != 0:
print(proc.stderr)
exit_with_error('error getting cflags')
lines = [x for x in proc.stderr.splitlines() if clang in x and input_file in x]
parts = shlex.split(lines[0].replace('\\', '\\\\'))
parts = [x for x in parts if x not in ['-c', '-o', '-v', '-emit-llvm'] and input_file not in x and temp_target not in x]
print(shared.shlex_join(parts[1:]))
return 0
shared.check_sanity()
def get_language_mode(args):
return_next = False
for item in args:
if return_next:
return item
if item == '-x':
return_next = True
continue
if item.startswith('-x'):
return item[2:]
return ''
language_mode = get_language_mode(args)
EMMAKEN_CFLAGS = os.environ.get('EMMAKEN_CFLAGS')
if EMMAKEN_CFLAGS:
args += shlex.split(EMMAKEN_CFLAGS)
# ---------------- Utilities ---------------
seen_names = {}
def uniquename(name):
if name not in seen_names:
seen_names[name] = str(len(seen_names))
return unsuffixed(name) + '_' + seen_names[name] + shared.suffix(name)
# ---------------- End configs -------------
with ToolchainProfiler.profile_block('parse arguments and setup'):
## Parse args
newargs = args.copy()
# Scan and strip emscripten specific cmdline warning flags.
# This needs to run before other cmdline flags have been parsed, so that
# warnings are properly printed during arg parse.
newargs = diagnostics.capture_warnings(newargs)
for i in range(len(newargs)):
if newargs[i] in ('-l', '-L', '-I'):
# Scan for individual -l/-L/-I arguments and concatenate the next arg on
# if there is no suffix
newargs[i] += newargs[i + 1]
newargs[i + 1] = ''
options, settings_changes, user_js_defines, newargs = parse_args(newargs)
if options.post_link or options.oformat == OFormat.BARE:
diagnostics.warning('experimental', '--oformat=base/--post-link are experimental and subject to change.')
if '-print-search-dirs' in newargs:
return run_process([clang, '-print-search-dirs'], check=False).returncode
if options.emrun:
options.pre_js += open(shared.path_from_root('src', 'emrun_prejs.js')).read() + '\n'
options.post_js += open(shared.path_from_root('src', 'emrun_postjs.js')).read() + '\n'
# emrun mode waits on program exit
settings.EXIT_RUNTIME = 1
if options.cpu_profiler:
options.post_js += open(shared.path_from_root('src', 'cpuprofiler.js')).read() + '\n'
if options.memory_profiler:
settings.MEMORYPROFILER = 1
if options.thread_profiler:
options.post_js += open(shared.path_from_root('src', 'threadprofiler.js')).read() + '\n'
if options.memory_init_file is None:
options.memory_init_file = settings.OPT_LEVEL >= 2
# TODO: support source maps with js_transform
if options.js_transform and settings.GENERATE_SOURCE_MAP:
logger.warning('disabling source maps because a js transform is being done')
settings.GENERATE_SOURCE_MAP = 0
explicit_settings_changes, newargs = parse_s_args(newargs)
settings_changes += explicit_settings_changes
# Find input files
# These three arrays are used to store arguments of different types for
# type-specific processing. In order to shuffle the arguments back together
# after processing, all of these arrays hold tuples (original_index, value).
# Note that the index part of the tuple can have a fractional part for input
# arguments that expand into multiple processed arguments, as in -Wl,-f1,-f2.
input_files = []
libs = []
link_flags = []
has_header_inputs = False
lib_dirs = []
has_dash_c = '-c' in newargs
has_dash_S = '-S' in newargs
has_dash_E = '-E' in newargs
compile_only = has_dash_c or has_dash_S or has_dash_E
def add_link_flag(i, f):
if f.startswith('-l'):
libs.append((i, f[2:]))
if f.startswith('-L'):
lib_dirs.append(f[2:])
link_flags.append((i, f))
# find input files with a simple heuristic. we should really analyze
# based on a full understanding of gcc params, right now we just assume that
# what is left contains no more |-x OPT| things
skip = False
for i in range(len(newargs)):
if skip:
skip = False
continue
arg = newargs[i]
if arg in ('-MT', '-MF', '-MJ', '-MQ', '-D', '-U', '-o', '-x',
'-Xpreprocessor', '-include', '-imacros', '-idirafter',
'-iprefix', '-iwithprefix', '-iwithprefixbefore',
'-isysroot', '-imultilib', '-A', '-isystem', '-iquote',
'-install_name', '-compatibility_version',
'-current_version', '-I', '-L', '-include-pch',
'-Xlinker', '-Xclang'):
skip = True
if not arg.startswith('-'):
# we already removed -o <target>, so all these should be inputs
newargs[i] = ''
# os.devnul should always be reported as existing but there is bug in windows
# python before 3.8:
# https://bugs.python.org/issue1311
if not os.path.exists(arg) and arg != os.devnull:
exit_with_error('%s: No such file or directory ("%s" was expected to be an input file, based on the commandline arguments provided)', arg, arg)
file_suffix = get_file_suffix(arg)
if file_suffix in HEADER_ENDINGS:
has_header_inputs = True
if file_suffix in STATICLIB_ENDINGS and not building.is_ar(arg):
if building.is_bitcode(arg):
message = arg + ': File has a suffix of a static library ' + str(STATICLIB_ENDINGS) + ', but instead is an LLVM bitcode file! When linking LLVM bitcode files use .bc or .o.'
else:
message = arg + ': Unknown format, not a static library!'
exit_with_error(message)
if file_suffix in DYNAMICLIB_ENDINGS and not building.is_bitcode(arg) and not building.is_wasm(arg):
# For shared libraries that are neither bitcode nor wasm, assuming its local native
# library and attempt to find a library by the same name in our own library path.
# TODO(sbc): Do we really need this feature? See test_other.py:test_local_link
libname = unsuffixed_basename(arg).lstrip('lib')
libs.append((i, libname))
else:
input_files.append((i, arg))
elif arg.startswith('-L'):
add_link_flag(i, arg)
newargs[i] = ''
elif arg.startswith('-l'):
add_link_flag(i, arg)
newargs[i] = ''
elif arg.startswith('-Wl,'):
# Multiple comma separated link flags can be specified. Create fake
# fractional indices for these: -Wl,a,b,c,d at index 4 becomes:
# (4, a), (4.25, b), (4.5, c), (4.75, d)
link_flags_to_add = arg.split(',')[1:]
for flag_index, flag in enumerate(link_flags_to_add):
add_link_flag(i + float(flag_index) / len(link_flags_to_add), flag)
newargs[i] = ''
elif arg == '-Xlinker':
add_link_flag(i + 1, newargs[i + 1])
newargs[i] = ''
newargs[i + 1] = ''
elif arg == '-s':
# -s and some other compiler flags are normally passed onto the linker
# TODO(sbc): Pass this and other flags through when using lld
# link_flags.append((i, arg))
newargs[i] = ''
elif arg == '-':
input_files.append((i, arg))
newargs[i] = ''
if not input_files and not link_flags:
exit_with_error('no input files')
newargs = [a for a in newargs if a]
settings_map = {}
for s in settings_changes:
key, value = s.split('=', 1)
settings_map[key] = value
# Libraries are searched before settings_changes are applied, so apply the
# value for STRICT from command line already now.
strict_cmdline = settings_map.get('STRICT')
if strict_cmdline:
settings.STRICT = int(strict_cmdline)
# Apply optimization level settings
if settings.OPT_LEVEL >= 1:
settings.ASSERTIONS = 0
if settings.SHRINK_LEVEL >= 2:
settings.EVAL_CTORS = 1
# For users that opt out of WARN_ON_UNDEFINED_SYMBOLS we assume they also
# want to opt out of ERROR_ON_UNDEFINED_SYMBOLS.
if settings_map.get('WARN_ON_UNDEFINED_SYMBOLS') == '0':
settings.ERROR_ON_UNDEFINED_SYMBOLS = 0
if settings.MINIMAL_RUNTIME or settings_map.get('MINIMAL_RUNTIME') in ('1', '2'):
# Remove the default exported functions 'malloc', 'free', etc. those should only be linked in if used
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE = []
# Apply -s settings in newargs here (after optimization levels, so they can override them)
apply_settings(settings_map)
specified_target = options.output_file
if os.environ.get('EMMAKEN_JUST_CONFIGURE') or 'conftest.c' in args:
# configure tests want a more shell-like style, where we emit return codes on exit()
settings.EXIT_RUNTIME = 1
# use node.js raw filesystem access, to behave just like a native executable
settings.NODERAWFS = 1
# Add `#!` line to output JS and make it executable.
options.executable = True
# Autoconf expects the executable output file to be called `a.out`
default_target_name = 'a.out'
elif settings.SIDE_MODULE:
default_target_name = 'a.out.wasm'
else:
default_target_name = 'a.out.js'
# specified_target is the user-specified one, target is what we will generate
if specified_target:
target = specified_target
# check for the existence of the output directory now, to avoid having
# to do so repeatedly when each of the various output files (.mem, .wasm,
# etc) are written. This gives a more useful error message than the
# IOError and python backtrace that users would otherwise see.
dirname = os.path.dirname(target)
if dirname and not os.path.isdir(dirname):
exit_with_error("specified output file (%s) is in a directory that does not exist" % target)
else:
target = default_target_name
settings.TARGET_BASENAME = target_basename = unsuffixed_basename(target)
if settings.EXTRA_EXPORTED_RUNTIME_METHODS:
diagnostics.warning('deprecated', 'EXTRA_EXPORTED_RUNTIME_METHODS is deprecated, please use EXPORTED_RUNTIME_METHODS instead')
settings.EXPORTED_RUNTIME_METHODS += settings.EXTRA_EXPORTED_RUNTIME_METHODS
final_suffix = get_file_suffix(target)
if has_dash_c or has_dash_S or has_dash_E or '-M' in newargs or '-MM' in newargs:
if has_dash_c:
if '-emit-llvm' in newargs:
options.default_object_extension = '.bc'
elif has_dash_S:
if '-emit-llvm' in newargs:
options.default_object_extension = '.ll'
else:
options.default_object_extension = '.s'
elif '-M' in newargs or '-MM' in newargs:
options.default_object_extension = '.mout' # not bitcode, not js; but just dependency rule of the input file
if specified_target:
if len(input_files) > 1:
exit_with_error('cannot specify -o with -c/-S/-E/-M and multiple source files')
else:
target = target_basename + options.default_object_extension
# If no output format was sepecific we try to imply the format based on
# the output filename extension.
if not options.oformat:
if settings.SIDE_MODULE or final_suffix == '.wasm':
options.oformat = OFormat.WASM
elif final_suffix == '.mjs':
options.oformat = OFormat.MJS
elif final_suffix == '.html':
options.oformat = OFormat.HTML
else:
options.oformat = OFormat.JS
if options.oformat == OFormat.MJS:
settings.EXPORT_ES6 = 1
settings.MODULARIZE = 1
if options.oformat in (OFormat.WASM, OFormat.BARE):
# If the user asks directly for a wasm file then this *is* the target
wasm_target = target
else:
# Otherwise the wasm file is produced alongside the final target.
wasm_target = get_secondary_target(target, '.wasm')
# Apply user -jsD settings
for s in user_js_defines:
settings[s[0]] = s[1]
shared.verify_settings()
if (options.oformat == OFormat.WASM or settings.PURE_WASI) and not settings.SIDE_MODULE:
# if the output is just a wasm file, it will normally be a standalone one,
# as there is no JS. an exception are side modules, as we can't tell at
# compile time whether JS will be involved or not - the main module may
# have JS, and the side module is expected to link against that.
# we also do not support standalone mode in fastcomp.
settings.STANDALONE_WASM = 1
if settings.LZ4:
settings.EXPORTED_RUNTIME_METHODS += ['LZ4']
if settings.WASM2C:
# wasm2c only makes sense with standalone wasm - there will be no JS,
# just wasm and then C
settings.STANDALONE_WASM = 1
# wasm2c doesn't need any special handling of i64, we have proper i64
# handling on the FFI boundary, which is exactly like the case of JS with
# BigInt support
settings.WASM_BIGINT = 1
if options.no_entry:
settings.EXPECT_MAIN = 0
elif settings.STANDALONE_WASM:
if '_main' in settings.EXPORTED_FUNCTIONS:
# TODO(sbc): Make this into a warning?
logger.debug('including `_main` in EXPORTED_FUNCTIONS is not necessary in standalone mode')
else:
# In normal non-standalone mode we have special handling of `_main` in EXPORTED_FUNCTIONS.
# 1. If the user specifies exports, but doesn't include `_main` we assume they want to build a
# reactor.
# 2. If the user doesn't export anything we default to exporting `_main` (unless `--no-entry`
# is specified (see above).
if 'EXPORTED_FUNCTIONS' in settings_map:
if '_main' not in settings.USER_EXPORTED_FUNCTIONS:
settings.EXPECT_MAIN = 0
else:
assert not settings.EXPORTED_FUNCTIONS
settings.EXPORTED_FUNCTIONS = ['_main']
if settings.STANDALONE_WASM:
# In STANDALONE_WASM mode we either build a command or a reactor.
# See https://github.com/WebAssembly/WASI/blob/main/design/application-abi.md
# For a command we always want EXIT_RUNTIME=1
# For a reactor we always want EXIT_RUNTIME=0
if 'EXIT_RUNTIME' in settings_map:
exit_with_error('Explictly setting EXIT_RUNTIME not compatible with STANDALONE_WASM. EXIT_RUNTIME will always be True for programs (with a main function) and False for reactors (not main function).')
settings.EXIT_RUNTIME = settings.EXPECT_MAIN
# Note the exports the user requested
building.user_requested_exports.update(settings.EXPORTED_FUNCTIONS)
def default_setting(name, new_default):
if name not in settings_map:
setattr(settings, name, new_default)
# -s ASSERTIONS=1 implies basic stack overflow checks, and ASSERTIONS=2
# implies full stack overflow checks.
if settings.ASSERTIONS:
# However, we don't set this default in PURE_WASI, or when we are linking without standard
# libraries because STACK_OVERFLOW_CHECK depends on emscripten_stack_get_end which is defined
# in libcompiler-rt.
if not settings.PURE_WASI and '-nostdlib' not in newargs and '-nodefaultlibs' not in newargs:
default_setting('STACK_OVERFLOW_CHECK', max(settings.ASSERTIONS, settings.STACK_OVERFLOW_CHECK))
if settings.LLD_REPORT_UNDEFINED or settings.STANDALONE_WASM:
# Reporting undefined symbols at wasm-ld time requires us to know if we have a `main` function
# or not, as does standalone wasm mode.
# TODO(sbc): Remove this once this becomes the default
settings.IGNORE_MISSING_MAIN = 0
# It is unlikely that developers targeting "native web" APIs with MINIMAL_RUNTIME need
# errno support by default.
if settings.MINIMAL_RUNTIME:
default_setting('SUPPORT_ERRNO', 0)
if settings.STRICT:
default_setting('STRICT_JS', 1)
default_setting('AUTO_JS_LIBRARIES', 0)
default_setting('AUTO_NATIVE_LIBRARIES', 0)
default_setting('AUTO_ARCHIVE_INDEXES', 0)
default_setting('IGNORE_MISSING_MAIN', 0)
default_setting('DEFAULT_TO_CXX', 0)
# Default to TEXTDECODER=2 (always use TextDecoder to decode UTF-8 strings)
# in -Oz builds, since custom decoder for UTF-8 takes up space.
# In pthreads enabled builds, TEXTDECODER==2 may not work, see
# https://github.com/whatwg/encoding/issues/172
# When supporting shell environments, do not do this as TextDecoder is not
# widely supported there.
if settings.SHRINK_LEVEL >= 2 and not settings.USE_PTHREADS and \
not settings.ENVIRONMENT_MAY_BE_SHELL:
default_setting('TEXTDECODER', 2)
# If set to 1, we will run the autodebugger (the automatic debugging tool, see
# tools/autodebugger). Note that this will disable inclusion of libraries. This
# is useful because including dlmalloc makes it hard to compare native and js
# builds
if os.environ.get('EMCC_AUTODEBUG'):
settings.AUTODEBUG = 1
# Use settings
if settings.DEBUG_LEVEL > 1 and options.use_closure_compiler:
diagnostics.warning('emcc', 'disabling closure because debug info was requested')
options.use_closure_compiler = False
if settings.WASM == 2 and settings.SINGLE_FILE:
exit_with_error('cannot have both WASM=2 and SINGLE_FILE enabled at the same time')
if settings.SEPARATE_DWARF and settings.WASM2JS:
exit_with_error('cannot have both SEPARATE_DWARF and WASM2JS at the same time (as there is no wasm file)')
if settings.MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION and settings.MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION:
exit_with_error('MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION and MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION are mutually exclusive!')
if options.emrun:
if settings.MINIMAL_RUNTIME:
exit_with_error('--emrun is not compatible with -s MINIMAL_RUNTIME=1')
settings.EXPORTED_RUNTIME_METHODS.append('addOnExit')
if options.use_closure_compiler:
settings.USE_CLOSURE_COMPILER = options.use_closure_compiler
if settings.CLOSURE_WARNINGS not in ['quiet', 'warn', 'error']:
exit_with_error('Invalid option -s CLOSURE_WARNINGS=%s specified! Allowed values are "quiet", "warn" or "error".' % settings.CLOSURE_WARNINGS)
# Include dynCall() function by default in DYNCALLS builds in classic runtime; in MINIMAL_RUNTIME, must add this explicitly.
if settings.DYNCALLS and not settings.MINIMAL_RUNTIME:
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$dynCall']
if settings.MAIN_MODULE:
assert not settings.SIDE_MODULE
if settings.MAIN_MODULE == 1:
settings.INCLUDE_FULL_LIBRARY = 1
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$preloadDylibs']
elif settings.SIDE_MODULE:
assert not settings.MAIN_MODULE
# memory init file is not supported with side modules, must be executable synchronously (for dlopen)
options.memory_init_file = False
# If we are including the entire JS library then we know for sure we will, by definition,
# require all the reverse dependencies.
if settings.INCLUDE_FULL_LIBRARY:
default_setting('REVERSE_DEPS', 'all')
if settings.MAIN_MODULE or settings.SIDE_MODULE:
if settings.MAIN_MODULE == 1 or settings.SIDE_MODULE == 1:
settings.LINKABLE = 1
settings.EXPORT_ALL = 1
settings.RELOCATABLE = 1
if settings.MAIN_MODULE:
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$getDylinkMetadata']
if settings.RELOCATABLE:
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += [
'$reportUndefinedSymbols',
'$relocateExports',
'$GOTHandler',
'__heap_base',
'__stack_pointer',
]
settings.EXPORTED_FUNCTIONS += [
# This needs to be exported on the Module object too so it's visible
# to side modules too.
'___heap_base',
# Unconditional dependency in library_dylink.js
'_setThrew',
]
if settings.MINIMAL_RUNTIME:
exit_with_error('MINIMAL_RUNTIME is not compatible with relocatable output')
if settings.WASM2JS:
exit_with_error('WASM2JS is not compatible with relocatable output')
# shared modules need memory utilities to allocate their memory
settings.EXPORTED_RUNTIME_METHODS += ['allocate']
settings.ALLOW_TABLE_GROWTH = 1
# various settings require sbrk() access
if settings.DETERMINISTIC or \
settings.EMSCRIPTEN_TRACING or \
settings.MALLOC == 'emmalloc' or \
settings.SAFE_HEAP or \
settings.MEMORYPROFILER:
settings.EXPORTED_FUNCTIONS += ['_sbrk']
if settings.MEMORYPROFILER:
settings.EXPORTED_FUNCTIONS += ['___heap_base',
'_emscripten_stack_get_base',
'_emscripten_stack_get_end',
'_emscripten_stack_get_current']
if settings.ASYNCIFY_LAZY_LOAD_CODE:
settings.ASYNCIFY = 1
if settings.ASYNCIFY:
# See: https://github.com/emscripten-core/emscripten/issues/12065
# See: https://github.com/emscripten-core/emscripten/issues/12066
settings.DYNCALLS = 1
settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_get_base',
'_emscripten_stack_get_end',
'_emscripten_stack_set_limits']
settings.ASYNCIFY_ADD = unmangle_symbols_from_cmdline(settings.ASYNCIFY_ADD)
settings.ASYNCIFY_REMOVE = unmangle_symbols_from_cmdline(settings.ASYNCIFY_REMOVE)
settings.ASYNCIFY_ONLY = unmangle_symbols_from_cmdline(settings.ASYNCIFY_ONLY)
# SSEx is implemented on top of SIMD128 instruction set, but do not pass SSE flags to LLVM
# so it won't think about generating native x86 SSE code.
newargs = [x for x in newargs if x not in SIMD_INTEL_FEATURE_TOWER and x not in SIMD_NEON_FLAGS]
link_to_object = False
if options.shared or options.relocatable:
# Until we have a better story for actually producing runtime shared libraries
# we support a compatibility mode where shared libraries are actually just
# object files linked with `wasm-ld --relocatable` or `llvm-link` in the case
# of LTO.
if final_suffix in EXECUTABLE_ENDINGS:
diagnostics.warning('emcc', '-shared/-r used with executable output suffix. This behaviour is deprecated. Please remove -shared/-r to build an executable or avoid the executable suffix (%s) when building object files.' % final_suffix)
else:
if options.shared:
diagnostics.warning('emcc', 'linking a library with `-shared` will emit a static object file. This is a form of emulation to support existing build systems. If you want to build a runtime shared library use the SIDE_MODULE setting.')
link_to_object = True
if settings.SUPPORT_BIG_ENDIAN:
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += [
'$LE_HEAP_STORE_U16',
'$LE_HEAP_STORE_I16',
'$LE_HEAP_STORE_U32',
'$LE_HEAP_STORE_I32',
'$LE_HEAP_STORE_F32',
'$LE_HEAP_STORE_F64',
'$LE_HEAP_LOAD_U16',
'$LE_HEAP_LOAD_I16',
'$LE_HEAP_LOAD_U32',
'$LE_HEAP_LOAD_I32',
'$LE_HEAP_LOAD_F32',
'$LE_HEAP_LOAD_F64'
]
if settings.STACK_OVERFLOW_CHECK:
# The basic writeStackCookie/checkStackCookie mechanism just needs to know where the end
# of the stack is.
settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_get_end', '_emscripten_stack_get_free']
if settings.STACK_OVERFLOW_CHECK == 2:
# The full checking done by binaryen's `StackCheck` pass also needs to know the base of the
# stack.
settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_get_base']
# We call one of these two functions during startup which caches the stack limits
# in wasm globals allowing get_base/get_free to be super fast.
# See compiler-rt/stack_limits.S.
if settings.RELOCATABLE:
settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_set_limits']
else:
settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_init']
if settings.MODULARIZE:
if settings.PROXY_TO_WORKER:
exit_with_error('-s MODULARIZE=1 is not compatible with --proxy-to-worker (if you want to run in a worker with -s MODULARIZE=1, you likely want to do the worker side setup manually)')
# in MINIMAL_RUNTIME we may not need to emit the Promise code, as the
# HTML output creates a singleton instance, and it does so without the
# Promise. However, in Pthreads mode the Promise is used for worker
# creation.
if settings.MINIMAL_RUNTIME and options.oformat == OFormat.HTML and not settings.USE_PTHREADS:
settings.EXPORT_READY_PROMISE = 0
if settings.LEGACY_VM_SUPPORT:
if settings.WASM2JS:
settings.POLYFILL_OLD_MATH_FUNCTIONS = 1
# Support all old browser versions
settings.MIN_FIREFOX_VERSION = 0
settings.MIN_SAFARI_VERSION = 0
settings.MIN_IE_VERSION = 0
settings.MIN_EDGE_VERSION = 0
settings.MIN_CHROME_VERSION = 0
if settings.MIN_CHROME_VERSION <= 37:
settings.WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG = 1
setup_environment_settings()
# Silently drop any individual backwards compatibility emulation flags that are known never to occur on browsers that support WebAssembly.
if not settings.WASM2JS:
settings.POLYFILL_OLD_MATH_FUNCTIONS = 0
settings.WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG = 0
forced_stdlibs = []
if settings.STB_IMAGE and final_suffix in EXECUTABLE_ENDINGS:
forced_stdlibs.append('libstb_image')
settings.EXPORTED_FUNCTIONS += ['_stbi_load', '_stbi_load_from_memory', '_stbi_image_free']
if settings.USE_WEBGL2:
settings.MAX_WEBGL_VERSION = 2
# MIN_WEBGL_VERSION=2 implies MAX_WEBGL_VERSION=2
if settings.MIN_WEBGL_VERSION == 2:
default_setting('MAX_WEBGL_VERSION', 2)
if settings.MIN_WEBGL_VERSION > settings.MAX_WEBGL_VERSION:
exit_with_error('MIN_WEBGL_VERSION must be smaller or equal to MAX_WEBGL_VERSION!')
if not settings.GL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS and settings.GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS:
exit_with_error('-s GL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS=0 only makes sense with -s GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0!')
if settings.ASMFS and final_suffix in EXECUTABLE_ENDINGS:
forced_stdlibs.append('libasmfs')
settings.FILESYSTEM = 0
settings.SYSCALLS_REQUIRE_FILESYSTEM = 0
settings.FETCH = 1
settings.SYSTEM_JS_LIBRARIES.append((0, shared.path_from_root('src', 'library_asmfs.js')))
# Explicitly drop linking in a malloc implementation if program is not using any dynamic allocation calls.
if not settings.USES_DYNAMIC_ALLOC:
settings.MALLOC = 'none'
if settings.MALLOC == 'emmalloc':
settings.SYSTEM_JS_LIBRARIES.append((0, shared.path_from_root('src', 'library_emmalloc.js')))
if settings.FETCH and final_suffix in EXECUTABLE_ENDINGS:
forced_stdlibs.append('libfetch')
settings.SYSTEM_JS_LIBRARIES.append((0, shared.path_from_root('src', 'library_fetch.js')))
if settings.USE_PTHREADS:
settings.FETCH_WORKER_FILE = unsuffixed(os.path.basename(target)) + '.fetch.js'
if settings.DEMANGLE_SUPPORT:
settings.EXPORTED_FUNCTIONS += ['___cxa_demangle']
if settings.FULL_ES3:
settings.FULL_ES2 = 1
settings.MAX_WEBGL_VERSION = max(2, settings.MAX_WEBGL_VERSION)
if settings.EMBIND:
forced_stdlibs.append('libembind')
settings.EXPORTED_FUNCTIONS += ['_stackSave', '_stackRestore', '_stackAlloc']
if not settings.STANDALONE_WASM:
# in standalone mode, crt1 will call the constructors from inside the wasm
settings.EXPORTED_FUNCTIONS.append('___wasm_call_ctors')
if settings.RELOCATABLE and not settings.DYNAMIC_EXECUTION:
exit_with_error('cannot have both DYNAMIC_EXECUTION=0 and RELOCATABLE enabled at the same time, since RELOCATABLE needs to eval()')
if settings.SIDE_MODULE and settings.GLOBAL_BASE != -1:
exit_with_error('Cannot set GLOBAL_BASE when building SIDE_MODULE')
if settings.RELOCATABLE or settings.LINKABLE:
default_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
default_setting('WARN_ON_UNDEFINED_SYMBOLS', 0)
if 'DISABLE_EXCEPTION_CATCHING' in settings_map and 'EXCEPTION_CATCHING_ALLOWED' in settings_map:
# If we get here then the user specified both DISABLE_EXCEPTION_CATCHING and EXCEPTION_CATCHING_ALLOWED
# on the command line. This is no longer valid so report either an error or a warning (for
# backwards compat with the old `DISABLE_EXCEPTION_CATCHING=2`
if settings_map['DISABLE_EXCEPTION_CATCHING'] in ('0', '2'):
diagnostics.warning('deprecated', 'DISABLE_EXCEPTION_CATCHING=X is no longer needed when specifying EXCEPTION_CATCHING_ALLOWED')
else:
exit_with_error('DISABLE_EXCEPTION_CATCHING and EXCEPTION_CATCHING_ALLOWED are mutually exclusive')
if settings.EXCEPTION_CATCHING_ALLOWED:
settings.DISABLE_EXCEPTION_CATCHING = 0
if settings.DISABLE_EXCEPTION_THROWING and not settings.DISABLE_EXCEPTION_CATCHING:
exit_with_error("DISABLE_EXCEPTION_THROWING was set (probably from -fno-exceptions) but is not compatible with enabling exception catching (DISABLE_EXCEPTION_CATCHING=0). If you don't want exceptions, set DISABLE_EXCEPTION_CATCHING to 1; if you do want exceptions, don't link with -fno-exceptions")
if options.use_preload_plugins or len(options.preload_files) or len(options.embed_files):
if settings.NODERAWFS:
exit_with_error('--preload-file and --embed-file cannot be used with NODERAWFS which disables virtual filesystem')
# if we include any files, or intend to use preload plugins, then we definitely need filesystem support
settings.FORCE_FILESYSTEM = 1
if settings.PROXY_TO_WORKER or options.use_preload_plugins:
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$Browser']
if not settings.MINIMAL_RUNTIME:
# In non-MINIMAL_RUNTIME, the core runtime depends on these functions to be present. (In MINIMAL_RUNTIME, they are
# no longer always bundled in)
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += [
'$keepRuntimeAlive',
'$demangle',
'$demangleAll',
'$jsStackTrace',
'$stackTrace'
]
if settings.FILESYSTEM:
# to flush streams on FS exit, we need to be able to call fflush
# we only include it if the runtime is exitable, or when ASSERTIONS
# (ASSERTIONS will check that streams do not need to be flushed,
# helping people see when they should have enabled EXIT_RUNTIME)
if settings.EXIT_RUNTIME or settings.ASSERTIONS:
settings.EXPORTED_FUNCTIONS += ['_fflush']
if settings.SUPPORT_ERRNO:
# so setErrNo JS library function can report errno back to C
settings.EXPORTED_FUNCTIONS += ['___errno_location']
if settings.SAFE_HEAP:
# SAFE_HEAP check includes calling emscripten_get_sbrk_ptr() from wasm
settings.EXPORTED_FUNCTIONS += ['_emscripten_get_sbrk_ptr', '_emscripten_stack_get_base']
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$unSign']
if not settings.DECLARE_ASM_MODULE_EXPORTS:
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$exportAsmFunctions']
if settings.ALLOW_MEMORY_GROWTH:
# Setting ALLOW_MEMORY_GROWTH turns off ABORTING_MALLOC, as in that mode we default to
# the behavior of trying to grow and returning 0 from malloc on failure, like
# a standard system would. However, if the user sets the flag it
# overrides that.
default_setting('ABORTING_MALLOC', 0)
if settings.USE_PTHREADS:
if settings.USE_PTHREADS == 2:
exit_with_error('USE_PTHREADS=2 is no longer supported')
if settings.ALLOW_MEMORY_GROWTH:
diagnostics.warning('pthreads-mem-growth', 'USE_PTHREADS + ALLOW_MEMORY_GROWTH may run non-wasm code slowly, see https://github.com/WebAssembly/design/issues/1271')
# UTF8Decoder.decode may not work with a view of a SharedArrayBuffer, see https://github.com/whatwg/encoding/issues/172
settings.TEXTDECODER = 0
settings.SYSTEM_JS_LIBRARIES.append((0, shared.path_from_root('src', 'library_pthread.js')))
newargs += ['-pthread']
settings.EXPORTED_FUNCTIONS += [
'___emscripten_pthread_data_constructor',
'___pthread_tsd_run_dtors',
'__emscripten_call_on_thread',
'__emscripten_do_dispatch_to_thread',
'__emscripten_main_thread_futex',
'__emscripten_thread_init',
'_emscripten_current_thread_process_queued_calls',
'__emscripten_allow_main_runtime_queued_calls',
'_emscripten_futex_wake',
'_emscripten_get_global_libc',
'_emscripten_main_browser_thread_id',
'_emscripten_main_thread_process_queued_calls',
'_emscripten_register_main_browser_thread_id',
'_emscripten_run_in_main_runtime_thread_js',
'_emscripten_stack_set_limits',
'_emscripten_sync_run_in_main_thread_2',
'_emscripten_sync_run_in_main_thread_4',
'_emscripten_tls_init',
'_pthread_self',
]
# Some of these symbols are using by worker.js but otherwise unreferenced.
# Because emitDCEGraph only considered the main js file, and not worker.js
# we have explictly mark these symbols as user-exported so that they will
# kept alive through DCE.
# TODO: Find a less hacky way to do this, perhaps by also scanning worker.js
# for roots.
building.user_requested_exports.add('_emscripten_tls_init')
building.user_requested_exports.add('_emscripten_current_thread_process_queued_calls')
# set location of worker.js
settings.PTHREAD_WORKER_FILE = unsuffixed(os.path.basename(target)) + '.worker.js'
else:
settings.SYSTEM_JS_LIBRARIES.append((0, shared.path_from_root('src', 'library_pthread_stub.js')))
if settings.FORCE_FILESYSTEM and not settings.MINIMAL_RUNTIME:
# when the filesystem is forced, we export by default methods that filesystem usage
# may need, including filesystem usage from standalone file packager output (i.e.
# file packages not built together with emcc, but that are loaded at runtime
# separately, and they need emcc's output to contain the support they need)
if not settings.ASMFS:
settings.EXPORTED_RUNTIME_METHODS += [
'FS_createPath',
'FS_createDataFile',
'FS_createPreloadedFile',
'FS_createLazyFile',
'FS_createDevice',
'FS_unlink'
]
settings.EXPORTED_RUNTIME_METHODS += [
'addRunDependency',
'removeRunDependency',
]
if not settings.MINIMAL_RUNTIME or settings.EXIT_RUNTIME:
# MINIMAL_RUNTIME only needs callRuntimeCallbacks in certain cases, but the normal runtime
# always does.
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$callRuntimeCallbacks']
if settings.USE_PTHREADS:
# memalign is used to ensure allocated thread stacks are aligned.
settings.EXPORTED_FUNCTIONS += ['_memalign']
if settings.MINIMAL_RUNTIME:
building.user_requested_exports.add('exit')
if settings.PROXY_TO_PTHREAD:
settings.EXPORTED_FUNCTIONS += ['_emscripten_proxy_main']
# pthread stack setup and other necessary utilities
def include_and_export(name):
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$' + name]
settings.EXPORTED_FUNCTIONS += [name]
include_and_export('establishStackSpace')
include_and_export('invokeEntryPoint')
if not settings.MINIMAL_RUNTIME:
# noExitRuntime does not apply to MINIMAL_RUNTIME.
include_and_export('keepRuntimeAlive')
if settings.MODULARIZE:
if settings.EXPORT_NAME == 'Module':
exit_with_error('pthreads + MODULARIZE currently require you to set -s EXPORT_NAME=Something (see settings.js) to Something != Module, so that the .worker.js file can work')
# MODULARIZE+USE_PTHREADS mode requires extra exports out to Module so that worker.js
# can access them:
# general threading variables:
settings.EXPORTED_RUNTIME_METHODS += ['PThread']
# To keep code size to minimum, MINIMAL_RUNTIME does not utilize the global ExitStatus
# object, only regular runtime has it.
if not settings.MINIMAL_RUNTIME:
settings.EXPORTED_RUNTIME_METHODS += ['ExitStatus']
if settings.SIDE_MODULE:
diagnostics.warning('experimental', '-s SIDE_MODULE + pthreads is experimental')
elif settings.MAIN_MODULE:
diagnostics.warning('experimental', '-s MAIN_MODULE + pthreads is experimental')
elif settings.LINKABLE:
diagnostics.warning('experimental', '-s LINKABLE + pthreads is experimental')
if settings.PROXY_TO_WORKER:
exit_with_error('--proxy-to-worker is not supported with -s USE_PTHREADS>0! Use the option -s PROXY_TO_PTHREAD=1 if you want to run the main thread of a multithreaded application in a web worker.')
else:
if settings.PROXY_TO_PTHREAD:
exit_with_error('-s PROXY_TO_PTHREAD=1 requires -s USE_PTHREADS to work!')
def check_memory_setting(setting):
if settings[setting] % webassembly.WASM_PAGE_SIZE != 0:
exit_with_error(f'{setting} must be a multiple of WebAssembly page size (64KiB), was {settings[setting]}')
check_memory_setting('INITIAL_MEMORY')
if settings.INITIAL_MEMORY >= 2 * 1024 * 1024 * 1024:
exit_with_error('INITIAL_MEMORY must be less than 2GB due to current spec limitations')
if settings.INITIAL_MEMORY < settings.TOTAL_STACK:
exit_with_error(f'INITIAL_MEMORY must be larger than TOTAL_STACK, was {settings.INITIAL_MEMORY} (TOTAL_STACK={settings.TOTAL_STACK})')
if settings.MAXIMUM_MEMORY != -1:
check_memory_setting('MAXIMUM_MEMORY')
if settings.MEMORY_GROWTH_LINEAR_STEP != -1:
check_memory_setting('MEMORY_GROWTH_LINEAR_STEP')
if settings.USE_PTHREADS and settings.ALLOW_MEMORY_GROWTH and settings.MAXIMUM_MEMORY == -1:
exit_with_error('If pthreads and memory growth are enabled, MAXIMUM_MEMORY must be set')
if settings.EXPORT_ES6 and not settings.MODULARIZE:
# EXPORT_ES6 requires output to be a module
if 'MODULARIZE' in settings_map:
exit_with_error('EXPORT_ES6 requires MODULARIZE to be set')
settings.MODULARIZE = 1
if settings.MODULARIZE and not settings.DECLARE_ASM_MODULE_EXPORTS:
# When MODULARIZE option is used, currently requires declaring all module exports
# individually - TODO: this could be optimized
exit_with_error('DECLARE_ASM_MODULE_EXPORTS=0 is not compatible with MODULARIZE')
# When not declaring wasm module exports in outer scope one by one, disable minifying
# wasm module export names so that the names can be passed directly to the outer scope.
# Also, if using library_exports.js API, disable minification so that the feature can work.
if not settings.DECLARE_ASM_MODULE_EXPORTS or 'exports.js' in [x for _, x in libs]:
settings.MINIFY_ASMJS_EXPORT_NAMES = 0
# Enable minification of wasm imports and exports when appropriate, if we
# are emitting an optimized JS+wasm combo (then the JS knows how to load the minified names).
# Things that process the JS after this operation would be done must disable this.
# For example, ASYNCIFY_LAZY_LOAD_CODE needs to identify import names.
if will_metadce() and \
settings.OPT_LEVEL >= 2 and \
settings.DEBUG_LEVEL <= 2 and \
options.oformat not in (OFormat.WASM, OFormat.BARE) and \
not settings.LINKABLE and \
not settings.STANDALONE_WASM and \
not settings.AUTODEBUG and \
not settings.ASSERTIONS and \
not settings.RELOCATABLE and \
not settings.ASYNCIFY_LAZY_LOAD_CODE and \
settings.MINIFY_ASMJS_EXPORT_NAMES:
settings.MINIFY_WASM_IMPORTS_AND_EXPORTS = 1
settings.MINIFY_WASM_IMPORTED_MODULES = 1
if settings.MINIMAL_RUNTIME:
# Minimal runtime uses a different default shell file
if options.shell_path == shared.path_from_root('src', 'shell.html'):
options.shell_path = shared.path_from_root('src', 'shell_minimal_runtime.html')
if settings.ASSERTIONS and settings.MINIMAL_RUNTIME:
# In ASSERTIONS-builds, functions UTF8ArrayToString() and stringToUTF8Array() (which are not JS library functions), both
# use warnOnce(), which in MINIMAL_RUNTIME is a JS library function, so explicitly have to mark dependency to warnOnce()
# in that case. If string functions are turned to library functions in the future, then JS dependency tracking can be
# used and this special directive can be dropped.
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$warnOnce']
# Require explicit -lfoo.js flags to link with JS libraries.
settings.AUTO_JS_LIBRARIES = 0
if settings.MODULARIZE and settings.EXPORT_NAME == 'Module' and options.oformat == OFormat.HTML and \
(options.shell_path == shared.path_from_root('src', 'shell.html') or options.shell_path == shared.path_from_root('src', 'shell_minimal.html')):
exit_with_error('Due to collision in variable name "Module", the shell file "' + options.shell_path + '" is not compatible with build options "-s MODULARIZE=1 -s EXPORT_NAME=Module". Either provide your own shell file, change the name of the export to something else to avoid the name collision. (see https://github.com/emscripten-core/emscripten/issues/7950 for details)')
if settings.STANDALONE_WASM:
if settings.USE_PTHREADS:
exit_with_error('STANDALONE_WASM does not support pthreads yet')
if settings.MINIMAL_RUNTIME:
exit_with_error('MINIMAL_RUNTIME reduces JS size, and is incompatible with STANDALONE_WASM which focuses on ignoring JS anyhow and being 100% wasm')
# the wasm must be runnable without the JS, so there cannot be anything that
# requires JS legalization
settings.LEGALIZE_JS_FFI = 0
# TODO(sbc): Remove WASM2JS here once the size regression it would introduce has been fixed.
if settings.USE_PTHREADS or settings.RELOCATABLE or settings.ASYNCIFY_LAZY_LOAD_CODE or settings.WASM2JS:
settings.IMPORTED_MEMORY = 1
if settings.WASM_BIGINT:
settings.LEGALIZE_JS_FFI = 0
if settings.SINGLE_FILE:
settings.GENERATE_SOURCE_MAP = 0
if options.use_closure_compiler == 2 and not settings.WASM2JS:
exit_with_error('closure compiler mode 2 assumes the code is asm.js, so not meaningful for wasm')
if 'MEM_INIT_METHOD' in settings_map:
exit_with_error('MEM_INIT_METHOD is not supported in wasm. Memory will be embedded in the wasm binary if threads are not used, and included in a separate file if threads are used.')
if settings.WASM2JS:
settings.MAYBE_WASM2JS = 1
# when using wasm2js, if the memory segments are in the wasm then they
# end up converted by wasm2js into base64 encoded JS. alternatively, we
# can use a .mem file like asm.js used to.
# generally we follow what the options tell us to do (which is to use
# a .mem file in most cases, since it is binary & compact). however, for
# pthreads we must keep the memory segments in the wasm as they will be
# passive segments which the .mem format cannot handle.
settings.MEM_INIT_IN_WASM = not options.memory_init_file or settings.SINGLE_FILE or settings.USE_PTHREADS
else:
# wasm includes the mem init in the wasm binary. The exception is
# wasm2js, which behaves more like js.
options.memory_init_file = True
settings.MEM_INIT_IN_WASM = True
# wasm side modules have suffix .wasm
if settings.SIDE_MODULE and target.endswith('.js'):
diagnostics.warning('emcc', 'output suffix .js requested, but wasm side modules are just wasm files; emitting only a .wasm, no .js')
sanitize = set()
for arg in newargs:
if arg.startswith('-fsanitize='):
sanitize.update(arg.split('=', 1)[1].split(','))
elif arg.startswith('-fno-sanitize='):
sanitize.difference_update(arg.split('=', 1)[1].split(','))
if sanitize:
settings.USE_OFFSET_CONVERTER = 1
settings.EXPORTED_FUNCTIONS += [
'_memalign',
'_emscripten_builtin_memalign',
'_emscripten_builtin_malloc',
'_emscripten_builtin_free',
'___heap_base',
'___global_base'
]
if settings.USE_OFFSET_CONVERTER and settings.USE_PTHREADS:
settings.EXPORTED_RUNTIME_METHODS += ['WasmOffsetConverter']
if sanitize & UBSAN_SANITIZERS:
if '-fsanitize-minimal-runtime' in newargs:
settings.UBSAN_RUNTIME = 1
else:
settings.UBSAN_RUNTIME = 2
if 'leak' in sanitize:
settings.USE_LSAN = 1
settings.EXIT_RUNTIME = 1
if settings.LINKABLE:
exit_with_error('LSan does not support dynamic linking')
if 'address' in sanitize:
settings.USE_ASAN = 1
if not settings.UBSAN_RUNTIME:
settings.UBSAN_RUNTIME = 2
settings.EXPORTED_FUNCTIONS += [
'_emscripten_builtin_memset',
'_asan_c_load_1', '_asan_c_load_1u',
'_asan_c_load_2', '_asan_c_load_2u',
'_asan_c_load_4', '_asan_c_load_4u',
'_asan_c_load_f', '_asan_c_load_d',
'_asan_c_store_1', '_asan_c_store_1u',
'_asan_c_store_2', '_asan_c_store_2u',
'_asan_c_store_4', '_asan_c_store_4u',
'_asan_c_store_f', '_asan_c_store_d',
]
if settings.ASAN_SHADOW_SIZE != -1:
diagnostics.warning('emcc', 'ASAN_SHADOW_SIZE is ignored and will be removed in a future release')
if settings.GLOBAL_BASE != -1:
exit_with_error("ASan does not support custom GLOBAL_BASE")
max_mem = settings.INITIAL_MEMORY
if settings.ALLOW_MEMORY_GROWTH:
max_mem = settings.MAXIMUM_MEMORY
if max_mem == -1:
exit_with_error('ASan requires a finite MAXIMUM_MEMORY')
shadow_size = max_mem // 8
settings.GLOBAL_BASE = shadow_size
if settings.SAFE_HEAP:
# SAFE_HEAP instruments ASan's shadow memory accesses.
# Since the shadow memory starts at 0, the act of accessing the shadow memory is detected
# by SAFE_HEAP as a null pointer dereference.
exit_with_error('ASan does not work with SAFE_HEAP')
if settings.LINKABLE:
exit_with_error('ASan does not support dynamic linking')
if sanitize and settings.GENERATE_SOURCE_MAP:
settings.LOAD_SOURCE_MAP = 1
if settings.LOAD_SOURCE_MAP and settings.USE_PTHREADS:
settings.EXPORTED_RUNTIME_METHODS += ['WasmSourceMap']
if settings.GLOBAL_BASE == -1:
# default if nothing else sets it
# a higher global base is useful for optimizing load/store offsets, as it
# enables the --post-emscripten pass
settings.GLOBAL_BASE = 1024
# various settings require malloc/free support from JS
if settings.RELOCATABLE or \
settings.BUILD_AS_WORKER or \
settings.USE_WEBGPU or \
settings.USE_PTHREADS or \
settings.OFFSCREENCANVAS_SUPPORT or \
settings.LEGACY_GL_EMULATION or \
not settings.DISABLE_EXCEPTION_CATCHING or \
settings.ASYNCIFY or \
settings.ASMFS or \
settings.DEMANGLE_SUPPORT or \
settings.FORCE_FILESYSTEM or \
settings.STB_IMAGE or \
settings.EMBIND or \
settings.FETCH or \
settings.PROXY_POSIX_SOCKETS or \
options.memory_profiler or \
sanitize:
settings.EXPORTED_FUNCTIONS += ['_malloc', '_free']
if not settings.DISABLE_EXCEPTION_CATCHING:
# If not for LTO builds, we could handle these by adding deps_info.py
# entries for __cxa_find_matching_catch_* functions. However, under
# LTO these symbols don't exist prior the linking.
settings.EXPORTED_FUNCTIONS += ['___cxa_is_pointer_type', '___cxa_can_catch']
if settings.ASYNCIFY:
if not settings.ASYNCIFY_IGNORE_INDIRECT:
# if we are not ignoring indirect calls, then we must treat invoke_* as if
# they are indirect calls, since that is what they do - we can't see their
# targets statically.
settings.ASYNCIFY_IMPORTS += ['invoke_*']
# with pthreads we may call main through the __call_main mechanism, which can
# therefore reach anything in the program, so mark it as possibly causing a
# sleep (the asyncify analysis doesn't look through JS, just wasm, so it can't
# see what it itself calls)
if settings.USE_PTHREADS:
settings.ASYNCIFY_IMPORTS += ['__call_main']
# add the default imports
settings.ASYNCIFY_IMPORTS += DEFAULT_ASYNCIFY_IMPORTS
# return the full import name, including module. The name may
# already have a module prefix; if not, we assume it is "env".
def get_full_import_name(name):
if '.' in name:
return name
return 'env.' + name
settings.ASYNCIFY_IMPORTS = [get_full_import_name(i) for i in settings.ASYNCIFY_IMPORTS]
if settings.WASM2JS and settings.GENERATE_SOURCE_MAP:
exit_with_error('wasm2js does not support source maps yet (debug in wasm for now)')
if settings.NODE_CODE_CACHING:
if settings.WASM_ASYNC_COMPILATION:
exit_with_error('NODE_CODE_CACHING requires sync compilation (WASM_ASYNC_COMPILATION=0)')
if not shared.target_environment_may_be('node'):
exit_with_error('NODE_CODE_CACHING only works in node, but target environments do not include it')
if settings.SINGLE_FILE:
exit_with_error('NODE_CODE_CACHING saves a file on the side and is not compatible with SINGLE_FILE')
if options.tracing and settings.ALLOW_MEMORY_GROWTH:
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['emscripten_trace_report_memory_layout']
settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_get_current',
'_emscripten_stack_get_base',
'_emscripten_stack_get_end']
# Any "pointers" passed to JS will now be i64's, in both modes.
if settings.MEMORY64:
if settings_map.get('WASM_BIGINT') == '0':
exit_with_error('MEMORY64 is not compatible with WASM_BIGINT=0')
settings.WASM_BIGINT = 1
# check if we can address the 2GB mark and higher: either if we start at
# 2GB, or if we allow growth to either any amount or to 2GB or more.
if settings.INITIAL_MEMORY > 2 * 1024 * 1024 * 1024 or \
(settings.ALLOW_MEMORY_GROWTH and
(settings.MAXIMUM_MEMORY < 0 or
settings.MAXIMUM_MEMORY > 2 * 1024 * 1024 * 1024)):
settings.CAN_ADDRESS_2GB = 1
settings.EMSCRIPTEN_VERSION = shared.EMSCRIPTEN_VERSION
settings.PROFILING_FUNCS = options.profiling_funcs
settings.SOURCE_MAP_BASE = options.source_map_base or ''
# exit block 'parse arguments and setup'
log_time('parse arguments and setup')
linker_inputs = []
if options.post_link:
process_libraries(libs, lib_dirs, linker_inputs)
if len(input_files) != 1:
exit_with_error('--post-link requires a single input file')
post_link(options, input_files[0][1], wasm_target, target)
return 0
## Compile source code to object files
logger.debug('compiling inputs')
with ToolchainProfiler.profile_block('compile inputs'):
def is_link_flag(flag):
if flag.startswith('-nostdlib'):
return True
return flag.startswith(('-l', '-L', '-Wl,'))
CXX = [shared.CLANG_CXX]
CC = [shared.CLANG_CC]
if config.COMPILER_WRAPPER:
logger.debug('using compiler wrapper: %s', config.COMPILER_WRAPPER)
CXX.insert(0, config.COMPILER_WRAPPER)
CC.insert(0, config.COMPILER_WRAPPER)
if 'EMMAKEN_COMPILER' in os.environ:
diagnostics.warning('deprecated', '`EMMAKEN_COMPILER` is deprecated.\n'
'To use an alteranative LLVM build set `LLVM_ROOT` in the config file (or `EM_LLVM_ROOT` env var).\n'
'To wrap invocations of clang use the `COMPILER_WRAPPER` setting (or `EM_COMPILER_WRAPPER` env var.\n')
CXX = [os.environ['EMMAKEN_COMPILER']]
CC = [cxx_to_c_compiler(os.environ['EMMAKEN_COMPILER'])]
compile_args = [a for a in newargs if a and not is_link_flag(a)]
system_libs.ensure_sysroot()
def use_cxx(src):
if 'c++' in language_mode or run_via_emxx:
return True
# Next consider the filename
if src.endswith(C_ENDINGS + OBJC_ENDINGS):
return False
if src.endswith(CXX_ENDINGS):
return True
# Finally fall back to the default
if settings.DEFAULT_TO_CXX:
# Default to using C++ even when run as `emcc`.
# This means that emcc will act as a C++ linker when no source files are
# specified.
# This differs to clang and gcc where the default is always C unless run as
# clang++/g++.
return True
return False
def get_compiler(cxx):
if cxx:
return CXX
return CC
def get_clang_command(src_file):
return get_compiler(use_cxx(src_file)) + get_cflags(options, args) + compile_args + [src_file]
def get_clang_command_asm(src_file):
return get_compiler(use_cxx(src_file)) + get_clang_flags() + compile_args + [src_file]
# preprocessor-only (-E) support
if has_dash_E or '-M' in newargs or '-MM' in newargs or '-fsyntax-only' in newargs:
for input_file in [x[1] for x in input_files]:
cmd = get_clang_command(input_file)
if specified_target:
cmd += ['-o', specified_target]
# Do not compile, but just output the result from preprocessing stage or
# output the dependency rule. Warning: clang and gcc behave differently
# with -MF! (clang seems to not recognize it)
logger.debug(('just preprocessor ' if has_dash_E else 'just dependencies: ') + ' '.join(cmd))
shared.check_call(cmd)
return 0
# Precompiled headers support
if has_header_inputs:
headers = [header for _, header in input_files]
for header in headers:
if not header.endswith(HEADER_ENDINGS):
exit_with_error('cannot mix precompile headers with non-header inputs: ' + str(headers) + ' : ' + header)
cmd = get_clang_command(header)
if specified_target:
cmd += ['-o', specified_target]
logger.debug("running (for precompiled headers): " + cmd[0] + ' ' + ' '.join(cmd[1:]))
shared.check_call(cmd)
return 0
def get_object_filename(input_file):
if compile_only:
# In compile-only mode we don't use any temp file. The object files
# are written directly to their final output locations.
if specified_target:
assert len(input_files) == 1
return specified_target
else:
return unsuffixed_basename(input_file) + options.default_object_extension
else:
return in_temp(unsuffixed(uniquename(input_file)) + options.default_object_extension)
def compile_source_file(i, input_file):
logger.debug('compiling source file: ' + input_file)
output_file = get_object_filename(input_file)
if not compile_only:
linker_inputs.append((i, output_file))
if get_file_suffix(input_file) in ASSEMBLY_ENDINGS:
cmd = get_clang_command_asm(input_file)
else:
cmd = get_clang_command(input_file)
if not has_dash_c:
cmd += ['-c']
cmd += ['-o', output_file]
shared.check_call(cmd)
if output_file not in ('-', os.devnull):
assert os.path.exists(output_file)
# First, generate LLVM bitcode. For each input file, we get base.o with bitcode
for i, input_file in input_files:
file_suffix = get_file_suffix(input_file)
if file_suffix in SOURCE_ENDINGS + ASSEMBLY_ENDINGS or (has_dash_c and file_suffix == '.bc'):
compile_source_file(i, input_file)
elif file_suffix in DYNAMICLIB_ENDINGS:
logger.debug('using shared library: ' + input_file)
linker_inputs.append((i, input_file))
elif building.is_ar(input_file):
logger.debug('using static library: ' + input_file)
ensure_archive_index(input_file)
linker_inputs.append((i, input_file))
elif language_mode:
compile_source_file(i, input_file)
elif input_file == '-':
exit_with_error('-E or -x required when input is from standard input')
else:
# Default to assuming the inputs are object files and pass them to the linker
logger.debug('using object file: ' + input_file)
linker_inputs.append((i, input_file))
# exit block 'compile inputs'
log_time('compile inputs')
if compile_only:
logger.debug('stopping after compile phase')
for flag in link_flags:
diagnostics.warning('unused-command-line-argument', "argument unused during compilation: '%s'" % flag[1])
for f in linker_inputs:
diagnostics.warning('unused-command-line-argument', "%s: linker input file unused because linking not done" % f[1])
return 0
if specified_target and specified_target.startswith('-'):
exit_with_error('invalid output filename: `%s`' % specified_target)
ldflags = emsdk_ldflags(newargs)
for f in ldflags:
add_link_flag(sys.maxsize, f)
using_lld = not (link_to_object and settings.LTO)
link_flags = filter_link_flags(link_flags, using_lld)
# Decide what we will link
consumed = process_libraries(libs, lib_dirs, linker_inputs)
# Filter out libraries that are actually JS libs
link_flags = [l for l in link_flags if l[0] not in consumed]
# If we are linking to an intermediate object then ignore other
# "fake" dynamic libraries, since otherwise we will end up with
# multiple copies in the final executable.
if link_to_object or options.ignore_dynamic_linking:
linker_inputs = filter_out_dynamic_libs(options, linker_inputs)
else:
linker_inputs = filter_out_duplicate_dynamic_libs(linker_inputs)
if settings.MAIN_MODULE:
dylibs = [i[1] for i in linker_inputs if get_file_suffix(i[1]) in DYNAMICLIB_ENDINGS]
process_dynamic_libs(dylibs)
linker_arguments = [val for _, val in sorted(linker_inputs + link_flags)]
if link_to_object:
with ToolchainProfiler.profile_block('linking to object file'):
logger.debug('link_to_object: ' + str(linker_arguments) + ' -> ' + target)
building.link_to_object(linker_arguments, target)
logger.debug('stopping after linking to object file')
return 0
if final_suffix in ('.o', '.bc', '.so', '.dylib') and not settings.SIDE_MODULE:
diagnostics.warning('emcc', 'generating an executable with an object extension (%s). If you meant to build an object file please use `-c, `-r`, or `-shared`' % final_suffix)
## Continue on to create JavaScript
with ToolchainProfiler.profile_block('calculate system libraries'):
extra_files_to_link = []
# link in ports and system libraries, if necessary
if not settings.SIDE_MODULE:
# Ports are always linked into the main module, never the size module.
extra_files_to_link += system_libs.get_ports_libs(settings)
if '-nostdlib' not in newargs and '-nodefaultlibs' not in newargs:
link_as_cxx = run_via_emxx
# Traditionally we always link as C++. For compatibility we continue to do that,
# unless running in strict mode.
if not settings.STRICT and '-nostdlib++' not in newargs:
link_as_cxx = True
extra_files_to_link += system_libs.calculate([f for _, f in sorted(linker_inputs)] + extra_files_to_link, link_as_cxx, forced=forced_stdlibs)
linker_arguments += extra_files_to_link
# exit block 'calculate system libraries'
log_time('calculate system libraries')
def dedup_list(lst):
rtn = []
for item in lst:
if item not in rtn:
rtn.append(item)
return rtn
# Make a final pass over settings.EXPORTED_FUNCTIONS to remove any
# duplication between functions added by the driver/libraries and function
# specified by the user
settings.EXPORTED_FUNCTIONS = dedup_list(settings.EXPORTED_FUNCTIONS)
with ToolchainProfiler.profile_block('link'):
logger.debug('linking: ' + str(linker_arguments))
# if EMCC_DEBUG=2 then we must link now, so the temp files are complete.
# if using the wasm backend, we might be using vanilla LLVM, which does not allow our
# fastcomp deferred linking opts.
# TODO: we could check if this is a fastcomp build, and still speed things up here
js_funcs = None
if settings.LLD_REPORT_UNDEFINED and settings.ERROR_ON_UNDEFINED_SYMBOLS:
js_funcs = get_all_js_syms()
log_time('JS symbol generation')
building.link_lld(linker_arguments, wasm_target, external_symbol_list=js_funcs)
# Special handling for when the user passed '-Wl,--version'. In this case the linker
# does not create the output file, but just prints its version and exits with 0.
if '--version' in linker_arguments:
return 0
# exit block 'link'
log_time('link')
if target == os.devnull:
# TODO(sbc): In theory we should really run the whole pipeline even if the output is
# /dev/null, but that will take some refactoring
return 0
# Perform post-link steps (unless we are running bare mode)
if options.oformat != OFormat.BARE:
post_link(options, wasm_target, wasm_target, target)
return 0
def move_file(src, dst):
logging.debug('move: %s -> %s', src, dst)
if os.path.isdir(dst):
exit_with_error(f'cannot write output file `{dst}`: Is a directory')
src = os.path.abspath(src)
dst = os.path.abspath(dst)
if src == dst:
return
if dst == os.devnull:
return
shutil.move(src, dst)
def post_link(options, in_wasm, wasm_target, target):
global final_js
target_basename = unsuffixed_basename(target)
if options.oformat != OFormat.WASM:
final_js = in_temp(target_basename + '.js')
if settings.MEM_INIT_IN_WASM:
memfile = None
else:
memfile = shared.replace_or_append_suffix(target, '.mem')
with ToolchainProfiler.profile_block('emscript'):
# Emscripten
logger.debug('emscript')
if options.memory_init_file:
settings.MEM_INIT_METHOD = 1
else:
assert settings.MEM_INIT_METHOD != 1
if embed_memfile():
settings.SUPPORT_BASE64_EMBEDDING = 1
emscripten.run(in_wasm, wasm_target, final_js, memfile)
save_intermediate('original')
# exit block 'emscript'
log_time('emscript)')
with ToolchainProfiler.profile_block('source transforms'):
# Embed and preload files
if len(options.preload_files) or len(options.embed_files):
logger.debug('setting up files')
file_args = ['--from-emcc', '--export-name=' + settings.EXPORT_NAME]
if len(options.preload_files):
file_args.append('--preload')
file_args += options.preload_files
if len(options.embed_files):
file_args.append('--embed')
file_args += options.embed_files
if len(options.exclude_files):
file_args.append('--exclude')
file_args += options.exclude_files
if options.use_preload_cache:
file_args.append('--use-preload-cache')
if settings.LZ4:
file_args.append('--lz4')
if options.use_preload_plugins:
file_args.append('--use-preload-plugins')
file_code = shared.check_call([shared.FILE_PACKAGER, unsuffixed(target) + '.data'] + file_args, stdout=PIPE).stdout
options.pre_js = js_manipulation.add_files_pre_js(options.pre_js, file_code)
# Apply pre and postjs files
if final_js and (options.pre_js or options.post_js):
logger.debug('applying pre/postjses')
src = open(final_js).read()
final_js += '.pp.js'
with open(final_js, 'w') as f:
# pre-js code goes right after the Module integration code (so it
# can use Module), we have a marker for it
f.write(do_replace(src, '// {{PRE_JSES}}', fix_windows_newlines(options.pre_js)))
f.write(fix_windows_newlines(options.post_js))
options.pre_js = src = options.post_js = None
save_intermediate('pre-post')
# Apply a source code transformation, if requested
if options.js_transform:
safe_copy(final_js, final_js + '.tr.js')
final_js += '.tr.js'
posix = not shared.WINDOWS
logger.debug('applying transform: %s', options.js_transform)
shared.check_call(building.remove_quotes(shlex.split(options.js_transform, posix=posix) + [os.path.abspath(final_js)]))
save_intermediate('transformed')
# exit block 'source transforms'
log_time('source transforms')
if memfile and not settings.MINIMAL_RUNTIME:
# MINIMAL_RUNTIME doesn't use `var memoryInitializer` but instead expects Module['mem'] to
# be loaded before the module. See src/postamble_minimal.js.
with ToolchainProfiler.profile_block('memory initializer'):
# For the wasm backend, we don't have any memory info in JS. All we need to do
# is set the memory initializer url.
src = open(final_js).read()
src = do_replace(src, '// {{MEM_INITIALIZER}}', 'var memoryInitializer = "%s";' % os.path.basename(memfile))
open(final_js + '.mem.js', 'w').write(src)
final_js += '.mem.js'
log_time('memory initializer')
with ToolchainProfiler.profile_block('binaryen'):
do_binaryen(target, options, wasm_target)
log_time('binaryen')
# If we are not emitting any JS then we are all done now
if options.oformat == OFormat.WASM:
return
with ToolchainProfiler.profile_block('final emitting'):
# Remove some trivial whitespace
# TODO: do not run when compress has already been done on all parts of the code
# src = open(final_js).read()
# src = re.sub(r'\n+[ \n]*\n+', '\n', src)
# open(final_js, 'w').write(src)
if settings.USE_PTHREADS:
target_dir = os.path.dirname(os.path.abspath(target))
worker_output = os.path.join(target_dir, settings.PTHREAD_WORKER_FILE)
with open(worker_output, 'w') as f:
f.write(shared.read_and_preprocess(shared.path_from_root('src', 'worker.js'), expand_macros=True))
# Minify the worker.js file in optimized builds
if (settings.OPT_LEVEL >= 1 or settings.SHRINK_LEVEL >= 1) and not settings.DEBUG_LEVEL:
minified_worker = building.acorn_optimizer(worker_output, ['minifyWhitespace'], return_output=True)
open(worker_output, 'w').write(minified_worker)
# track files that will need native eols
generated_text_files_with_native_eols = []
if settings.MODULARIZE:
modularize()
module_export_name_substitution()
# Run a final regex pass to clean up items that were not possible to optimize by Closure, or unoptimalities that were left behind
# by processing steps that occurred after Closure.
if settings.MINIMAL_RUNTIME == 2 and settings.USE_CLOSURE_COMPILER and settings.DEBUG_LEVEL == 0 and not settings.SINGLE_FILE:
# Process .js runtime file. Note that we need to handle the license text
# here, so that it will not confuse the hacky script.
shared.JS.handle_license(final_js)
shared.run_process([shared.PYTHON, shared.path_from_root('tools', 'hacky_postprocess_around_closure_limitations.py'), final_js])
# Apply pre and postjs files
if options.extern_pre_js or options.extern_post_js:
logger.debug('applying extern pre/postjses')
src = open(final_js).read()
final_js += '.epp.js'
with open(final_js, 'w') as f:
f.write(fix_windows_newlines(options.extern_pre_js))
f.write(src)
f.write(fix_windows_newlines(options.extern_post_js))
save_intermediate('extern-pre-post')
shared.JS.handle_license(final_js)
if options.oformat in (OFormat.JS, OFormat.MJS):
js_target = target
else:
js_target = get_secondary_target(target, '.js')
# The JS is now final. Move it to its final location
move_file(final_js, js_target)
if not settings.SINGLE_FILE:
generated_text_files_with_native_eols += [js_target]
# If we were asked to also generate HTML, do that
if options.oformat == OFormat.HTML:
generate_html(target, options, js_target, target_basename,
wasm_target, memfile)
elif settings.PROXY_TO_WORKER:
generate_worker_js(target, js_target, target_basename)
if embed_memfile() and memfile:
shared.try_delete(memfile)
if settings.SPLIT_MODULE:
diagnostics.warning('experimental', 'The SPLIT_MODULE setting is experimental and subject to change')
do_split_module(wasm_target)
for f in generated_text_files_with_native_eols:
tools.line_endings.convert_line_endings_in_file(f, os.linesep, options.output_eol)
if options.executable:
make_js_executable(js_target)
log_time('final emitting')
# exit block 'final emitting'
return 0
def version_string():
return 'emcc (Emscripten gcc/clang-like replacement + linker emulating GNU ld) %s' % shared.EMSCRIPTEN_VERSION
def parse_args(newargs):
options = EmccOptions()
settings_changes = []
user_js_defines = []
should_exit = False
eh_enabled = False
wasm_eh_enabled = False
skip = False
for i in range(len(newargs)):
if skip:
skip = False
continue
# On Windows Vista (and possibly others), excessive spaces in the command line
# leak into the items in this array, so trim e.g. 'foo.cpp ' -> 'foo.cpp'
newargs[i] = newargs[i].strip()
arg = newargs[i]
arg_value = None
def check_flag(value):
# Check for and consume a flag
if arg == value:
newargs[i] = ''
return True
return False
def check_arg(name):
nonlocal arg_value
if arg.startswith(name) and '=' in arg:
arg_value = arg.split('=', 1)[1]
newargs[i] = ''
return True
if arg == name:
if len(newargs) <= i + 1:
exit_with_error("option '%s' requires an argument" % arg)
arg_value = newargs[i + 1]
newargs[i] = ''
newargs[i + 1] = ''
return True
return False
def consume_arg():
nonlocal arg_value
assert arg_value is not None
rtn = arg_value
arg_value = None
return rtn
def consume_arg_file():
name = consume_arg()
if not os.path.isfile(name):
exit_with_error("'%s': file not found: '%s'" % (arg, name))
return name
if arg.startswith('-O'):
# Let -O default to -O2, which is what gcc does.
options.requested_level = arg[2:] or '2'
if options.requested_level == 's':
options.requested_level = 2
settings.SHRINK_LEVEL = 1
settings_changes.append('INLINING_LIMIT=1')
elif options.requested_level == 'z':
options.requested_level = 2
settings.SHRINK_LEVEL = 2
settings_changes.append('INLINING_LIMIT=1')
settings.OPT_LEVEL = validate_arg_level(options.requested_level, 3, 'Invalid optimization level: ' + arg, clamp=True)
elif check_arg('--js-opts'):
logger.warning('--js-opts ignored when using llvm backend')
consume_arg()
elif check_arg('--llvm-opts'):
diagnostics.warning('deprecated', '--llvm-opts is deprecated. All non-emcc args are passed through to clang.')
elif arg.startswith('-flto'):
if '=' in arg:
settings.LTO = arg.split('=')[1]
else:
settings.LTO = "full"
elif check_arg('--llvm-lto'):
logger.warning('--llvm-lto ignored when using llvm backend')
consume_arg()
elif check_arg('--closure-args'):
args = consume_arg()
options.closure_args += shlex.split(args)
elif check_arg('--closure'):
options.use_closure_compiler = int(consume_arg())
elif check_arg('--js-transform'):
options.js_transform = consume_arg()
elif check_arg('--pre-js'):
options.pre_js += open(consume_arg_file()).read() + '\n'
elif check_arg('--post-js'):
options.post_js += open(consume_arg_file()).read() + '\n'
elif check_arg('--extern-pre-js'):
options.extern_pre_js += open(consume_arg_file()).read() + '\n'
elif check_arg('--extern-post-js'):
options.extern_post_js += open(consume_arg_file()).read() + '\n'
elif check_arg('--compiler-wrapper'):
config.COMPILER_WRAPPER = consume_arg()
elif check_flag('--post-link'):
options.post_link = True
elif check_arg('--oformat'):
formats = [f.lower() for f in OFormat.__members__]
fmt = consume_arg()
if fmt not in formats:
exit_with_error('invalid output format: `%s` (must be one of %s)' % (fmt, formats))
options.oformat = getattr(OFormat, fmt.upper())
elif check_arg('--minify'):
arg = consume_arg()
if arg != '0':
exit_with_error('0 is the only supported option for --minify; 1 has been deprecated')
settings.DEBUG_LEVEL = max(1, settings.DEBUG_LEVEL)
elif arg.startswith('-g'):
options.requested_debug = arg
requested_level = arg[2:] or '3'
if is_int(requested_level):
# the -gX value is the debug level (-g1, -g2, etc.)
settings.DEBUG_LEVEL = validate_arg_level(requested_level, 4, 'Invalid debug level: ' + arg)
# if we don't need to preserve LLVM debug info, do not keep this flag
# for clang
if settings.DEBUG_LEVEL < 3:
newargs[i] = ''
else:
# for 3+, report -g to clang as -g4 etc. are not accepted
newargs[i] = '-g'
if settings.DEBUG_LEVEL == 4:
settings.GENERATE_SOURCE_MAP = 1
diagnostics.warning('deprecated', 'please replace -g4 with -gsource-map')
else:
if requested_level.startswith('force_dwarf'):
exit_with_error('gforce_dwarf was a temporary option and is no longer necessary (use -g)')
elif requested_level.startswith('separate-dwarf'):
# emit full DWARF but also emit it in a file on the side
newargs[i] = '-g'
# if a file is provided, use that; otherwise use the default location
# (note that we do not know the default location until all args have
# been parsed, so just note True for now).
if requested_level != 'separate-dwarf':
if not requested_level.startswith('separate-dwarf=') or requested_level.count('=') != 1:
exit_with_error('invalid -gseparate-dwarf=FILENAME notation')
settings.SEPARATE_DWARF = requested_level.split('=')[1]
else:
settings.SEPARATE_DWARF = True
elif requested_level == 'source-map':
settings.GENERATE_SOURCE_MAP = 1
newargs[i] = '-g'
# a non-integer level can be something like -gline-tables-only. keep
# the flag for the clang frontend to emit the appropriate DWARF info.
# set the emscripten debug level to 3 so that we do not remove that
# debug info during link (during compile, this does not make a
# difference).
settings.DEBUG_LEVEL = 3
elif check_flag('-profiling') or check_flag('--profiling'):
settings.DEBUG_LEVEL = max(settings.DEBUG_LEVEL, 2)
options.profiling = True
elif check_flag('-profiling-funcs') or check_flag('--profiling-funcs'):
options.profiling_funcs = True
elif newargs[i] == '--tracing' or newargs[i] == '--memoryprofiler':
if newargs[i] == '--memoryprofiler':
options.memory_profiler = True
options.tracing = True
newargs[i] = ''
settings_changes.append("EMSCRIPTEN_TRACING=1")
settings.SYSTEM_JS_LIBRARIES.append((0, shared.path_from_root('src', 'library_trace.js')))
elif check_flag('--emit-symbol-map'):
options.emit_symbol_map = True
settings.EMIT_SYMBOL_MAP = 1
elif check_flag('--bind'):
settings.EMBIND = 1
settings.SYSTEM_JS_LIBRARIES.append((0, shared.path_from_root('src', 'embind', 'emval.js')))
settings.SYSTEM_JS_LIBRARIES.append((0, shared.path_from_root('src', 'embind', 'embind.js')))
elif check_arg('--embed-file'):
options.embed_files.append(consume_arg())
elif check_arg('--preload-file'):
options.preload_files.append(consume_arg())
elif check_arg('--exclude-file'):
options.exclude_files.append(consume_arg())
elif check_flag('--use-preload-cache'):
options.use_preload_cache = True
elif check_flag('--no-heap-copy'):
diagnostics.warning('legacy-settings', 'ignoring legacy flag --no-heap-copy (that is the only mode supported now)')
elif check_flag('--use-preload-plugins'):
options.use_preload_plugins = True
elif check_flag('--ignore-dynamic-linking'):
options.ignore_dynamic_linking = True
elif arg == '-v':
shared.PRINT_STAGES = True
elif check_arg('--shell-file'):
options.shell_path = consume_arg_file()
elif check_arg('--source-map-base'):
options.source_map_base = consume_arg()
elif check_flag('--no-entry'):
options.no_entry = True
elif check_arg('--js-library'):
settings.SYSTEM_JS_LIBRARIES.append((i + 1, os.path.abspath(consume_arg_file())))
elif check_flag('--remove-duplicates'):
diagnostics.warning('legacy-settings', '--remove-duplicates is deprecated as it is no longer needed. If you cannot link without it, file a bug with a testcase')
elif check_flag('--jcache'):
logger.error('jcache is no longer supported')
elif check_flag('--clear-cache'):
logger.info('clearing cache as requested by --clear-cache')
shared.Cache.erase()
shared.check_sanity(force=True) # this is a good time for a sanity check
should_exit = True
elif check_flag('--clear-ports'):
logger.info('clearing ports and cache as requested by --clear-ports')
system_libs.Ports.erase()
shared.Cache.erase()
shared.check_sanity(force=True) # this is a good time for a sanity check
should_exit = True
elif check_flag('--check'):
print(version_string(), file=sys.stderr)
shared.check_sanity(force=True)
should_exit = True
elif check_flag('--show-ports'):
system_libs.show_ports()
should_exit = True
elif check_arg('--memory-init-file'):
options.memory_init_file = int(consume_arg())
elif check_flag('--proxy-to-worker'):
settings_changes.append('PROXY_TO_WORKER=1')
elif check_arg('--valid-abspath'):
options.valid_abspaths.append(consume_arg())
elif check_flag('--separate-asm'):
exit_with_error('cannot --separate-asm with the wasm backend, since not emitting asm.js')
elif arg.startswith(('-I', '-L')):
path_name = arg[2:]
if os.path.isabs(path_name) and not is_valid_abspath(options, path_name):
# Of course an absolute path to a non-system-specific library or header
# is fine, and you can ignore this warning. The danger are system headers
# that are e.g. x86 specific and nonportable. The emscripten bundled
# headers are modified to be portable, local system ones are generally not.
diagnostics.warning(
'absolute-paths', '-I or -L of an absolute path "' + arg +
'" encountered. If this is to a local system header/library, it may '
'cause problems (local system files make sense for compiling natively '
'on your system, but not necessarily to JavaScript).')
elif check_flag('--emrun'):
options.emrun = True
elif check_flag('--cpuprofiler'):
options.cpu_profiler = True
elif check_flag('--threadprofiler'):
options.thread_profiler = True
settings_changes.append('PTHREADS_PROFILING=1')
elif arg == '-fno-exceptions':
settings.DISABLE_EXCEPTION_CATCHING = 1
settings.DISABLE_EXCEPTION_THROWING = 1
settings.EXCEPTION_HANDLING = 0
elif arg == '-fexceptions':
eh_enabled = True
elif arg == '-fwasm-exceptions':
wasm_eh_enabled = True
elif arg == '-fignore-exceptions':
settings.DISABLE_EXCEPTION_CATCHING = 1
elif check_arg('--default-obj-ext'):
options.default_object_extension = consume_arg()
if not options.default_object_extension.startswith('.'):
options.default_object_extension = '.' + options.default_object_extension
elif arg == '-fsanitize=cfi':
options.cfi = True
elif check_arg('--output_eol'):
style = consume_arg()
if style.lower() == 'windows':
options.output_eol = '\r\n'
elif style.lower() == 'linux':
options.output_eol = '\n'
else:
exit_with_error('Invalid value "' + style + '" to --output_eol!')
elif check_arg('--generate-config'):
optarg = consume_arg()
path = os.path.expanduser(optarg)
if os.path.exists(path):
exit_with_error('File ' + optarg + ' passed to --generate-config already exists!')
else:
config.generate_config(optarg)
should_exit = True
# Record USE_PTHREADS setting because it controls whether --shared-memory is passed to lld
elif arg == '-pthread':
settings_changes.append('USE_PTHREADS=1')
elif arg in ('-fno-diagnostics-color', '-fdiagnostics-color=never'):
colored_logger.disable()
diagnostics.color_enabled = False
elif arg == '-fno-rtti':
settings.USE_RTTI = 0
elif arg == '-frtti':
settings.USE_RTTI = 1
elif arg.startswith('-jsD'):
key = arg[4:]
if '=' in key:
key, value = key.split('=')
else:
value = '1'
if key in settings.keys():
exit_with_error(arg + ': cannot change built-in settings values with a -jsD directive. Pass -s ' + key + '=' + value + ' instead!')
user_js_defines += [(key, value)]
newargs[i] = ''
elif check_flag('-shared'):
options.shared = True
elif check_flag('-r'):
options.relocatable = True
elif check_arg('-o'):
options.output_file = consume_arg()
elif arg.startswith('-o'):
options.output_file = arg[2:]
newargs[i] = ''
elif arg == '-mllvm':
# Ignore the next argument rather than trying to parse it. This is needed
# because llvm args could, for example, start with `-o` and we don't want
# to confuse that with a normal `-o` flag.
skip = True
if should_exit:
sys.exit(0)
# TODO Currently -fexceptions only means Emscripten EH. Switch to wasm
# exception handling by default when -fexceptions is given when wasm
# exception handling becomes stable.
if wasm_eh_enabled:
settings.EXCEPTION_HANDLING = 1
settings.DISABLE_EXCEPTION_THROWING = 1
settings.DISABLE_EXCEPTION_CATCHING = 1
elif eh_enabled:
settings.EXCEPTION_HANDLING = 0
settings.DISABLE_EXCEPTION_THROWING = 0
settings.DISABLE_EXCEPTION_CATCHING = 0
newargs = [a for a in newargs if a]
return options, settings_changes, user_js_defines, newargs
def do_binaryen(target, options, wasm_target):
global final_js
logger.debug('using binaryen')
if settings.GENERATE_SOURCE_MAP and not settings.SOURCE_MAP_BASE:
logger.warning("Wasm source map won't be usable in a browser without --source-map-base")
# whether we need to emit -g (function name debug info) in the final wasm
debug_info = settings.DEBUG_LEVEL >= 2 or options.profiling_funcs
# whether we need to emit -g in the intermediate binaryen invocations (but not necessarily at the very end).
# this is necessary for emitting a symbol map at the end.
intermediate_debug_info = bool(debug_info or options.emit_symbol_map or settings.ASYNCIFY_ONLY or settings.ASYNCIFY_REMOVE or settings.ASYNCIFY_ADD)
# note that wasm-ld can strip DWARF info for us too (--strip-debug), but it
# also strips the Names section. so to emit just the Names section we don't
# tell wasm-ld to strip anything, and we do it here.
strip_debug = settings.DEBUG_LEVEL < 3
strip_producers = not settings.EMIT_PRODUCERS_SECTION
# run wasm-opt if we have work for it: either passes, or if we are using
# source maps (which requires some extra processing to keep the source map
# but remove DWARF)
passes = get_binaryen_passes()
if passes or settings.GENERATE_SOURCE_MAP:
# if we need to strip certain sections, and we have wasm-opt passes
# to run anyhow, do it with them.
if strip_debug:
passes += ['--strip-debug']
if strip_producers:
passes += ['--strip-producers']
building.save_intermediate(wasm_target, 'pre-byn.wasm')
building.run_wasm_opt(wasm_target,
wasm_target,
args=passes,
debug=intermediate_debug_info)
elif strip_debug or strip_producers:
# we are not running wasm-opt. if we need to strip certain sections
# then do so using llvm-objcopy which is fast and does not rewrite the
# code (which is better for debug info)
building.save_intermediate(wasm_target, 'pre-strip.wasm')
building.strip(wasm_target, wasm_target, debug=strip_debug, producers=strip_producers)
if settings.EVAL_CTORS:
building.save_intermediate(wasm_target, 'pre-ctors.wasm')
building.eval_ctors(final_js, wasm_target, debug_info=intermediate_debug_info)
# after generating the wasm, do some final operations
# Add extra dylibs if needed.
if settings.RUNTIME_LINKED_LIBS:
webassembly.update_dylink_section(wasm_target, settings.RUNTIME_LINKED_LIBS)
if settings.EMIT_EMSCRIPTEN_METADATA:
diagnostics.warning('deprecated', 'We hope to remove support for EMIT_EMSCRIPTEN_METADATA. See https://github.com/emscripten-core/emscripten/issues/12231')
webassembly.add_emscripten_metadata(wasm_target)
if final_js:
if settings.SUPPORT_BIG_ENDIAN:
final_js = building.little_endian_heap(final_js)
# >=2GB heap support requires pointers in JS to be unsigned. rather than
# require all pointers to be unsigned by default, which increases code size
# a little, keep them signed, and just unsign them here if we need that.
if settings.CAN_ADDRESS_2GB:
final_js = building.use_unsigned_pointers_in_js(final_js)
# pthreads memory growth requires some additional JS fixups.
# note that we must do this after handling of unsigned pointers. unsigning
# adds some >>> 0 things, while growth will replace a HEAP8 with a call to
# a method to get the heap, and that call would not be recognized by the
# unsigning pass
if settings.USE_PTHREADS and settings.ALLOW_MEMORY_GROWTH:
final_js = building.apply_wasm_memory_growth(final_js)
if settings.USE_ASAN:
final_js = building.instrument_js_for_asan(final_js)
if settings.SAFE_HEAP:
final_js = building.instrument_js_for_safe_heap(final_js)
if settings.OPT_LEVEL >= 2 and settings.DEBUG_LEVEL <= 2:
# minify the JS. Do not minify whitespace if Closure is used, so that
# Closure can print out readable error messages (Closure will then
# minify whitespace afterwards)
save_intermediate_with_wasm('preclean', wasm_target)
final_js = building.minify_wasm_js(js_file=final_js,
wasm_file=wasm_target,
expensive_optimizations=will_metadce(),
minify_whitespace=minify_whitespace() and not options.use_closure_compiler,
debug_info=intermediate_debug_info)
save_intermediate_with_wasm('postclean', wasm_target)
if settings.ASYNCIFY_LAZY_LOAD_CODE:
building.asyncify_lazy_load_code(wasm_target, debug=intermediate_debug_info)
def preprocess_wasm2js_script():
return read_and_preprocess(shared.path_from_root('src', 'wasm2js.js'), expand_macros=True)
def run_closure_compiler():
global final_js
final_js = building.closure_compiler(final_js, pretty=not minify_whitespace(),
extra_closure_args=options.closure_args)
save_intermediate_with_wasm('closure', wasm_target)
if final_js and options.use_closure_compiler:
run_closure_compiler()
symbols_file = shared.replace_or_append_suffix(target, '.symbols') if options.emit_symbol_map else None
if settings.WASM2JS:
if settings.WASM == 2:
wasm2js_template = wasm_target + '.js'
open(wasm2js_template, 'w').write(preprocess_wasm2js_script())
# generate secondary file for JS symbols
symbols_file_js = shared.replace_or_append_suffix(wasm2js_template, '.symbols') if options.emit_symbol_map else None
else:
wasm2js_template = final_js
symbols_file_js = shared.replace_or_append_suffix(target, '.symbols') if options.emit_symbol_map else None
wasm2js = building.wasm2js(wasm2js_template,
wasm_target,
opt_level=settings.OPT_LEVEL,
minify_whitespace=minify_whitespace(),
use_closure_compiler=options.use_closure_compiler,
debug_info=debug_info,
symbols_file=symbols_file,
symbols_file_js=symbols_file_js)
shared.configuration.get_temp_files().note(wasm2js)
if settings.WASM == 2:
safe_copy(wasm2js, wasm2js_template)
if settings.WASM != 2:
final_js = wasm2js
# if we only target JS, we don't need the wasm any more
shared.try_delete(wasm_target)
save_intermediate('wasm2js')
# emit the final symbols, either in the binary or in a symbol map.
# this will also remove debug info if we only kept it around in the intermediate invocations.
# note that if we aren't emitting a binary (like in wasm2js) then we don't
# have anything to do here.
if options.emit_symbol_map and os.path.exists(wasm_target):
building.handle_final_wasm_symbols(wasm_file=wasm_target, symbols_file=symbols_file, debug_info=debug_info)
save_intermediate_with_wasm('symbolmap', wasm_target)
if settings.DEBUG_LEVEL >= 3 and settings.SEPARATE_DWARF and os.path.exists(wasm_target):
building.emit_debug_on_side(wasm_target, settings.SEPARATE_DWARF)
if settings.WASM2C:
wasm2c.do_wasm2c(wasm_target)
# replace placeholder strings with correct subresource locations
if final_js and settings.SINGLE_FILE and not settings.WASM2JS:
js = open(final_js).read()
if settings.MINIMAL_RUNTIME:
js = do_replace(js, '<<< WASM_BINARY_DATA >>>', base64_encode(open(wasm_target, 'rb').read()))
else:
js = do_replace(js, '<<< WASM_BINARY_FILE >>>', shared.JS.get_subresource_location(wasm_target))
shared.try_delete(wasm_target)
with open(final_js, 'w') as f:
f.write(js)
def modularize():
global final_js
logger.debug('Modularizing, assigning to var ' + settings.EXPORT_NAME)
src = open(final_js).read()
return_value = settings.EXPORT_NAME
if settings.WASM_ASYNC_COMPILATION:
return_value += '.ready'
if not settings.EXPORT_READY_PROMISE:
return_value = '{}'
src = '''
function(%(EXPORT_NAME)s) {
%(EXPORT_NAME)s = %(EXPORT_NAME)s || {};
%(src)s
return %(return_value)s
}
''' % {
'EXPORT_NAME': settings.EXPORT_NAME,
'src': src,
'return_value': return_value
}
if settings.MINIMAL_RUNTIME and not settings.USE_PTHREADS:
# Single threaded MINIMAL_RUNTIME programs do not need access to
# document.currentScript, so a simple export declaration is enough.
src = 'var %s=%s' % (settings.EXPORT_NAME, src)
else:
script_url_node = ""
# When MODULARIZE this JS may be executed later,
# after document.currentScript is gone, so we save it.
# In EXPORT_ES6 + USE_PTHREADS the 'thread' is actually an ES6 module webworker running in strict mode,
# so doesn't have access to 'document'. In this case use 'import.meta' instead.
if settings.EXPORT_ES6 and settings.USE_ES6_IMPORT_META:
script_url = "import.meta.url"
else:
script_url = "typeof document !== 'undefined' && document.currentScript ? document.currentScript.src : undefined"
if shared.target_environment_may_be('node'):
script_url_node = "if (typeof __filename !== 'undefined') _scriptDir = _scriptDir || __filename;"
src = '''
var %(EXPORT_NAME)s = (function() {
var _scriptDir = %(script_url)s;
%(script_url_node)s
return (%(src)s);
})();
''' % {
'EXPORT_NAME': settings.EXPORT_NAME,
'script_url': script_url,
'script_url_node': script_url_node,
'src': src
}
final_js += '.modular.js'
with open(final_js, 'w') as f:
f.write(src)
# Export using a UMD style export, or ES6 exports if selected
if settings.EXPORT_ES6:
f.write('export default %s;' % settings.EXPORT_NAME)
elif not settings.MINIMAL_RUNTIME:
f.write('''\
if (typeof exports === 'object' && typeof module === 'object')
module.exports = %(EXPORT_NAME)s;
else if (typeof define === 'function' && define['amd'])
define([], function() { return %(EXPORT_NAME)s; });
else if (typeof exports === 'object')
exports["%(EXPORT_NAME)s"] = %(EXPORT_NAME)s;
''' % {'EXPORT_NAME': settings.EXPORT_NAME})
shared.configuration.get_temp_files().note(final_js)
save_intermediate('modularized')
def module_export_name_substitution():
global final_js
logger.debug('Private module export name substitution with ' + settings.EXPORT_NAME)
with open(final_js) as f:
src = f.read()
final_js += '.module_export_name_substitution.js'
if settings.MINIMAL_RUNTIME:
# In MINIMAL_RUNTIME the Module object is always present to provide the .asm.js/.wasm content
replacement = settings.EXPORT_NAME
else:
replacement = "typeof %(EXPORT_NAME)s !== 'undefined' ? %(EXPORT_NAME)s : {}" % {"EXPORT_NAME": settings.EXPORT_NAME}
src = re.sub(r'{\s*[\'"]?__EMSCRIPTEN_PRIVATE_MODULE_EXPORT_NAME_SUBSTITUTION__[\'"]?:\s*1\s*}', replacement, src)
# For Node.js and other shell environments, create an unminified Module object so that
# loading external .asm.js file that assigns to Module['asm'] works even when Closure is used.
if settings.MINIMAL_RUNTIME and (shared.target_environment_may_be('node') or shared.target_environment_may_be('shell')):
src = 'if(typeof Module==="undefined"){var Module={};}\n' + src
with open(final_js, 'w') as f:
f.write(src)
shared.configuration.get_temp_files().note(final_js)
save_intermediate('module_export_name_substitution')
def generate_traditional_runtime_html(target, options, js_target, target_basename,
wasm_target, memfile):
script = ScriptSource()
shell = read_and_preprocess(options.shell_path)
assert '{{{ SCRIPT }}}' in shell, 'HTML shell must contain {{{ SCRIPT }}} , see src/shell.html for an example'
base_js_target = os.path.basename(js_target)
if settings.PROXY_TO_WORKER:
proxy_worker_filename = (settings.PROXY_TO_WORKER_FILENAME or target_basename) + '.js'
worker_js = worker_js_script(proxy_worker_filename)
script.inline = ('''
var filename = '%s';
if ((',' + window.location.search.substr(1) + ',').indexOf(',noProxy,') < 0) {
console.log('running code in a web worker');
''' % shared.JS.get_subresource_location(proxy_worker_filename)) + worker_js + '''
} else {
console.log('running code on the main thread');
var fileBytes = tryParseAsDataURI(filename);
var script = document.createElement('script');
if (fileBytes) {
script.innerHTML = intArrayToString(fileBytes);
} else {
script.src = filename;
}
document.body.appendChild(script);
}
'''
else:
# Normal code generation path
script.src = base_js_target
if not settings.SINGLE_FILE:
if memfile and not settings.MINIMAL_RUNTIME:
# start to load the memory init file in the HTML, in parallel with the JS
script.un_src()
script.inline = ('''
var memoryInitializer = '%s';
memoryInitializer = Module['locateFile'] ? Module['locateFile'](memoryInitializer, '') : memoryInitializer;
Module['memoryInitializerRequestURL'] = memoryInitializer;
var meminitXHR = Module['memoryInitializerRequest'] = new XMLHttpRequest();
meminitXHR.open('GET', memoryInitializer, true);
meminitXHR.responseType = 'arraybuffer';
meminitXHR.send(null);
''' % shared.JS.get_subresource_location(memfile)) + script.inline
if not settings.WASM_ASYNC_COMPILATION:
# We need to load the wasm file before anything else, it has to be synchronously ready TODO: optimize
script.un_src()
script.inline = '''
var wasmURL = '%s';
var wasmXHR = new XMLHttpRequest();
wasmXHR.open('GET', wasmURL, true);
wasmXHR.responseType = 'arraybuffer';
wasmXHR.onload = function() {
if (wasmXHR.status === 200 || wasmXHR.status === 0) {
Module.wasmBinary = wasmXHR.response;
} else {
var wasmURLBytes = tryParseAsDataURI(wasmURL);
if (wasmURLBytes) {
Module.wasmBinary = wasmURLBytes.buffer;
}
}
%s
};
wasmXHR.send(null);
''' % (shared.JS.get_subresource_location(wasm_target), script.inline)
if settings.WASM == 2:
# If target browser does not support WebAssembly, we need to load the .wasm.js file before the main .js file.
script.un_src()
script.inline = '''
function loadMainJs() {
%s
}
if (!window.WebAssembly || location.search.indexOf('_rwasm=0') > 0) {
// Current browser does not support WebAssembly, load the .wasm.js JavaScript fallback
// before the main JS runtime.
var wasm2js = document.createElement('script');
wasm2js.src = '%s';
wasm2js.onload = loadMainJs;
document.body.appendChild(wasm2js);
} else {
// Current browser supports Wasm, proceed with loading the main JS runtime.
loadMainJs();
}
''' % (script.inline, shared.JS.get_subresource_location(wasm_target) + '.js')
# when script.inline isn't empty, add required helper functions such as tryParseAsDataURI
if script.inline:
for filename in ('arrayUtils.js', 'base64Utils.js', 'URIUtils.js'):
content = read_and_preprocess(shared.path_from_root('src', filename))
script.inline = content + script.inline
script.inline = 'var ASSERTIONS = %s;\n%s' % (settings.ASSERTIONS, script.inline)
# inline script for SINGLE_FILE output
if settings.SINGLE_FILE:
js_contents = script.inline or ''
if script.src:
js_contents += open(js_target).read()
shared.try_delete(js_target)
script.src = None
script.inline = js_contents
html_contents = do_replace(shell, '{{{ SCRIPT }}}', script.replacement())
html_contents = tools.line_endings.convert_line_endings(html_contents, '\n', options.output_eol)
try:
with open(target, 'wb') as f:
# Force UTF-8 output for consistency across platforms and with the web.
f.write(html_contents.encode('utf-8'))
except OSError as e:
exit_with_error(f'cannot write output file: {e}')
def minify_html(filename):
if settings.DEBUG_LEVEL >= 2:
return
opts = []
# -g1 and greater retain whitespace and comments in source
if settings.DEBUG_LEVEL == 0:
opts += ['--collapse-whitespace',
'--collapse-inline-tag-whitespace',
'--remove-comments',
'--remove-tag-whitespace',
'--sort-attributes',
'--sort-class-name']
# -g2 and greater do not minify HTML at all
if settings.DEBUG_LEVEL <= 1:
opts += ['--decode-entities',
'--collapse-boolean-attributes',
'--remove-attribute-quotes',
'--remove-redundant-attributes',
'--remove-script-type-attributes',
'--remove-style-link-type-attributes',
'--use-short-doctype',
'--minify-css', 'true',
'--minify-js', 'true']
# html-minifier also has the following options, but they look unsafe for use:
# '--remove-optional-tags': removes e.g. <head></head> and <body></body> tags from the page.
# (Breaks at least browser.test_sdl2glshader)
# '--remove-empty-attributes': removes all attributes with whitespace-only values.
# (Breaks at least browser.test_asmfs_hello_file)
# '--remove-empty-elements': removes all elements with empty contents.
# (Breaks at least browser.test_asm_swapping)
logger.debug('minifying HTML file ' + filename)
size_before = os.path.getsize(filename)
start_time = time.time()
shared.check_call(shared.get_npm_cmd('html-minifier-terser') + [filename, '-o', filename] + opts, env=shared.env_with_node_in_path())
elapsed_time = time.time() - start_time
size_after = os.path.getsize(filename)
delta = size_after - size_before
logger.debug('HTML minification took {:.2f}'.format(elapsed_time) + ' seconds, and shrunk size of ' + filename + ' from ' + str(size_before) + ' to ' + str(size_after) + ' bytes, delta=' + str(delta) + ' ({:+.2f}%)'.format(delta * 100.0 / size_before))
def generate_html(target, options, js_target, target_basename,
wasm_target, memfile):
logger.debug('generating HTML')
if settings.EXPORT_NAME != 'Module' and \
not settings.MINIMAL_RUNTIME and \
options.shell_path == shared.path_from_root('src', 'shell.html'):
# the minimal runtime shell HTML is designed to support changing the export
# name, but the normal one does not support that currently
exit_with_error('Customizing EXPORT_NAME requires that the HTML be customized to use that name (see https://github.com/emscripten-core/emscripten/issues/10086)')
if settings.MINIMAL_RUNTIME:
generate_minimal_runtime_html(target, options, js_target, target_basename)
else:
generate_traditional_runtime_html(target, options, js_target, target_basename,
wasm_target, memfile)
if settings.MINIFY_HTML and (settings.OPT_LEVEL >= 1 or settings.SHRINK_LEVEL >= 1):
minify_html(target)
def generate_worker_js(target, js_target, target_basename):
# compiler output is embedded as base64
if settings.SINGLE_FILE:
proxy_worker_filename = shared.JS.get_subresource_location(js_target)
# compiler output goes in .worker.js file
else:
move_file(js_target, unsuffixed(js_target) + '.worker.js')
worker_target_basename = target_basename + '.worker'
proxy_worker_filename = (settings.PROXY_TO_WORKER_FILENAME or worker_target_basename) + '.js'
target_contents = worker_js_script(proxy_worker_filename)
open(target, 'w').write(target_contents)
def worker_js_script(proxy_worker_filename):
web_gl_client_src = open(shared.path_from_root('src', 'webGLClient.js')).read()
idb_store_src = open(shared.path_from_root('src', 'IDBStore.js')).read()
proxy_client_src = open(shared.path_from_root('src', 'proxyClient.js')).read()
proxy_client_src = do_replace(proxy_client_src, '{{{ filename }}}', proxy_worker_filename)
proxy_client_src = do_replace(proxy_client_src, '{{{ IDBStore.js }}}', idb_store_src)
return web_gl_client_src + '\n' + proxy_client_src
def process_libraries(libs, lib_dirs, linker_inputs):
libraries = []
consumed = []
suffixes = STATICLIB_ENDINGS + DYNAMICLIB_ENDINGS
# Find library files
for i, lib in libs:
logger.debug('looking for library "%s"', lib)
found = False
for suff in suffixes:
name = 'lib' + lib + suff
for lib_dir in lib_dirs:
path = os.path.join(lib_dir, name)
if os.path.exists(path):
logger.debug('found library "%s" at %s', lib, path)
linker_inputs.append((i, path))
consumed.append(i)
found = True
break
if found:
break
if not found:
jslibs = building.map_to_js_libs(lib)
if jslibs is not None:
libraries += [(i, jslib) for jslib in jslibs]
consumed.append(i)
elif building.map_and_apply_to_settings(lib):
consumed.append(i)
settings.SYSTEM_JS_LIBRARIES += libraries
# At this point processing SYSTEM_JS_LIBRARIES is finished, no more items will be added to it.
# Sort the input list from (order, lib_name) pairs to a flat array in the right order.
settings.SYSTEM_JS_LIBRARIES.sort(key=lambda lib: lib[0])
settings.SYSTEM_JS_LIBRARIES = [lib[1] for lib in settings.SYSTEM_JS_LIBRARIES]
return consumed
class ScriptSource:
def __init__(self):
self.src = None # if set, we have a script to load with a src attribute
self.inline = None # if set, we have the contents of a script to write inline in a script
def un_src(self):
"""Use this if you want to modify the script and need it to be inline."""
if self.src is None:
return
self.inline = '''
var script = document.createElement('script');
script.src = "%s";
document.body.appendChild(script);
''' % self.src
self.src = None
def replacement(self):
"""Returns the script tag to replace the {{{ SCRIPT }}} tag in the target"""
assert (self.src or self.inline) and not (self.src and self.inline)
if self.src:
return '<script async type="text/javascript" src="%s"></script>' % quote(self.src)
else:
return '<script>\n%s\n</script>' % self.inline
def is_valid_abspath(options, path_name):
# Any path that is underneath the emscripten repository root must be ok.
if shared.path_from_root().replace('\\', '/') in path_name.replace('\\', '/'):
return True
def in_directory(root, child):
# make both path absolute
root = os.path.realpath(root)
child = os.path.realpath(child)
# return true, if the common prefix of both is equal to directory
# e.g. /a/b/c/d.rst and directory is /a/b, the common prefix is /a/b
return os.path.commonprefix([root, child]) == root
for valid_abspath in options.valid_abspaths:
if in_directory(valid_abspath, path_name):
return True
return False
def parse_value(text, expect_list):
# Note that using response files can introduce whitespace, if the file
# has a newline at the end. For that reason, we rstrip() in relevant
# places here.
def parse_string_value(text):
first = text[0]
if first == "'" or first == '"':
text = text.rstrip()
assert text[-1] == text[0] and len(text) > 1, 'unclosed opened quoted string. expected final character to be "%s" and length to be greater than 1 in "%s"' % (text[0], text)
return text[1:-1]
return text
def parse_string_list_members(text):
sep = ','
values = text.split(sep)
result = []
index = 0
while True:
current = values[index].lstrip() # Cannot safely rstrip for cases like: "HERE-> ,"
if not len(current):
exit_with_error('string array should not contain an empty value')
first = current[0]
if not(first == "'" or first == '"'):
result.append(current.rstrip())
else:
start = index
while True: # Continue until closing quote found
if index >= len(values):
exit_with_error("unclosed quoted string. expected final character to be '%s' in '%s'" % (first, values[start]))
new = values[index].rstrip()
if new and new[-1] == first:
if start == index:
result.append(current.rstrip()[1:-1])
else:
result.append((current + sep + new)[1:-1])
break
else:
current += sep + values[index]
index += 1
index += 1
if index >= len(values):
break
return result
def parse_string_list(text):
text = text.rstrip()
if text and text[0] == '[':
if text[-1] != ']':
exit_with_error('unclosed opened string list. expected final character to be "]" in "%s"' % (text))
text = text[1:-1]
if text.strip() == "":
return []
return parse_string_list_members(text)
if expect_list or (text and text[0] == '['):
# if json parsing fails, we fall back to our own parser, which can handle a few
# simpler syntaxes
try:
return json.loads(text)
except ValueError:
return parse_string_list(text)
try:
return int(text)
except ValueError:
return parse_string_value(text)
def validate_arg_level(level_string, max_level, err_msg, clamp=False):
try:
level = int(level_string)
except ValueError:
raise Exception(err_msg)
if clamp:
if level > max_level:
logger.warning("optimization level '-O" + level_string + "' is not supported; using '-O" + str(max_level) + "' instead")
level = max_level
if not 0 <= level <= max_level:
raise Exception(err_msg)
return level
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
def main(args):
start_time = time.time()
ret = run(args)
logger.debug('total time: %.2f seconds', (time.time() - start_time))
return ret
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
logger.warning('KeyboardInterrupt')
sys.exit(1)
| 40.788358 | 379 | 0.68778 |
d0ffe439a5100948d7cf1c382079496b47f642cf | 9,047 | py | Python | examples/imagenet_resnet.py | DMALab/TSplit | 8f86f987163aa06521bfeeb174616eb4a0a81b47 | [
"Apache-2.0"
] | 2 | 2021-05-29T11:18:14.000Z | 2021-09-09T14:29:21.000Z | examples/imagenet_resnet.py | DMALab/TSplit | 8f86f987163aa06521bfeeb174616eb4a0a81b47 | [
"Apache-2.0"
] | null | null | null | examples/imagenet_resnet.py | DMALab/TSplit | 8f86f987163aa06521bfeeb174616eb4a0a81b47 | [
"Apache-2.0"
] | 1 | 2021-05-01T16:34:37.000Z | 2021-05-01T16:34:37.000Z | import numpy as np
from athena import ndarray
from athena import gpu_ops as ad
from athena.microopOptimizer import microopOptimizer
from athena.microopPlanner import microopPlanner
import time
import argparse
executor_ctx = ndarray.gpu(0)
variable_list = []
val_list = []
rand = np.random.RandomState(seed=123)
def get_variable(name, size):
global variable_list, val_list
x = ad.Variable(name=name)
x_val = rand.normal(scale=0.1, size=size)
x_val = ndarray.array(x_val, ctx=executor_ctx)
variable_list.append(x)
val_list.append(x_val)
return x
def conv2d_1_1(x, in_channel, out_channel, stride=1, padding=1, name=''):
x = ad.conv2d_op(x, get_variable(name + '_weight', (out_channel, in_channel, 1, 1)), stride=stride, padding=padding, For_ResNet=True)
return x
def conv2d_3_3(x, in_channel, out_channel, stride=1, padding=1, name=''):
x = ad.conv2d_op(x, get_variable(name + '_weight', (out_channel, in_channel, 3, 3)), stride=stride, padding=padding, For_ResNet=True)
return x
def conv2d_7_7(x, in_channel, out_channel, stride=1, padding=1, name=''):
x = ad.conv2d_op(x, get_variable(name + '_weight', (out_channel, in_channel, 7, 7)), stride=stride, padding=padding, For_ResNet=True)
return x
def batch_norm_with_relu(x, hidden, name):
x = ad.batch_normalization_op(x, get_variable(name + '_scale', (1, hidden, 1, 1)),
get_variable(name + '_bias', (1, hidden, 1, 1)))
x = ad.relu_op(x)
return x
def resnet_block_large(x, in_channel, out_channel, num_blocks, is_first=False, name=''):
if is_first:
indentity = conv2d_1_1(x, in_channel, out_channel, stride=1, padding=0, name=name + '_conv0')
indentity = batch_norm_with_relu(indentity, out_channel, name + '_bn0')
x = conv2d_1_1(x, in_channel, out_channel / 4, stride=1, padding=0, name=name + '_conv1')
x = batch_norm_with_relu(x, out_channel / 4, name + '_bn1')
x = conv2d_3_3(x, out_channel / 4, out_channel / 4, stride=1, padding=1, name=name + '_conv2')
x = batch_norm_with_relu(x, out_channel / 4, name + '_bn2')
x = conv2d_1_1(x, out_channel / 4, out_channel, stride=1, padding=0, name=name + '_conv3')
x = batch_norm_with_relu(x, out_channel, name + 'bn_3')
x = x + indentity
else:
identity = conv2d_1_1(x, in_channel, out_channel, stride=2, padding=0, name=name + '_conv0')
identity = batch_norm_with_relu(identity, out_channel, name + '_bn0')
x = conv2d_1_1(x, in_channel, out_channel / 4, stride=1, padding=0, name=name + '_conv1')
x = batch_norm_with_relu(x, out_channel / 4, name + '_bn1')
x = conv2d_3_3(x, out_channel / 4 , out_channel / 4, stride=2, padding=1, name=name + '_conv2')
x = batch_norm_with_relu(x, out_channel / 4, name + '_bn2')
x = conv2d_1_1(x, out_channel / 4, out_channel, stride=1, padding=0, name=name + '_conv3')
x = batch_norm_with_relu(x, out_channel, name + '_bn3')
x = x + identity
for i in range(1, num_blocks):
identity = x
x = conv2d_1_1(x, out_channel, out_channel / 4, stride=1, padding=0, name=name + '_conv%d' % (3 * i + 1))
x = batch_norm_with_relu(x, out_channel / 4, name + '_bn%d' % (3 * i + 1))
x = conv2d_3_3(x, out_channel / 4, out_channel / 4, stride=1, padding=1, name=name + '_conv%d' % (3 * i + 2))
x = batch_norm_with_relu(x, out_channel / 4, name + '_bn%d' % (3 * i + 2))
x = conv2d_1_1(x, out_channel / 4, out_channel, stride=1, padding=0, name=name + '_conv%d' % (3 * i + 3))
x = batch_norm_with_relu(x, out_channel, name + '_bn%d' % (3 * i + 3))
x = x + identity
return x
def fc(x, shape, name):
x = ad.matmul_op(x, get_variable(name + '_weight', shape))
return x
def resnet_model(x, y_, num_layers=18):
'''
ResNet model, for CIFAR10 dataset.
Parameters:
x: Variable(athena.gpu_ops.Node.Node), shape (N, C, H, W)
y_: Variable(athena.gpu_ops.Node.Node), shape (N, num_classes)
num_layers: 18 or 34
Return:
loss: Variable(athena.gpu_ops.Node.Node), shape (1,)
y: Variable(athena.gpu_ops.Node.Node), shape (N, num_classes)
'''
base_size = 64
x = conv2d_7_7(x, 3, base_size, stride=2, padding=3, name='resnet_initial_conv')
x = batch_norm_with_relu(x, base_size, 'resnet_initial_bn')
x = ad.max_pool2d_op(x, 3, 3, stride=2, padding=1)
if num_layers == 50:
# print("Building ResNet-50 model...")
x = resnet_block_large(x, base_size, 4 * 64, num_blocks=3, is_first=True, name='resnet_block1')
x = resnet_block_large(x, 4 * 64, 4 * 128, num_blocks=4, is_first=False, name='resnet_block2')
x = resnet_block_large(x, 4 * 128, 4 * 256, num_blocks=6, is_first=False, name='resnet_block3')
x = resnet_block_large(x, 4 * 256, 4 * 512, num_blocks=3, is_first=False, name='resnet_block4')
elif num_layers == 101:
# print("Building ResNet-101 model...")
x = resnet_block_large(x, base_size, 4 * 64, num_blocks=3, is_first=True, name='resnet_block1')
x = resnet_block_large(x, 4 * 64, 4 * 128, num_blocks=4, is_first=False, name='resnet_block2')
x = resnet_block_large(x, 4 * 128, 4 * 256, num_blocks=23, is_first=False, name='resnet_block3')
x = resnet_block_large(x, 4 * 256, 4 * 512, num_blocks=3, is_first=False, name='resnet_block4')
else:
assert False, "Number of layers should be 18, 34, 50 or 101 !"
x = ad.avg_pool2d_op(x, 7, 7, padding=0, stride=7)
x = ad.array_reshape_op(x, (batch_size, -1))
y = fc(x, (512 * 4, 1000), name='resnet_final_fc')
# here we don't use cudnn for softmax crossentropy to avoid overflows
loss = ad.softmaxcrossentropy_op(y, y_)
return loss, y
def resnet(batch_size, num_layers, policy = "None"):
global variable_list, val_list
variable_list = []
val_list = []
X = ad.Variable(name='X')
X_val = np.empty(shape=(batch_size, 3, 224, 224), dtype=np.float32)
# X_val = ndarray.array(X_val, ctx=executor_ctx)
y_ = ad.Variable(name='y_')
y_val = np.empty(shape=(batch_size, 1000), dtype=np.float32)
# y_val = ndarray.array(y_val, ctx=executor_ctx)
loss, y = resnet_model(X, y_, num_layers)
grad_list = ad.gradients(loss, variable_list)
if policy == "None" or policy == "base":
athena_exec = ad.Executor
elif policy == "vdnnconv" or policy == "vdnnall":
athena_exec = ad.vdnnExecutor
elif policy == "superneurons":
athena_exec = ad.superNeuronsExecutor
elif policy == "recompute_memory" or policy == "recompute_speed":
athena_exec = ad.recomputeExecutor
elif policy == "simulator":
athena_exec = microopOptimizer
elif policy == "profiler":
athena_exec = ad.profileExecutor
elif policy == "planner":
athena_exec = microopPlanner
elif policy == "tsplit":
athena_exec = ad.microopExecutor
else:
raise NotImplementedError
if policy == "vdnnconv":
executor = athena_exec([loss] + grad_list + [y], ctx=executor_ctx, policy = "conv")
elif policy == "vdnnall":
executor = athena_exec([loss] + grad_list + [y], ctx=executor_ctx, policy = "all")
elif policy == "recompute_memory":
executor = athena_exec([loss] + grad_list + [y], ctx=executor_ctx, policy = "memory")
elif policy == "recompute_speed":
executor = athena_exec([loss] + grad_list + [y], ctx=executor_ctx, policy = "speed")
else:
executor = athena_exec([loss] + grad_list + [y], ctx=executor_ctx)
feed_dict = dict()
feed_dict[X] = X_val
feed_dict[y_] = y_val
for i in range(len(variable_list)):
feed_dict[variable_list[i]] = val_list[i]
for i in range(3):
if i == 1:
start = time.time()
grad_val_list = executor.run(feed_dict)
end = time.time()
return (end - start) / 2
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Demo of argparse")
parser.add_argument('-l','--layer', type=int, default=0)
parser.add_argument('-p','--policy', default='None')
args = parser.parse_args()
policy = args.policy
layer = args.layer
# batch_size = 590
# execution_time = resnet(batch_size, layer, policy = policy)
# print("Batch size: {} , time: {} s\n".format(batch_size, execution_time))
# output_file_name = "/home/xiaonan/microop/Athena/exp/" + "resnet" + str(layer) + "/" + policy + "_batchsize_with_time.txt"
# output_file = open(output_file_name, "a+", buffering=1)
# output_file.write("Policy: {}, on ResNet{}\n".format(policy, layer))
# for batch_size in range(32, 2000, 32):
# execution_time = resnet(batch_size, layer, policy = policy)
# print("Batch size: {} , time: {} s\n".format(batch_size, execution_time))
# output_file.write("Batch size: {} , time: {} s\n".format(batch_size, execution_time))
# output_file.close()
| 43.917476 | 137 | 0.644965 |
c561c60814830df21204d975ebd5334913b04625 | 452 | py | Python | app/main/model/example.py | Eliotdoesprogramming/python.flask.sqlalchemy.Rest_Api_Template | 3f0a98ae4676aef9ecdf0df70eb9d1990fee6182 | [
"MIT"
] | null | null | null | app/main/model/example.py | Eliotdoesprogramming/python.flask.sqlalchemy.Rest_Api_Template | 3f0a98ae4676aef9ecdf0df70eb9d1990fee6182 | [
"MIT"
] | null | null | null | app/main/model/example.py | Eliotdoesprogramming/python.flask.sqlalchemy.Rest_Api_Template | 3f0a98ae4676aef9ecdf0df70eb9d1990fee6182 | [
"MIT"
] | null | null | null | #Example model
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
#single instance of SQLAlchemy and Marshmallow
from model import db,ma
class Example(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=True)
def __init__(self,name) -> None:
self.name=name
#Example Schema
class ExampleSchema(ma.Schema):
class Meta:
fields = ('id','name') | 22.6 | 49 | 0.721239 |
381009228a8252bd40e6701ceb78dc83ac57ba99 | 2,413 | py | Python | src/bilder/components/hashicorp/consul/steps.py | mitodl/ol-infrastructure | f09912e39ff280575964a4df7c004fde58912636 | [
"BSD-3-Clause"
] | 25 | 2020-07-10T21:05:43.000Z | 2022-03-09T03:55:30.000Z | src/bilder/components/hashicorp/consul/steps.py | mitodl/ol-infrastructure | f09912e39ff280575964a4df7c004fde58912636 | [
"BSD-3-Clause"
] | 423 | 2020-06-23T18:00:43.000Z | 2022-03-31T17:44:08.000Z | src/bilder/components/hashicorp/consul/steps.py | mitodl/ol-infrastructure | f09912e39ff280575964a4df7c004fde58912636 | [
"BSD-3-Clause"
] | null | null | null | import tempfile
from pathlib import Path
from pyinfra.api import deploy
from pyinfra.operations import apt, files, systemd
from bilder.facts import has_systemd # noqa: F401
@deploy("Set up DNS proxy")
def proxy_consul_dns(state=None, host=None):
apt.packages(
name="Install Unbound for DNS proxying",
packages=["unbound"],
present=True,
update=True,
state=state,
host=host,
)
with tempfile.NamedTemporaryFile(delete=False, mode="w") as dhclient_config:
dhclient_config.write(
r'make_resolv_conf\necho "nameserver 127.0.0.1\\n$(cat /etc/resolv.conf)" '
"> /etc/resolv.conf"
)
files.put(
name="Configure dhclient to use local DNS",
dest="/etc/dhcp/dhclient-enter-hooks.d/consul",
src=dhclient_config.name,
create_remote_dir=True,
mode="0755",
state=state,
host=host,
)
# Allow hosts that default to using systemd-resolved to properly resolve Consul
# domains
if host.fact.has_systemd and host.fact.systemd_enabled["systemd-resolved.service"]:
with tempfile.NamedTemporaryFile(delete=False, mode="w") as resolved_conf:
resolved_conf.write("[Resolve]\nDNS=127.0.0.1\nDomains=~consul")
consul_resolved_config = files.put(
name="Configure systemd-resolved to resolve .consul domains locally",
dest="/etc/systemd/resolved.conf.d/consul.conf",
src=resolved_conf.name,
create_remote_dir=True,
state=state,
host=host,
)
systemd.service(
name="Enable systemd-resolved",
service="systemd-resolved",
enabled=True,
running=True,
restarted=consul_resolved_config.changed,
state=state,
host=host,
)
files.put(
name="Configure Unbound to resolve .consul domains locally",
dest="/etc/unbound/unbound.conf.d/consul.conf",
src=Path(__file__).parent.joinpath("files", "unbound_config.conf"),
create_remote_dir=True,
state=state,
host=host,
)
systemd.service(
name="Enable Unbound DNS proxy",
service="unbound",
enabled=True,
running=True,
state=state,
host=host,
)
| 33.513889 | 87 | 0.598425 |
ea5dd6948b112404c7951bc7039840c85579f74c | 19,352 | py | Python | tests/python/relay/test_op_level1.py | YSHsieh7777/tvm | b51973fb48deb34ff725bf1206f1b683f8bc2773 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 1 | 2021-12-29T00:04:56.000Z | 2021-12-29T00:04:56.000Z | tests/python/relay/test_op_level1.py | YSHsieh7777/tvm | b51973fb48deb34ff725bf1206f1b683f8bc2773 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 1 | 2017-12-09T06:30:45.000Z | 2017-12-09T22:53:23.000Z | tests/python/relay/test_op_level1.py | YSHsieh7777/tvm | b51973fb48deb34ff725bf1206f1b683f8bc2773 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 1 | 2021-02-06T01:56:20.000Z | 2021-02-06T01:56:20.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import tvm
from tvm import te
import scipy
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import run_infer_type
import tvm.topi.testing
from tvm.contrib.nvcc import have_fp16
import tvm.testing
def sigmoid(x):
one = np.ones_like(x)
return one / (one + np.exp(-x))
def relu(x):
x_copy = np.copy(x)
np.maximum(x_copy, 0, x_copy)
return x_copy
def rsqrt(x):
one = np.ones_like(x)
return one / np.sqrt(x)
@tvm.testing.uses_gpu
def test_unary_op():
def check_single_op(opfunc, ref, dtype):
shape = (10, 4)
dtype = dtype
tp = relay.TensorType(shape)
x = relay.var("x", tp, dtype=dtype)
y = opfunc(x)
# test printer
assert ("{}(%x)".format(y.op.name)) in y.astext()
# test type inference
yy = run_infer_type(y)
assert yy.checked_type == tp
if ref is not None:
data = np.random.rand(*shape).astype(dtype)
ref_res = ref(data)
func = relay.Function([x], y)
for target, ctx in tvm.testing.enabled_targets():
# use graph by execuor default for testing, as we need
# create function explicitly to avoid constant-folding.
if (
dtype == "float16"
and target == "cuda"
and not have_fp16(tvm.gpu(0).compute_version)
):
continue
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
for opfunc, ref in [
(tvm.relay.log, np.log),
(tvm.relay.exp, np.exp),
(tvm.relay.erf, scipy.special.erf),
(tvm.relay.sqrt, np.sqrt),
(tvm.relay.rsqrt, rsqrt),
(tvm.relay.sigmoid, sigmoid),
(tvm.relay.tanh, np.tanh),
(relay.nn.relu, relu),
(tvm.relay.cos, np.cos),
(tvm.relay.sin, np.sin),
(tvm.relay.tan, np.tan),
(tvm.relay.atan, np.arctan),
]:
for dtype in ["float16", "float32"]:
check_single_op(opfunc, ref, dtype)
@tvm.testing.uses_gpu
def test_binary_op():
def inst(vars, sh):
return [vars.get(s, s) for s in sh]
def check_binary_op(opfunc, ref, dtype):
# TODO(@jroesch): this piece of code improperly uses type variables.
n = te.var("n")
s1 = (5, n, 5)
s2 = (n, 1)
t1 = relay.TensorType(s1)
t2 = relay.TensorType(s2)
x = relay.var("x", t1, dtype=dtype)
y = relay.var("y", t2, dtype=dtype)
z = opfunc(x, y)
# test printer
assert ("{}(%x, %y)".format(z.op.name)) in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == t1
if ref is not None:
t1 = relay.TensorType((5, 10, 5))
t2 = relay.TensorType((5, 10, 5))
x = relay.var("x", t1, dtype=dtype)
y = relay.var("y", t2, dtype=dtype)
z = opfunc(x, y)
x_data = np.random.rand(5, 10, 5).astype(dtype)
y_data = np.random.rand(5, 10, 5).astype(dtype)
ref_res = ref(x_data, y_data)
func = relay.Function([x, y], z)
for target, ctx in tvm.testing.enabled_targets():
# use graph by execuor default for testing, as we need
# create function explicitly to avoid constant-folding.
if (
dtype == "float16"
and target == "cuda"
and not have_fp16(tvm.gpu(0).compute_version)
):
continue
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01, atol=1e-3)
for opfunc, ref in [
(relay.add, np.add),
(relay.subtract, np.subtract),
(relay.multiply, np.multiply),
(relay.divide, np.divide),
(relay.floor_divide, np.floor_divide),
(relay.floor_mod, np.fmod),
]:
for dtype in ["float16", "float32"]:
check_binary_op(opfunc, ref, dtype)
@tvm.testing.uses_gpu
def test_expand_dims():
# based on topi test
def verify_expand_dims(dshape, dtype, oshape, axis, num_newaxis):
x = relay.Var("x", relay.TensorType(dshape, dtype))
func = relay.Function([x], relay.expand_dims(x, axis, num_newaxis))
for target, ctx in tvm.testing.enabled_targets():
if (
dtype == "float16"
and target == "cuda"
and not have_fp16(tvm.gpu(0).compute_version)
):
continue
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = data.reshape(oshape)
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
for dtype in ["float16", "float32"]:
verify_expand_dims((3, 10), dtype, (3, 10, 1, 1), 2, 2)
verify_expand_dims((3, 10), dtype, (1, 3, 10), -3, 1)
@tvm.testing.uses_gpu
def test_bias_add():
for dtype in ["float16", "float32"]:
xshape = (10, 2, 3, 4)
bshape = (2,)
rtol = 1e-2 if dtype == "float16" else 1e-5
x = relay.var("x", shape=xshape, dtype=dtype)
bias = relay.var("bias", dtype=dtype)
z = relay.nn.bias_add(x, bias)
zz = run_infer_type(z)
assert "axis=" not in zz.astext()
assert zz.args[1].checked_type == relay.TensorType(bshape, dtype)
func = relay.Function([x, bias], z)
x_data = np.random.uniform(size=xshape).astype(dtype)
y_data = np.random.uniform(size=bshape).astype(dtype)
ref_res = x_data + y_data.reshape((2, 1, 1))
for target, ctx in tvm.testing.enabled_targets():
if (
dtype == "float16"
and target == "cuda"
and not have_fp16(tvm.gpu(0).compute_version)
):
continue
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=rtol)
def test_bias_add_type_failure():
# the axis is out of range
try:
b_add = relay.nn.bias_add(relay.const(1), relay.const(2), axis=0)
run_infer_type(b_add)
except tvm._ffi.base.TVMError:
pass
else:
assert False
def test_expand_dims_infer_type():
for dtype in ["float16", "float32"]:
n, t, d = te.size_var("n"), te.size_var("t"), 100
x = relay.var("x", shape=(n, t, d), dtype=dtype)
y = relay.expand_dims(x, axis=2)
assert "axis=2" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, t, 1, 100), dtype)
@tvm.testing.uses_gpu
def test_softmax():
for dtype in ["float16", "float32"]:
# Softmax accuracy for float16 is poor
if dtype == "float16":
return
shape = (10, 4)
x = relay.var("x", shape=shape, dtype=dtype)
y = relay.nn.softmax(x, axis=1)
assert "nn.softmax" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], y)
x_data = np.random.uniform(size=shape).astype(dtype)
ref_res = tvm.topi.testing.softmax_python(x_data)
for target, ctx in tvm.testing.enabled_targets():
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_log_softmax():
for dtype in ["float16", "float32"]:
# Softmax accuracy for float16 is poor
if dtype == "float16":
return
shape = (10, 4)
x = relay.var("x", shape=shape, dtype=dtype)
y = relay.nn.log_softmax(x, axis=1)
assert "nn.log_softmax" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], y)
x_data = np.random.uniform(size=shape).astype(dtype)
ref_res = tvm.topi.testing.log_softmax_python(x_data)
for target, ctx in tvm.testing.enabled_targets():
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_concatenate():
for dtype in ["float16", "float32"]:
n, t, d = te.size_var("n"), te.size_var("t"), 100
x = relay.var("x", shape=(n, t, d))
y = relay.var("y", shape=(n, t, d))
z = relay.concatenate((x, y), axis=-1)
assert "axis=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, t, 200))
x = relay.exp(x)
z = relay.concatenate((x, y), axis=2)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, t, 200))
z = relay.concatenate((x, y), axis=1)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, t + t, 100))
# check shape mismatches (the following case is expected to raise tvm._ffi.base.TVMError.
try:
x = relay.var("p1", shape=(2, 5))
y = relay.var("p2", shape=(2, 3))
c = relay.concatenate([x, y], axis=0)
func = relay.Function([x, y], c)
zz = run_infer_type(func)
except tvm._ffi.base.TVMError:
pass
else:
assert False
x = relay.var("x", shape=(10, 5), dtype=dtype)
y = relay.var("y", shape=(10, 5), dtype=dtype)
t = relay.var("z", shape=(), dtype=dtype)
z = relay.concatenate((x, y), axis=1)
z = relay.add(z, t)
# Check result.
func = relay.Function([x, y, t], z)
x_data = np.random.rand(10, 5).astype(dtype)
y_data = np.random.rand(10, 5).astype(dtype)
t_data = np.random.uniform(size=()).astype(dtype)
ref_res = np.concatenate((x_data, y_data), axis=1) + t_data
for target, ctx in tvm.testing.enabled_targets():
if (
dtype == "float16"
and target == "cuda"
and not have_fp16(tvm.gpu(0).compute_version)
):
continue
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data, y_data, t_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=0.01)
op_res2 = intrp2.evaluate(func)(x_data, y_data, t_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=0.01)
def test_dropout():
for dtype in ["float16", "float32"]:
n, t, d = te.size_var("n"), te.size_var("t"), te.size_var("d")
input_ty = relay.TensorType((n, t, d), dtype)
x = relay.var("x", input_ty)
y = relay.nn.dropout(x, rate=0.75)
assert "rate=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == input_ty
in_np = np.random.random([4, 5, 6]).astype("float32")
x = relay.const(in_np)
y = relay.nn.dropout(x, rate=0.5)
func = relay.Function([], y)
for target, ctx in tvm.testing.enabled_targets():
for backend in ["debug", "graph"]:
intrp = relay.create_executor("debug", ctx=ctx, target=target)
op_res = intrp.evaluate(func)()
tvm.testing.assert_allclose(op_res.asnumpy(), in_np, rtol=0.01)
def test_batch_norm():
for dtype in ["float16", "float32"]:
# beta and gamma ignored
data = relay.var("data", relay.TensorType((3, 2, 1), dtype))
beta = relay.var("beta", relay.TensorType((2,), dtype))
gamma = relay.var("gamma", relay.TensorType((2,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((2,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((2,), dtype))
y = relay.nn.batch_norm(
data, gamma, beta, moving_mean, moving_var, center=False, scale=False
)
yy = run_infer_type(y.astuple())
assert "center=" in yy.astext()
assert yy.checked_type == relay.ty.TupleType(
tvm.runtime.convert(
[
relay.TensorType((3, 2, 1), dtype),
relay.TensorType((2,), dtype),
relay.TensorType((2,), dtype),
]
)
)
beta = relay.var("beta", relay.TensorType((3,), dtype))
gamma = relay.var("gamma", relay.TensorType((3,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((3,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((3,), dtype))
y = relay.nn.batch_norm(
data, gamma, beta, moving_mean, moving_var, axis=0, center=False, scale=False
)
yy = run_infer_type(y.astuple())
assert yy.checked_type == relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((3, 2, 1), dtype),
relay.ty.TensorType((3,), dtype),
relay.ty.TensorType((3,), dtype),
]
)
)
# axis=-1
data = relay.var("data", relay.TensorType((1, 2, 3), dtype))
beta = relay.var("beta", relay.TensorType((3,), dtype))
gamma = relay.var("gamma", relay.TensorType((3,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((3,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((3,), dtype))
y = relay.nn.batch_norm(
data, gamma, beta, moving_mean, moving_var, axis=-1, center=False, scale=False
)
yy = run_infer_type(y.astuple())
assert yy.checked_type == relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((1, 2, 3), dtype),
relay.ty.TensorType((3,), dtype),
relay.ty.TensorType((3,), dtype),
]
)
)
@pytest.mark.xfail
def test_dense_type_check():
dtype = "float16"
n, c, h, w = 2, 2, 2, 2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
# it should fail since it does not match with m(2)
mismatch_w = 3
w = relay.var("w", relay.TensorType((2, mismatch_w), dtype))
y = relay.nn.dense(x, w)
yy = run_infer_type(y)
@tvm.testing.uses_gpu
def test_dense():
for dtype in ["float16", "float32"]:
# Dense accuracy for float16 is poor
if dtype == "float16":
return
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
w = relay.var("w", relay.TensorType((2, w), dtype))
y = relay.nn.dense(x, w, units=2)
assert "units=2" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), 2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
wh, ww = te.size_var("wh"), te.size_var("ww")
w = relay.var("w", relay.TensorType((ww, wh), dtype))
y = relay.nn.dense(x, w)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, ww), dtype)
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), 2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
w = relay.var("w", relay.IncompleteType())
y = relay.nn.dense(x, w, units=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)
x = relay.var("x", shape=(10, 5), dtype=dtype)
w = relay.var("w", shape=(2, 5), dtype=dtype)
z = relay.nn.dense(x, w)
# Check result.
func = relay.Function([x, w], z)
x_data = np.random.rand(10, 5).astype(dtype)
w_data = np.random.rand(2, 5).astype(dtype)
ref_res = np.dot(x_data, w_data.T)
for target, ctx in tvm.testing.enabled_targets():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data, w_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data, w_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
def test_dense_dtype():
data_dtype = "uint8"
weight_dtype = "int8"
out_dtype = "uint8"
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), data_dtype))
w = relay.var("w", relay.TensorType((2, w), weight_dtype))
y = relay.nn.dense(x, w, units=2, out_dtype=out_dtype)
assert "units=2" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), out_dtype)
assert run_infer_type(yy.args[0]).checked_type.dtype == "uint8"
assert run_infer_type(yy.args[1]).checked_type.dtype == "int8"
def test_bitserial_dense():
m, k = te.size_var("m"), te.size_var("k")
x = relay.var("x", relay.TensorType((m, k), "int16"))
w = relay.var("w", relay.TensorType((k, 32), "int16"))
y = relay.nn.bitserial_dense(x, w, units=32)
"units=8" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((m, 32), "int16")
if __name__ == "__main__":
test_concatenate()
test_bias_add()
test_bias_add_type_failure()
test_unary_op()
test_binary_op()
test_expand_dims_infer_type()
test_expand_dims()
test_softmax()
test_log_softmax()
test_dropout()
test_batch_norm()
test_dense()
test_bitserial_dense()
test_dense_dtype()
| 37.945098 | 97 | 0.574204 |
ab8068a772476b1a9925a4766fde1c9646f25d9f | 8,467 | py | Python | projects/DensePose/query_db.py | aminekechaou/detectron2 | 3772b9316f8a2e6bf55cf5868dd64214d7f7c49a | [
"Apache-2.0"
] | null | null | null | projects/DensePose/query_db.py | aminekechaou/detectron2 | 3772b9316f8a2e6bf55cf5868dd64214d7f7c49a | [
"Apache-2.0"
] | null | null | null | projects/DensePose/query_db.py | aminekechaou/detectron2 | 3772b9316f8a2e6bf55cf5868dd64214d7f7c49a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import logging
import os
import sys
from timeit import default_timer as timer
from typing import Any, ClassVar, Dict, List
import torch
from detectron2.data.catalog import DatasetCatalog
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
from densepose.data.structures import DensePoseDataRelative
from densepose.utils.dbhelper import EntrySelector
from densepose.utils.logger import verbosity_to_level
from densepose.vis.base import CompoundVisualizer
from densepose.vis.bounding_box import BoundingBoxVisualizer
from densepose.vis.densepose_data_points import (
DensePoseDataCoarseSegmentationVisualizer,
DensePoseDataPointsIVisualizer,
DensePoseDataPointsUVisualizer,
DensePoseDataPointsVisualizer,
DensePoseDataPointsVVisualizer,
)
DOC = """Query DB - a tool to print / visualize data from a database
"""
LOGGER_NAME = "query_db"
logger = logging.getLogger(LOGGER_NAME)
_ACTION_REGISTRY: Dict[str, "Action"] = {}
class Action(object):
@classmethod
def add_arguments(cls: type, parser: argparse.ArgumentParser):
parser.add_argument(
"-v",
"--verbosity",
action="count",
help="Verbose mode. Multiple -v options increase the verbosity.",
)
def register_action(cls: type):
"""
Decorator for action classes to automate action registration
"""
global _ACTION_REGISTRY
_ACTION_REGISTRY[cls.COMMAND] = cls
return cls
class EntrywiseAction(Action):
@classmethod
def add_arguments(cls: type, parser: argparse.ArgumentParser):
super(EntrywiseAction, cls).add_arguments(parser)
parser.add_argument(
"dataset", metavar="<dataset>", help="Dataset name (e.g. densepose_coco_2014_train)"
)
parser.add_argument(
"selector",
metavar="<selector>",
help="Dataset entry selector in the form field1[:type]=value1[,"
"field2[:type]=value_min-value_max...] which selects all "
"entries from the dataset that satisfy the constraints",
)
parser.add_argument(
"--max-entries", metavar="N", help="Maximum number of entries to process", type=int
)
@classmethod
def execute(cls: type, args: argparse.Namespace):
dataset = setup_dataset(args.dataset)
entry_selector = EntrySelector.from_string(args.selector)
context = cls.create_context(args)
if args.max_entries is not None:
for _, entry in zip(range(args.max_entries), dataset):
if entry_selector(entry):
cls.execute_on_entry(entry, context)
else:
for entry in dataset:
if entry_selector(entry):
cls.execute_on_entry(entry, context)
@classmethod
def create_context(cls: type, args: argparse.Namespace) -> Dict[str, Any]:
context = {}
return context
@register_action
class PrintAction(EntrywiseAction):
"""
Print action that outputs selected entries to stdout
"""
COMMAND: ClassVar[str] = "print"
@classmethod
def add_parser(cls: type, subparsers: argparse._SubParsersAction):
parser = subparsers.add_parser(cls.COMMAND, help="Output selected entries to stdout. ")
cls.add_arguments(parser)
parser.set_defaults(func=cls.execute)
@classmethod
def add_arguments(cls: type, parser: argparse.ArgumentParser):
super(PrintAction, cls).add_arguments(parser)
@classmethod
def execute_on_entry(cls: type, entry: Dict[str, Any], context: Dict[str, Any]):
import pprint
printer = pprint.PrettyPrinter(indent=2, width=200, compact=True)
printer.pprint(entry)
@register_action
class ShowAction(EntrywiseAction):
"""
Show action that visualizes selected entries on an image
"""
COMMAND: ClassVar[str] = "show"
VISUALIZERS: ClassVar[Dict[str, object]] = {
"dp_segm": DensePoseDataCoarseSegmentationVisualizer(),
"dp_i": DensePoseDataPointsIVisualizer(),
"dp_u": DensePoseDataPointsUVisualizer(),
"dp_v": DensePoseDataPointsVVisualizer(),
"dp_pts": DensePoseDataPointsVisualizer(),
"bbox": BoundingBoxVisualizer(),
}
@classmethod
def add_parser(cls: type, subparsers: argparse._SubParsersAction):
parser = subparsers.add_parser(cls.COMMAND, help="Visualize selected entries")
cls.add_arguments(parser)
parser.set_defaults(func=cls.execute)
@classmethod
def add_arguments(cls: type, parser: argparse.ArgumentParser):
super(ShowAction, cls).add_arguments(parser)
parser.add_argument(
"visualizations",
metavar="<visualizations>",
help="Comma separated list of visualizations, possible values: "
"[{}]".format(",".join(sorted(cls.VISUALIZERS.keys()))),
)
parser.add_argument(
"--output",
metavar="<image_file>",
default="output.png",
help="File name to save output to",
)
@classmethod
def execute_on_entry(cls: type, entry: Dict[str, Any], context: Dict[str, Any]):
import cv2
import numpy as np
image_fpath = PathManager.get_local_path(entry["file_name"])
image = cv2.imread(image_fpath, cv2.IMREAD_GRAYSCALE)
image = np.tile(image[:, :, np.newaxis], [1, 1, 3])
datas = cls._extract_data_for_visualizers_from_entry(context["vis_specs"], entry)
visualizer = context["visualizer"]
image_vis = visualizer.visualize(image, datas)
entry_idx = context["entry_idx"] + 1
out_fname = cls._get_out_fname(entry_idx, context["out_fname"])
cv2.imwrite(out_fname, image_vis)
logger.info(f"Output saved to {out_fname}")
context["entry_idx"] += 1
@classmethod
def _get_out_fname(cls: type, entry_idx: int, fname_base: str):
base, ext = os.path.splitext(fname_base)
return base + ".{0:04d}".format(entry_idx) + ext
@classmethod
def create_context(cls: type, args: argparse.Namespace) -> Dict[str, Any]:
vis_specs = args.visualizations.split(",")
visualizers = []
for vis_spec in vis_specs:
vis = cls.VISUALIZERS[vis_spec]
visualizers.append(vis)
context = {
"vis_specs": vis_specs,
"visualizer": CompoundVisualizer(visualizers),
"out_fname": args.output,
"entry_idx": 0,
}
return context
@classmethod
def _extract_data_for_visualizers_from_entry(
cls: type, vis_specs: List[str], entry: Dict[str, Any]
):
dp_list = []
bbox_list = []
for annotation in entry["annotations"]:
is_valid, _ = DensePoseDataRelative.validate_annotation(annotation)
if not is_valid:
continue
bbox = torch.as_tensor(annotation["bbox"])
bbox_list.append(bbox)
dp_data = DensePoseDataRelative(annotation)
dp_list.append(dp_data)
datas = []
for vis_spec in vis_specs:
datas.append(bbox_list if "bbox" == vis_spec else (bbox_list, dp_list))
return datas
def setup_dataset(dataset_name):
logger.info("Loading dataset {}".format(dataset_name))
start = timer()
dataset = DatasetCatalog.get(dataset_name)
stop = timer()
logger.info("Loaded dataset {} in {:.3f}s".format(dataset_name, stop - start))
return dataset
def create_argument_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description=DOC,
formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=120),
)
parser.set_defaults(func=lambda _: parser.print_help(sys.stdout))
subparsers = parser.add_subparsers(title="Actions")
for _, action in _ACTION_REGISTRY.items():
action.add_parser(subparsers)
return parser
def main():
parser = create_argument_parser()
args = parser.parse_args()
verbosity = args.verbosity if hasattr(args, "verbosity") else None
global logger
logger = setup_logger(name=LOGGER_NAME)
logger.setLevel(verbosity_to_level(verbosity))
args.func(args)
if __name__ == "__main__":
main()
| 33.733068 | 96 | 0.662218 |
2ba83d342e2b8d9bc1ecc6d29d0f179c4a0d0f5f | 718 | py | Python | tests/__init__.py | py-graphit/py-graphit | 533ef47e279fc07d9a88f86cc9d19f09d56176f9 | [
"Apache-2.0"
] | 1 | 2018-12-02T18:56:34.000Z | 2018-12-02T18:56:34.000Z | tests/__init__.py | py-graphit/py-graphit | 533ef47e279fc07d9a88f86cc9d19f09d56176f9 | [
"Apache-2.0"
] | null | null | null | tests/__init__.py | py-graphit/py-graphit | 533ef47e279fc07d9a88f86cc9d19f09d56176f9 | [
"Apache-2.0"
] | 1 | 2018-12-02T15:29:41.000Z | 2018-12-02T15:29:41.000Z | # -*- coding: utf-8 -*-
"""
Python function for graphit module, run as:
::
test = module_test_suite()
runner = unittest.TextTestRunner(verbosity=2)
runner.run(test)
"""
import os
import sys
import unittest
import logging
# Init basic logging
logging.basicConfig(level=logging.DEBUG)
# Add modules in package to path so we can import them
modulepath = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))
sys.path.insert(0, modulepath)
def module_test_suite():
"""
Run graphit module unit tests
"""
testpath = os.path.join(os.path.dirname(__file__), 'module')
loader = unittest.TestLoader()
suite = loader.discover(testpath, pattern='module_*.py')
return suite
| 21.757576 | 76 | 0.696379 |
de9ee1bd198feb492e486c02fdbd28913d5b3d76 | 72,409 | py | Python | ibeis/init/filter_annots.py | holmbergius/ibeisold | da3a1480057a6a5d5c68304760642edaae680502 | [
"Apache-2.0"
] | 1 | 2019-01-17T22:59:14.000Z | 2019-01-17T22:59:14.000Z | ibeis/init/filter_annots.py | holmbergius/ibeisold | da3a1480057a6a5d5c68304760642edaae680502 | [
"Apache-2.0"
] | null | null | null | ibeis/init/filter_annots.py | holmbergius/ibeisold | da3a1480057a6a5d5c68304760642edaae680502 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
TODO:
* cross validation
* encounter vs database (time filtering)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import functools
import copy
import utool as ut
import numpy as np
import six
from ibeis.control import controller_inject
(print, rrr, profile) = ut.inject2(__name__)
VERB_TESTDATA, VERYVERB_TESTDATA = ut.get_verbflag('testdata', 'td', 'acfg')
SEED1 = 0
SEED2 = 42
if False and ut.is_developer():
USE_ACFG_CACHE = not ut.get_argflag(('--nocache-annot', '--nocache-aid',
'--nocache')) and ut.USE_CACHE
USE_ACFG_CACHE = False
else:
USE_ACFG_CACHE = False
_tup = controller_inject.make_ibs_register_decorator(__name__)
CLASS_INJECT_KEY, register_ibs_method = _tup
@profile
def time_filter_annots():
"""
python -m ibeis.init.filter_annots time_filter_annots --db PZ_Master1 -a ctrl:qmingt=2 --profile
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.init.filter_annots import * # NOQA
>>> result = time_filter_annots()
"""
import ibeis
ibeis.testdata_expanded_aids()
@register_ibs_method
def filter_annots_general(ibs, aid_list=None, filter_kw={}, verbose=False, **kwargs):
r"""
Args:
ibs (IBEISController): ibeis controller object
aid_list (list): list of annotation rowids
filter_kw (?):
KWargs::
has_none_annotmatch, any_match_annotmatch, has_all, is_known,
any_match_annot, logic_annot, none_match_annotmatch,
max_num_annotmatch, any_startswith_annot, has_any, require_quality,
species, any_match, view_ext, has_any_annotmatch, view_pername,
max_num_annot, min_timedelta, any_startswith, max_numfeat,
any_startswith_annotmatch, been_adjusted, any_endswith_annot,
require_viewpoint, logic, has_any_annot, min_num_annotmatch, min_num,
min_num_annot, has_all_annot, has_none, min_pername,
any_endswith_annotmatch, any_endswith, require_timestamp, none_match,
contrib_contains, has_all_annotmatch, logic_annotmatch, min_numfeat,
none_match_annot, view_ext1, view_ext2, max_num, has_none_annot,
minqual, view
CommandLine:
python -m ibeis --tf filter_annots_general
python -m ibeis --tf filter_annots_general --db PZ_Master1 \
--has_any=[needswork,correctable,mildviewpoint] \
--has_none=[viewpoint,photobomb,error:viewpoint,quality] --show
python -m ibeis --tf filter_annots_general --db=GZ_Master1 \
--max-numfeat=300 --show --minqual=junk --species=None
python -m ibeis --tf filter_annots_general --db=lynx \
--been_adjusted=True
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.init.filter_annots import * # NOQA
>>> import ibeis
>>> filter_kw = ut.argparse_dict(get_default_annot_filter_form(),
>>> type_hint=ut.ddict(list, has_any=list,
>>> has_none=list,
>>> logic=str))
>>> print('filter_kw = %s' % (ut.dict_str(filter_kw),))
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> aid_list = ibs.get_valid_aids()
>>> #filter_kw = dict(is_known=True, min_num=1, has_any='viewpoint')
>>> #filter_kw = dict(is_known=True, min_num=1, any_match='.*error.*')
>>> aid_list_ = filter_annots_general(ibs, aid_list, filter_kw)
>>> print('len(aid_list_) = %r' % (len(aid_list_),))
>>> all_tags = ut.flatten(ibs.get_annot_all_tags(aid_list_))
>>> filtered_tag_hist = ut.dict_hist(all_tags)
>>> ut.print_dict(filtered_tag_hist, key_order_metric='val')
>>> ut.print_dict(ibs.get_annot_stats_dict(aid_list_), 'annot_stats')
>>> ut.quit_if_noshow()
>>> import ibeis.viz.interact
>>> ibeis.viz.interact.interact_chip.interact_multichips(ibs, aid_list_)
>>> ut.show_if_requested()
"""
if aid_list is None:
aid_list = ibs.get_valid_aids()
filter_kw_ = get_default_annot_filter_form()
ut.update_existing(filter_kw_, filter_kw, iswarning=True, assert_exists=True)
ut.update_existing(filter_kw_, kwargs, iswarning=True, assert_exists=True)
aid_list_ = aid_list
#filter_kw = ut.merge_dicts(get_default_annot_filter_form(), filter_kw)
# TODO MERGE FILTERFLAGS BY TAGS AND FILTERFLAGS INDEPENDANT
#aid_list_ = ibs.filterannots_by_tags(aid_list_, filter_kw)
aid_list_ = ibs.filter_annots_independent(aid_list_, filter_kw_, verbose=verbose)
aid_list_ = filter_annots_intragroup(ibs, aid_list_, filter_kw_, verbose=verbose)
return aid_list_
@register_ibs_method
def sample_annots_general(ibs, aid_list=None, filter_kw={}, verbose=False, **kwargs):
""" filter + sampling """
# hack
from ibeis.expt import annotation_configs
if aid_list is None:
aid_list = ibs.get_valid_aids()
filter_kw_ = annotation_configs.INDEPENDENT_DEFAULTS.copy()
filter_kw_.update(annotation_configs.SUBINDEX_DEFAULTS.copy())
filter_kw_.update(annotation_configs.SAMPLE_DEFAULTS.copy())
ut.update_existing(filter_kw_, filter_kw, iswarning=True, assert_exists=True)
ut.update_existing(filter_kw_, kwargs, iswarning=True, assert_exists=True)
aid_list_ = aid_list
#filter_kw = ut.merge_dicts(get_default_annot_filter_form(), filter_kw)
# TODO MERGE FILTERFLAGS BY TAGS AND FILTERFLAGS INDEPENDANT
#aid_list_ = ibs.filterannots_by_tags(aid_list_, filter_kw)
aid_list_ = ibs.filter_annots_independent(aid_list_, filter_kw_, verbose=verbose)
aid_list_ = filter_annots_intragroup(ibs, aid_list_, filter_kw_, verbose=verbose)
aid_list_ = sample_annots(ibs, aid_list_, filter_kw_, verbose=verbose)
aid_list_ = subindex_annots(ibs, aid_list_, filter_kw_, verbose=verbose)
return aid_list_
@profile
def get_default_annot_filter_form():
r"""
Returns dictionary containing defaults for all valid filter parameters
CommandLine:
python -m ibeis --tf get_default_annot_filter_form
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.init.filter_annots import * # NOQA
>>> filter_kw = get_default_annot_filter_form()
>>> print(ut.dict_str(filter_kw, align=True))
>>> print(', '.join(filter_kw.keys()))
"""
from ibeis.expt import annotation_configs
iden_defaults = annotation_configs.INDEPENDENT_DEFAULTS.copy()
filter_kw = iden_defaults
#tag_defaults = get_annot_tag_filterflags(
# None, None, {}, request_defaultkw=True)
#filter_kw = ut.dict_union3(iden_defaults, tag_defaults, combine_op=None)
return filter_kw
@register_ibs_method
def get_annot_tag_filterflags(ibs, aid_list, filter_kw,
request_defaultkw=False):
r"""
Filters annotations by tags including those that is belongs to in a pair
"""
from ibeis import tag_funcs
# Build Filters
filter_keys = ut.get_func_kwargs(tag_funcs.filterflags_general_tags)
annotmatch_filterkw = {}
annot_filterkw = {}
both_filterkw = {}
kwreg = ut.KWReg(enabled=request_defaultkw)
for key in filter_keys:
annotmatch_filterkw[key] = filter_kw.get(*kwreg(key + '_annotmatch', None))
annot_filterkw[key] = filter_kw.get(*kwreg(key + '_annot', None))
both_filterkw[key] = filter_kw.get(*kwreg(key, None))
if request_defaultkw:
return kwreg.defaultkw
# Grab Data
need_annot_tags = any([var is not None for var in annot_filterkw.values()])
need_annotmatch_tags = any([
var is not None for var in annotmatch_filterkw.values()])
need_both_tags = any([var is not None for var in both_filterkw.values()])
if need_annot_tags or need_both_tags:
annot_tags_list = ibs.get_annot_case_tags(aid_list)
if need_annotmatch_tags or need_both_tags:
annotmatch_tags_list = ibs.get_annot_annotmatch_tags(aid_list)
if need_both_tags:
both_tags_list = list(map(ut.unique_ordered,
map(ut.flatten, zip(annot_tags_list,
annotmatch_tags_list))))
# Filter Data
flags = np.ones(len(aid_list), dtype=np.bool)
if need_annot_tags:
flags_ = tag_funcs.filterflags_general_tags(
annot_tags_list, **annot_filterkw)
np.logical_and(flags_, flags, out=flags)
if need_annotmatch_tags:
flags_ = tag_funcs.filterflags_general_tags(
annotmatch_tags_list, **annotmatch_filterkw)
np.logical_and(flags_, flags, out=flags)
if need_both_tags:
flags_ = tag_funcs.filterflags_general_tags(
both_tags_list, **both_filterkw)
np.logical_and(flags_, flags, out=flags)
return flags
@register_ibs_method
def filterannots_by_tags(ibs, aid_list, filter_kw):
r"""
Args:
ibs (IBEISController): ibeis controller object
aid_list (list): list of annotation rowids
CommandLine:
python -m ibeis --tf filterannots_by_tags
utprof.py -m ibeis --tf filterannots_by_tags
SeeAlso:
filter_annotmatch_by_tags
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.init.filter_annots import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='PZ_Master1')
>>> aid_list = ibs.get_valid_aids()
>>> has_any = ut.get_argval('--tags', type_=list,
>>> default=['SceneryMatch', 'Photobomb'])
>>> min_num = ut.get_argval('--min_num', type_=int, default=1)
>>> filter_kw = dict(has_any=has_any, min_num=1)
>>> aid_list_ = filterannots_by_tags(ibs, aid_list, filter_kw)
>>> print('aid_list_ = %r' % (aid_list_,))
>>> ut.quit_if_noshow()
>>> pass
>>> # TODO: show special annot group in GUI
"""
flags = get_annot_tag_filterflags(ibs, aid_list, filter_kw)
aid_list_ = ut.compress(aid_list, flags)
return aid_list_
def get_acfg_cacheinfo(ibs, aidcfg):
"""
Returns location and name of the ~~annot~~ data cache
"""
from os.path import dirname, join
# Make loading aids a big faster for experiments
if ut.is_developer():
import ibeis
repodir = dirname(ut.get_module_dir(ibeis))
acfg_cachedir = join(repodir, 'ACFG_CACHE')
else:
#acfg_cachedir = './localdata/ACFG_CACHE'
acfg_cachedir = join(ibs.get_cachedir(), 'ACFG_CACHE')
ut.ensuredir(acfg_cachedir)
acfg_cachename = 'ACFG_CACHE'
RESPECT_INTERNAL_CFGS = False
if RESPECT_INTERNAL_CFGS:
aid_cachestr = ibs.get_dbname() + '_' + ut.hashstr27(ut.to_json(aidcfg))
else:
relevant_aidcfg = copy.deepcopy(aidcfg)
ut.delete_dict_keys(relevant_aidcfg['qcfg'], ut.INTERNAL_CFGKEYS)
ut.delete_dict_keys(relevant_aidcfg['dcfg'], ut.INTERNAL_CFGKEYS)
aid_cachestr = (
ibs.get_dbname() + '_' + ut.hashstr27(ut.to_json(relevant_aidcfg)))
acfg_cacheinfo = (acfg_cachedir, acfg_cachename, aid_cachestr)
return acfg_cacheinfo
@profile
def expand_single_acfg(ibs, aidcfg, verbose=None):
"""
for main_helpers """
from ibeis.expt import annotation_configs
if verbose is None:
verbose = VERB_TESTDATA
if verbose:
print('+=== EXPAND_SINGLE_ACFG ===')
print(' * acfg = %s' %
(ut.dict_str(annotation_configs.compress_aidcfg(aidcfg),
align=True),))
print('+---------------------')
avail_aids = ibs._get_all_aids()
avail_aids = filter_annots_independent(ibs, avail_aids, aidcfg, verbose=verbose)
avail_aids = filter_annots_intragroup(ibs, avail_aids, aidcfg, verbose=verbose)
avail_aids = sample_annots(ibs, avail_aids, aidcfg, verbose=verbose)
avail_aids = subindex_annots(ibs, avail_aids, aidcfg, verbose=verbose)
aids = avail_aids
if verbose:
print('L___ EXPAND_SINGLE_ACFG ___')
return aids
@profile
def hack_remove_label_errors(ibs, expanded_aids, verbose=None):
qaids_, daids_ = expanded_aids
partitioned_sets = ibs.partition_annots_into_corresponding_groups(
qaids_, daids_)
tup = partitioned_sets
query_group, data_group, unknown_group, distract_group = tup
unknown_flags = ibs.unflat_map(
ibs.get_annot_tag_filterflags, unknown_group,
filter_kw=dict(none_match=['.*error.*']))
#data_flags = ibs.unflat_map(
# ibs.get_annot_tag_filterflags, data_group,
# filter_kw=dict(none_match=['.*error.*']))
query_flags = ibs.unflat_map(
ibs.get_annot_tag_filterflags, query_group,
filter_kw=dict(none_match=['.*error.*']))
query_noterror_flags = list(map(all, ut.list_zipflatten(
query_flags,
#data_flags,
)))
unknown_noterror_flags = list(map(all, unknown_flags))
filtered_queries = ut.flatten(
ut.compress(query_group, query_noterror_flags))
filtered_unknown = ut.flatten(
ut.compress(unknown_group, unknown_noterror_flags))
filtered_qaids_ = sorted(filtered_queries + filtered_unknown)
expanded_aids = (filtered_qaids_, daids_)
if verbose:
ut.colorprint('+---------------------', 'red')
ibs.print_annotconfig_stats(filtered_qaids_, daids_)
ut.colorprint('L___ HACKED_EXPAND_ACFGS ___', 'red')
return expanded_aids
@profile
def hack_extra(ibs, expanded_aids):
# SUCH HACK to get a larger database
from ibeis.expt import annotation_configs
_aidcfg = annotation_configs.default['dcfg']
_aidcfg['sample_per_name'] = 1
_aidcfg['sample_size'] = 500
_aidcfg['min_pername'] = 1
_aidcfg['require_viewpoint'] = True
_aidcfg['exclude_reference'] = True
_aidcfg['view'] = 'right'
prefix = 'hack'
qaids = expanded_aids[0]
daids = expanded_aids[1]
_extra_aids = ibs.get_valid_aids()
_extra_aids = ibs.remove_groundtrue_aids(
_extra_aids, (qaids + daids))
_extra_aids = filter_annots_independent(
ibs, _extra_aids, _aidcfg, prefix)
_extra_aids = sample_annots(
ibs, _extra_aids, _aidcfg, prefix)
daids = sorted(daids + _extra_aids)
expanded_aids = (qaids, daids)
return expanded_aids
def expand_acfgs_consistently(ibs, acfg_combo, initial_aids=None,
use_cache=None, verbose=None):
"""
Expands a set of configurations such that they are comparable
CommandLine:
python -m ibeis --tf parse_acfg_combo_list \
-a varysize
ibeis --tf get_annotcfg_list --db PZ_Master1 -a varysize
#ibeis --tf get_annotcfg_list --db lynx -a default:hack_imageset=True
ibeis --tf get_annotcfg_list --db PZ_Master1 -a varysize:qsize=None
ibeis --tf get_annotcfg_list --db PZ_Master0 --nofilter-dups -a varysize
ibeis --tf get_annotcfg_list --db PZ_MTEST -a varysize --nofilter-dups
ibeis --tf get_annotcfg_list --db PZ_Master0 --verbtd \
--nofilter-dups -a varysize
ibeis --tf get_annotcfg_list --db PZ_Master1 -a viewpoint_compare \
--verbtd --nofilter-dups
ibeis --tf get_annotcfg_list -a timectrl --db GZ_Master1 --verbtd \
--nofilter-dups
"""
from ibeis.expt import annotation_configs
if verbose is None:
verbose = VERB_TESTDATA
# Edit configs so the sample sizes are consistent
# FIXME: requiers that smallest configs are specified first
def tmpmin(a, b):
if a is None:
return b
elif b is None:
return a
return min(a, b)
expanded_aids_list = []
# Keep track of seen samples
min_qsize = None
min_dsize = None
# HACK: Find out the params being varied and disallow those from being
# prefiltered due to the lack of heirarchical filters
nonvaried_dict, varied_acfg_list = annotation_configs.partition_acfg_list(
acfg_combo)
hack_exclude_keys = list(set(ut.flatten(
[list(ut.merge_dicts(*acfg.values()).keys())
for acfg in varied_acfg_list])))
# HACK: determine unconstrained min / max nannots
if False:
import copy
acfg_combo2 = copy.deepcopy(acfg_combo)
unconstrained_expansions = []
for combox, acfg in enumerate(acfg_combo2):
qcfg = acfg['qcfg']
dcfg = acfg['dcfg']
with ut.Indenter('[PRE %d] ' % (combox,)):
expanded_aids = expand_acfgs(ibs, acfg, initial_aids=initial_aids,
use_cache=use_cache,
hack_exclude_keys=hack_exclude_keys,
verbose=verbose)
unconstrained_expansions.append(expanded_aids)
if any(ut.take_column(ut.take_column(acfg_combo, 'dcfg'), 'force_const_size')):
unconstrained_lens = np.array([(len(q), len(d)) for q, d in unconstrained_expansions])
#max_dlen = unconstrained_lens.T[1].max()
min_dlen = unconstrained_lens.T[1].min()
for acfg in acfg_combo:
dcfg = acfg['dcfg']
# TODO: make sample size annot_sample_size
# sample size is #annots
if dcfg['sample_size'] is None:
dcfg['_orig_sample_size'] = dcfg['sample_size']
dcfg['sample_size'] = min_dlen
for combox, acfg in enumerate(acfg_combo):
qcfg = acfg['qcfg']
dcfg = acfg['dcfg']
# In some cases we may want to clamp these, but others we do not
if qcfg['force_const_size']:
qcfg['_orig_sample_size'] = qcfg['sample_size']
qcfg['sample_size'] = tmpmin(qcfg['sample_size'] , min_qsize)
if dcfg['force_const_size']:
dcfg['_orig_sample_size'] = dcfg['sample_size']
dcfg['sample_size'] = tmpmin(dcfg['sample_size'] , min_dsize)
# Expand modified acfgdict
with ut.Indenter('[%d] ' % (combox,)):
expanded_aids = expand_acfgs(ibs, acfg, initial_aids=initial_aids,
use_cache=use_cache,
hack_exclude_keys=hack_exclude_keys,
verbose=verbose)
#if dcfg.get('hack_extra', None):
# assert False
# expanded_aids = hack_extra(ibs, expanded_aids)
qsize = len(expanded_aids[0])
dsize = len(expanded_aids[1])
# <hack for float that should not interfere with other hacks
if qcfg['sample_size'] != qsize:
qcfg['_orig_sample_size'] = qcfg['sample_size']
if dcfg['sample_size'] != dsize:
dcfg['_orig_sample_size'] = dcfg['sample_size']
# /-->
if min_qsize is None:
qcfg['sample_size'] = qsize
if min_dsize is None: # UNSURE
dcfg['sample_size'] = dsize
if qcfg['sample_size'] != qsize:
qcfg['_true_sample_size'] = qsize
if dcfg['sample_size'] != dsize:
dcfg['_true_sample_size'] = dsize
if qcfg['force_const_size']:
min_qsize = tmpmin(min_qsize, qsize)
if dcfg['force_const_size']: # UNSURE
min_dsize = tmpmin(min_dsize, dsize)
# so hacky
# this has to be after sample_size assignment, otherwise the filtering
# is unstable Remove queries that have labeling errors in them.
# TODO: fix errors AND remove labels
#remove_label_errors = ut.is_developer() or ut.get_argflag('--noerrors')
#ut.is_developer() or ut.get_argflag('--noerrors')
remove_label_errors = qcfg.get('hackerrors', False)
if remove_label_errors:
expanded_aids = hack_remove_label_errors(ibs, expanded_aids, verbose)
#ibs.print_annotconfig_stats(*expanded_aids)
expanded_aids_list.append(expanded_aids)
# Sample afterwords
return list(zip(acfg_combo, expanded_aids_list))
@profile
def expand_acfgs(ibs, aidcfg, verbose=None, use_cache=None,
hack_exclude_keys=None, initial_aids=None, save_cache=True):
r"""
Main multi-expansion function. Expands an annot config dict into qaids and
daids. New version of this function based on a configuration dictionary
built from command line argumetns
Args:
ibs (IBEISController): ibeis controller object
aidcfg (dict): configuration of the annotation filter
verbose (bool): verbosity flag(default = False)
use_cache (bool): turns on disk based caching(default = None)
hack_exclude_keys (None): (default = None)
initial_aids (None): (default = None)
Returns:
tuple: expanded_aids=(qaid_list, daid_list) - expanded list of aids
that meet the criteria of the aidcfg filter
TODO:
The database should be created first in most circumstances, then
the queries should be filtered to meet the database restrictions?
I'm not sure Sometimes you need to set the query aids constant, but
sometimes you need to set the data aids constant. Seems to depend.
This function very much needs the idea of filter chains
OkNewIdea:
3 filters:
* Common sampling - takes care of things like min time delta,
* species, quality viewpoint etc.
* query sampling
* database sampling
Basic idea is
* Sample large pool
* Partition pool into query and database
Requires:
* base sampling params
* partition1 params
* partition2 params
* inter partition params?
CommandLine:
python -m ibeis.dev -e print_acfg -a timectrl:qsize=10,dsize=10 --db PZ_MTEST --veryverbtd --nocache-aid
python -m ibeis.dev -e print_acfg -a timectrl:qminqual=good,qsize=10,dsize=10 --db PZ_MTEST --veryverbtd --nocache-aid
python -m ibeis.dev -e print_acfg -a timectrl --db PZ_MTEST --verbtd --nocache-aid
python -m ibeis.dev -e print_acfg -a timectrl --db PZ_Master1 --verbtd --nocache-aid
python -m ibeis.dev -e print_acfg -a timequalctrl --db PZ_Master1 --verbtd --nocache-aid
python -m ibeis.dev -e rank_cdf -a controlled:qsize=10,dsize=10,dper_name=2 -t default --db PZ_MTEST
python -m ibeis.dev -e rank_cdf -a controlled:qsize=10,dsize=20,dper_name=2 -t default --db PZ_MTEST
python -m ibeis.dev -e print -a controlled:qsize=10,dsize=10 -t default --db PZ_MTEST --verbtd --nocache-aid
python -m ibeis.dev -e latexsum -t candinvar -a viewpoint_compare --db NNP_Master3 --acfginfo
utprof.py -m ibeis.dev -e print -t candk -a varysize --db PZ_MTEST --acfginfo
utprof.py -m ibeis.dev -e latexsum -t candk -a controlled --db PZ_Master0 --acfginfo
python -m ibeis --tf get_annotcfg_list:0 --db NNP_Master3 -a viewpoint_compare --nocache-aid --verbtd
python -m ibeis --tf get_annotcfg_list --db PZ_Master1 \
-a timectrl:qhas_any=\(needswork,correctable,mildviewpoint\),qhas_none=\(viewpoint,photobomb,error:viewpoint,quality\) \
--acfginfo --veryverbtd --veryverbtd
python -m ibeis --tf draw_rank_cdf --db PZ_Master1 --show -t best \
-a timectrl:qhas_any=\(needswork,correctable,mildviewpoint\),qhas_none=\(viewpoint,photobomb,error:viewpoint,quality\) \
--acfginfo --veryverbtd
python -m ibeis --tf get_annotcfg_list --db Oxford -a default:qhas_any=\(query,\),dpername=2,exclude_reference=True --acfginfo --verbtd --veryverbtd --nocache-aid
CommandLine:
python -m ibeis.init.filter_annots --exec-expand_acfgs --show
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.init.filter_annots import * # NOQA
>>> import ibeis
>>> from ibeis.expt import annotation_configs
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> aidcfg = copy.deepcopy(annotation_configs.default)
>>> aidcfg['qcfg']['species'] = 'primary'
>>> initial_aids = None
>>> expanded_aids = expand_acfgs(ibs, aidcfg, initial_aids=initial_aids)
>>> result = ut.repr3(expanded_aids, nl=1, nobr=True)
>>> print(result)
[1, 2, 3, 4, 5, 6],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
"""
from ibeis.expt import annotation_configs
if verbose is None:
verbose = VERB_TESTDATA
assert isinstance(aidcfg, dict), 'type(aidcfg)=%r' % (type(aidcfg),)
aidcfg = copy.deepcopy(aidcfg)
# Check if this filter has been cached
# TODO: keep a database state config that augments the cachestr?
if use_cache is None:
use_cache = USE_ACFG_CACHE
# save_cache = True
if use_cache and save_cache:
acfg_cacheinfo = get_acfg_cacheinfo(ibs, aidcfg)
acfg_cachedir, acfg_cachename, aid_cachestr = acfg_cacheinfo
if use_cache:
try:
(qaid_list, daid_list) = ut.load_cache(
acfg_cachedir, acfg_cachename, aid_cachestr)
except IOError:
pass
else:
return qaid_list, daid_list
comp_acfg = annotation_configs.compress_aidcfg(aidcfg)
if verbose:
ut.colorprint('+=== EXPAND_ACFGS ===', 'yellow')
print(' * acfg = %s' % (ut.dict_str(comp_acfg, align=True),))
ut.colorprint('+---------------------', 'yellow')
# Breakup into common, query, and database configs
qcfg = aidcfg['qcfg']
dcfg = aidcfg['dcfg']
common_cfg = comp_acfg['common']
# Extract the common independent filtering params
idenfilt_cfg_default = annotation_configs.INDEPENDENT_DEFAULTS
idenfilt_cfg_empty = {key: None for key in idenfilt_cfg_default.keys()}
idenfilt_cfg_common = ut.update_existing(idenfilt_cfg_empty,
common_cfg, copy=True)
if hack_exclude_keys:
for key in hack_exclude_keys:
if key in idenfilt_cfg_common:
idenfilt_cfg_common[key] = None
# Find the q/d specific filtering flags that were already taken care of in
# common filtering. Set them all to None, so we dont rerun that filter
qpredone_iden_keys = ut.dict_isect(qcfg, idenfilt_cfg_common).keys()
for key in qpredone_iden_keys:
qcfg[key] = None
dpredone_iden_keys = ut.dict_isect(dcfg, idenfilt_cfg_common).keys()
for key in dpredone_iden_keys:
dcfg[key] = None
#if aidcfg['qcfg']['hack_imageset'] is True:
# return ibs.get_imageset_expanded_aids()
# Hack: Make hierarchical filters to supersede this
if initial_aids is None:
initial_aids = ibs._get_all_aids()
verbflags = dict(verbose=verbose)
qfiltflags = dict(prefix='q', **verbflags)
dfiltflags = dict(prefix='d', **verbflags)
default_aids = initial_aids
# A chain of filters on all of the aids
global_filter_chain = [
(filter_annots_independent, idenfilt_cfg_common),
(filter_annots_intragroup, idenfilt_cfg_common),
]
# Chains of filters individually for each partition
partition_chains = [
[
# Query partition chain
(filter_annots_independent, qcfg),
(filter_annots_intragroup, qcfg),
(sample_annots, qcfg),
],
[
# Database partition chain
(filter_annots_independent, dcfg),
(filter_annots_intragroup, dcfg),
(sample_annots_wrt_ref, dcfg, 0),
]
]
try:
# GLOBAL FILTER CHAIN
# applies filtering to all available aids
for filtfn, filtcfg in global_filter_chain:
default_aids = filtfn(ibs, default_aids, filtcfg, prefix='',
withpre=True, **verbflags)
# PARTITION FILTER CHAIN
# chain of filters for query / database annots
default_qaids = default_daids = default_aids
partition_avail_aids = [default_qaids, default_daids]
partion_kwargs = [qfiltflags, dfiltflags]
for index in range(len(partition_chains)):
filter_chain = partition_chains[index]
avail_aids = partition_avail_aids[index]
_partkw = partion_kwargs[index].copy()
for filter_tup in filter_chain:
filtfn, filtcfg = filter_tup[0:2]
if len(filter_tup) == 3:
# handle filters that take reference sets
refindex = filter_tup[2]
ref_aids = partition_avail_aids[refindex]
_partkw['ref_aids'] = ref_aids
# Execute filtering
avail_aids = filtfn(ibs, avail_aids, filtcfg, **_partkw)
partition_avail_aids[index] = avail_aids
# SUBINDEX EACH PARTITIONED CHAIN
subindex_cfgs = [qcfg, dcfg]
for index in range(len(partition_avail_aids)):
avail_aids = partition_avail_aids[index]
_partkw = partion_kwargs[index]
filtcfg = subindex_cfgs[index]
avail_aids = subindex_annots(
ibs, avail_aids, filtcfg, **_partkw)
partition_avail_aids[index] = avail_aids
# UNPACK FILTER RESULTS
avail_qaids, avail_daids = partition_avail_aids
except Exception as ex:
print('PRINTING ERROR INFO')
print(' * acfg = %s' % (ut.dict_str(comp_acfg, align=True),))
ut.printex(ex, 'Error executing filter chains')
raise
qaid_list = sorted(avail_qaids)
daid_list = sorted(avail_daids)
if verbose:
ut.colorprint('+---------------------', 'yellow')
ibs.print_annotconfig_stats(qaid_list, daid_list)
ut.colorprint('L___ EXPAND_ACFGS ___', 'yellow')
# Save filter to cache
if use_cache and save_cache:
ut.ensuredir(acfg_cachedir)
try:
ut.save_cache(acfg_cachedir, acfg_cachename, aid_cachestr,
(qaid_list, daid_list))
except IOError:
pass
return qaid_list, daid_list
def expand_species(ibs, species, avail_aids=None):
if species == 'primary':
species = ibs.get_primary_database_species()
if species is None and avail_aids is not None:
species = ibs.get_dominant_species(avail_aids)
return species
@profile
@register_ibs_method
def filter_annots_independent(ibs, avail_aids, aidcfg, prefix='',
verbose=VERB_TESTDATA, withpre=False):
r"""
Filtering that doesn't have to do with a reference set of aids
TODO make filterflags version
Args:
ibs (IBEISController): ibeis controller object
avail_aids (list):
aidcfg (dict):
prefix (str): (default = '')
verbose (bool): verbosity flag(default = False)
Returns:
list: avail_aids
CommandLine:
python -m ibeis --tf filter_annots_independent --veryverbtd
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.init.filter_annots import * # NOQA
>>> import ibeis
>>> from ibeis.expt import annotation_configs
>>> ibs = ibeis.opendb(defaultdb='PZ_MTEST')
>>> avail_aids = input_aids = ibs.get_valid_aids()
>>> aidcfg = annotation_configs.default['dcfg']
>>> aidcfg['require_timestamp'] = True
>>> aidcfg['require_quality'] = False
>>> aidcfg['is_known'] = True
>>> prefix = ''
>>> verbose = True
>>> avail_aids = filter_annots_independent(ibs, avail_aids, aidcfg,
>>> prefix, verbose)
>>> result = ('avail_aids = %s' % (str(avail_aids),))
>>> print(result)
Ignore:
# Testing tag features
python -m ibeis --tf draw_rank_cdf --db PZ_Master1 --show -t best \
-a timectrl:qhas_any=\(needswork,correctable,mildviewpoint\),qhas_none=\(viewpoint,photobomb,error:viewpoint,quality\) \
---acfginfo --veryverbtd
"""
from ibeis.other import ibsfuncs
if aidcfg is None:
if verbose:
print('No annot filter returning')
return avail_aids
VerbosityContext = verb_context('FILTER_INDEPENDENT', aidcfg, verbose)
VerbosityContext.startfilter(withpre=withpre)
if aidcfg.get('is_known') is True:
with VerbosityContext('is_known'):
avail_aids = ibs.filter_aids_without_name(
avail_aids, invert=not aidcfg['is_known'])
#avail_aids = sorted(avail_aids)
if aidcfg.get('is_exemplar') is not None:
flags = ibs.get_annot_exemplar_flags(avail_aids)
is_valid = [flag == aidcfg['is_exemplar'] for flag in flags]
with VerbosityContext('is_exemplar'):
avail_aids = ut.compress(avail_aids, is_valid)
#avail_aids = sorted(avail_aids)
if aidcfg.get('reviewed') is not None:
flags = ibs.get_annot_reviewed(avail_aids)
is_valid = [flag == aidcfg['reviewed'] for flag in flags]
with VerbosityContext('reviewed'):
avail_aids = ut.compress(avail_aids, is_valid)
#avail_aids = sorted(avail_aids)
if aidcfg.get('multiple') is not None:
flags = ibs.get_annot_multiple(avail_aids)
is_valid = [flag == aidcfg['multiple'] for flag in flags]
with VerbosityContext('multiple'):
avail_aids = ut.compress(avail_aids, is_valid)
#avail_aids = sorted(avail_aids)
if aidcfg.get('require_timestamp') is True:
with VerbosityContext('require_timestamp'):
avail_aids = ibs.filter_aids_without_timestamps(avail_aids)
#avail_aids = sorted(avail_aids)
cfg_species = aidcfg.get('species')
if isinstance(cfg_species, six.string_types) and cfg_species.lower() == 'none':
cfg_species = None
metadata = ut.LazyDict(
species=lambda: expand_species(ibs, cfg_species, None))
if cfg_species is not None:
species = metadata['species']
with VerbosityContext('species', species=species):
avail_aids = ibs.filter_aids_to_species(avail_aids, species)
#avail_aids = sorted(avail_aids)
if aidcfg.get('been_adjusted', None):
# HACK to see if the annotation has been adjusted from the default
# value set by dbio.ingest_database
flag_list = ibs.get_annot_been_adjusted(avail_aids)
with VerbosityContext('been_adjusted'):
avail_aids = ut.compress(avail_aids, flag_list)
if aidcfg.get('contrib_contains', None):
contrib_contains = aidcfg['contrib_contains']
gid_list = ibs.get_annot_gids(avail_aids)
tag_list = ibs.get_image_contributor_tag(gid_list)
flag_list = [contrib_contains in tag for tag in tag_list]
with VerbosityContext('contrib_contains'):
avail_aids = ut.compress(avail_aids, flag_list)
if aidcfg.get('minqual') is not None or aidcfg.get('require_quality'):
minqual = 'junk' if aidcfg['minqual'] is None else aidcfg['minqual']
with VerbosityContext('minqual', 'require_quality'):
# Filter quality
avail_aids = ibs.filter_aids_to_quality(
avail_aids, minqual, unknown_ok=not aidcfg['require_quality'])
#avail_aids = sorted(avail_aids)
if aidcfg.get('max_unixtime', None) is not None:
max_unixtime = aidcfg.get('max_unixtime', None)
unixtimes = np.array(ibs.get_annot_image_unixtimes_asfloat(avail_aids))
flags = unixtimes <= max_unixtime
with VerbosityContext('max_unixtime'):
avail_aids = ut.compress(avail_aids, flags)
#avail_aids = sorted(avail_aids)
if aidcfg.get('min_unixtime', None) is not None:
min_unixtime = aidcfg.get('min_unixtime', None)
unixtimes = np.array(ibs.get_annot_image_unixtimes_asfloat(avail_aids))
flags = unixtimes >= min_unixtime
with VerbosityContext('min_unixtime'):
avail_aids = ut.compress(avail_aids, flags)
#avail_aids = sorted(avail_aids)
if aidcfg.get('max_numfeat') is not None or aidcfg.get('min_numfeat') is not None:
max_numfeat = aidcfg['max_numfeat']
min_numfeat = aidcfg['min_numfeat']
if max_numfeat is None:
max_numfeat = np.inf
if min_numfeat is None:
min_numfeat = 0
numfeat_list = np.array(ibs.get_annot_num_feats(avail_aids))
flags_list = np.logical_and(
numfeat_list >= min_numfeat,
numfeat_list <= max_numfeat)
with VerbosityContext('max_numfeat', 'min_numfeat'):
avail_aids = ut.compress(avail_aids, flags_list)
if aidcfg.get('view') is not None or aidcfg.get('require_viewpoint'):
# Resolve base viewpoint
if aidcfg['view'] == 'primary':
view = ibsfuncs.get_primary_species_viewpoint(metadata['species'])
elif aidcfg['view'] == 'primary1':
view = ibsfuncs.get_primary_species_viewpoint(metadata['species'], 1)
else:
view = aidcfg['view']
if isinstance(view, six.string_types) and view.lower() == 'none':
view = None
OLD = False
if OLD:
view_ext1 = (aidcfg['view_ext']
if aidcfg['view_ext1'] is None else
aidcfg['view_ext1'])
view_ext2 = (aidcfg['view_ext']
if aidcfg['view_ext2'] is None else
aidcfg['view_ext2'])
valid_yaws = ibsfuncs.get_extended_viewpoints(
view, num1=view_ext1, num2=view_ext2)
unknown_ok = not aidcfg['require_viewpoint']
with VerbosityContext('view', 'require_viewpoint', 'view_ext',
'view_ext1', 'view_ext2', valid_yaws=valid_yaws):
avail_aids = ibs.filter_aids_to_viewpoint(
avail_aids, valid_yaws, unknown_ok=unknown_ok)
avail_aids = sorted(avail_aids)
else:
def rectify_view(vstr):
# FIXME: I stopped implementing the += stuff
vstr_num = vstr.lower()
num = 0
if not vstr_num.endswith('1'):
vstr = vstr_num
else:
if '+' in vstr:
vstr, numstr = vstr_num.split('+')
num = int(numstr)
if '-' in vstr:
vstr, numstr = vstr_num.split('+')
num = -int(numstr)
assert num == 0, 'cant do += yet'
if vstr == 'primary':
return ibsfuncs.get_primary_species_viewpoint(metadata['species'])
for yawtxt, other_yawtxt in ibs.const.YAWALIAS.items():
other_yawtxt = ut.ensure_iterable(other_yawtxt)
if vstr == yawtxt.lower():
return yawtxt
for x in other_yawtxt:
if vstr == x.lower():
return yawtxt
raise ValueError('unknown viewpoint vstr=%r' % (vstr,))
if view is None:
valid_yaw_txts = None
else:
valid_yaw_txts = [
rectify_view(vstr)
for vstr in ut.smart_cast(view, list)
]
unknown_ok = not aidcfg['require_viewpoint']
yaw_flags = ibs.get_viewpoint_filterflags(
avail_aids, valid_yaw_txts, unknown_ok=unknown_ok, assume_unique=True)
yaw_flags = list(yaw_flags)
with VerbosityContext('view', 'require_viewpoint', 'view_ext',
'view_ext1', 'view_ext2', valid_yaws=valid_yaw_txts):
avail_aids = ut.compress(avail_aids, yaw_flags)
#if aidcfg.get('exclude_view') is not None:
# raise NotImplementedError('view tag resolution of exclude_view')
# # Filter viewpoint
# # TODO need to resolve viewpoints
# exclude_view = aidcfg.get('exclude_view')
# with VerbosityContext('exclude_view', hack=True):
# avail_aids = ibs.remove_aids_of_viewpoint(
# avail_aids, exclude_view)
if aidcfg.get('min_pername_global') is not None:
# Keep annots with at least this many groundtruths in the database
min_pername_global = aidcfg.get('min_pername_global')
num_gt_global_list = ibs.get_annot_num_groundtruth(avail_aids, noself=False)
flag_list = np.array(num_gt_global_list) >= min_pername_global
with VerbosityContext('exclude_view'):
avail_aids = ut.compress(avail_aids, flag_list)
#avail_aids = sorted(avail_aids)
if aidcfg.get('max_pername_global') is not None:
max_pername_global = aidcfg.get('max_pername_global')
num_gt_global_list = ibs.get_annot_num_groundtruth(avail_aids, noself=False)
flag_list = np.array(num_gt_global_list) <= max_pername_global
with VerbosityContext('exclude_view'):
avail_aids = ut.compress(avail_aids, flag_list)
#avail_aids = sorted(avail_aids)
# FILTER HACK integrating some notion of tag functions
# TODO: further integrate
if aidcfg.get('has_any', None) or aidcfg.get('has_none', None):
filterkw = ut.dict_subset(aidcfg, ['has_any', 'has_none'], None)
flags = get_annot_tag_filterflags(ibs, avail_aids, filterkw)
with VerbosityContext('has_any', 'has_none'):
avail_aids = ut.compress(avail_aids, flags)
#avail_aids = sorted(avail_aids)
avail_aids = sorted(avail_aids)
VerbosityContext.endfilter()
return avail_aids
@profile
def filter_annots_intragroup(ibs, avail_aids, aidcfg, prefix='',
verbose=VERB_TESTDATA, withpre=False):
r"""
This filters annots using information about the relationships
between the annotations in the ``avail_aids`` group. This function is not
independent and a second consecutive call may yield new results.
Thus, the order in which this filter is applied matters.
CommandLine:
ibeis --tf get_annotcfg_list \
-a default:qsame_imageset=True,been_adjusted=True,excluderef=True \
--db lynx --veryverbtd --nocache-aid
Example:
>>> aidcfg['min_timedelta'] = 60 * 60 * 24
>>> aidcfg['min_pername'] = 3
"""
from ibeis.other import ibsfuncs
if aidcfg is None:
if verbose:
print('No annot filter returning')
return avail_aids
VerbosityContext = verb_context('FILTER_INTRAGROUP', aidcfg, verbose)
VerbosityContext.startfilter(withpre=withpre)
metadata = ut.LazyDict(species=lambda: expand_species(ibs, aidcfg['species'], avail_aids))
if aidcfg['same_imageset'] is not None:
same_imageset = aidcfg['same_imageset']
assert same_imageset is True
imgsetid_list = ibs.get_annot_primary_imageset(avail_aids)
nid_list = ibs.get_annot_nids(avail_aids)
multiprop2_aids = ut.hierarchical_group_items(avail_aids, [nid_list, imgsetid_list])
qaid_list = []
# TODO: sampling using different enouncters
for imgsetid, nid2_aids in multiprop2_aids.iteritems():
if len(nid2_aids) == 1:
pass
else:
aids_list = list(nid2_aids.values())
idx = ut.list_argmax(list(map(len, aids_list)))
qaids = aids_list[idx]
qaid_list.extend(qaids)
with VerbosityContext('same_imageset'):
avail_aids = qaid_list
avail_aids = sorted(avail_aids)
# TODO:
# Filter via GPS distance
#try:
# if aidcfg['min_spacedelta'] is not None:
# pass
# if aidcfg['min_spacetimedelta'] is not None:
# pass
#except KeyError:
# pass
# FIXME: This is NOT an independent filter because it depends on pairwise
# interactions
if aidcfg['view_pername'] is not None:
species = metadata['species']
# This filter removes entire names. The avaiable aids must be from
# names with certain viewpoint frequency properties
prop2_nid2_aids = ibs.group_annots_by_prop_and_name(
avail_aids, ibs.get_annot_yaw_texts)
countstr = aidcfg['view_pername']
primary_viewpoint = ibsfuncs.get_primary_species_viewpoint(species)
lhs_dict = {
'primary': primary_viewpoint,
'primary1': ibsfuncs.get_extended_viewpoints(
primary_viewpoint, num1=1, num2=0, include_base=False)[0]
}
self = ut.CountstrParser(lhs_dict, prop2_nid2_aids)
nid2_flag = self.parse_countstr_expr(countstr)
nid2_aids = ibs.group_annots_by_name_dict(avail_aids)
valid_nids = [nid for nid, flag in nid2_flag.items() if flag]
with VerbosityContext('view_pername', countstr=countstr):
avail_aids = ut.flatten(ut.dict_take(nid2_aids, valid_nids))
#avail_aids = sorted(avail_aids)
if aidcfg['min_timedelta'] is not None:
min_timedelta = ut.ensure_timedelta(aidcfg['min_timedelta'])
with VerbosityContext('min_timedelta', min_timedelta=min_timedelta):
avail_aids = ibs.filter_annots_using_minimum_timedelta(
avail_aids, min_timedelta)
#avail_aids = sorted(avail_aids)
# Each aid must have at least this number of other groundtruth aids
min_pername = aidcfg['min_pername']
if min_pername is not None:
grouped_aids_ = ibs.group_annots_by_name(avail_aids,
distinguish_unknowns=True,
assume_unique=True)[0]
with VerbosityContext('min_pername'):
flags = np.array(ut.lmap(len, grouped_aids_)) >= min_pername
avail_aids = ut.flatten(ut.compress(grouped_aids_, flags))
#avail_aids = ut.flatten([
# aids for aids in grouped_aids_ if len(aids) >= min_pername])
#avail_aids = sorted(avail_aids)
max_pername = aidcfg['max_pername']
if max_pername is not None:
grouped_aids_ = ibs.group_annots_by_name(avail_aids,
distinguish_unknowns=True,
assume_unique=True)[0]
with VerbosityContext('max_pername'):
avail_aids = ut.flatten([
aids for aids in grouped_aids_ if len(aids) <= max_pername])
#avail_aids = sorted(avail_aids)
avail_aids = sorted(avail_aids)
VerbosityContext.endfilter()
return avail_aids
@profile
def get_reference_preference_order(ibs, gt_ref_grouped_aids,
gt_avl_grouped_aids, prop_getter, cmp_func,
aggfn, rng, verbose=VERB_TESTDATA):
r"""
Orders preference for sampling based on some metric
"""
import vtool as vt
grouped_reference_unixtimes = ibs.unflat_map(
prop_getter, gt_ref_grouped_aids)
grouped_available_gt_unixtimes = ibs.unflat_map(
prop_getter, gt_avl_grouped_aids)
grouped_reference_props = grouped_reference_unixtimes
grouped_available_gt_props = grouped_available_gt_unixtimes
# Order the available aids by some aggregation over some metric
preference_scores = [
aggfn(cmp_func(ref_prop, avl_prop[:, None]), axis=1)
for ref_prop, avl_prop in
zip(grouped_reference_props, grouped_available_gt_props)
]
# Order by increasing timedelta (metric)
gt_preference_idx_list = vt.argsort_groups(
preference_scores, reverse=True, rng=rng)
return gt_preference_idx_list
@profile
def sample_annots_wrt_ref(ibs, avail_aids, aidcfg, ref_aids, prefix='',
verbose=VERB_TESTDATA):
"""
Sampling when a reference set is given
"""
sample_per_name = aidcfg.get('sample_per_name')
sample_per_ref_name = aidcfg.get('sample_per_ref_name')
exclude_reference = aidcfg.get('exclude_reference')
sample_size = aidcfg.get('sample_size')
offset = aidcfg.get('sample_offset')
sample_rule_ref = aidcfg.get('sample_rule_ref')
sample_rule = aidcfg.get('sample_rule')
sample_occur = aidcfg.get('sample_occur')
avail_aids = sorted(avail_aids)
ref_aids = sorted(ref_aids)
VerbosityContext = verb_context('SAMPLE (REF)', aidcfg, verbose)
VerbosityContext.startfilter()
if sample_per_ref_name is None:
sample_per_ref_name = sample_per_name
if offset is None:
offset = 0
if exclude_reference:
assert ref_aids is not None, (
'ref_aids=%r' % (ref_aids,))
# VerbosityContext.report_annot_stats(ibs, avail_aids, prefix, '')
# VerbosityContext.report_annot_stats(ibs, ref_aids, prefix, '')
with VerbosityContext('exclude_reference',
num_ref_aids=len(ref_aids)):
import utool
with utool.embed_on_exception_context:
avail_aids = ut.setdiff_ordered(avail_aids, ref_aids)
avail_aids = sorted(avail_aids)
# HACK:
#also_exclude_overlaps = ibs.get_dbname() == 'Oxford'
also_exclude_overlaps = True
if also_exclude_overlaps:
contact_aids_list = ibs.get_annot_contact_aids(ref_aids, daid_list=avail_aids, assume_unique=True)
# Disallow the same name in the same image
x = ibs.unflat_map(ibs.get_annot_nids, contact_aids_list)
y = ibs.get_annot_nids(ref_aids)
sameimg_samename_aids = ut.flatten(
[ut.compress(aids, np.array(x0) == y0)
for aids, x0, y0 in zip(contact_aids_list, x, y)])
#contact_aids = ut.flatten(contact_aids_list)
avail_aids = ut.setdiff_ordered(avail_aids, sameimg_samename_aids)
with VerbosityContext('sample_occurr',
num_ref_aids=len(ref_aids)):
also_exclude_ref_encounters = sample_occur is True
if also_exclude_ref_encounters:
# Get other aids from the references' encounters
ref_enc_texts = ibs.get_annot_encounter_text(ref_aids)
avail_enc_texts = ibs.get_annot_encounter_text(avail_aids)
flags = ut.setdiff_flags(avail_enc_texts, ref_enc_texts)
avail_aids = ut.compress(avail_aids, flags)
if not (sample_per_ref_name is not None or sample_size is not None):
VerbosityContext.endfilter()
return avail_aids
if ut.is_float(sample_size):
# A float sample size is a interpolations between full data and small
# data
sample_size = int(round((len(avail_aids) * sample_size +
(1 - sample_size) * len(ref_aids))))
if verbose:
print('Expanding sample size to: %r' % (sample_size,))
# This function first partitions aids into a one set that corresonds with
# the reference set and another that does not correspond with the reference
# set. The rest of the filters operate on these sets independently
partitioned_sets = ibs.partition_annots_into_corresponding_groups(
ref_aids, avail_aids)
# items
# [0], and [1] are corresponding lists of annot groups
# [2], and [3] are non-corresonding annot groups
(gt_ref_grouped_aids, gt_avl_grouped_aids,
gf_ref_grouped_aids, gf_avl_grouped_aids) = partitioned_sets
if sample_per_ref_name is not None:
rng = np.random.RandomState(SEED2)
if sample_rule_ref == 'maxtimedelta':
# Maximize time delta between query and corresponding database
# annotations
cmp_func = ut.absdiff
aggfn = np.mean
prop_getter = ibs.get_annot_image_unixtimes_asfloat
gt_preference_idx_list = get_reference_preference_order(
ibs, gt_ref_grouped_aids, gt_avl_grouped_aids, prop_getter,
cmp_func, aggfn, rng)
elif sample_rule_ref == 'random':
gt_preference_idx_list = [ut.random_indexes(len(aids), rng=rng)
for aids in gt_avl_grouped_aids]
else:
raise ValueError('Unknown sample_rule_ref = %r' % (
sample_rule_ref,))
gt_sample_idxs_list = ut.get_list_column_slice(
gt_preference_idx_list, offset, offset + sample_per_ref_name)
gt_sample_aids = ut.list_ziptake(gt_avl_grouped_aids,
gt_sample_idxs_list)
gt_avl_grouped_aids = gt_sample_aids
with VerbosityContext('sample_per_ref_name', 'sample_rule_ref',
'sample_offset',
sample_per_ref_name=sample_per_ref_name):
avail_aids = (ut.flatten(gt_avl_grouped_aids) +
ut.flatten(gf_avl_grouped_aids))
if sample_per_name is not None:
# sample rule is always random for gf right now
rng = np.random.RandomState(SEED2)
if sample_rule == 'random':
gf_preference_idx_list = [ut.random_indexes(len(aids), rng=rng)
for aids in gf_avl_grouped_aids]
else:
raise ValueError('Unknown sample_rule=%r' % (sample_rule,))
gf_sample_idxs_list = ut.get_list_column_slice(
gf_preference_idx_list, offset, offset + sample_per_name)
gf_sample_aids = ut.list_ziptake(gf_avl_grouped_aids,
gf_sample_idxs_list)
gf_avl_grouped_aids = gf_sample_aids
with VerbosityContext('sample_per_name', 'sample_rule',
'sample_offset'):
avail_aids = (ut.flatten(gt_avl_grouped_aids) +
ut.flatten(gf_avl_grouped_aids))
gt_avl_aids = ut.flatten(gt_avl_grouped_aids)
gf_avl_aids = ut.flatten(gf_avl_grouped_aids)
if sample_size is not None:
# Keep all correct matches to the reference set
# We have the option of keeping ground false
num_gt = len(gt_avl_aids)
num_gf = len(gf_avl_aids)
num_keep_gf = sample_size - num_gt
num_remove_gf = num_gf - num_keep_gf
if num_remove_gf < 0:
# Too few ground false
print(('Warning: Cannot meet sample_size=%r. available_%saids '
'will be undersized by at least %d')
% (sample_size, prefix, -num_remove_gf,))
if num_keep_gf < 0:
# Too many multitons; Can never remove a multiton
print('Warning: Cannot meet sample_size=%r. available_%saids '
'will be oversized by at least %d'
% (sample_size, prefix, -num_keep_gf,))
rng = np.random.RandomState(SEED2)
gf_avl_aids = ut.random_sample(gf_avl_aids, num_keep_gf, rng=rng)
# random ordering makes for bad hashes
with VerbosityContext('sample_size', sample_size=sample_size,
num_remove_gf=num_remove_gf,
num_keep_gf=num_keep_gf):
avail_aids = gt_avl_aids + gf_avl_aids
avail_aids = sorted(gt_avl_aids + gf_avl_aids)
VerbosityContext.endfilter()
return avail_aids
@profile
def multi_sampled_seaturtle_queries():
import ibeis
from ibeis.expt import annotation_configs
from ibeis.expt import experiment_helpers
from ibeis.init.filter_annots import expand_acfgs
import copy
aidcfg = copy.deepcopy(annotation_configs.default)
db = 'seaturtles' # 'testdb1'
ibs = ibeis.opendb(defaultdb=db)
a = ['default:sample_occur=True,occur_offset=0,exclude_reference=True,qhas_any=(left,right),num_names=1']
acfg_combo_list = experiment_helpers.parse_acfg_combo_list(a)
aidcfg = acfg_combo_list[0][0]
if False:
# Do each name individually. A bit slower, but more correct
qaids_list = []
daids_list = []
aidcfg['qcfg']['name_offset'] = 0
aidcfg['qcfg']['occur_offset'] = 0
prev = -1
while True:
aidcfg['qcfg']['occur_offset'] = 0
while True:
qaids, daids = expand_acfgs(ibs, aidcfg, use_cache=False, save_cache=False)
aidcfg['qcfg']['occur_offset'] += 1
if len(qaids) == 0:
break
qaids_list.append(qaids)
daids_list.append(daids)
print(qaids)
if len(qaids_list) == prev:
break
prev = len(qaids_list)
aidcfg['qcfg']['name_offset'] += 1
for qaids, daids in zip(qaids_list, daids_list):
ibs.print_annotconfig_stats(qaids, daids, enc_per_name=True, per_enc=True)
else:
# A bit faster because we can do multiple names at the same time
qaids_list = []
daids_list = []
aidcfg['qcfg']['num_names'] = None
aidcfg['dcfg']['num_names'] = None
aidcfg['qcfg']['name_offset'] = 0
aidcfg['qcfg']['occur_offset'] = 0
while True:
qaids, daids = expand_acfgs(ibs, aidcfg, use_cache=False, save_cache=False)
aidcfg['qcfg']['occur_offset'] += 1
if len(qaids) == 0:
break
qaids_list.append(qaids)
daids_list.append(daids)
print(qaids)
for qaids, daids in zip(qaids_list, daids_list):
ibs.print_annotconfig_stats(qaids, daids, enc_per_name=True, per_enc=True)
@profile
def sample_annots(ibs, avail_aids, aidcfg, prefix='', verbose=VERB_TESTDATA):
"""
Sampling preserves input sample structure and thust does not always return
exact values
CommandLine:
python -m ibeis --tf sample_annots --veryverbtd
python -m ibeis --tf get_annotcfg_list --db seaturtles \
-a default:qhas_any=\(left,right\),sample_occur=True,exclude_reference=True,sample_offset=0,num_names=1 --acfginfo
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.init.filter_annots import * # NOQA
>>> import ibeis
>>> from ibeis.expt import annotation_configs
>>> ibs = ibeis.opendb(defaultdb='PZ_MTEST')
>>> avail_aids = input_aids = ibs.get_valid_aids()
>>> aidcfg = copy.deepcopy(annotation_configs.default['dcfg'])
>>> aidcfg['sample_per_name'] = 3
>>> aidcfg['sample_size'] = 10
>>> aidcfg['min_pername'] = 2
>>> prefix = ''
>>> verbose = True
>>> avail_aids = filter_annots_independent(ibs, avail_aids, aidcfg,
>>> prefix, verbose)
>>> avail_aids = sample_annots(ibs, avail_aids, aidcfg,
>>> prefix, avail_aids)
>>> result = ('avail_aids = %s' % (str(avail_aids),))
>>> print(result)
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.init.filter_annots import * # NOQA
>>> import ibeis
>>> from ibeis.expt import annotation_configs
>>> db = 'seaturtles' # 'testdb1'
>>> ibs = ibeis.opendb(defaultdb=db)
>>> aidcfg = copy.deepcopy(annotation_configs.default)['qcfg']
>>> aidcfg['sample_occur'] = True
>>> initial_aids = ibs.get_valid_aids()
>>> withpre, verbose, prefix = True, 2, ''
>>> avail_aids = filter_annots_independent(
>>> ibs, initial_aids, {'has_any': ['left', 'right']}, prefix, verbose)
>>> qaids = sample_annots(ibs, avail_aids, aidcfg, prefix, verbose)
>>> avail_aids = initial_aids
>>> ref_aids = qaids
>>> dcfg = dict(exclude_reference=True, sample_occur=True)
>>> daids = sample_annots_wrt_ref(ibs, initial_aids, dcfg, qaids, prefix, verbose)
>>> ibs.print_annotconfig_stats(qaids, daids, enc_per_name=True, per_enc=True)
"""
import vtool as vt
from ibeis.expt import annotation_configs
def get_cfg(key):
default_dict = annotation_configs.SAMPLE_DEFAULTS
return aidcfg.get(key, default_dict[key])
VerbosityContext = verb_context('SAMPLE (NOREF)', aidcfg, verbose)
VerbosityContext.startfilter()
sample_rule = get_cfg('sample_rule')
sample_per_name = get_cfg('sample_per_name')
sample_size = get_cfg('sample_size')
offset = get_cfg('sample_offset')
occur_offset = get_cfg('occur_offset')
name_offset = get_cfg('name_offset')
num_names = get_cfg('num_names')
sample_occur = get_cfg('sample_occur')
unflat_get_annot_unixtimes = functools.partial(
ibs.unflat_map, ibs.get_annot_image_unixtimes_asfloat)
if offset is None:
offset = 0
if occur_offset is None:
occur_offset = 0
if name_offset is None:
name_offset = 0
if num_names is not None:
grouped_aids = ibs.group_annots_by_name(avail_aids, assume_unique=True)[0]
with VerbosityContext('num_names'):
name_slice = slice(name_offset, name_offset + num_names)
avail_aids = ut.flatten(grouped_aids[name_slice])
if sample_occur is True:
# Occurrence / Encounter sampling
occur_texts = ibs.get_annot_occurrence_text(avail_aids)
names = ibs.get_annot_names(avail_aids)
grouped_ = ut.hierarchical_group_items(avail_aids, [names, occur_texts])
# ensure dictionary ordering for offset consistency
sgrouped_ = ut.sort_dict(ut.hmap_vals(ut.sort_dict, grouped_, max_depth=0))
occur_slice = slice(occur_offset, occur_offset + 1)
chosen = [ut.flatten(list(sub.values())[occur_slice]) for sub in sgrouped_.values()]
with VerbosityContext('sample_offset'):
# TODO: num ocurrences to sample
# TODO: num annots per encounter to sample
avail_aids = ut.flatten(chosen)
# now find which groups of annotations share those tags
if sample_per_name is not None:
# For the query we just choose a single annot per name
# For the database we have to do something different
grouped_aids = ibs.group_annots_by_name(avail_aids, assume_unique=True)[0]
# Order based on some preference (like random)
sample_seed = get_cfg('sample_seed')
rng = np.random.RandomState(sample_seed)
# + --- Get nested sample indicies ---
if sample_rule == 'random':
preference_idxs_list = [
ut.random_indexes(len(aids), rng=rng) for aids in grouped_aids]
elif sample_rule == 'mintime':
unixtime_list = unflat_get_annot_unixtimes(grouped_aids)
preference_idxs_list = vt.argsort_groups(unixtime_list,
reverse=False, rng=rng)
elif sample_rule == 'maxtime':
unixtime_list = unflat_get_annot_unixtimes(grouped_aids)
preference_idxs_list = vt.argsort_groups(unixtime_list,
reverse=True, rng=rng)
elif sample_rule == 'qual_and_view':
if sample_rule != 'qual_and_view':
# Hacked in
with VerbosityContext('sample_per_name', 'sample_rule',
'sample_offset'):
flags = ibs.get_annot_quality_viewpoint_subset(avail_aids, annots_per_view=sample_per_name)
avail_aids = ut.compress(avail_aids, flags)
else:
raise ValueError('Unknown sample_rule=%r' % (sample_rule,))
# L ___
if sample_rule != 'qual_and_view':
sample_idxs_list = list(ut.iget_list_column_slice(
preference_idxs_list, offset, offset + sample_per_name))
sample_aids = ut.list_ziptake(grouped_aids, sample_idxs_list)
with VerbosityContext('sample_per_name', 'sample_rule',
'sample_offset'):
avail_aids = ut.flatten(sample_aids)
avail_aids = sorted(avail_aids)
if sample_size is not None:
# BUG: Should sample annots while preserving name size
if sample_size > avail_aids:
print('Warning sample size too large')
rng = np.random.RandomState(SEED2)
# Randomly sample names rather than annotations this makes sampling a
# knapsack problem. Use a random greedy solution
grouped_aids = ibs.group_annots_by_name(avail_aids, assume_unique=True)[0]
# knapsack items values and weights are are num annots per name
knapsack_items = [(len(aids), len(aids), count)
for count, aids in enumerate(grouped_aids)]
ut.deterministic_shuffle(knapsack_items, rng=rng)
total_value, items_subset = ut.knapsack_greedy(knapsack_items,
sample_size)
group_idx_sample = ut.get_list_column(items_subset, 2)
subgroup_aids = ut.take(grouped_aids, group_idx_sample)
with VerbosityContext('sample_size'):
avail_aids = ut.flatten(subgroup_aids)
#avail_aids = ut.random_sample(avail_aids, sample_size, rng=rng)
if total_value != sample_size:
print('Sampling could not get exactly right sample size')
avail_aids = sorted(avail_aids)
VerbosityContext.endfilter()
return avail_aids
@profile
def subindex_annots(ibs, avail_aids, aidcfg, ref_aids=None,
prefix='', verbose=VERB_TESTDATA):
"""
Returns exact subindex of annotations
"""
VerbosityContext = verb_context('SUBINDEX', aidcfg, verbose)
VerbosityContext.startfilter(withpre=False)
if aidcfg['shuffle']:
rand_idx = ut.random_indexes(len(avail_aids), seed=SEED2)
with VerbosityContext('shuffle', SEED2=SEED2):
avail_aids = ut.take(avail_aids, rand_idx)
if aidcfg['index'] is not None:
indicies = ensure_flatlistlike(aidcfg['index'])
_indexed_aids = [avail_aids[ix]
for ix in indicies if ix < len(avail_aids)]
with VerbosityContext('index', subset_size=len(_indexed_aids)):
avail_aids = _indexed_aids
# Always sort aids to preserve hashes? (Maybe sort the vuuids instead)
avail_aids = sorted(avail_aids)
VerbosityContext.endfilter(withpost=False)
return avail_aids
@profile
def ensure_flatiterable(input_):
if isinstance(input_, six.string_types):
input_ = ut.fuzzy_int(input_)
if isinstance(input_, int) or not ut.isiterable(input_):
return [input_]
elif isinstance(input_, (list, tuple)):
#print(input_)
if len(input_) > 0 and ut.isiterable(input_[0]):
return ut.flatten(input_)
return input_
else:
raise TypeError('cannot ensure %r input_=%r is iterable', (
type(input_), input_))
def ensure_flatlistlike(input_):
#if isinstance(input_, slice):
# pass
iter_ = ensure_flatiterable(input_)
return list(iter_)
def verb_context(filtertype, aidcfg, verbose):
""" closure helper """
class VerbosityContext(object):
"""
Printing filter info in a way that avoids polluting the function
namespace. This is a hack.
This is a with_statement context class that expect a variable avail_aids
to be modified inside the context. It prints the state of the variable
before and after filtering. Several static methods can be used
at the start and end of larger filtering functions.
"""
def __init__(self, *keys, **filterextra):
self.prefix = ut.get_var_from_stack('prefix', verbose=False)
if verbose:
dictkw = dict(nl=False, explicit=True, nobraces=True)
infostr = ''
if len(keys) > 0:
subdict = ut.dict_subset(aidcfg, keys, None)
infostr += '' + ut.dict_str(subdict, **dictkw)
print('[%s] * Filter by %s' % (
self.prefix.upper(), infostr.strip()))
if verbose > 1 and len(filterextra) > 0:
infostr2 = ut.dict_str(filterextra, nl=False, explicit=False)
print('[%s] %s' % (
self.prefix.upper(), infostr2))
def __enter__(self):
aids = ut.get_var_from_stack('avail_aids', verbose=False)
self.num_before = len(aids)
def __exit__(self, exc_type, exc_value, exc_traceback):
if verbose:
aids = ut.get_var_from_stack('avail_aids', verbose=False)
num_after = len(aids)
num_removed = self.num_before - num_after
if num_removed > 0 or verbose > 1:
print('[%s] ... removed %d annots. %d remain' %
(self.prefix.upper(), num_removed, num_after))
@staticmethod
def report_annot_stats(ibs, aids, prefix, name_suffix, statskw={}):
if verbose > 1:
with ut.Indenter('[%s] ' % (prefix.upper(),)):
# TODO: helpx on statskw
#statskw = dict(per_name_vpedge=None, per_name=None)
dict_name = prefix + 'aid_stats' + name_suffix
#hashid, per_name, per_qual, per_vp, per_name_vpedge,
#per_image, min_name_hourdist
ibs.print_annot_stats(aids, prefix=prefix, label=dict_name,
**statskw)
#def report_annotconfig_stats(ref_aids, aids):
# with ut.Indenter(' '):
# ibs.print_annotconfig_stats(ref_aids, avail_aids)
@staticmethod
def startfilter(withpre=True):
"""
Args:
withpre (bool): if True reports stats before filtering
"""
if verbose:
prefix = ut.get_var_from_stack('prefix', verbose=False)
print('[%s] * [%s] %sAIDS' % (prefix.upper(), filtertype,
prefix))
if verbose > 1 and withpre:
ibs = ut.get_var_from_stack('ibs', verbose=False)
aids = ut.get_var_from_stack('avail_aids', verbose=False)
VerbosityContext.report_annot_stats(ibs, aids, prefix,
'_pre')
@staticmethod
def endfilter(withpost=True):
if verbose:
ibs = ut.get_var_from_stack('ibs', verbose=False)
aids = ut.get_var_from_stack('avail_aids', verbose=False)
prefix = ut.get_var_from_stack('prefix', verbose=False)
hashid = ibs.get_annot_hashid_semantic_uuid(
aids, prefix=prefix.upper())
if withpost:
if verbose > 1:
VerbosityContext.report_annot_stats(ibs, aids, prefix,
'_post')
print('[%s] * HAHID: %s' % (prefix.upper(), hashid))
print('[%s] * [%s]: len(avail_%saids) = %r\n' % (
prefix.upper(), filtertype, prefix, len(aids)))
return VerbosityContext
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.init.filter_annots
python -m ibeis.init.filter_annots --allexamples
python -m ibeis.init.filter_annots --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| 41.734294 | 172 | 0.624108 |
b6095822319babe263c7cf53357d80cd643fe795 | 21,059 | py | Python | tests/unit/test_control_connection.py | fatelei/python-driver | 3bddef6185f2691e1713dfe51d1fa26d1555724c | [
"Apache-2.0"
] | null | null | null | tests/unit/test_control_connection.py | fatelei/python-driver | 3bddef6185f2691e1713dfe51d1fa26d1555724c | [
"Apache-2.0"
] | null | null | null | tests/unit/test_control_connection.py | fatelei/python-driver | 3bddef6185f2691e1713dfe51d1fa26d1555724c | [
"Apache-2.0"
] | null | null | null | # Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from concurrent.futures import ThreadPoolExecutor
from mock import Mock, ANY, call
from cassandra import OperationTimedOut, SchemaTargetType, SchemaChangeType
from cassandra.protocol import ResultMessage, RESULT_KIND_ROWS
from cassandra.cluster import ControlConnection, _Scheduler
from cassandra.pool import Host
from cassandra.policies import (SimpleConvictionPolicy, RoundRobinPolicy,
ConstantReconnectionPolicy)
PEER_IP = "foobar"
class MockMetadata(object):
def __init__(self):
self.hosts = {
"192.168.1.0": Host("192.168.1.0", SimpleConvictionPolicy),
"192.168.1.1": Host("192.168.1.1", SimpleConvictionPolicy),
"192.168.1.2": Host("192.168.1.2", SimpleConvictionPolicy)
}
for host in self.hosts.values():
host.set_up()
self.cluster_name = None
self.partitioner = None
self.token_map = {}
def get_host(self, rpc_address):
return self.hosts.get(rpc_address)
def all_hosts(self):
return self.hosts.values()
def rebuild_token_map(self, partitioner, token_map):
self.partitioner = partitioner
self.token_map = token_map
class MockCluster(object):
max_schema_agreement_wait = 5
load_balancing_policy = RoundRobinPolicy()
reconnection_policy = ConstantReconnectionPolicy(2)
down_host = None
contact_points = []
is_shutdown = False
def __init__(self):
self.metadata = MockMetadata()
self.added_hosts = []
self.removed_hosts = []
self.scheduler = Mock(spec=_Scheduler)
self.executor = Mock(spec=ThreadPoolExecutor)
def add_host(self, address, datacenter, rack, signal=False, refresh_nodes=True):
host = Host(address, SimpleConvictionPolicy, datacenter, rack)
self.added_hosts.append(host)
return host
def remove_host(self, host):
self.removed_hosts.append(host)
def on_up(self, host):
pass
def on_down(self, host, is_host_addition):
self.down_host = host
class MockConnection(object):
is_defunct = False
def __init__(self):
self.host = "192.168.1.0"
self.local_results = [
["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens"],
[["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"]]]
]
self.peer_results = [
["rpc_address", "peer", "schema_version", "data_center", "rack", "tokens"],
[["192.168.1.1", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"]],
["192.168.1.2", "10.0.0.2", "a", "dc1", "rack1", ["2", "102", "202"]]]
]
local_response = ResultMessage(
kind=RESULT_KIND_ROWS, results=self.local_results)
peer_response = ResultMessage(
kind=RESULT_KIND_ROWS, results=self.peer_results)
self.wait_for_responses = Mock(return_value=(peer_response, local_response))
class FakeTime(object):
def __init__(self):
self.clock = 0
def time(self):
return self.clock
def sleep(self, amount):
self.clock += amount
class ControlConnectionTest(unittest.TestCase):
def setUp(self):
self.cluster = MockCluster()
self.connection = MockConnection()
self.time = FakeTime()
self.control_connection = ControlConnection(self.cluster, 1, 0, 0)
self.control_connection._connection = self.connection
self.control_connection._time = self.time
def _get_matching_schema_preloaded_results(self):
local_results = [
["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens"],
[["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"]]]
]
local_response = ResultMessage(kind=RESULT_KIND_ROWS, results=local_results)
peer_results = [
["rpc_address", "peer", "schema_version", "data_center", "rack", "tokens"],
[["192.168.1.1", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"]],
["192.168.1.2", "10.0.0.2", "a", "dc1", "rack1", ["2", "102", "202"]]]
]
peer_response = ResultMessage(kind=RESULT_KIND_ROWS, results=peer_results)
return (peer_response, local_response)
def _get_nonmatching_schema_preloaded_results(self):
local_results = [
["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens"],
[["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"]]]
]
local_response = ResultMessage(kind=RESULT_KIND_ROWS, results=local_results)
peer_results = [
["rpc_address", "peer", "schema_version", "data_center", "rack", "tokens"],
[["192.168.1.1", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"]],
["192.168.1.2", "10.0.0.2", "b", "dc1", "rack1", ["2", "102", "202"]]]
]
peer_response = ResultMessage(kind=RESULT_KIND_ROWS, results=peer_results)
return (peer_response, local_response)
def test_wait_for_schema_agreement(self):
"""
Basic test with all schema versions agreeing
"""
self.assertTrue(self.control_connection.wait_for_schema_agreement())
# the control connection should not have slept at all
self.assertEqual(self.time.clock, 0)
def test_wait_for_schema_agreement_uses_preloaded_results_if_given(self):
"""
wait_for_schema_agreement uses preloaded results if given for shared table queries
"""
preloaded_results = self._get_matching_schema_preloaded_results()
self.assertTrue(self.control_connection.wait_for_schema_agreement(preloaded_results=preloaded_results))
# the control connection should not have slept at all
self.assertEqual(self.time.clock, 0)
# the connection should not have made any queries if given preloaded results
self.assertEqual(self.connection.wait_for_responses.call_count, 0)
def test_wait_for_schema_agreement_falls_back_to_querying_if_schemas_dont_match_preloaded_result(self):
"""
wait_for_schema_agreement requery if schema does not match using preloaded results
"""
preloaded_results = self._get_nonmatching_schema_preloaded_results()
self.assertTrue(self.control_connection.wait_for_schema_agreement(preloaded_results=preloaded_results))
# the control connection should not have slept at all
self.assertEqual(self.time.clock, 0)
self.assertEqual(self.connection.wait_for_responses.call_count, 1)
def test_wait_for_schema_agreement_fails(self):
"""
Make sure the control connection sleeps and retries
"""
# change the schema version on one node
self.connection.peer_results[1][1][2] = 'b'
self.assertFalse(self.control_connection.wait_for_schema_agreement())
# the control connection should have slept until it hit the limit
self.assertGreaterEqual(self.time.clock, self.cluster.max_schema_agreement_wait)
def test_wait_for_schema_agreement_skipping(self):
"""
If rpc_address or schema_version isn't set, the host should be skipped
"""
# an entry with no schema_version
self.connection.peer_results[1].append(
["192.168.1.3", "10.0.0.3", None, "dc1", "rack1", ["3", "103", "203"]]
)
# an entry with a different schema_version and no rpc_address
self.connection.peer_results[1].append(
[None, None, "b", "dc1", "rack1", ["4", "104", "204"]]
)
# change the schema version on one of the existing entries
self.connection.peer_results[1][1][3] = 'c'
self.cluster.metadata.get_host('192.168.1.1').is_up = False
self.assertTrue(self.control_connection.wait_for_schema_agreement())
self.assertEqual(self.time.clock, 0)
def test_wait_for_schema_agreement_rpc_lookup(self):
"""
If the rpc_address is 0.0.0.0, the "peer" column should be used instead.
"""
self.connection.peer_results[1].append(
["0.0.0.0", PEER_IP, "b", "dc1", "rack1", ["3", "103", "203"]]
)
host = Host("0.0.0.0", SimpleConvictionPolicy)
self.cluster.metadata.hosts[PEER_IP] = host
host.is_up = False
# even though the new host has a different schema version, it's
# marked as down, so the control connection shouldn't care
self.assertTrue(self.control_connection.wait_for_schema_agreement())
self.assertEqual(self.time.clock, 0)
# but once we mark it up, the control connection will care
host.is_up = True
self.assertFalse(self.control_connection.wait_for_schema_agreement())
self.assertGreaterEqual(self.time.clock, self.cluster.max_schema_agreement_wait)
def test_refresh_nodes_and_tokens(self):
self.control_connection.refresh_node_list_and_token_map()
meta = self.cluster.metadata
self.assertEqual(meta.partitioner, 'Murmur3Partitioner')
self.assertEqual(meta.cluster_name, 'foocluster')
# check token map
self.assertEqual(sorted(meta.all_hosts()), sorted(meta.token_map.keys()))
for token_list in meta.token_map.values():
self.assertEqual(3, len(token_list))
# check datacenter/rack
for host in meta.all_hosts():
self.assertEqual(host.datacenter, "dc1")
self.assertEqual(host.rack, "rack1")
self.assertEqual(self.connection.wait_for_responses.call_count, 1)
def test_refresh_nodes_and_tokens_uses_preloaded_results_if_given(self):
"""
refresh_nodes_and_tokens uses preloaded results if given for shared table queries
"""
preloaded_results = self._get_matching_schema_preloaded_results()
self.control_connection._refresh_node_list_and_token_map(self.connection, preloaded_results=preloaded_results)
meta = self.cluster.metadata
self.assertEqual(meta.partitioner, 'Murmur3Partitioner')
self.assertEqual(meta.cluster_name, 'foocluster')
# check token map
self.assertEqual(sorted(meta.all_hosts()), sorted(meta.token_map.keys()))
for token_list in meta.token_map.values():
self.assertEqual(3, len(token_list))
# check datacenter/rack
for host in meta.all_hosts():
self.assertEqual(host.datacenter, "dc1")
self.assertEqual(host.rack, "rack1")
# the connection should not have made any queries if given preloaded results
self.assertEqual(self.connection.wait_for_responses.call_count, 0)
def test_refresh_nodes_and_tokens_no_partitioner(self):
"""
Test handling of an unknown partitioner.
"""
# set the partitioner column to None
self.connection.local_results[1][0][4] = None
self.control_connection.refresh_node_list_and_token_map()
meta = self.cluster.metadata
self.assertEqual(meta.partitioner, None)
self.assertEqual(meta.token_map, {})
def test_refresh_nodes_and_tokens_add_host(self):
self.connection.peer_results[1].append(
["192.168.1.3", "10.0.0.3", "a", "dc1", "rack1", ["3", "103", "203"]]
)
self.cluster.scheduler.schedule = lambda delay, f, *args, **kwargs: f(*args, **kwargs)
self.control_connection.refresh_node_list_and_token_map()
self.assertEqual(1, len(self.cluster.added_hosts))
self.assertEqual(self.cluster.added_hosts[0].address, "192.168.1.3")
self.assertEqual(self.cluster.added_hosts[0].datacenter, "dc1")
self.assertEqual(self.cluster.added_hosts[0].rack, "rack1")
def test_refresh_nodes_and_tokens_remove_host(self):
del self.connection.peer_results[1][1]
self.control_connection.refresh_node_list_and_token_map()
self.assertEqual(1, len(self.cluster.removed_hosts))
self.assertEqual(self.cluster.removed_hosts[0].address, "192.168.1.2")
def test_refresh_nodes_and_tokens_timeout(self):
def bad_wait_for_responses(*args, **kwargs):
self.assertEqual(kwargs['timeout'], self.control_connection._timeout)
raise OperationTimedOut()
self.connection.wait_for_responses = bad_wait_for_responses
self.control_connection.refresh_node_list_and_token_map()
self.cluster.executor.submit.assert_called_with(self.control_connection._reconnect)
def test_refresh_schema_timeout(self):
def bad_wait_for_responses(*args, **kwargs):
self.time.sleep(kwargs['timeout'])
raise OperationTimedOut()
self.connection.wait_for_responses = Mock(side_effect=bad_wait_for_responses)
self.control_connection.refresh_schema()
self.assertEqual(self.connection.wait_for_responses.call_count, self.cluster.max_schema_agreement_wait / self.control_connection._timeout)
self.assertEqual(self.connection.wait_for_responses.call_args[1]['timeout'], self.control_connection._timeout)
def test_handle_topology_change(self):
event = {
'change_type': 'NEW_NODE',
'address': ('1.2.3.4', 9000)
}
self.cluster.scheduler.reset_mock()
self.control_connection._handle_topology_change(event)
self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection.refresh_node_list_and_token_map)
event = {
'change_type': 'REMOVED_NODE',
'address': ('1.2.3.4', 9000)
}
self.cluster.scheduler.reset_mock()
self.control_connection._handle_topology_change(event)
self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.cluster.remove_host, None)
event = {
'change_type': 'MOVED_NODE',
'address': ('1.2.3.4', 9000)
}
self.cluster.scheduler.reset_mock()
self.control_connection._handle_topology_change(event)
self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection.refresh_node_list_and_token_map)
def test_handle_status_change(self):
event = {
'change_type': 'UP',
'address': ('1.2.3.4', 9000)
}
self.cluster.scheduler.reset_mock()
self.control_connection._handle_status_change(event)
self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection.refresh_node_list_and_token_map)
# do the same with a known Host
event = {
'change_type': 'UP',
'address': ('192.168.1.0', 9000)
}
self.cluster.scheduler.reset_mock()
self.control_connection._handle_status_change(event)
host = self.cluster.metadata.hosts['192.168.1.0']
self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.cluster.on_up, host)
self.cluster.scheduler.schedule.reset_mock()
event = {
'change_type': 'DOWN',
'address': ('1.2.3.4', 9000)
}
self.control_connection._handle_status_change(event)
self.assertFalse(self.cluster.scheduler.schedule.called)
# do the same with a known Host
event = {
'change_type': 'DOWN',
'address': ('192.168.1.0', 9000)
}
self.control_connection._handle_status_change(event)
host = self.cluster.metadata.hosts['192.168.1.0']
self.assertIs(host, self.cluster.down_host)
def test_handle_schema_change(self):
change_types = [getattr(SchemaChangeType, attr) for attr in vars(SchemaChangeType) if attr[0] != '_']
for change_type in change_types:
event = {
'target_type': SchemaTargetType.TABLE,
'change_type': change_type,
'keyspace': 'ks1',
'table': 'table1'
}
self.cluster.scheduler.reset_mock()
self.control_connection._handle_schema_change(event)
self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection.refresh_schema, **event)
self.cluster.scheduler.reset_mock()
event['target_type'] = SchemaTargetType.KEYSPACE
del event['table']
self.control_connection._handle_schema_change(event)
self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection.refresh_schema, **event)
def test_refresh_disabled(self):
cluster = MockCluster()
schema_event = {
'target_type': SchemaTargetType.TABLE,
'change_type': SchemaChangeType.CREATED,
'keyspace': 'ks1',
'table': 'table1'
}
status_event = {
'change_type': 'UP',
'address': ('1.2.3.4', 9000)
}
topo_event = {
'change_type': 'MOVED_NODE',
'address': ('1.2.3.4', 9000)
}
cc_no_schema_refresh = ControlConnection(cluster, 1, -1, 0)
cluster.scheduler.reset_mock()
# no call on schema refresh
cc_no_schema_refresh._handle_schema_change(schema_event)
self.assertFalse(cluster.scheduler.schedule.called)
self.assertFalse(cluster.scheduler.schedule_unique.called)
# topo and status changes as normal
cc_no_schema_refresh._handle_status_change(status_event)
cc_no_schema_refresh._handle_topology_change(topo_event)
cluster.scheduler.schedule_unique.assert_has_calls([call(ANY, cc_no_schema_refresh.refresh_node_list_and_token_map),
call(ANY, cc_no_schema_refresh.refresh_node_list_and_token_map)])
cc_no_topo_refresh = ControlConnection(cluster, 1, 0, -1)
cluster.scheduler.reset_mock()
# no call on topo refresh
cc_no_topo_refresh._handle_topology_change(topo_event)
self.assertFalse(cluster.scheduler.schedule.called)
self.assertFalse(cluster.scheduler.schedule_unique.called)
# schema and status change refresh as normal
cc_no_topo_refresh._handle_status_change(status_event)
cc_no_topo_refresh._handle_schema_change(schema_event)
cluster.scheduler.schedule_unique.assert_has_calls([call(ANY, cc_no_topo_refresh.refresh_node_list_and_token_map),
call(0.0, cc_no_topo_refresh.refresh_schema,
**schema_event)])
class EventTimingTest(unittest.TestCase):
"""
A simple test to validate that event scheduling happens in order
Added for PYTHON-358
"""
def setUp(self):
self.cluster = MockCluster()
self.connection = MockConnection()
self.time = FakeTime()
# Use 2 for the schema_event_refresh_window which is what we would normally default to.
self.control_connection = ControlConnection(self.cluster, 1, 2, 0)
self.control_connection._connection = self.connection
self.control_connection._time = self.time
def test_event_delay_timing(self):
"""
Submits a wide array of events make sure that each is scheduled to occur in the order they were received
"""
prior_delay = 0
for _ in range(100):
for change_type in ('CREATED', 'DROPPED', 'UPDATED'):
event = {
'change_type': change_type,
'keyspace': '1',
'table': 'table1'
}
# This is to increment the fake time, we don't actually sleep here.
self.time.sleep(.001)
self.cluster.scheduler.reset_mock()
self.control_connection._handle_schema_change(event)
self.cluster.scheduler.mock_calls
# Grabs the delay parameter from the scheduler invocation
current_delay = self.cluster.scheduler.mock_calls[0][1][0]
self.assertLess(prior_delay, current_delay)
prior_delay = current_delay
| 41.373281 | 146 | 0.654779 |
531d49468644796d0b5e5c03e19af41595f70d3d | 3,063 | py | Python | ml-app/entities/learn/classification/model.py | janove51/ml-app | 0d66aa4c25648f2059eb645b7f8081f028fac703 | [
"MIT"
] | null | null | null | ml-app/entities/learn/classification/model.py | janove51/ml-app | 0d66aa4c25648f2059eb645b7f8081f028fac703 | [
"MIT"
] | null | null | null | ml-app/entities/learn/classification/model.py | janove51/ml-app | 0d66aa4c25648f2059eb645b7f8081f028fac703 | [
"MIT"
] | null | null | null | import os, sys
sys.path.append(os.path.abspath('../'))
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
import numpy as np
import classification.utils
def assemble_param_grid_rfc(nr_trees_min=200, nr_trees_max=2000, nr_trees_options=10,
max_features_min=None, max_features_max=None, max_features_options=None,
max_depth_min=1, max_depth_max=32, max_depth_options=6,
bootstrap_options = [True, False], min_samples_split = [2, 5, 10], min_samples_leaf = [1, 2, 4]):
'''
Generates grid with value options for optimal Hyperparameter search of RandomForestClassifier
:return: dictionary with parameter values
'''
# Number of trees = size of the ensemble itself
n_estimators = [int(x) for x in np.linspace(start=nr_trees_min, stop=nr_trees_max, num=nr_trees_options)]
# Number of features: limit it to increase variance within ensemble
if max_features_min is None and max_features_max is None and max_features_options is None:
max_features = ['sqrt', 'log2'] # default value
else:
max_features = [int(x) for x in np.linspace(start=max_features_min, stop=max_features_max, num=max_features_options)]
# Max Depth = controls model complexity
if max_depth_min is None and max_depth_max is None and max_depth_options is None:
max_depth = None # default value
else:
max_depth = [int(x) for x in np.linspace(start=max_depth_min, stop=max_depth_max, num=max_depth_options)]
nr_combos = len(n_estimators) * len(min_samples_split) * len(max_features) * len(max_depth) * len(min_samples_leaf) * len(bootstrap_options)
print('Grid contains {} possible combinations'.format(nr_combos))
grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap_options}
print('Hyperparameter Grid assembled:', grid)
return grid
def train_rfc(X_train, y_train, grid, grid_search_type = 'random'):
'''
Train Random Forest Classifier using verious grid search methods
:param X_train: nd-array
:param y_train: nd-array
:param grid: dictionary with grid values
:param grid_search_type: string
:return: scikit model object
'''
rfc = RandomForestClassifier(n_jobs=-1, bootstrap=True)
if grid_search_type == 'random':
grid_search = RandomizedSearchCV(estimator=rfc, param_distributions=grid,
n_iter=2, cv=2, verbose=0, random_state=0, n_jobs=1)
elif grid_search_type == 'all':
grid_search = GridSearchCV(estimator=rfc, param_grid=grid, cv=2, n_jobs=1, verbose=0)
grid_search.fit(X_train, y_train)
print('Best Parameter grid:', grid_search.best_params_)
model = grid_search.best_estimator_
return model
| 38.772152 | 144 | 0.69507 |
802d18b8c7aead93622047d9327b29be53f9a315 | 1,417 | py | Python | metadata_replace/test_mrepr.py | Preocts/python_play_carton | 071b19a6b5a6420192cd262195f95acfd787b476 | [
"MIT"
] | null | null | null | metadata_replace/test_mrepr.py | Preocts/python_play_carton | 071b19a6b5a6420192cd262195f95acfd787b476 | [
"MIT"
] | null | null | null | metadata_replace/test_mrepr.py | Preocts/python_play_carton | 071b19a6b5a6420192cd262195f95acfd787b476 | [
"MIT"
] | null | null | null | from typing import Dict
import pytest
import mrepr
@pytest.mark.parametrize(
("in_", "keypairs", "expected"),
(
("{{metatag}}", {"metatag": "replaced"}, "replaced"),
("{{metaTag }}", {"metatag": "replaced"}, "replaced"),
("{{ metaTag}}", {"metatag": "replaced"}, "replaced"),
("{{ metaTag }}", {"metatag": "replaced"}, "replaced"),
("{{metatag}} ", {"metatag": "replaced"}, "replaced "),
(" {{Metatag}}", {"metatag": "replaced"}, " replaced"),
(" {{ metatag }} ", {"metatag": "replaced"}, " replaced "),
("This{{metatag}}sentence", {"metatag": "replaced"}, "Thisreplacedsentence"),
(
"This {{ metatag }} sentence",
{"metatag": "replaced"},
"This replaced sentence",
),
(
"This {{ metatag }} sentence",
{"metatag": "replaced"},
"This replaced sentence",
),
(
"This {{ newtag }} sentence",
{"metatag": "replaced"},
"This {{ newtag }} sentence",
),
(
"This {{ newtag }}{{metatag}} sentence",
{"metatag": "replaced", "newtag": "swapped"},
"This swappedreplaced sentence",
),
),
)
def test_mrepr(in_: str, keypairs: Dict[str, str], expected: str) -> None:
"""Test metatag repr"""
assert mrepr.mrepr(in_, keypairs) == expected
| 32.204545 | 85 | 0.485533 |
aba541e9ddbfb81741a63ed6f9bfc25859cd943a | 7,358 | py | Python | blog/migrations/0001_initial.py | mayankchauhan96/Travel-Blogging-website | c1aa425e961fe1158159dea4f2d97df79a0ee917 | [
"Apache-2.0"
] | null | null | null | blog/migrations/0001_initial.py | mayankchauhan96/Travel-Blogging-website | c1aa425e961fe1158159dea4f2d97df79a0ee917 | [
"Apache-2.0"
] | 12 | 2021-03-19T09:05:16.000Z | 2022-03-12T00:39:11.000Z | blog/migrations/0001_initial.py | mayankchauhan96/Travel-Blogging-website | c1aa425e961fe1158159dea4f2d97df79a0ee917 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.0.5 on 2021-01-07 18:40
import autoslug.fields
import ckeditor_uploader.fields
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import sorl.thumbnail.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.CharField(blank=True, choices=[('Beaches', 'Beaches'), ('Treks', 'Treks'), ('Glaciers', 'Glaciers'), ('Summit', 'Summit'), ('Islands', 'Islands'), ('Hiking', 'Hiking'), ('Camping', 'Camping'), ('Mountains', 'Mountains'), ('Deserts', 'Deserts'), ('Forests', 'Forests'), ('Historic', 'Historic'), ('Monuments', 'Monuments'), ('Temples', 'Temples'), ('Museums', 'Museums'), ('Zoos', 'Zoos'), ('ThemeParks', 'ThemeParks'), ('Gardens', 'Gardens'), ('Aquaria', 'Aquaria'), ('Winter', 'Winter'), ('Market', 'Market'), ('Urban', 'Urban'), ('Rural', 'Rural'), ('Rivers', 'Rivers'), ('Lakes', 'Lakes'), ('Couple', 'Couple'), ('Sports', 'Sports'), ('Food', 'Food '), ('Resorts', 'Resorts'), ('Culture', 'Culture'), ('Adventure', 'Adventure'), ('MotoBlogs', 'MotoBlogs'), ('Solo', 'Solo'), ('Summer', 'Summer'), ('TravelTips', 'TravelTips'), ('Photography', 'Photography'), ('WFM', 'WFM')], max_length=100, null=True)),
],
),
migrations.CreateModel(
name='ContactUs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=80)),
('email', models.EmailField(max_length=100)),
('mobile', models.CharField(blank=True, max_length=100)),
('created_on', models.DateTimeField(auto_now_add=True)),
('content', models.TextField(max_length=500)),
],
options={
'ordering': ['-created_on'],
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, unique=True)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='title')),
('cover', sorl.thumbnail.fields.ImageField(null=True, upload_to='images/')),
('updated_on', models.DateTimeField(auto_now=True)),
('content', ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('status', models.IntegerField(choices=[(0, 'Draft'), (1, 'Publish')], default=0)),
('state', models.CharField(choices=[('Somewhere In India', 'Somewhere In India'), ('Out Of India', 'Out Of India'), ('Andhra Pradesh', 'Andhra Pradesh'), ('Arunachal Pradesh', 'Arunachal Pradesh'), ('Assam', 'Assam'), ('Bihar', 'Bihar'), ('Chhattisgarh', 'Chhattisgarh'), ('Chandigarh', 'Chandigarh'), ('Dadra and Nagar Haveli', 'Dadra and Nagar Haveli'), ('Daman and Diu', 'Daman and Diu'), ('Delhi', 'Delhi'), ('Goa', 'Goa'), ('Gujarat', 'Gujarat'), ('Haryana', 'Haryana'), ('Himachal Pradesh', 'Himachal Pradesh'), ('Jammu and Kashmir', 'Jammu and Kashmir'), ('Jharkhand', 'Jharkhand'), ('Karnataka', 'Karnataka'), ('Kerala', 'Kerala'), ('Madhya Pradesh', 'Madhya Pradesh'), ('Maharashtra', 'Maharashtra'), ('Manipur', 'Manipur'), ('Meghalaya', 'Meghalaya'), ('Mizoram', 'Mizoram'), ('Nagaland', 'Nagaland'), ('Orissa', 'Orissa'), ('Punjab', 'Punjab'), ('Pondicherry', 'Pondicherry'), ('Rajasthan', 'Rajasthan'), ('Sikkim', 'Sikkim'), ('Tamil Nadu', 'Tamil Nadu'), ('Tripura', 'Tripura'), ('Uttar Pradesh', 'Uttar Pradesh'), ('Uttarakhand', 'Uttarakhand'), ('West Bengal', 'West Bengal')], default='Somewhere In India', max_length=80)),
('slug_st', autoslug.fields.AutoSlugField(editable=False, populate_from='state')),
('location', models.CharField(max_length=200)),
('slug_lc', autoslug.fields.AutoSlugField(editable=False, populate_from='location')),
('views', models.IntegerField(default=0)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL)),
('category', models.ManyToManyField(blank=True, related_name='posts', to='blog.Category')),
('like', models.ManyToManyField(blank=True, related_name='post_liked', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created_on'],
},
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=150)),
('signup_confirmation', models.BooleanField(default=False)),
('facebook_link', models.CharField(blank=True, max_length=100, null=True)),
('instagram_link', models.CharField(blank=True, max_length=100, null=True)),
('bio', models.CharField(blank=True, max_length=100, null=True)),
('city', models.CharField(blank=True, max_length=100, null=True)),
('Website', models.CharField(blank=True, max_length=100, null=True)),
('youtube_channel', models.CharField(blank=True, max_length=100, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='PostView',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(max_length=40)),
('session', models.CharField(max_length=40)),
('created', models.DateTimeField(default=datetime.datetime(2021, 1, 8, 0, 10, 20, 691644))),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='post_views', to='blog.Post')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80)),
('email', models.EmailField(max_length=100)),
('body', models.TextField(max_length=80)),
('created_on', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.Post')),
],
options={
'ordering': ['-created_on'],
},
),
]
| 68.766355 | 1,155 | 0.593368 |
0751e5b7b2e7d7149747c66d079210a6c122013a | 6,189 | py | Python | learnedevolution/bins/evaluate_sweep.py | realtwister/LearnedEvolution | 2ec49b50a49acae9693cfb05ac114dfbcc4aa337 | [
"MIT"
] | null | null | null | learnedevolution/bins/evaluate_sweep.py | realtwister/LearnedEvolution | 2ec49b50a49acae9693cfb05ac114dfbcc4aa337 | [
"MIT"
] | null | null | null | learnedevolution/bins/evaluate_sweep.py | realtwister/LearnedEvolution | 2ec49b50a49acae9693cfb05ac114dfbcc4aa337 | [
"MIT"
] | null | null | null | from .utils import confirm
import re
import ast
from tempfile import TemporaryDirectory
from time import sleep
VAR_REGEX = re.compile("<<VARIABLE:[a-zA-Z0-9_.,{} ()\+\"\'\[\]\-]+>>");
SPACE_REGEX = re.compile("{{[a-zA-Z0-9_., ()\"\'\[\]\+\-]+}}");
SPEC_VAR_REGEX = lambda var: "<<VARIABLE:"+var+"(|{{[a-zA-Z0-9_., \"\'{}()\[\]\+\-]+}})>>";
parsers = [];
def register_evaluate_sweep(subparsers):
parser = subparsers.add_parser('evaluate_sweep')
parser.set_defaults(func = main)
parser.add_argument("log_dir", help="The directory to save/ log to")
parser.add_argument("config_file", help="path to config file")
parser.add_argument("variable_dir", help="variables file")
parser.add_argument("--session_name", help = "The session name to use (will be created if doesn't exist) (DEFAULT:learnedevolution)", default="learnedevolution")
parser.add_argument("-y","--yes",dest="should_confirm", action="store_false", default=True)
parser.add_argument("--workers", help="Number of workers", default = 4)
parsers.append(parser)
def search_variable(line):
found_vars = [];
for var_found in VAR_REGEX.finditer(line):
var = var_found.group()[11:-2];
space_found = SPACE_REGEX.search(var);
if space_found:
var = var[:space_found.start()]
space = space_found.group()[2:-2];
found_vars.append((var,space));
else:
found_vars.append((var, None))
return found_vars;
def find_variables_in_file(file_path):
variables = dict();
with open(file_path) as f:
for line in f:
for var, space in search_variable(line):
if space is not None:
if var in variables and variables[var] is not None:
print("Space for variable {} defined multiple times".format(var));
else:
variables[var] = create_space(var, space);
elif var not in variables:
variables[var] = None;
return variables;
def get_inactive_windows(windows):
res = []
for window in windows:
if window.name == "bash":
res.append(window)
return res
def main(args):
import libtmux
import os
parser = parsers[0]
# Check arguments
if os.path.exists(args.log_dir):
if not confirm("Are you sure you want to overwrite it?", args.should_confirm):
parser.error("The experiment dir already exists.");
if args.config_file is not None:
config = args.config_file;
else:
config = os.path.join(args.log_dir, "config.py")
if not os.path.exists(config):
parser.error("Configuration file not found")
if not os.path.isfile(config):
parser.error("Configuration is not a file")
if not os.path.exists(args.variable_dir):
parser.error("variable_dir should exist")
# Find the variables in the config file
variables = find_variables_in_file(config)
# Select variable files with appropriate variables
variable_files = [];
for f in os.listdir(args.variable_dir):
f_path = os.path.join(args.variable_dir, f)
if os.path.isfile(f_path) and f[-4:] == ".var":
with open(f_path,'r') as of:
contents = eval(of.read())
for v in variables:
if v not in contents:
break;
else:
variable_files.append(f)
workers = min(int(args.workers), len(variable_files))
print("-------- Summary --------")
print("Variables: ({})".format( len(variables)))
for v in variables:
print(" -",v)
print("Configurations: ({})".format(len(variable_files)))
for f in variable_files:
print(" -",f)
print("Workers:", workers)
print("Logging to:", os.path.abspath(args.log_dir))
print("-------------------------")
if not confirm("Run the experiment?", args.should_confirm):
exit()
# Create configs in temporary directory
tempdir =TemporaryDirectory()
for f_name in variable_files:
f_path = os.path.join(args.variable_dir, f_name)
with open(f_path,'r') as of:
values = eval(of.read())
new_config_path = os.path.join(tempdir.name, f_name[:-4]+".py")
with open(config) as original:
with open(new_config_path,'a') as new:
for line in original:
for var in variables:
line = re.sub(SPEC_VAR_REGEX(var), str(values[var]), line);
new.write(line)
# run the sessions
tmux = libtmux.Server()
# Select tmux session
if tmux.has_session(args.session_name):
session = tmux.find_where({ "session_name": args.session_name })
if not confirm("Session already exists. Should I continue?", args.should_confirm):
exit()
else:
session = tmux.new_session(args.session_name)
# clean idle windows
for window in get_inactive_windows(session.windows)[:-1]:
window.kill_window()
# Setup windows
windows = []
for i in range(workers- len(session.windows)):
window = session.new_window()
windows.append(window)
queue = list(variable_files)
while True:
for window in get_inactive_windows(session.windows):
if len(queue) == 0:
if len(session.windows) > 1:
window.kill_window()
break;
f_name = queue.pop()
f_name = f_name[:-4] #Remove .var extension
config_path = os.path.join(tempdir.name, f_name+".py")
current_dir = os.path.join(args.log_dir, f_name)
window.attached_pane.send_keys("python3 -m learnedevolution evaluate_static {} {}".format(
current_dir,
config_path
))
print("Running configuration", f_name, "on", window.id)
if len(queue) == 0:
break;
sleep(1)
print("All experiments have started or finished on session", session.name)
print("Waiting before clearing temporary directory")
sleep(100)
tempdir.cleanup()
| 34.966102 | 165 | 0.59832 |
7b15209c18723fbcf9e43587da32c91117223168 | 4,755 | py | Python | utils/utils.py | HerrYu123/deeplabv3 | 4f29d2bfb725a77b22fb04e1e0006ae742fa5d47 | [
"MIT"
] | null | null | null | utils/utils.py | HerrYu123/deeplabv3 | 4f29d2bfb725a77b22fb04e1e0006ae742fa5d47 | [
"MIT"
] | null | null | null | utils/utils.py | HerrYu123/deeplabv3 | 4f29d2bfb725a77b22fb04e1e0006ae742fa5d47 | [
"MIT"
] | null | null | null | # camera-ready
import torch
import torch.nn as nn
from ipdb import set_trace as b
import os
import numpy as np
def add_weight_decay(net, l2_value, skip_list=()):
# https://raberrytv.wordpress.com/2017/10/29/pytorch-weight-decay-made-easy/
decay, no_decay = [], []
for name, param in net.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list:
no_decay.append(param)
else:
decay.append(param)
b()
return [{'params': no_decay, 'weight_decay': 0.0}, {'params': decay, 'weight_decay': l2_value}]
# function for colorizing a label image:
def label_img_to_color(img, num_classes):
# for num_classes=8
if num_classes == 8:
label_to_color = {
0: [0, 196, 121], #ego
1: [255, 210, 37], #left
2: [240, 127, 0], #right
3: [246, 22, 70], #opposite
4: [255,255,255],
5: [224, 74, 209],
6: [230, 150, 140],
7: [0, 0, 0]
}
# for num_classes=24'
elif num_classes == 24:
label_to_color = {
0: [0, 196, 121],
1: [255, 210, 37],
2: [240, 127, 0],
3: [246, 22, 70],
4: [255,255,255],
5: [224, 74, 209],
6: [230, 150, 140],
7: [70, 70, 70],
8: [102,102, 156],
9: [190,153,153],
10: [180,165,180],
11: [150, 100, 100],
12: [220, 220, 0],
13: [107, 142, 35],
14: [220, 20, 60],
15: [255, 0, 0],
16: [ 0, 0, 70],
17: [ 0, 60, 100],
18: [0, 0, 90],
19: [140, 0, 160],
20: [255, 0, 200],
21: [255, 140, 230],
22: [221, 147, 255],
23: [0, 0 , 0]
}
# for num_classes=20'
elif num_classes == 20:
# migration model
label_to_color = {
0: [0, 196, 121],
1: [255,210, 37],
2: [246, 22, 70],
3: [224, 74,209],
4: [230,150,140],
5: [230,150,140],
6: [250,170, 30],
7: [220,220, 0],
8: [107,142, 35],
9: [152,251,152],
10: [70,130,180],
11: [220, 20, 60],
12: [255, 0, 0],
13: [0, 0,142],
14: [0, 0, 70],
15: [0, 60,100],
16: [ 0, 80,100],
17: [0, 0,230],
18: [119, 11, 32],
19: [0, 0, 90],
#255: [0, 0, 0]
}
# label_to_color = {
# 0: [128, 64,128],
# 1: [244, 35,232],
# 2: [70, 70, 70],
# 3: [102,102,156],
# 4: [190,153,153],
# 5: [153,153,153],
# 6: [250,170, 30],
# 7: [220,220, 0],
# 8: [107,142, 35],
# 9: [152,251,152],
# 10: [70,130,180],
# 11: [220, 20, 60],
# 12: [255, 0, 0],
# 13: [0, 0,142],
# 14: [0, 0, 70],
# 15: [0, 60,100],
# 16: [ 0, 80,100],
# 17: [0, 0,230],
# 18: [119, 11, 32],
# 19: [0, 0, 90],
# }
else:
print("labels numbers error")
b()
# label_to_color = {
# 0: [128, 64,128],
# 1: [244, 35,232],
# 2: [ 70, 70, 70],
# 3: [102,102,156],
# 4: [190,153,153],
# 5: [153,153,153],
# 6: [250,170, 30],
# 7: [220,220, 0],
# 8: [107,142, 35],
# 9: [152,251,152],
# 10: [ 70,130,180],
# 11: [220, 20, 60],
# 12: [255, 0, 0],
# 13: [ 0, 0,142],
# 14: [ 0, 0, 70],
# 15: [ 0, 60,100],
# 16: [ 0, 80,100],
# 17: [ 0, 0,230],
# 18: [119, 11, 32],
# 19: [81, 0, 81]
# }
img_height, img_width = img.shape
img_color = np.zeros((img_height, img_width, 3))
for row in range(img_height):
for col in range(img_width):
label = img[row, col]
img_color[row, col] = np.array(label_to_color[label])
return img_color
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def check_mkdir(dir_name):
if not os.path.exists(dir_name):
os.mkdir(dir_name)
| 27.485549 | 99 | 0.403575 |
f1b8969bc44b62acdc8fb81804d7a5852739b22a | 444 | py | Python | 01 - Basics/39-datatypes-numerics.py | python-demo-codes/basics | 2a151bbff4b528cefd52978829c632fd087c8f20 | [
"DOC"
] | 2 | 2019-08-23T06:05:55.000Z | 2019-08-26T03:56:07.000Z | 01 - Basics/39-datatypes-numerics.py | python-lang-codes/basics | 2a151bbff4b528cefd52978829c632fd087c8f20 | [
"DOC"
] | null | null | null | 01 - Basics/39-datatypes-numerics.py | python-lang-codes/basics | 2a151bbff4b528cefd52978829c632fd087c8f20 | [
"DOC"
] | 4 | 2020-10-01T07:16:07.000Z | 2021-07-17T07:55:08.000Z | # HEAD
# Python Basics - Numeric Data Type
# DESCRIPTION
# Describes
# - how numerics are assigned to variables
# - how string like numerics are converted to numerics (type conversion)
#
# RESOURCES
#
# CORE PYTHON DATA TYPES
# # Integer
# # INTEGER
# Integer like Numeric
var = 1
# Convert a Integer like string into Integer/Numeric
var = int("1")
# # Following fails to convert to Integer or Numeric Type
# var = int("1x")
| 18.5 | 78 | 0.693694 |
b221045328dbd3509906cd2ad24fc337afa2668a | 107,316 | py | Python | Lib/test/test_enum.py | ekgus9701/python_practice | b01fa0924752d55e9b2651745c1422d1045bd7a6 | [
"bzip2-1.0.6"
] | 18 | 2016-03-04T15:44:24.000Z | 2021-12-31T11:06:25.000Z | Software/Python-3.7.2/mybuild/lib/python3.7/test/test_enum.py | KHsu2/gelsightmini_tracking | 8f06a8f5e8e376305584d4b3db8c7b47fea90b39 | [
"MIT"
] | 49 | 2016-02-29T17:59:52.000Z | 2019-05-05T04:59:26.000Z | Software/Python-3.7.2/mybuild/lib/python3.7/test/test_enum.py | KHsu2/gelsightmini_tracking | 8f06a8f5e8e376305584d4b3db8c7b47fea90b39 | [
"MIT"
] | 5 | 2018-02-21T02:13:36.000Z | 2019-10-07T02:01:32.000Z | import enum
import inspect
import pydoc
import unittest
import threading
from collections import OrderedDict
from enum import Enum, IntEnum, EnumMeta, Flag, IntFlag, unique, auto
from io import StringIO
from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
from test import support
from datetime import timedelta
try:
import threading
except ImportError:
threading = None
# for pickle tests
try:
class Stooges(Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
Stooges = exc
try:
class IntStooges(int, Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
IntStooges = exc
try:
class FloatStooges(float, Enum):
LARRY = 1.39
CURLY = 2.72
MOE = 3.142596
except Exception as exc:
FloatStooges = exc
try:
class FlagStooges(Flag):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
FlagStooges = exc
# for pickle test and subclass tests
try:
class StrEnum(str, Enum):
'accepts only string values'
class Name(StrEnum):
BDFL = 'Guido van Rossum'
FLUFL = 'Barry Warsaw'
except Exception as exc:
Name = exc
try:
Question = Enum('Question', 'who what when where why', module=__name__)
except Exception as exc:
Question = exc
try:
Answer = Enum('Answer', 'him this then there because')
except Exception as exc:
Answer = exc
try:
Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
except Exception as exc:
Theory = exc
# for doctests
try:
class Fruit(Enum):
TOMATO = 1
BANANA = 2
CHERRY = 3
except Exception:
pass
def test_pickle_dump_load(assertion, source, target=None):
if target is None:
target = source
for protocol in range(HIGHEST_PROTOCOL + 1):
assertion(loads(dumps(source, protocol=protocol)), target)
def test_pickle_exception(assertion, exception, obj):
for protocol in range(HIGHEST_PROTOCOL + 1):
with assertion(exception):
dumps(obj, protocol=protocol)
class TestHelpers(unittest.TestCase):
# _is_descriptor, _is_sunder, _is_dunder
def test_is_descriptor(self):
class foo:
pass
for attr in ('__get__','__set__','__delete__'):
obj = foo()
self.assertFalse(enum._is_descriptor(obj))
setattr(obj, attr, 1)
self.assertTrue(enum._is_descriptor(obj))
def test_is_sunder(self):
for s in ('_a_', '_aa_'):
self.assertTrue(enum._is_sunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_sunder(s))
def test_is_dunder(self):
for s in ('__a__', '__aa__'):
self.assertTrue(enum._is_dunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_dunder(s))
# for subclassing tests
class classproperty:
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, instance, ownerclass):
return self.fget(ownerclass)
# tests
class TestEnum(unittest.TestCase):
def setUp(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
self.Season = Season
class Konstants(float, Enum):
E = 2.7182818
PI = 3.1415926
TAU = 2 * PI
self.Konstants = Konstants
class Grades(IntEnum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.Grades = Grades
class Directional(str, Enum):
EAST = 'east'
WEST = 'west'
NORTH = 'north'
SOUTH = 'south'
self.Directional = Directional
from datetime import date
class Holiday(date, Enum):
NEW_YEAR = 2013, 1, 1
IDES_OF_MARCH = 2013, 3, 15
self.Holiday = Holiday
def test_dir_on_class(self):
Season = self.Season
self.assertEqual(
set(dir(Season)),
set(['__class__', '__doc__', '__members__', '__module__',
'SPRING', 'SUMMER', 'AUTUMN', 'WINTER']),
)
def test_dir_on_item(self):
Season = self.Season
self.assertEqual(
set(dir(Season.WINTER)),
set(['__class__', '__doc__', '__module__', 'name', 'value']),
)
def test_dir_with_added_behavior(self):
class Test(Enum):
this = 'that'
these = 'those'
def wowser(self):
return ("Wowser! I'm %s!" % self.name)
self.assertEqual(
set(dir(Test)),
set(['__class__', '__doc__', '__members__', '__module__', 'this', 'these']),
)
self.assertEqual(
set(dir(Test.this)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'wowser']),
)
def test_dir_on_sub_with_behavior_on_super(self):
# see issue22506
class SuperEnum(Enum):
def invisible(self):
return "did you see me?"
class SubEnum(SuperEnum):
sample = 5
self.assertEqual(
set(dir(SubEnum.sample)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'invisible']),
)
def test_enum_in_enum_out(self):
Season = self.Season
self.assertIs(Season(Season.WINTER), Season.WINTER)
def test_enum_value(self):
Season = self.Season
self.assertEqual(Season.SPRING.value, 1)
def test_intenum_value(self):
self.assertEqual(IntStooges.CURLY.value, 2)
def test_enum(self):
Season = self.Season
lst = list(Season)
self.assertEqual(len(lst), len(Season))
self.assertEqual(len(Season), 4, Season)
self.assertEqual(
[Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst)
for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split(), 1):
e = Season(i)
self.assertEqual(e, getattr(Season, season))
self.assertEqual(e.value, i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, season)
self.assertIn(e, Season)
self.assertIs(type(e), Season)
self.assertIsInstance(e, Season)
self.assertEqual(str(e), 'Season.' + season)
self.assertEqual(
repr(e),
'<Season.{0}: {1}>'.format(season, i),
)
def test_value_name(self):
Season = self.Season
self.assertEqual(Season.SPRING.name, 'SPRING')
self.assertEqual(Season.SPRING.value, 1)
with self.assertRaises(AttributeError):
Season.SPRING.name = 'invierno'
with self.assertRaises(AttributeError):
Season.SPRING.value = 2
def test_changing_member(self):
Season = self.Season
with self.assertRaises(AttributeError):
Season.WINTER = 'really cold'
def test_attribute_deletion(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
def spam(cls):
pass
self.assertTrue(hasattr(Season, 'spam'))
del Season.spam
self.assertFalse(hasattr(Season, 'spam'))
with self.assertRaises(AttributeError):
del Season.SPRING
with self.assertRaises(AttributeError):
del Season.DRY
with self.assertRaises(AttributeError):
del Season.SPRING.name
def test_bool_of_class(self):
class Empty(Enum):
pass
self.assertTrue(bool(Empty))
def test_bool_of_member(self):
class Count(Enum):
zero = 0
one = 1
two = 2
for member in Count:
self.assertTrue(bool(member))
def test_invalid_names(self):
with self.assertRaises(ValueError):
class Wrong(Enum):
mro = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_create_= 11
with self.assertRaises(ValueError):
class Wrong(Enum):
_get_mixins_ = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_find_new_ = 1
with self.assertRaises(ValueError):
class Wrong(Enum):
_any_name_ = 9
def test_bool(self):
# plain Enum members are always True
class Logic(Enum):
true = True
false = False
self.assertTrue(Logic.true)
self.assertTrue(Logic.false)
# unless overridden
class RealLogic(Enum):
true = True
false = False
def __bool__(self):
return bool(self._value_)
self.assertTrue(RealLogic.true)
self.assertFalse(RealLogic.false)
# mixed Enums depend on mixed-in type
class IntLogic(int, Enum):
true = 1
false = 0
self.assertTrue(IntLogic.true)
self.assertFalse(IntLogic.false)
def test_contains(self):
Season = self.Season
self.assertIn(Season.AUTUMN, Season)
with self.assertWarns(DeprecationWarning):
self.assertNotIn(3, Season)
with self.assertWarns(DeprecationWarning):
self.assertNotIn('AUTUMN', Season)
val = Season(3)
self.assertIn(val, Season)
class OtherEnum(Enum):
one = 1; two = 2
self.assertNotIn(OtherEnum.two, Season)
def test_member_contains(self):
self.assertRaises(TypeError, lambda: 'test' in self.Season.AUTUMN)
self.assertRaises(TypeError, lambda: 3 in self.Season.AUTUMN)
self.assertRaises(TypeError, lambda: 'AUTUMN' in self.Season.AUTUMN)
def test_comparisons(self):
Season = self.Season
with self.assertRaises(TypeError):
Season.SPRING < Season.WINTER
with self.assertRaises(TypeError):
Season.SPRING > 4
self.assertNotEqual(Season.SPRING, 1)
class Part(Enum):
SPRING = 1
CLIP = 2
BARREL = 3
self.assertNotEqual(Season.SPRING, Part.SPRING)
with self.assertRaises(TypeError):
Season.SPRING < Part.CLIP
def test_enum_duplicates(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = FALL = 3
WINTER = 4
ANOTHER_SPRING = 1
lst = list(Season)
self.assertEqual(
lst,
[Season.SPRING, Season.SUMMER,
Season.AUTUMN, Season.WINTER,
])
self.assertIs(Season.FALL, Season.AUTUMN)
self.assertEqual(Season.FALL.value, 3)
self.assertEqual(Season.AUTUMN.value, 3)
self.assertIs(Season(3), Season.AUTUMN)
self.assertIs(Season(1), Season.SPRING)
self.assertEqual(Season.FALL.name, 'AUTUMN')
self.assertEqual(
[k for k,v in Season.__members__.items() if v.name != k],
['FALL', 'ANOTHER_SPRING'],
)
def test_duplicate_name(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
red = 4
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
def red(self):
return 'red'
with self.assertRaises(TypeError):
class Color(Enum):
@property
def red(self):
return 'redder'
red = 1
green = 2
blue = 3
def test_enum_with_value_name(self):
class Huh(Enum):
name = 1
value = 2
self.assertEqual(
list(Huh),
[Huh.name, Huh.value],
)
self.assertIs(type(Huh.name), Huh)
self.assertEqual(Huh.name.name, 'name')
self.assertEqual(Huh.name.value, 1)
def test_format_enum(self):
Season = self.Season
self.assertEqual('{}'.format(Season.SPRING),
'{}'.format(str(Season.SPRING)))
self.assertEqual( '{:}'.format(Season.SPRING),
'{:}'.format(str(Season.SPRING)))
self.assertEqual('{:20}'.format(Season.SPRING),
'{:20}'.format(str(Season.SPRING)))
self.assertEqual('{:^20}'.format(Season.SPRING),
'{:^20}'.format(str(Season.SPRING)))
self.assertEqual('{:>20}'.format(Season.SPRING),
'{:>20}'.format(str(Season.SPRING)))
self.assertEqual('{:<20}'.format(Season.SPRING),
'{:<20}'.format(str(Season.SPRING)))
def test_format_enum_custom(self):
class TestFloat(float, Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'TestFloat success!'
self.assertEqual('{}'.format(TestFloat.one), 'TestFloat success!')
def assertFormatIsValue(self, spec, member):
self.assertEqual(spec.format(member), spec.format(member.value))
def test_format_enum_date(self):
Holiday = self.Holiday
self.assertFormatIsValue('{}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:^20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:>20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:<20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m %M:00}', Holiday.IDES_OF_MARCH)
def test_format_enum_float(self):
Konstants = self.Konstants
self.assertFormatIsValue('{}', Konstants.TAU)
self.assertFormatIsValue('{:}', Konstants.TAU)
self.assertFormatIsValue('{:20}', Konstants.TAU)
self.assertFormatIsValue('{:^20}', Konstants.TAU)
self.assertFormatIsValue('{:>20}', Konstants.TAU)
self.assertFormatIsValue('{:<20}', Konstants.TAU)
self.assertFormatIsValue('{:n}', Konstants.TAU)
self.assertFormatIsValue('{:5.2}', Konstants.TAU)
self.assertFormatIsValue('{:f}', Konstants.TAU)
def test_format_enum_int(self):
Grades = self.Grades
self.assertFormatIsValue('{}', Grades.C)
self.assertFormatIsValue('{:}', Grades.C)
self.assertFormatIsValue('{:20}', Grades.C)
self.assertFormatIsValue('{:^20}', Grades.C)
self.assertFormatIsValue('{:>20}', Grades.C)
self.assertFormatIsValue('{:<20}', Grades.C)
self.assertFormatIsValue('{:+}', Grades.C)
self.assertFormatIsValue('{:08X}', Grades.C)
self.assertFormatIsValue('{:b}', Grades.C)
def test_format_enum_str(self):
Directional = self.Directional
self.assertFormatIsValue('{}', Directional.WEST)
self.assertFormatIsValue('{:}', Directional.WEST)
self.assertFormatIsValue('{:20}', Directional.WEST)
self.assertFormatIsValue('{:^20}', Directional.WEST)
self.assertFormatIsValue('{:>20}', Directional.WEST)
self.assertFormatIsValue('{:<20}', Directional.WEST)
def test_hash(self):
Season = self.Season
dates = {}
dates[Season.WINTER] = '1225'
dates[Season.SPRING] = '0315'
dates[Season.SUMMER] = '0704'
dates[Season.AUTUMN] = '1031'
self.assertEqual(dates[Season.AUTUMN], '1031')
def test_intenum_from_scratch(self):
class phy(int, Enum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_intenum_inherited(self):
class IntEnum(int, Enum):
pass
class phy(IntEnum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_from_scratch(self):
class phy(float, Enum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_inherited(self):
class FloatEnum(float, Enum):
pass
class phy(FloatEnum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_strenum_from_scratch(self):
class phy(str, Enum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_strenum_inherited(self):
class StrEnum(str, Enum):
pass
class phy(StrEnum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_intenum(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c')
self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2])
lst = list(WeekDay)
self.assertEqual(len(lst), len(WeekDay))
self.assertEqual(len(WeekDay), 7)
target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
target = target.split()
for i, weekday in enumerate(target, 1):
e = WeekDay(i)
self.assertEqual(e, i)
self.assertEqual(int(e), i)
self.assertEqual(e.name, weekday)
self.assertIn(e, WeekDay)
self.assertEqual(lst.index(e)+1, i)
self.assertTrue(0 < e < 8)
self.assertIs(type(e), WeekDay)
self.assertIsInstance(e, int)
self.assertIsInstance(e, Enum)
def test_intenum_duplicates(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = TEUSDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertIs(WeekDay.TEUSDAY, WeekDay.TUESDAY)
self.assertEqual(WeekDay(3).name, 'TUESDAY')
self.assertEqual([k for k,v in WeekDay.__members__.items()
if v.name != k], ['TEUSDAY', ])
def test_intenum_from_bytes(self):
self.assertIs(IntStooges.from_bytes(b'\x00\x03', 'big'), IntStooges.MOE)
with self.assertRaises(ValueError):
IntStooges.from_bytes(b'\x00\x05', 'big')
def test_floatenum_fromhex(self):
h = float.hex(FloatStooges.MOE.value)
self.assertIs(FloatStooges.fromhex(h), FloatStooges.MOE)
h = float.hex(FloatStooges.MOE.value + 0.01)
with self.assertRaises(ValueError):
FloatStooges.fromhex(h)
def test_pickle_enum(self):
if isinstance(Stooges, Exception):
raise Stooges
test_pickle_dump_load(self.assertIs, Stooges.CURLY)
test_pickle_dump_load(self.assertIs, Stooges)
def test_pickle_int(self):
if isinstance(IntStooges, Exception):
raise IntStooges
test_pickle_dump_load(self.assertIs, IntStooges.CURLY)
test_pickle_dump_load(self.assertIs, IntStooges)
def test_pickle_float(self):
if isinstance(FloatStooges, Exception):
raise FloatStooges
test_pickle_dump_load(self.assertIs, FloatStooges.CURLY)
test_pickle_dump_load(self.assertIs, FloatStooges)
def test_pickle_enum_function(self):
if isinstance(Answer, Exception):
raise Answer
test_pickle_dump_load(self.assertIs, Answer.him)
test_pickle_dump_load(self.assertIs, Answer)
def test_pickle_enum_function_with_module(self):
if isinstance(Question, Exception):
raise Question
test_pickle_dump_load(self.assertIs, Question.who)
test_pickle_dump_load(self.assertIs, Question)
def test_enum_function_with_qualname(self):
if isinstance(Theory, Exception):
raise Theory
self.assertEqual(Theory.__qualname__, 'spanish_inquisition')
def test_class_nested_enum_and_pickle_protocol_four(self):
# would normally just have this directly in the class namespace
class NestedEnum(Enum):
twigs = 'common'
shiny = 'rare'
self.__class__.NestedEnum = NestedEnum
self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
test_pickle_dump_load(self.assertIs, self.NestedEnum.twigs)
def test_pickle_by_name(self):
class ReplaceGlobalInt(IntEnum):
ONE = 1
TWO = 2
ReplaceGlobalInt.__reduce_ex__ = enum._reduce_ex_by_name
for proto in range(HIGHEST_PROTOCOL):
self.assertEqual(ReplaceGlobalInt.TWO.__reduce_ex__(proto), 'TWO')
def test_exploding_pickle(self):
BadPickle = Enum(
'BadPickle', 'dill sweet bread-n-butter', module=__name__)
globals()['BadPickle'] = BadPickle
# now break BadPickle to test exception raising
enum._make_class_unpicklable(BadPickle)
test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
def test_string_enum(self):
class SkillLevel(str, Enum):
master = 'what is the sound of one hand clapping?'
journeyman = 'why did the chicken cross the road?'
apprentice = 'knock, knock!'
self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
def test_getattr_getitem(self):
class Period(Enum):
morning = 1
noon = 2
evening = 3
night = 4
self.assertIs(Period(2), Period.noon)
self.assertIs(getattr(Period, 'night'), Period.night)
self.assertIs(Period['morning'], Period.morning)
def test_getattr_dunder(self):
Season = self.Season
self.assertTrue(getattr(Season, '__eq__'))
def test_iteration_order(self):
class Season(Enum):
SUMMER = 2
WINTER = 4
AUTUMN = 3
SPRING = 1
self.assertEqual(
list(Season),
[Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
)
def test_reversed_iteration_order(self):
self.assertEqual(
list(reversed(self.Season)),
[self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
self.Season.SPRING]
)
def test_programmatic_function_string(self):
SummerMonth = Enum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', start=10)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 10):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list_with_start(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'], start=20)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 20):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_iterable(self):
SummerMonth = Enum(
'SummerMonth',
(('june', 1), ('july', 2), ('august', 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_from_dict(self):
SummerMonth = Enum(
'SummerMonth',
OrderedDict((('june', 1), ('july', 2), ('august', 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int, start=30)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 30):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass_with_start(self):
SummerMonth = IntEnum('SummerMonth', 'june july august', start=40)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 40):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_subclassing(self):
if isinstance(Name, Exception):
raise Name
self.assertEqual(Name.BDFL, 'Guido van Rossum')
self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
self.assertIs(Name.BDFL, getattr(Name, 'BDFL'))
test_pickle_dump_load(self.assertIs, Name.BDFL)
def test_extending(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_exclude_methods(self):
class whatever(Enum):
this = 'that'
these = 'those'
def really(self):
return 'no, not %s' % self.value
self.assertIsNot(type(whatever.really), whatever)
self.assertEqual(whatever.this.really(), 'no, not that')
def test_wrong_inheritance_order(self):
with self.assertRaises(TypeError):
class Wrong(Enum, str):
NotHere = 'error before this point'
def test_intenum_transitivity(self):
class number(IntEnum):
one = 1
two = 2
three = 3
class numero(IntEnum):
uno = 1
dos = 2
tres = 3
self.assertEqual(number.one, numero.uno)
self.assertEqual(number.two, numero.dos)
self.assertEqual(number.three, numero.tres)
def test_wrong_enum_in_call(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_wrong_enum_in_mixed_call(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_mixed_enum_in_call_1(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.female), Monochrome.white)
def test_mixed_enum_in_call_2(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.male), Monochrome.black)
def test_flufl_enum(self):
class Fluflnum(Enum):
def __int__(self):
return int(self.value)
class MailManOptions(Fluflnum):
option1 = 1
option2 = 2
option3 = 3
self.assertEqual(int(MailManOptions.option1), 1)
def test_introspection(self):
class Number(IntEnum):
one = 100
two = 200
self.assertIs(Number.one._member_type_, int)
self.assertIs(Number._member_type_, int)
class String(str, Enum):
yarn = 'soft'
rope = 'rough'
wire = 'hard'
self.assertIs(String.yarn._member_type_, str)
self.assertIs(String._member_type_, str)
class Plain(Enum):
vanilla = 'white'
one = 1
self.assertIs(Plain.vanilla._member_type_, object)
self.assertIs(Plain._member_type_, object)
def test_no_such_enum_member(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
Color(4)
with self.assertRaises(KeyError):
Color['chartreuse']
def test_new_repr(self):
class Color(Enum):
red = 1
green = 2
blue = 3
def __repr__(self):
return "don't you just love shades of %s?" % self.name
self.assertEqual(
repr(Color.blue),
"don't you just love shades of blue?",
)
def test_inherited_repr(self):
class MyEnum(Enum):
def __repr__(self):
return "My name is %s." % self.name
class MyIntEnum(int, MyEnum):
this = 1
that = 2
theother = 3
self.assertEqual(repr(MyIntEnum.that), "My name is that.")
def test_multiple_mixin_mro(self):
class auto_enum(type(Enum)):
def __new__(metacls, cls, bases, classdict):
temp = type(classdict)()
names = set(classdict._member_names)
i = 0
for k in classdict._member_names:
v = classdict[k]
if v is Ellipsis:
v = i
else:
i = v
i += 1
temp[k] = v
for k, v in classdict.items():
if k not in names:
temp[k] = v
return super(auto_enum, metacls).__new__(
metacls, cls, bases, temp)
class AutoNumberedEnum(Enum, metaclass=auto_enum):
pass
class AutoIntEnum(IntEnum, metaclass=auto_enum):
pass
class TestAutoNumber(AutoNumberedEnum):
a = ...
b = 3
c = ...
class TestAutoInt(AutoIntEnum):
a = ...
b = 3
c = ...
def test_subclasses_with_getnewargs(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs__(self):
return self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_getnewargs_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs_ex__(self):
return self._args, {}
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce__(self):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce_ex__(self, proto):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_without_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_exception(self.assertRaises, TypeError, NEI.x)
test_pickle_exception(self.assertRaises, PicklingError, NEI)
def test_subclasses_without_direct_pickle_support_using_name(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
def __reduce_ex__(self, proto):
return getattr, (self.__class__, self._name_)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_tuple_subclass(self):
class SomeTuple(tuple, Enum):
__qualname__ = 'SomeTuple' # needed for pickle protocol 4
first = (1, 'for the money')
second = (2, 'for the show')
third = (3, 'for the music')
self.assertIs(type(SomeTuple.first), SomeTuple)
self.assertIsInstance(SomeTuple.second, tuple)
self.assertEqual(SomeTuple.third, (3, 'for the music'))
globals()['SomeTuple'] = SomeTuple
test_pickle_dump_load(self.assertIs, SomeTuple.first)
def test_duplicate_values_give_unique_enum_items(self):
class AutoNumber(Enum):
first = ()
second = ()
third = ()
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
self.assertEqual(
list(AutoNumber),
[AutoNumber.first, AutoNumber.second, AutoNumber.third],
)
self.assertEqual(int(AutoNumber.second), 2)
self.assertEqual(AutoNumber.third.value, 3)
self.assertIs(AutoNumber(1), AutoNumber.first)
def test_inherited_new_from_enhanced_enum(self):
class AutoNumber(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_inherited_new_from_mixed_enum(self):
class AutoNumber(IntEnum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = int.__new__(cls, value)
obj._value_ = value
return obj
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_equality(self):
class AlwaysEqual:
def __eq__(self, other):
return True
class OrdinaryEnum(Enum):
a = 1
self.assertEqual(AlwaysEqual(), OrdinaryEnum.a)
self.assertEqual(OrdinaryEnum.a, AlwaysEqual())
def test_ordered_mixin(self):
class OrderedEnum(Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self._value_ >= other._value_
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self._value_ > other._value_
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self._value_ <= other._value_
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self._value_ < other._value_
return NotImplemented
class Grade(OrderedEnum):
A = 5
B = 4
C = 3
D = 2
F = 1
self.assertGreater(Grade.A, Grade.B)
self.assertLessEqual(Grade.F, Grade.C)
self.assertLess(Grade.D, Grade.A)
self.assertGreaterEqual(Grade.B, Grade.B)
self.assertEqual(Grade.B, Grade.B)
self.assertNotEqual(Grade.C, Grade.D)
def test_extending2(self):
class Shade(Enum):
def shade(self):
print(self.name)
class Color(Shade):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_extending3(self):
class Shade(Enum):
def shade(self):
return self.name
class Color(Shade):
def hex(self):
return '%s hexlified!' % self.value
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
def test_subclass_duplicate_name(self):
class Base(Enum):
def test(self):
pass
class Test(Base):
test = 1
self.assertIs(type(Test.test), Test)
def test_subclass_duplicate_name_dynamic(self):
from types import DynamicClassAttribute
class Base(Enum):
@DynamicClassAttribute
def test(self):
return 'dynamic'
class Test(Base):
test = 1
self.assertEqual(Test.test.test, 'dynamic')
def test_no_duplicates(self):
class UniqueEnum(Enum):
def __init__(self, *args):
cls = self.__class__
if any(self.value == e.value for e in cls):
a = self.name
e = cls(self.value).name
raise ValueError(
"aliases not allowed in UniqueEnum: %r --> %r"
% (a, e)
)
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
grene = 2
def test_init(self):
class Planet(Enum):
MERCURY = (3.303e+23, 2.4397e6)
VENUS = (4.869e+24, 6.0518e6)
EARTH = (5.976e+24, 6.37814e6)
MARS = (6.421e+23, 3.3972e6)
JUPITER = (1.9e+27, 7.1492e7)
SATURN = (5.688e+26, 6.0268e7)
URANUS = (8.686e+25, 2.5559e7)
NEPTUNE = (1.024e+26, 2.4746e7)
def __init__(self, mass, radius):
self.mass = mass # in kilograms
self.radius = radius # in meters
@property
def surface_gravity(self):
# universal gravitational constant (m3 kg-1 s-2)
G = 6.67300E-11
return G * self.mass / (self.radius * self.radius)
self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
def test_ignore(self):
class Period(timedelta, Enum):
'''
different lengths of time
'''
def __new__(cls, value, period):
obj = timedelta.__new__(cls, value)
obj._value_ = value
obj.period = period
return obj
_ignore_ = 'Period i'
Period = vars()
for i in range(13):
Period['month_%d' % i] = i*30, 'month'
for i in range(53):
Period['week_%d' % i] = i*7, 'week'
for i in range(32):
Period['day_%d' % i] = i, 'day'
OneDay = day_1
OneWeek = week_1
OneMonth = month_1
self.assertFalse(hasattr(Period, '_ignore_'))
self.assertFalse(hasattr(Period, 'Period'))
self.assertFalse(hasattr(Period, 'i'))
self.assertTrue(isinstance(Period.day_1, timedelta))
self.assertTrue(Period.month_1 is Period.day_30)
self.assertTrue(Period.week_4 is Period.day_28)
def test_nonhash_value(self):
class AutoNumberInAList(Enum):
def __new__(cls):
value = [len(cls.__members__) + 1]
obj = object.__new__(cls)
obj._value_ = value
return obj
class ColorInAList(AutoNumberInAList):
red = ()
green = ()
blue = ()
self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
for enum, value in zip(ColorInAList, range(3)):
value += 1
self.assertEqual(enum.value, [value])
self.assertIs(ColorInAList([value]), enum)
def test_conflicting_types_resolved_in_new(self):
class LabelledIntEnum(int, Enum):
def __new__(cls, *args):
value, label = args
obj = int.__new__(cls, value)
obj.label = label
obj._value_ = value
return obj
class LabelledList(LabelledIntEnum):
unprocessed = (1, "Unprocessed")
payment_complete = (2, "Payment Complete")
self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
self.assertEqual(LabelledList.unprocessed, 1)
self.assertEqual(LabelledList(1), LabelledList.unprocessed)
def test_auto_number(self):
class Color(Enum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_auto_name(self):
class Color(Enum):
def _generate_next_value_(name, start, count, last):
return name
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_name_inherit(self):
class AutoNameEnum(Enum):
def _generate_next_value_(name, start, count, last):
return name
class Color(AutoNameEnum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_garbage(self):
class Color(Enum):
red = 'red'
blue = auto()
self.assertEqual(Color.blue.value, 1)
def test_auto_garbage_corrected(self):
class Color(Enum):
red = 'red'
blue = 2
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_missing(self):
class Color(Enum):
red = 1
green = 2
blue = 3
@classmethod
def _missing_(cls, item):
if item == 'three':
return cls.blue
elif item == 'bad return':
# trigger internal error
return 5
elif item == 'error out':
raise ZeroDivisionError
else:
# trigger not found
return None
self.assertIs(Color('three'), Color.blue)
self.assertRaises(ValueError, Color, 7)
try:
Color('bad return')
except TypeError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
try:
Color('error out')
except ZeroDivisionError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
def test_multiple_mixin(self):
class MaxMixin:
@classproperty
def MAX(cls):
max = len(cls)
cls.MAX = max
return max
class StrMixin:
def __str__(self):
return self._name_.lower()
class SomeEnum(Enum):
def behavior(self):
return 'booyah'
class AnotherEnum(Enum):
def behavior(self):
return 'nuhuh!'
def social(self):
return "what's up?"
class Color(MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(MaxMixin, StrMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class CoolColor(StrMixin, SomeEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolColor.RED.value, 1)
self.assertEqual(CoolColor.GREEN.value, 2)
self.assertEqual(CoolColor.BLUE.value, 3)
self.assertEqual(str(CoolColor.BLUE), 'blue')
self.assertEqual(CoolColor.RED.behavior(), 'booyah')
class CoolerColor(StrMixin, AnotherEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolerColor.RED.value, 1)
self.assertEqual(CoolerColor.GREEN.value, 2)
self.assertEqual(CoolerColor.BLUE.value, 3)
self.assertEqual(str(CoolerColor.BLUE), 'blue')
self.assertEqual(CoolerColor.RED.behavior(), 'nuhuh!')
self.assertEqual(CoolerColor.RED.social(), "what's up?")
class CoolestColor(StrMixin, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolestColor.RED.value, 1)
self.assertEqual(CoolestColor.GREEN.value, 2)
self.assertEqual(CoolestColor.BLUE.value, 3)
self.assertEqual(str(CoolestColor.BLUE), 'blue')
self.assertEqual(CoolestColor.RED.behavior(), 'booyah')
self.assertEqual(CoolestColor.RED.social(), "what's up?")
class ConfusedColor(StrMixin, AnotherEnum, SomeEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ConfusedColor.RED.value, 1)
self.assertEqual(ConfusedColor.GREEN.value, 2)
self.assertEqual(ConfusedColor.BLUE.value, 3)
self.assertEqual(str(ConfusedColor.BLUE), 'blue')
self.assertEqual(ConfusedColor.RED.behavior(), 'nuhuh!')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
class ReformedColor(StrMixin, IntEnum, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ReformedColor.RED.value, 1)
self.assertEqual(ReformedColor.GREEN.value, 2)
self.assertEqual(ReformedColor.BLUE.value, 3)
self.assertEqual(str(ReformedColor.BLUE), 'blue')
self.assertEqual(ReformedColor.RED.behavior(), 'booyah')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
self.assertTrue(issubclass(ReformedColor, int))
def test_multiple_inherited_mixin(self):
class StrEnum(str, Enum):
def __new__(cls, *args, **kwargs):
for a in args:
if not isinstance(a, str):
raise TypeError("Enumeration '%s' (%s) is not"
" a string" % (a, type(a).__name__))
return str.__new__(cls, *args, **kwargs)
@unique
class Decision1(StrEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
class MyEnum(StrEnum):
pass
@unique
class Decision2(MyEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
class TestOrder(unittest.TestCase):
def test_same_members(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
def test_same_members_with_aliases(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
verde = green
def test_same_members_wrong_order(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
blue = 3
green = 2
def test_order_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
def test_order_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
verde = green
def test_enum_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
def test_enum_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
verde = green
class TestFlag(unittest.TestCase):
"""Tests of the Flags."""
class Perm(Flag):
R, W, X = 4, 2, 1
class Color(Flag):
BLACK = 0
RED = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
class Open(Flag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'Perm.R')
self.assertEqual(str(Perm.W), 'Perm.W')
self.assertEqual(str(Perm.X), 'Perm.X')
self.assertEqual(str(Perm.R | Perm.W), 'Perm.R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'Perm.R|W|X')
self.assertEqual(str(Perm(0)), 'Perm.0')
self.assertEqual(str(~Perm.R), 'Perm.W|X')
self.assertEqual(str(~Perm.W), 'Perm.R|X')
self.assertEqual(str(~Perm.X), 'Perm.R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm.0')
self.assertEqual(str(Perm(~0)), 'Perm.R|W|X')
Open = self.Open
self.assertEqual(str(Open.RO), 'Open.RO')
self.assertEqual(str(Open.WO), 'Open.WO')
self.assertEqual(str(Open.AC), 'Open.AC')
self.assertEqual(str(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(str(Open.WO | Open.CE), 'Open.CE|WO')
self.assertEqual(str(~Open.RO), 'Open.CE|AC|RW|WO')
self.assertEqual(str(~Open.WO), 'Open.CE|RW')
self.assertEqual(str(~Open.AC), 'Open.CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'Open.AC')
self.assertEqual(str(~(Open.WO | Open.CE)), 'Open.RW')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), '<Perm.R: 4>')
self.assertEqual(repr(Perm.W), '<Perm.W: 2>')
self.assertEqual(repr(Perm.X), '<Perm.X: 1>')
self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>')
self.assertEqual(repr(Perm(0)), '<Perm.0: 0>')
self.assertEqual(repr(~Perm.R), '<Perm.W|X: 3>')
self.assertEqual(repr(~Perm.W), '<Perm.R|X: 5>')
self.assertEqual(repr(~Perm.X), '<Perm.R|W: 6>')
self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: 1>')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm.0: 0>')
self.assertEqual(repr(Perm(~0)), '<Perm.R|W|X: 7>')
Open = self.Open
self.assertEqual(repr(Open.RO), '<Open.RO: 0>')
self.assertEqual(repr(Open.WO), '<Open.WO: 1>')
self.assertEqual(repr(Open.AC), '<Open.AC: 3>')
self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>')
self.assertEqual(repr(Open.WO | Open.CE), '<Open.CE|WO: 524289>')
self.assertEqual(repr(~Open.RO), '<Open.CE|AC|RW|WO: 524291>')
self.assertEqual(repr(~Open.WO), '<Open.CE|RW: 524290>')
self.assertEqual(repr(~Open.AC), '<Open.CE: 524288>')
self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC: 3>')
self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: 2>')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i | j), Perm(i.value | j.value))
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for i in Perm:
self.assertIs(i | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual((i & j).value, i.value & j.value)
self.assertIs(type(i & j), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & RWX, i)
self.assertIs(RWX & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for i in Perm:
self.assertIs(i ^ Perm(0), i)
self.assertIs(Perm(0) ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_programatic_function_string(self):
Perm = Flag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = Flag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = Flag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = Flag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = Flag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_pickle(self):
if isinstance(FlagStooges, Exception):
raise FlagStooges
test_pickle_dump_load(self.assertIs, FlagStooges.CURLY|FlagStooges.MOE)
test_pickle_dump_load(self.assertIs, FlagStooges)
def test_contains(self):
Open = self.Open
Color = self.Color
self.assertFalse(Color.BLACK in Open)
self.assertFalse(Open.RO in Color)
with self.assertWarns(DeprecationWarning):
self.assertFalse('BLACK' in Color)
with self.assertWarns(DeprecationWarning):
self.assertFalse('RO' in Open)
with self.assertWarns(DeprecationWarning):
self.assertFalse(1 in Color)
with self.assertWarns(DeprecationWarning):
self.assertFalse(1 in Open)
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
def test_auto_number(self):
class Color(Flag):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 4)
def test_auto_number_garbage(self):
with self.assertRaisesRegex(TypeError, 'Invalid Flag value: .not an int.'):
class Color(Flag):
red = 'not an int'
blue = auto()
def test_cascading_failure(self):
class Bizarre(Flag):
c = 3
d = 4
f = 6
# Bizarre.c | Bizarre.d
self.assertRaisesRegex(ValueError, "5 is not a valid Bizarre", Bizarre, 5)
self.assertRaisesRegex(ValueError, "5 is not a valid Bizarre", Bizarre, 5)
self.assertRaisesRegex(ValueError, "2 is not a valid Bizarre", Bizarre, 2)
self.assertRaisesRegex(ValueError, "2 is not a valid Bizarre", Bizarre, 2)
self.assertRaisesRegex(ValueError, "1 is not a valid Bizarre", Bizarre, 1)
self.assertRaisesRegex(ValueError, "1 is not a valid Bizarre", Bizarre, 1)
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_bizarre(self):
class Bizarre(Flag):
b = 3
c = 4
d = 6
self.assertEqual(repr(Bizarre(7)), '<Bizarre.d|c|b: 7>')
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(AllMixin, StrMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
@support.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(Flag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with support.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
class TestIntFlag(unittest.TestCase):
"""Tests of the IntFlags."""
class Perm(IntFlag):
X = 1 << 0
W = 1 << 1
R = 1 << 2
class Color(IntFlag):
BLACK = 0
RED = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
class Open(IntFlag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
def test_type(self):
Perm = self.Perm
Open = self.Open
for f in Perm:
self.assertTrue(isinstance(f, Perm))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Perm.W | Perm.X, Perm))
self.assertEqual(Perm.W | Perm.X, 3)
for f in Open:
self.assertTrue(isinstance(f, Open))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Open.WO | Open.RW, Open))
self.assertEqual(Open.WO | Open.RW, 3)
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'Perm.R')
self.assertEqual(str(Perm.W), 'Perm.W')
self.assertEqual(str(Perm.X), 'Perm.X')
self.assertEqual(str(Perm.R | Perm.W), 'Perm.R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'Perm.R|W|X')
self.assertEqual(str(Perm.R | 8), 'Perm.8|R')
self.assertEqual(str(Perm(0)), 'Perm.0')
self.assertEqual(str(Perm(8)), 'Perm.8')
self.assertEqual(str(~Perm.R), 'Perm.W|X')
self.assertEqual(str(~Perm.W), 'Perm.R|X')
self.assertEqual(str(~Perm.X), 'Perm.R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm.-8')
self.assertEqual(str(~(Perm.R | 8)), 'Perm.W|X')
self.assertEqual(str(Perm(~0)), 'Perm.R|W|X')
self.assertEqual(str(Perm(~8)), 'Perm.R|W|X')
Open = self.Open
self.assertEqual(str(Open.RO), 'Open.RO')
self.assertEqual(str(Open.WO), 'Open.WO')
self.assertEqual(str(Open.AC), 'Open.AC')
self.assertEqual(str(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(str(Open.WO | Open.CE), 'Open.CE|WO')
self.assertEqual(str(Open(4)), 'Open.4')
self.assertEqual(str(~Open.RO), 'Open.CE|AC|RW|WO')
self.assertEqual(str(~Open.WO), 'Open.CE|RW')
self.assertEqual(str(~Open.AC), 'Open.CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'Open.AC|RW|WO')
self.assertEqual(str(~(Open.WO | Open.CE)), 'Open.RW')
self.assertEqual(str(Open(~4)), 'Open.CE|AC|RW|WO')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), '<Perm.R: 4>')
self.assertEqual(repr(Perm.W), '<Perm.W: 2>')
self.assertEqual(repr(Perm.X), '<Perm.X: 1>')
self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>')
self.assertEqual(repr(Perm.R | 8), '<Perm.8|R: 12>')
self.assertEqual(repr(Perm(0)), '<Perm.0: 0>')
self.assertEqual(repr(Perm(8)), '<Perm.8: 8>')
self.assertEqual(repr(~Perm.R), '<Perm.W|X: -5>')
self.assertEqual(repr(~Perm.W), '<Perm.R|X: -3>')
self.assertEqual(repr(~Perm.X), '<Perm.R|W: -2>')
self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: -7>')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm.-8: -8>')
self.assertEqual(repr(~(Perm.R | 8)), '<Perm.W|X: -13>')
self.assertEqual(repr(Perm(~0)), '<Perm.R|W|X: -1>')
self.assertEqual(repr(Perm(~8)), '<Perm.R|W|X: -9>')
Open = self.Open
self.assertEqual(repr(Open.RO), '<Open.RO: 0>')
self.assertEqual(repr(Open.WO), '<Open.WO: 1>')
self.assertEqual(repr(Open.AC), '<Open.AC: 3>')
self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>')
self.assertEqual(repr(Open.WO | Open.CE), '<Open.CE|WO: 524289>')
self.assertEqual(repr(Open(4)), '<Open.4: 4>')
self.assertEqual(repr(~Open.RO), '<Open.CE|AC|RW|WO: -1>')
self.assertEqual(repr(~Open.WO), '<Open.CE|RW: -2>')
self.assertEqual(repr(~Open.AC), '<Open.CE: -4>')
self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC|RW|WO: -524289>')
self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: -524290>')
self.assertEqual(repr(Open(~4)), '<Open.CE|AC|RW|WO: -5>')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i | j, i.value | j.value)
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for j in range(8):
self.assertEqual(i | j, i.value | j)
self.assertEqual((i | j).value, i.value | j)
self.assertIs(type(i | j), Perm)
self.assertEqual(j | i, j | i.value)
self.assertEqual((j | i).value, j | i.value)
self.assertIs(type(j | i), Perm)
for i in Perm:
self.assertIs(i | i, i)
self.assertIs(i | 0, i)
self.assertIs(0 | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual(i & j, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertEqual((i & j).value, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertIs(type(i & j), Perm, 'i is %r, j is %r' % (i, j))
for j in range(8):
self.assertEqual(i & j, i.value & j)
self.assertEqual((i & j).value, i.value & j)
self.assertIs(type(i & j), Perm)
self.assertEqual(j & i, j & i.value)
self.assertEqual((j & i).value, j & i.value)
self.assertIs(type(j & i), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & 7, i)
self.assertIs(7 & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i ^ j, i.value ^ j.value)
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for j in range(8):
self.assertEqual(i ^ j, i.value ^ j)
self.assertEqual((i ^ j).value, i.value ^ j)
self.assertIs(type(i ^ j), Perm)
self.assertEqual(j ^ i, j ^ i.value)
self.assertEqual((j ^ i).value, j ^ i.value)
self.assertIs(type(j ^ i), Perm)
for i in Perm:
self.assertIs(i ^ 0, i)
self.assertIs(0 ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertEqual(~i, ~i.value)
self.assertEqual((~i).value, ~i.value)
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_programatic_function_string(self):
Perm = IntFlag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = IntFlag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = IntFlag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = IntFlag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = IntFlag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_empty_list(self):
Perm = enum.IntFlag('Perm', [])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', [])
lst = list(Thing)
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
def test_programatic_function_from_empty_tuple(self):
Perm = enum.IntFlag('Perm', ())
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', ())
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
def test_contains(self):
Color = self.Color
Open = self.Open
self.assertTrue(Color.GREEN in Color)
self.assertTrue(Open.RW in Open)
self.assertFalse(Color.GREEN in Open)
self.assertFalse(Open.RW in Color)
with self.assertWarns(DeprecationWarning):
self.assertFalse('GREEN' in Color)
with self.assertWarns(DeprecationWarning):
self.assertFalse('RW' in Open)
with self.assertWarns(DeprecationWarning):
self.assertFalse(2 in Color)
with self.assertWarns(DeprecationWarning):
self.assertFalse(2 in Open)
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
with self.assertWarns(DeprecationWarning):
self.assertFalse('swallow' in RW)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(AllMixin, StrMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
@support.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(IntFlag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with support.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
class TestUnique(unittest.TestCase):
def test_unique_clean(self):
@unique
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
@unique
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
def test_unique_dirty(self):
with self.assertRaisesRegex(ValueError, 'tres.*one'):
@unique
class Dirty(Enum):
one = 1
two = 'dos'
tres = 1
with self.assertRaisesRegex(
ValueError,
'double.*single.*turkey.*triple',
):
@unique
class Dirtier(IntEnum):
single = 1
double = 1
triple = 3
turkey = 3
def test_unique_with_name(self):
@unique
class Silly(Enum):
one = 1
two = 'dos'
name = 3
@unique
class Sillier(IntEnum):
single = 1
name = 2
triple = 3
value = 4
expected_help_output_with_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1)
|\x20\x20
| An enumeration.
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = <Color.blue: 3>
|\x20\x20
| green = <Color.green: 2>
|\x20\x20
| red = <Color.red: 1>
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
| The name of the Enum member.
|\x20\x20
| value
| The value of the Enum member.
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.EnumMeta:
|\x20\x20
| __members__
| Returns a mapping of member name->value.
|\x20\x20\x20\x20\x20\x20
| This mapping lists all enum members, including aliases. Note that this
| is a read-only view of the internal mapping."""
expected_help_output_without_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1)
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = <Color.blue: 3>
|\x20\x20
| green = <Color.green: 2>
|\x20\x20
| red = <Color.red: 1>
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
|\x20\x20
| value
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.EnumMeta:
|\x20\x20
| __members__"""
class TestStdLib(unittest.TestCase):
maxDiff = None
class Color(Enum):
red = 1
green = 2
blue = 3
def test_pydoc(self):
# indirectly test __objclass__
if StrEnum.__doc__ is None:
expected_text = expected_help_output_without_docs % __name__
else:
expected_text = expected_help_output_with_docs % __name__
output = StringIO()
helper = pydoc.Helper(output=output)
helper(self.Color)
result = output.getvalue().strip()
self.assertEqual(result, expected_text)
def test_inspect_getmembers(self):
values = dict((
('__class__', EnumMeta),
('__doc__', 'An enumeration.'),
('__members__', self.Color.__members__),
('__module__', __name__),
('blue', self.Color.blue),
('green', self.Color.green),
('name', Enum.__dict__['name']),
('red', self.Color.red),
('value', Enum.__dict__['value']),
))
result = dict(inspect.getmembers(self.Color))
self.assertEqual(values.keys(), result.keys())
failed = False
for k in values.keys():
if result[k] != values[k]:
print()
print('\n%s\n key: %s\n result: %s\nexpected: %s\n%s\n' %
('=' * 75, k, result[k], values[k], '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
def test_inspect_classify_class_attrs(self):
# indirectly test __objclass__
from inspect import Attribute
values = [
Attribute(name='__class__', kind='data',
defining_class=object, object=EnumMeta),
Attribute(name='__doc__', kind='data',
defining_class=self.Color, object='An enumeration.'),
Attribute(name='__members__', kind='property',
defining_class=EnumMeta, object=EnumMeta.__members__),
Attribute(name='__module__', kind='data',
defining_class=self.Color, object=__name__),
Attribute(name='blue', kind='data',
defining_class=self.Color, object=self.Color.blue),
Attribute(name='green', kind='data',
defining_class=self.Color, object=self.Color.green),
Attribute(name='red', kind='data',
defining_class=self.Color, object=self.Color.red),
Attribute(name='name', kind='data',
defining_class=Enum, object=Enum.__dict__['name']),
Attribute(name='value', kind='data',
defining_class=Enum, object=Enum.__dict__['value']),
]
values.sort(key=lambda item: item.name)
result = list(inspect.classify_class_attrs(self.Color))
result.sort(key=lambda item: item.name)
failed = False
for v, r in zip(values, result):
if r != v:
print('\n%s\n%s\n%s\n%s\n' % ('=' * 75, r, v, '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
class MiscTestCase(unittest.TestCase):
def test__all__(self):
support.check__all__(self, enum)
# These are unordered here on purpose to ensure that declaration order
# makes no difference.
CONVERT_TEST_NAME_D = 5
CONVERT_TEST_NAME_C = 5
CONVERT_TEST_NAME_B = 5
CONVERT_TEST_NAME_A = 5 # This one should sort first.
CONVERT_TEST_NAME_E = 5
CONVERT_TEST_NAME_F = 5
class TestIntEnumConvert(unittest.TestCase):
def test_convert_value_lookup_priority(self):
test_type = enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# We don't want the reverse lookup value to vary when there are
# multiple possible names for a given value. It should always
# report the first lexigraphical name in that case.
self.assertEqual(test_type(5).name, 'CONVERT_TEST_NAME_A')
def test_convert(self):
test_type = enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# Ensure that test_type has all of the desired names and values.
self.assertEqual(test_type.CONVERT_TEST_NAME_F,
test_type.CONVERT_TEST_NAME_A)
self.assertEqual(test_type.CONVERT_TEST_NAME_B, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_C, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_D, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_E, 5)
# Ensure that test_type only picked up names matching the filter.
self.assertEqual([name for name in dir(test_type)
if name[0:2] not in ('CO', '__')],
[], msg='Names other than CONVERT_TEST_* found.')
if __name__ == '__main__':
unittest.main()
| 36.084734 | 103 | 0.539603 |
198be4e3a3527accfaa4fd967fee6bb8c87c538c | 1,030 | py | Python | sclrecommender/matrix/oneClassMatrix.py | wezteoh/Bandit_Recommendation | a326e4d1d082e1a2113fe739bc343fb45b0b8a4a | [
"MIT"
] | null | null | null | sclrecommender/matrix/oneClassMatrix.py | wezteoh/Bandit_Recommendation | a326e4d1d082e1a2113fe739bc343fb45b0b8a4a | [
"MIT"
] | null | null | null | sclrecommender/matrix/oneClassMatrix.py | wezteoh/Bandit_Recommendation | a326e4d1d082e1a2113fe739bc343fb45b0b8a4a | [
"MIT"
] | null | null | null | import numpy as np
from .recommenderMatrix import RecommenderMatrix
class OneClassMatrix(RecommenderMatrix):
def __init__(self, ratingMatrix, positiveThreshold):
'''
Generates a matrix of 1,0
where:
1 => This is a positive item
0 => Don't know anything about this data, might be negative, might be unseen
'''
ratingMatrix = ratingMatrix.copy()
# Note: Don't have to round the reconstruction matrix since you are setting to binary from the threshold below itself
super().__init__(ratingMatrix)
# Convert to one class
self.oneClassMatrix= np.ones(self.ratingMatrix.shape)
self.oneClassMatrix[np.where(self.ratingMatrix == 0)] = 0.0
self.oneClassMatrix[np.where(self.ratingMatrix < positiveThreshold)] = 0.0
# Override
def applyMask(self, mask):
super().applyMask(mask) # Checks for mask shape
self.oneClassMatrix *= mask
def getOneClassMatrix(self):
return self.oneClassMatrix.copy()
| 36.785714 | 125 | 0.673786 |
9e753ed97bec391dcf4076bfbdc6088dae8aa092 | 8,591 | py | Python | official/r1/transformer/translate.py | zcdzcdzcd/models | a31b526a7617a152a138a865b5689bf5b59f655d | [
"Apache-2.0"
] | 15 | 2019-11-06T17:23:27.000Z | 2021-07-17T16:03:01.000Z | official/r1/transformer/translate.py | zcdzcdzcd/models | a31b526a7617a152a138a865b5689bf5b59f655d | [
"Apache-2.0"
] | 16 | 2020-01-28T22:22:10.000Z | 2022-03-12T00:10:37.000Z | official/r1/transformer/translate.py | zcdzcdzcd/models | a31b526a7617a152a138a865b5689bf5b59f655d | [
"Apache-2.0"
] | 13 | 2019-11-06T17:23:29.000Z | 2019-11-29T13:03:07.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Translate text or files using trained transformer model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# pylint: disable=g-bad-import-order
from absl import app as absl_app
from absl import flags
import tensorflow as tf
# pylint: enable=g-bad-import-order
from official.transformer.utils import tokenizer
from official.utils.flags import core as flags_core
_DECODE_BATCH_SIZE = 32
_EXTRA_DECODE_LENGTH = 100
_BEAM_SIZE = 4
_ALPHA = 0.6
def _get_sorted_inputs(filename):
"""Read and sort lines from the file sorted by decreasing length.
Args:
filename: String name of file to read inputs from.
Returns:
Sorted list of inputs, and dictionary mapping original index->sorted index
of each element.
"""
with tf.io.gfile.GFile(filename) as f:
records = f.read().split("\n")
inputs = [record.strip() for record in records]
if not inputs[-1]:
inputs.pop()
input_lens = [(i, len(line.split())) for i, line in enumerate(inputs)]
sorted_input_lens = sorted(input_lens, key=lambda x: x[1], reverse=True)
sorted_inputs = [None] * len(sorted_input_lens)
sorted_keys = [0] * len(sorted_input_lens)
for i, (index, _) in enumerate(sorted_input_lens):
sorted_inputs[i] = inputs[index]
sorted_keys[index] = i
return sorted_inputs, sorted_keys
def _encode_and_add_eos(line, subtokenizer):
"""Encode line with subtokenizer, and add EOS id to the end."""
return subtokenizer.encode(line) + [tokenizer.EOS_ID]
def _trim_and_decode(ids, subtokenizer):
"""Trim EOS and PAD tokens from ids, and decode to return a string."""
try:
index = list(ids).index(tokenizer.EOS_ID)
return subtokenizer.decode(ids[:index])
except ValueError: # No EOS found in sequence
return subtokenizer.decode(ids)
def translate_file(
estimator, subtokenizer, input_file, output_file=None,
print_all_translations=True):
"""Translate lines in file, and save to output file if specified.
Args:
estimator: tf.Estimator used to generate the translations.
subtokenizer: Subtokenizer object for encoding and decoding source and
translated lines.
input_file: file containing lines to translate
output_file: file that stores the generated translations.
print_all_translations: If true, all translations are printed to stdout.
Raises:
ValueError: if output file is invalid.
"""
batch_size = _DECODE_BATCH_SIZE
# Read and sort inputs by length. Keep dictionary (original index-->new index
# in sorted list) to write translations in the original order.
sorted_inputs, sorted_keys = _get_sorted_inputs(input_file)
num_decode_batches = (len(sorted_inputs) - 1) // batch_size + 1
def input_generator():
"""Yield encoded strings from sorted_inputs."""
for i, line in enumerate(sorted_inputs):
if i % batch_size == 0:
batch_num = (i // batch_size) + 1
tf.logging.info("Decoding batch %d out of %d." %
(batch_num, num_decode_batches))
yield _encode_and_add_eos(line, subtokenizer)
def input_fn():
"""Created batched dataset of encoded inputs."""
ds = tf.data.Dataset.from_generator(
input_generator, tf.int64, tf.TensorShape([None]))
ds = ds.padded_batch(batch_size, [None])
return ds
translations = []
for i, prediction in enumerate(estimator.predict(input_fn)):
translation = _trim_and_decode(prediction["outputs"], subtokenizer)
translations.append(translation)
if print_all_translations:
tf.logging.info("Translating:\n\tInput: %s\n\tOutput: %s" %
(sorted_inputs[i], translation))
# Write translations in the order they appeared in the original file.
if output_file is not None:
if tf.io.gfile.isdir(output_file):
raise ValueError("File output is a directory, will not save outputs to "
"file.")
tf.logging.info("Writing to file %s" % output_file)
with tf.io.gfile.GFile(output_file, "w") as f:
for i in sorted_keys:
f.write("%s\n" % translations[i])
def translate_text(estimator, subtokenizer, txt):
"""Translate a single string."""
encoded_txt = _encode_and_add_eos(txt, subtokenizer)
def input_fn():
ds = tf.data.Dataset.from_tensors(encoded_txt)
ds = ds.batch(_DECODE_BATCH_SIZE)
return ds
predictions = estimator.predict(input_fn)
translation = next(predictions)["outputs"]
translation = _trim_and_decode(translation, subtokenizer)
tf.logging.info("Translation of \"%s\": \"%s\"" % (txt, translation))
def main(unused_argv):
from official.transformer import transformer_main
tf.logging.set_verbosity(tf.logging.INFO)
if FLAGS.text is None and FLAGS.file is None:
tf.logging.warn("Nothing to translate. Make sure to call this script using "
"flags --text or --file.")
return
subtokenizer = tokenizer.Subtokenizer(FLAGS.vocab_file)
# Set up estimator and params
params = transformer_main.PARAMS_MAP[FLAGS.param_set]
params["beam_size"] = _BEAM_SIZE
params["alpha"] = _ALPHA
params["extra_decode_length"] = _EXTRA_DECODE_LENGTH
params["batch_size"] = _DECODE_BATCH_SIZE
estimator = tf.estimator.Estimator(
model_fn=transformer_main.model_fn, model_dir=FLAGS.model_dir,
params=params)
if FLAGS.text is not None:
tf.logging.info("Translating text: %s" % FLAGS.text)
translate_text(estimator, subtokenizer, FLAGS.text)
if FLAGS.file is not None:
input_file = os.path.abspath(FLAGS.file)
tf.logging.info("Translating file: %s" % input_file)
if not tf.gfile.Exists(FLAGS.file):
raise ValueError("File does not exist: %s" % input_file)
output_file = None
if FLAGS.file_out is not None:
output_file = os.path.abspath(FLAGS.file_out)
tf.logging.info("File output specified: %s" % output_file)
translate_file(estimator, subtokenizer, input_file, output_file)
def define_translate_flags():
"""Define flags used for translation script."""
# Model flags
flags.DEFINE_string(
name="model_dir", short_name="md", default="/tmp/transformer_model",
help=flags_core.help_wrap(
"Directory containing Transformer model checkpoints."))
flags.DEFINE_enum(
name="param_set", short_name="mp", default="big",
enum_values=["base", "big"],
help=flags_core.help_wrap(
"Parameter set to use when creating and training the model. The "
"parameters define the input shape (batch size and max length), "
"model configuration (size of embedding, # of hidden layers, etc.), "
"and various other settings. The big parameter set increases the "
"default batch size, embedding/hidden size, and filter size. For a "
"complete list of parameters, please see model/model_params.py."))
flags.DEFINE_string(
name="vocab_file", short_name="vf", default=None,
help=flags_core.help_wrap(
"Path to subtoken vocabulary file. If data_download.py was used to "
"download and encode the training data, look in the data_dir to find "
"the vocab file."))
flags.mark_flag_as_required("vocab_file")
flags.DEFINE_string(
name="text", default=None,
help=flags_core.help_wrap(
"Text to translate. Output will be printed to console."))
flags.DEFINE_string(
name="file", default=None,
help=flags_core.help_wrap(
"File containing text to translate. Translation will be printed to "
"console and, if --file_out is provided, saved to an output file."))
flags.DEFINE_string(
name="file_out", default=None,
help=flags_core.help_wrap(
"If --file flag is specified, save translation to this file."))
if __name__ == "__main__":
define_translate_flags()
FLAGS = flags.FLAGS
absl_app.run(main)
| 36.096639 | 80 | 0.701315 |
719f7961b45149cc58fcb7c8e3106d03315c73bd | 1,560 | py | Python | test_autolens/integration/tests/features/model_mapper/link_variable_float_to_next_phase.py | PyJedi/PyAutoLens | bcfb2e7b447aa24508fc648d60b6fd9b4fd852e7 | [
"MIT"
] | null | null | null | test_autolens/integration/tests/features/model_mapper/link_variable_float_to_next_phase.py | PyJedi/PyAutoLens | bcfb2e7b447aa24508fc648d60b6fd9b4fd852e7 | [
"MIT"
] | null | null | null | test_autolens/integration/tests/features/model_mapper/link_variable_float_to_next_phase.py | PyJedi/PyAutoLens | bcfb2e7b447aa24508fc648d60b6fd9b4fd852e7 | [
"MIT"
] | null | null | null | import autofit as af
import autolens as al
from test_autolens.integration.tests.imaging import runner
test_type = "model_mapper"
test_name = "link_model_float_to_next_phase"
data_type = "lens_light_dev_vaucouleurs"
data_resolution = "lsst"
def make_pipeline(name, phase_folders, non_linear_class=af.MultiNest):
phase1 = al.PhaseImaging(
phase_name="phase_1",
phase_folders=phase_folders,
galaxies=dict(lens=al.GalaxyModel(redshift=0.5, light=al.lp.EllipticalSersic)),
non_linear_class=non_linear_class,
)
phase1.optimizer.const_efficiency_mode = True
phase1.optimizer.n_live_points = 20
phase1.optimizer.sampling_efficiency = 0.8
class MMPhase2(al.PhaseImaging):
def customize_priors(self, results):
self.galaxies.lens.light.centre = results.from_phase(
"phase_1"
).model.galaxies.lens.light.centre
self.galaxies.lens.light.axis_ratio = results.from_phase(
"phase_1"
).model.galaxies.lens.light.axis_ratio
phase2 = MMPhase2(
phase_name="phase_2",
phase_folders=phase_folders,
galaxies=dict(lens=al.GalaxyModel(redshift=0.5, light=al.lp.EllipticalSersic)),
non_linear_class=non_linear_class,
)
phase2.optimizer.const_efficiency_mode = True
phase2.optimizer.n_live_points = 20
phase2.optimizer.sampling_efficiency = 0.8
return al.PipelineDataset(name, phase1, phase2)
if __name__ == "__main__":
import sys
runner.run(sys.modules[__name__])
| 29.433962 | 87 | 0.705769 |
553b06e0a446030f7c5e2713c926ccf0d4a76673 | 179 | py | Python | tests/IT/fixtures/test_class_inheritance_1.py | testandconquer/pytest-conquer | da600c7f5bcd06aa62c5cca9b75370bf1a6ebf05 | [
"MIT"
] | null | null | null | tests/IT/fixtures/test_class_inheritance_1.py | testandconquer/pytest-conquer | da600c7f5bcd06aa62c5cca9b75370bf1a6ebf05 | [
"MIT"
] | 5 | 2018-12-27T02:52:01.000Z | 2019-01-02T01:52:55.000Z | tests/IT/fixtures/test_class_inheritance_1.py | testandconquer/pytest-conquer | da600c7f5bcd06aa62c5cca9b75370bf1a6ebf05 | [
"MIT"
] | null | null | null | class TestObject1(object):
@classmethod
def setup_class(cls):
pass
@classmethod
def teardown_class(cls):
pass
def test1(self):
pass
| 13.769231 | 28 | 0.586592 |
4c9f21b5189e0437fdf3b93c2ffb700c8fa68fd3 | 1,697 | py | Python | register/admin.py | yashiki-takajin/sfa-next | 049058a37b9ee45b58be5f4393a0b3191362043c | [
"MIT"
] | 19 | 2018-11-23T10:13:14.000Z | 2022-03-26T11:57:55.000Z | register/admin.py | yashiki-takajin/sfa-next | 049058a37b9ee45b58be5f4393a0b3191362043c | [
"MIT"
] | 3 | 2020-06-05T19:25:20.000Z | 2021-06-10T20:59:30.000Z | register/admin.py | yashiki-takajin/sfa-next | 049058a37b9ee45b58be5f4393a0b3191362043c | [
"MIT"
] | 8 | 2019-04-21T11:08:22.000Z | 2021-12-08T09:38:30.000Z | from django.conf import settings
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from django.utils.translation import ugettext_lazy as _
from .models import MyGroup, User, Workspace
class MyUserChangeForm(UserChangeForm):
class Meta:
model = User
fields = '__all__'
class MyUserCreationForm(UserCreationForm):
class Meta:
model = User
fields = ('email', )
class MyGroupAdmin(admin.ModelAdmin):
pass
class WorkspaceAdmin(admin.ModelAdmin):
pass
class MyUserAdmin(UserAdmin):
fieldsets = (
(None, {
'fields': ('email', 'password')
}),
(_('Personal info'), {
'fields': ('first_name', 'last_name', 'workspace',
'is_workspace_active', 'workspace_role', 'my_group')
}),
(_('Permissions'), {
'fields': ('is_active', 'is_staff', 'is_superuser', 'groups',
'user_permissions')
}),
(_('Important dates'), {
'fields': ('last_login', 'date_joined')
}),
)
filter_horizontal = ('groups', 'user_permissions')
add_fieldsets = ((None, {
'classes': ('wide', ),
'fields': ('email', 'password1', 'password2'),
}), )
form = MyUserChangeForm
add_form = MyUserCreationForm
list_display = ('email', 'first_name', 'last_name', 'is_staff')
search_fields = ('email', 'first_name', 'last_name')
ordering = ('email', )
admin.site.register(User, MyUserAdmin)
admin.site.register(Workspace, WorkspaceAdmin)
admin.site.register(MyGroup, MyGroupAdmin)
| 27.370968 | 75 | 0.619328 |
e90383e6a38a73dae3c92637ab5519b03942ed6b | 2,618 | py | Python | src/squirrel/repo/setuprepolist.py | bvz2000/squirrel | 5d3ba00825aaa5337d8972a0edc6530230a8a754 | [
"Unlicense"
] | null | null | null | src/squirrel/repo/setuprepolist.py | bvz2000/squirrel | 5d3ba00825aaa5337d8972a0edc6530230a8a754 | [
"Unlicense"
] | null | null | null | src/squirrel/repo/setuprepolist.py | bvz2000/squirrel | 5d3ba00825aaa5337d8972a0edc6530230a8a754 | [
"Unlicense"
] | null | null | null | import inspect
import os
from bvzconfig import Config
from squirrel.shared.constants import *
from squirrel.shared.squirrelerror import SquirrelError
# ----------------------------------------------------------------------------------------------------------------------
def validate_repo_list(repo_list_obj,
localized_resource_obj):
"""
Makes sure the repo list file is valid. Raises an asset error if not.
:param repo_list_obj:
The repo list object responsible for managing the list of repos.
:param localized_resource_obj:
The localization object responsible for managing localized strings.
:return:
Nothing.
"""
sections = dict()
sections["repos"] = None
sections["defaults"] = [("default_repo", "str")]
failures = repo_list_obj.validate(sections)
if failures:
if failures[1] is None:
err_msg = localized_resource_obj.get_error_msg(601)
err_msg = err_msg.format(repo_list_p=repo_list_obj.config_path,
section=failures[0])
raise SquirrelError(err_msg, 601)
# ----------------------------------------------------------------------------------------------------------------------
def create_repo_list_object(localized_resource_obj,
repo_list_p=None):
"""
Create a repo list object.
:param localized_resource_obj:
The localization object responsible for managing localized strings.
:param repo_list_p:
If provided, this path will be used instead of any provided by an env variable or the default repo list
file location. If None, then the repo list file will be read from the path given by the env variable or,
if that is not set, from the default location. Defaults to None.
:return:
A repo list object.
"""
assert repo_list_p is None or type(repo_list_p) is str
if repo_list_p is None:
if REPO_LIST_PATH_ENV_VAR in os.environ.keys():
repo_list_p = os.environ[REPO_LIST_PATH_ENV_VAR]
else:
module_d = os.path.split(inspect.stack()[0][1])[0]
repo_list_p = os.path.join(module_d, "..", "..", "..", "config", "repos")
if not os.path.exists(repo_list_p):
err_msg = localized_resource_obj.get_error_msg(603)
err_msg = err_msg.format(repo_list_file=repo_list_p)
raise SquirrelError(err_msg, 603)
repo_list_obj = Config(repo_list_p)
validate_repo_list(repo_list_obj, localized_resource_obj)
return repo_list_obj
| 35.863014 | 120 | 0.599312 |
940f817959ca8cffa6953eacdb0ba9f983f03d21 | 1,600 | py | Python | src/vsc/model/expr_fieldref_model.py | edcote/pyvsc | 18261852ca502291e0ac3266d1c0d2dd91317b01 | [
"Apache-2.0"
] | null | null | null | src/vsc/model/expr_fieldref_model.py | edcote/pyvsc | 18261852ca502291e0ac3266d1c0d2dd91317b01 | [
"Apache-2.0"
] | null | null | null | src/vsc/model/expr_fieldref_model.py | edcote/pyvsc | 18261852ca502291e0ac3266d1c0d2dd91317b01 | [
"Apache-2.0"
] | 1 | 2021-09-12T23:39:58.000Z | 2021-09-12T23:39:58.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Created on Jul 26, 2019
#
# @author: ballance
from vsc.model.expr_model import ExprModel
class ExprFieldRefModel(ExprModel):
def __init__(self, fm):
super().__init__()
self.fm = fm
if fm is None:
raise Exception("Field Model None specified")
def build(self, btor):
if self.fm.var is None:
raise Exception("Field " + str(self.fm) + " (" + self.fm.name + ") has not been built")
return self.fm.var
def is_signed(self):
return self.fm.is_signed
def width(self):
return self.fm.width
def accept(self, visitor):
visitor.visit_expr_fieldref(self)
def val(self):
return self.fm.val
def __str__(self):
return "Field: " + self.fm.name | 30.188679 | 99 | 0.668125 |
924161ab6ff3e212d92fcdb31f6aa1f77453385b | 11,340 | py | Python | host/greatfet/utils.py | grvvy/greatfet | e8098307960a60e34c27ed2903f7abc2252b4cce | [
"BSD-3-Clause"
] | 328 | 2015-08-30T03:10:50.000Z | 2022-03-31T12:47:48.000Z | host/greatfet/utils.py | grvvy/greatfet | e8098307960a60e34c27ed2903f7abc2252b4cce | [
"BSD-3-Clause"
] | 231 | 2017-02-11T23:21:31.000Z | 2022-03-27T23:07:43.000Z | host/greatfet/utils.py | grvvy/greatfet | e8098307960a60e34c27ed2903f7abc2252b4cce | [
"BSD-3-Clause"
] | 94 | 2015-09-27T15:01:04.000Z | 2022-02-26T15:41:20.000Z | #
# This file is part of GreatFET
#
"""
Utilities that help in writing simple scripts for GreatFET.
"""
from __future__ import print_function
import sys
import ast
import time
import errno
import argparse
from decimal import Decimal
from . import GreatFET, _GreatFETSingletonWrapper
from .boards.flash_stub import GreatFETFlashStub
from pygreat.errors import DeviceNotFoundError
SI_PREFIXES = {
'E-12': 'p',
'E-9': 'n',
'E-6': 'u',
'E-3': 'm',
'E+3': 'k',
'E+6': 'M',
'E+9': 'G',
'E+12': 'T',
}
def log_silent(string, end=None):
"""Silently discards all log data, but provides our logging interface."""
pass
def log_verbose(string, end="\n"):
"""Prints all logging data to the screen."""
print(string, end=end)
sys.stdout.flush()
def log_error(string, end="\n"):
""" Prints errors to stderr. """
sys.stdout.flush()
print(string, end=end, file=sys.stderr)
sys.stderr.flush()
def eng_notation(number, unit=None, separator=' '):
""" Converts a given number to a nicely-formatted engineering number; so 10e6 would become 10 M."""
# Grab the raw engineering notation from python's decimal class...
string = Decimal(number).normalize().to_eng_string()
# ... and replace the normalized engineering suffix with the relevant SI prefix.
for normalized, prefix in SI_PREFIXES.items():
string = string.replace(normalized, separator + prefix)
if unit is not None:
string += unit
return string
def from_eng_notation(string, unit=None, units=None, to_type=None):
""" Converts a string accepted on the command line (potentially in engineering notation) into a
python number. """
# Ensure we have a new list of units accessible to us.
if units is None:
units = []
else:
units = units[:]
# If we have a single unit specified, absorb it into our units list.
if unit is not None:
units.append(unit)
# If we have an acceptable unit, strip it off before we process things.
for unit in units:
string = string.replace(unit, '')
string = string.replace(unit.upper(), '')
string = string.replace(unit.lower(), '')
# Strip off any unnecessary whitespace.
string = string.strip()
# Replace each SI prefix with its normalized value.
for normalized, prefix in SI_PREFIXES.items():
if string.endswith(prefix):
string = string.replace(prefix, '').strip()
string += normalized
break
# Finally, try to parse the string as a python literal.
result = ast.literal_eval(string)
# If we have a post-processing function, apply it.
if callable(to_type):
result = to_type(result)
return result
def human_readable_size(byte_count, unit="B", binary_marker='i'):
""" Converts a number of bytes into a human-readable size string. """
SUFFIXES = {
0: "",
1: "k" + binary_marker,
2: "M" + binary_marker,
3: "G" + binary_marker,
4: "T" + binary_marker,
5: "P" + binary_marker
}
if byte_count is None:
return 0
suffix_order =0
while byte_count >= 1024:
suffix_order += 1
byte_count /= 1024
return "{} {}{}".format(byte_count, SUFFIXES[suffix_order], unit)
class GreatFETArgumentParser(argparse.ArgumentParser):
""" Convenience-extended argument parser for GreatFET. """
""" Serial number expected from a device in DFU. """
DFU_STUB_SERIAL = "dfu_flash_stub"
def __init__(self, *args, **kwargs):
""" Sets up a GreatFET-specialized argument parser.
Additional keyword arguments:
dfu -- If set to True, DFU-reglated arguments will be provided.
raise_device_find_failures -- If set to True, this will throw a DeviceNotFoundError
instead of quitting if no device is present.
"""
# Determine if we should provide DFU arguments.
if 'dfu' in kwargs:
self.supports_dfu = kwargs['dfu']
del kwargs['dfu']
else:
self.supports_dfu = False
# Determine if we should provide DFU arguments.
if 'verbose_by_default' in kwargs:
verbose_by_default = kwargs['verbose_by_default']
del kwargs['verbose_by_default']
else:
verbose_by_default = False
# If set, this will throw DeviceNotFound errors instead of killing the process.
if 'raise_device_find_failures' in kwargs:
self.raise_device_find_failures = kwargs['raise_device_find_failures']
del kwargs['raise_device_find_failures']
else:
self.raise_device_find_failures = False
# Invoke the core function.
super(GreatFETArgumentParser, self).__init__(*args, **kwargs)
# Start off with no memoized arguments.
self.memoized_args = None
# By default, log queietly.
# Add the standard arguments used to find a GreatFET.
self.add_argument('-s', '--serial', dest='serial', metavar='<serialnumber>', type=str,
help="Serial number of device to look for", default=None)
self.add_argument('-i', '--index', dest='index', metavar='<i>', type=int,
help="number of the attached device (default: 0)", default=0)
self.add_argument('--wait', dest='wait', action='store_true',
help="Wait for a GreatFET device to come online if none is found.")
if verbose_by_default:
self.add_argument('-q', '--quiet', dest='verbose', action='store_false',
help="Don't log details to the console unless an error occurs.")
else:
self.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help="Log more details to the console.")
# TODO: specify protocol?
# TODO: accept comms URI
# If we're accepting devices from DFU mode, accept the relevant arguments, as well.
# Note that you must put the device into DFU mode and load the stub from the caller.
if self.supports_dfu:
self.add_argument('-d', '--dfu', dest='dfu', action='store_true',
help="Access a device from in DFU mode by first loading a stub. Always resets.")
self.add_argument('--dfu-stub', dest='dfu_stub', metavar='<stub.dfu>', type=str,
help="The stub to use for DFU programming. If not provided, the utility will attempt to automtaically find one.")
def find_specified_device(self):
""" Connects to the GreatFET specified by the user's command line arguments. """
device = None
args = self.parse_args()
# Loop until we have a device.
# Conditions where we should abort are presented below.
while device is None:
try:
device = self._find_greatfet(args)
except DeviceNotFoundError:
# If we're not in wait mode (or waiting for a DFU flash stub to come up), bail out.
if not (args.wait or (self.supports_dfu and args.dfu)):
# If we're not handling location failures, re-raise the exception.
if self.raise_device_find_failures:
raise
# Otherwise, print a message and bail out.
if args.serial:
print("No GreatFET board found matching serial '{}'.".format(args.serial), file=sys.stderr)
elif args.index:
print("No GreatFET board found with index '{}'.".format(args.index), file=sys.stderr)
else:
print("No GreatFET board found!", file=sys.stderr)
sys.exit(errno.ENODEV)
else:
time.sleep(1)
return device
def get_singleton_for_specified_device(self):
"""
Connects to the GreatFET specified by the user's command line arguments, but gets a singleton that persists
across reconnects.
"""
# Grab the device itself, and find its serial number.
device = self.find_specified_device()
serial = device.serial_number()
device.close()
# Create an equivalent singleton wrapper.
return _GreatFETSingletonWrapper(serial)
def get_log_function(self):
""" Returns a function that can be used for logging, but which respects verbosity. """
return log_verbose if self.parse_args().verbose else log_silent
def get_log_functions(self):
""" Returns a 2-tuple of a function that can be used for logging data and errors, attempting to repsect -v/-q."""
return self.get_log_function(), log_error
def parse_args(self):
""" Specialized version of parse_args that memoizes, for GreatFET. """
# If we haven't called parse_args yet, let the base class handle the parsing,
# first.
if self.memoized_args is None:
self.memoized_args = super(GreatFETArgumentParser, self).parse_args()
# Always return our memoized version.
return self.memoized_args
def _find_greatfet(self, args):
""" Finds a GreatFET matching the relevant arguments."""
# If we're programming via DFU mode, look for a device that sports the DFU stub.
# Note that we only support a single DFU-mode device for now, and thus always
# grab the first one.
if self.supports_dfu and args.dfu:
devices = GreatFET(find_all=True)
for device in devices:
if isinstance(device, GreatFETFlashStub):
return device
raise DeviceNotFoundError
# If we have an index argument, grab _all_ greatFETs and select by index.
elif args.index:
# Find _all_ GreatFETs...
devices = GreatFET(find_all=True)
# ... and then select the one with the provided index.
if len(devices) <= args.index:
raise DeviceNotFoundError
return devices[args.index]
# If we have a serial number, look only for a single device. Theoretically,
# we should never have more than one GreatFET with the same serial number.
# Technically, this is violable, but libusb doesn't properly handle searching
# by serial number if there are multiple devices with the same one, so we
# enforce this.
else:
return GreatFET(serial_number=args.serial)
def greatfet_assets_directory():
""" Provide a quick function that helps us get at our assets directory. """
import os
# Find the path to the module, and then find its assets folder.
module_path = os.path.dirname(__file__)
return os.path.join(module_path, 'assets')
def find_greatfet_asset(filename):
""" Returns the path to a given GreatFET asset, if it exists, or None if the GreatFET asset isn't provided."""
import os
asset_path = os.path.join(greatfet_assets_directory(), filename)
if os.path.isfile(asset_path):
return asset_path
else:
return None
| 33.850746 | 145 | 0.620106 |
022a4ac73afffd134ee4c35bf3dbba7ed2214bff | 3,260 | py | Python | tests/app/states/test_states.py | jerjohste/exopy | 0fe3eb94f440ead88c396a1abccf7c22dd633a61 | [
"BSD-3-Clause"
] | 16 | 2018-03-20T09:06:23.000Z | 2021-09-08T18:46:15.000Z | tests/app/states/test_states.py | jerjohste/exopy | 0fe3eb94f440ead88c396a1abccf7c22dd633a61 | [
"BSD-3-Clause"
] | 118 | 2015-05-13T07:50:04.000Z | 2018-02-14T17:37:20.000Z | tests/app/states/test_states.py | jerjohste/exopy | 0fe3eb94f440ead88c396a1abccf7c22dd633a61 | [
"BSD-3-Clause"
] | 11 | 2018-03-02T11:17:26.000Z | 2021-06-23T22:25:40.000Z | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by Exopy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Test state plugin system.
"""
import enaml
from enaml.workbench.api import Workbench
from pytest import raises
with enaml.imports():
from enaml.workbench.core.core_manifest import CoreManifest
from exopy.app.states.manifest import StateManifest
from .states_utils import StateContributor
CORE_PLUGIN = 'enaml.workbench.core'
GET_STATE = 'exopy.app.states.get'
STATE_ID = 'test.states.state'
class TestState(object):
"""Test the handling os states by the state plugin.
"""
def setup(self):
self.workbench = Workbench()
self.workbench.register(CoreManifest())
self.workbench.register(StateManifest())
self.workbench.register(StateContributor())
def test_get_state(self):
"""Test accessing to a state object through the command.
"""
core = self.workbench.get_plugin(CORE_PLUGIN)
par = {'state_id': STATE_ID}
state = core.invoke_command(GET_STATE,
par, trigger=self)
assert hasattr(state, 'string')
assert state.string == 'init'
with raises(AttributeError):
state.string = 1
self.workbench.unregister('exopy.app.states')
def test_state_unicity(self):
"""Test that asking twice the same state return the same object.
"""
core = self.workbench.get_plugin(CORE_PLUGIN)
par = {'state_id': STATE_ID}
state1 = core.invoke_command(GET_STATE,
par, trigger=self)
state2 = core.invoke_command(GET_STATE,
par, trigger=self)
assert state1 is state2
def test_member_sync(self):
"""Test that the state is correctly synchronised with the plugin.
"""
core = self.workbench.get_plugin(CORE_PLUGIN)
par = {'state_id': STATE_ID}
state = core.invoke_command(GET_STATE,
par, trigger=self)
plugin = self.workbench.get_plugin('test.states')
plugin.string = 'test'
assert state.string == 'test'
def test_death_notif(self):
"""Test that a state whose plugin is unregistered is marked as dead.
"""
core = self.workbench.get_plugin(CORE_PLUGIN)
par = {'state_id': STATE_ID}
state = core.invoke_command(GET_STATE,
par, trigger=self)
self.workbench.unregister(u'test.states')
assert not state.alive
# =============================================================================
# --- API import --------------------------------------------------------------
# =============================================================================
def test_api_import():
"""Test importing the api module.
"""
from exopy.app.states import api
assert api.__all__
| 31.650485 | 79 | 0.55 |
92dfc564442a9268d1dd23dcff29aae550306c0f | 5,583 | py | Python | tempest/api/network/test_allowed_address_pair.py | gamado/ds_tempest_rm_me_please | 3f5d149b3a32e713c60c59a054035ac2e5c73c28 | [
"Apache-2.0"
] | null | null | null | tempest/api/network/test_allowed_address_pair.py | gamado/ds_tempest_rm_me_please | 3f5d149b3a32e713c60c59a054035ac2e5c73c28 | [
"Apache-2.0"
] | null | null | null | tempest/api/network/test_allowed_address_pair.py | gamado/ds_tempest_rm_me_please | 3f5d149b3a32e713c60c59a054035ac2e5c73c28 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import six
from tempest.api.network import base
from tempest import config
from tempest import test
CONF = config.CONF
class AllowedAddressPairTestJSON(base.BaseNetworkTest):
"""Tests the Neutron Allowed Address Pair API extension
The following API operations are tested with this extension:
create port
list ports
update port
show port
v2.0 of the Neutron API is assumed. It is also assumed that the following
options are defined in the [network-feature-enabled] section of
etc/tempest.conf
api_extensions
"""
@classmethod
def skip_checks(cls):
super(AllowedAddressPairTestJSON, cls).skip_checks()
if not test.is_extension_enabled('allowed-address-pairs', 'network'):
msg = "Allowed Address Pairs extension not enabled."
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(AllowedAddressPairTestJSON, cls).resource_setup()
cls.network = cls.create_network()
cls.create_subnet(cls.network)
port = cls.create_port(cls.network)
cls.ip_address = port['fixed_ips'][0]['ip_address']
cls.mac_address = port['mac_address']
@test.idempotent_id('86c3529b-1231-40de-803c-00e40882f043')
def test_create_list_port_with_address_pair(self):
# Create port with allowed address pair attribute
allowed_address_pairs = [{'ip_address': self.ip_address,
'mac_address': self.mac_address}]
body = self.ports_client.create_port(
network_id=self.network['id'],
allowed_address_pairs=allowed_address_pairs)
port_id = body['port']['id']
self.addCleanup(self.ports_client.delete_port, port_id)
# Confirm port was created with allowed address pair attribute
body = self.ports_client.list_ports()
ports = body['ports']
port = [p for p in ports if p['id'] == port_id]
msg = 'Created port not found in list of ports returned by Neutron'
self.assertTrue(port, msg)
self._confirm_allowed_address_pair(port[0], self.ip_address)
def _update_port_with_address(self, address, mac_address=None, **kwargs):
# Create a port without allowed address pair
body = self.ports_client.create_port(network_id=self.network['id'])
port_id = body['port']['id']
self.addCleanup(self.ports_client.delete_port, port_id)
if mac_address is None:
mac_address = self.mac_address
# Update allowed address pair attribute of port
allowed_address_pairs = [{'ip_address': address,
'mac_address': mac_address}]
if kwargs:
allowed_address_pairs.append(kwargs['allowed_address_pairs'])
body = self.ports_client.update_port(
port_id, allowed_address_pairs=allowed_address_pairs)
allowed_address_pair = body['port']['allowed_address_pairs']
six.assertCountEqual(self, allowed_address_pair,
allowed_address_pairs)
@test.idempotent_id('9599b337-272c-47fd-b3cf-509414414ac4')
def test_update_port_with_address_pair(self):
# Update port with allowed address pair
self._update_port_with_address(self.ip_address)
@test.idempotent_id('4d6d178f-34f6-4bff-a01c-0a2f8fe909e4')
def test_update_port_with_cidr_address_pair(self):
# Update allowed address pair with cidr
cidr = str(netaddr.IPNetwork(CONF.network.project_network_cidr))
self._update_port_with_address(cidr)
@test.idempotent_id('b3f20091-6cd5-472b-8487-3516137df933')
def test_update_port_with_multiple_ip_mac_address_pair(self):
# Create an ip _address and mac_address through port create
resp = self.ports_client.create_port(network_id=self.network['id'])
newportid = resp['port']['id']
self.addCleanup(self.ports_client.delete_port, newportid)
ipaddress = resp['port']['fixed_ips'][0]['ip_address']
macaddress = resp['port']['mac_address']
# Update allowed address pair port with multiple ip and mac
allowed_address_pairs = {'ip_address': ipaddress,
'mac_address': macaddress}
self._update_port_with_address(
self.ip_address, self.mac_address,
allowed_address_pairs=allowed_address_pairs)
def _confirm_allowed_address_pair(self, port, ip):
msg = 'Port allowed address pairs should not be empty'
self.assertTrue(port['allowed_address_pairs'], msg)
ip_address = port['allowed_address_pairs'][0]['ip_address']
mac_address = port['allowed_address_pairs'][0]['mac_address']
self.assertEqual(ip_address, ip)
self.assertEqual(mac_address, self.mac_address)
class AllowedAddressPairIpV6TestJSON(AllowedAddressPairTestJSON):
_ip_version = 6
| 41.355556 | 78 | 0.687802 |
17587c72b4d27646a4c02786287b2feeccaaa19e | 2,172 | py | Python | nfmanagementapi/resources/ServiceGroupObjectCollectionResource.py | nfirewall/nfmapi | 7232975711ad01b031ed50d7f26936afcfe5312a | [
"MIT"
] | null | null | null | nfmanagementapi/resources/ServiceGroupObjectCollectionResource.py | nfirewall/nfmapi | 7232975711ad01b031ed50d7f26936afcfe5312a | [
"MIT"
] | null | null | null | nfmanagementapi/resources/ServiceGroupObjectCollectionResource.py | nfirewall/nfmapi | 7232975711ad01b031ed50d7f26936afcfe5312a | [
"MIT"
] | null | null | null | from nfmanagementapi.models import ServiceGroupObject
from nfmanagementapi.schemata import ServiceGroupObjectSchema
from marshmallow.exceptions import ValidationError
from .BaseResource import BaseResource
from flask import request
from app import db
from uuid import uuid4
path = 'service_groups'
endpoint = 'service_groups'
class ServiceGroupObjectCollectionResource(BaseResource):
def get(self):
"""List service groups
---
description: List all service groups
tags:
- Service Groups
responses:
200:
content:
application/json:
schema:
type: array
items: ServiceGroupObjectSchema
"""
objects = ServiceGroupObject.query.all()
schema = ServiceGroupObjectSchema(many = True)
return schema.dump(objects)
def post(self):
"""Create service group
---
description: Create a service group
tags:
- Service Groups
requestBody:
content:
application/json:
schema: ServiceGroupObjectSchema
responses:
201:
description: Created
content:
application/json:
schema: ServiceGroupObjectSchema
422:
description: Unprocessable Entity
content:
application/json:
schema: MessageSchema
"""
json_data = request.get_json()
try:
data = ServiceGroupObjectSchema().load(json_data)
except ValidationError as err:
return err.messages, 422
object = ServiceGroupObject()
error = False
messages = []
for key in data:
try:
setattr(object, key, data[key])
except ValueError as e:
error = True
messages.append(e.args[0])
if error:
return {"messages": messages}, 422
db.session.add(object)
db.session.commit()
db.session.refresh(object)
return ServiceGroupObjectSchema().dump(object) | 28.96 | 61 | 0.573665 |
65eadbf7310ec6c0e373b6428d5da182bb6f92e7 | 1,294 | py | Python | domain/src/entity/profile_entity.py | python-jacksonsr45/web_services | 6e37d4f00e9e59a35f06f05ce955ba53242ed9ee | [
"MIT"
] | null | null | null | domain/src/entity/profile_entity.py | python-jacksonsr45/web_services | 6e37d4f00e9e59a35f06f05ce955ba53242ed9ee | [
"MIT"
] | null | null | null | domain/src/entity/profile_entity.py | python-jacksonsr45/web_services | 6e37d4f00e9e59a35f06f05ce955ba53242ed9ee | [
"MIT"
] | null | null | null | import uuid
from datetime import datetime
class ProfileEntity:
def __init__(
self,
profile_id: str = None,
name: str = None,
last_name: str = None,
document_id: str = None,
phone: str = None,
mobile_phone: str = None,
created_at: str = None,
):
if not profile_id:
self.id = str(uuid.uuid4())
else:
self.id = profile_id
self.name = name
self.last_name = last_name
self.document_id = document_id
self.phone = phone
self.mobile_phone = mobile_phone
if not created_at:
self.created_at = datetime.now()
else:
self.created_at = created_at
self.updated_at = datetime.now()
def get_id(self) -> str:
return self.id
def get_name(self) -> str:
return self.name
def get_last_name(self) -> str:
return self.last_name
def get_document_id(self) -> str:
return self.last_name
def get_phone(self) -> str:
return self.phone
def get_mobile_phone(self) -> str:
return self.mobile_phone
def get_created_at(self) -> datetime:
return self.created_at
def get_updated_at(self) -> datetime:
return self.updated_at
| 23.962963 | 44 | 0.581917 |
074ce6df69a6b4d417be3161cf6e84eba92c6b21 | 6,039 | py | Python | plotly/graph_objs/scattergl/_line.py | omridanan/plotly.py | a8d26670cba49ce15ce9b7639ae0f55a6088a825 | [
"MIT"
] | null | null | null | plotly/graph_objs/scattergl/_line.py | omridanan/plotly.py | a8d26670cba49ce15ce9b7639ae0f55a6088a825 | [
"MIT"
] | null | null | null | plotly/graph_objs/scattergl/_line.py | omridanan/plotly.py | a8d26670cba49ce15ce9b7639ae0f55a6088a825 | [
"MIT"
] | 1 | 2019-02-18T04:12:56.000Z | 2019-02-18T04:12:56.000Z | from plotly.basedatatypes import BaseTraceHierarchyType
import copy
class Line(BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
Sets the line color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# dash
# ----
@property
def dash(self):
"""
Sets the style of the lines.
The 'dash' property is an enumeration that may be specified as:
- One of the following enumeration values:
['solid', 'dot', 'dash', 'longdash', 'dashdot',
'longdashdot']
Returns
-------
Any
"""
return self['dash']
@dash.setter
def dash(self, val):
self['dash'] = val
# width
# -----
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['width']
@width.setter
def width(self, val):
self['width'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergl'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the line color.
dash
Sets the style of the lines.
width
Sets the line width (in px).
"""
def __init__(self, arg=None, color=None, dash=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergl.Line
color
Sets the line color.
dash
Sets the style of the lines.
width
Sets the line width (in px).
Returns
-------
Line
"""
super(Line, self).__init__('line')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergl.Line
constructor must be a dict or
an instance of plotly.graph_objs.scattergl.Line"""
)
# Import validators
# -----------------
from plotly.validators.scattergl import (line as v_line)
# Initialize validators
# ---------------------
self._validators['color'] = v_line.ColorValidator()
self._validators['dash'] = v_line.DashValidator()
self._validators['width'] = v_line.WidthValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('color', None)
self.color = color if color is not None else _v
_v = arg.pop('dash', None)
self.dash = dash if dash is not None else _v
_v = arg.pop('width', None)
self.width = width if width is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
| 32.294118 | 78 | 0.536181 |
033760337b8304a4ccb5609b7cb5f461908a1b8d | 2,146 | py | Python | netmiko-interface-example/device_info.py | vabmalikusa/python_code_samples_network | 441e6202a69ab94102d9f392e7fea87968f8d09b | [
"MIT"
] | 522 | 2017-02-09T15:28:23.000Z | 2022-03-29T18:22:24.000Z | netmiko-interface-example/device_info.py | vabmalikusa/python_code_samples_network | 441e6202a69ab94102d9f392e7fea87968f8d09b | [
"MIT"
] | 10 | 2018-03-12T14:47:09.000Z | 2021-07-15T15:53:48.000Z | netmiko-interface-example/device_info.py | vabmalikusa/python_code_samples_network | 441e6202a69ab94102d9f392e7fea87968f8d09b | [
"MIT"
] | 360 | 2017-02-14T17:41:00.000Z | 2022-03-07T07:29:18.000Z | #! /usr/bin/env python
"""Device Details for DevNet Sandboxes
This script is imported into other code.
Copyright (c) 2018 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__author__ = "Hank Preston"
__author_email__ = "hapresto@cisco.com"
__copyright__ = "Copyright (c) 2016 Cisco Systems, Inc."
__license__ = "MIT"
# DevNet Always-On NETCONF/YANG & RESTCONF Sandbox Device
# https://devnetsandbox.cisco.com/RM/Diagram/Index/27d9747a-db48-4565-8d44-df318fce37ad?diagramType=Topology
ios_xe1 = {
"address": "ios-xe-mgmt.cisco.com",
"netconf_port": 10000,
"restconf_port": 9443,
"ssh_port": 8181,
"username": "root",
"password": "D_Vay!_10&",
"device_type": "cisco_ios"
}
# DevNet Always-On Sandbox NX-OS
#
nxos1 = {
"address": "sbx-nxos-mgmt.cisco.com",
"netconf_port": 10000,
"restconf_port": 443,
"ssh_port": 818122,
"username": "admin",
"password": "Admin_1234!",
"device_type": "cisco_nxos"
}
# Sample GitHub Editor Comment.
| 37.649123 | 108 | 0.695713 |
55cd50b3406fdee633923b952c0960f3234b70dd | 1,768 | py | Python | bg.py | FrasSmith/backgrads | 65cc952e72575f3a4c1d9ad68cd942706229a811 | [
"Apache-2.0"
] | null | null | null | bg.py | FrasSmith/backgrads | 65cc952e72575f3a4c1d9ad68cd942706229a811 | [
"Apache-2.0"
] | null | null | null | bg.py | FrasSmith/backgrads | 65cc952e72575f3a4c1d9ad68cd942706229a811 | [
"Apache-2.0"
] | null | null | null | # from .image import render_image
import PIL
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
import numpy as np
width = 1024
depth = 1024
colourRange = 'full' # ('dark', 'light', 'full')
filename = 'output.png'
def render_image(backWidth, backDepth, filename, colours='full'):
if colourRange == 'dark':
startColour = list(np.random.choice(range(128), size=3))
endColour = list(np.random.choice(range(128), size=3))
elif colourRange == 'light':
startColourTemp = list(np.random.choice(range(128), size=3))
endColourTemp = list(np.random.choice(range(128), size=3))
startColour = [x+127 for x in startColourTemp]
endColour = [x+127 for x in endColourTemp]
else:
startColour = list(np.random.choice(range(256), size=3))
endColour = list(np.random.choice(range(256), size=3))
hlist = list(np.random.choice([True, False], size=3))
colourArray = get_gradient_3d(backWidth, backDepth, startColour, endColour, hlist)
im = Image.fromarray(np.uint8(colourArray))
draw = ImageDraw.Draw(im)
im.save(filename)
def get_gradient_2d(start, stop, width, height, is_horizontal):
if is_horizontal:
return np.tile(np.linspace(start, stop, width), (height, 1))
else:
return np.tile(np.linspace(start, stop, height), (width, 1)).T
def get_gradient_3d(width, height, start_list, stop_list, is_horizontal_list):
result = np.zeros((height, width, len(start_list)), dtype=np.float64)
for i, (start, stop, is_horizontal) in enumerate(zip(start_list, stop_list, is_horizontal_list)):
result[:, :, i] = get_gradient_2d(start, stop, width, height, is_horizontal)
return result
render_image(width, depth, filename, 'full') | 36.081633 | 101 | 0.687783 |
3f0b8fb85ba3433adc4a19f44936ea1d24c04740 | 1,274 | py | Python | main.py | lee15253/edl_bk | 6777f5803138e6a64dabb096fe18a495728aabe3 | [
"MIT"
] | null | null | null | main.py | lee15253/edl_bk | 6777f5803138e6a64dabb096fe18a495728aabe3 | [
"MIT"
] | null | null | null | main.py | lee15253/edl_bk | 6777f5803138e6a64dabb096fe18a495728aabe3 | [
"MIT"
] | null | null | null | # Copyright (c) 2019, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: MIT
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/MIT
import ipdb
if __name__ == "__main__":
import torch.multiprocessing as mp
# https://github.com/pytorch/pytorch/issues/3492#issuecomment-392977006
try:
mp.set_start_method('spawn')
except RuntimeError:
pass
import os
os.environ["OMP_NUM_THREADS"] = "1"
import time
from dist_train.utils.experiment_bookend import open_experiment
from dist_train.workers import synchronous_worker
if __name__ == '__main__':
# Interpret the arguments. Load the shared model/optimizer. Fetch the config file.
model, _, config, args = open_experiment(apply_time_machine=True)
print(' ', flush=True)
model.reset()
print(' ', flush=True)
# Create a group of workers
print('Launching the individual workers...', flush=True)
processes = []
for rank in range(args.N):
# The workers perform roll-outs and synchronize gradients
p = mp.Process(target=synchronous_worker, args=(int(rank), config, args))
p.start()
time.sleep(0.25)
processes.append(p)
for p in processes:
p.join()
| 31.073171 | 101 | 0.694662 |
e9118d1ffe9715cecd0e9e336a3bc85ce92829db | 4,455 | py | Python | opendeep/log/logger.py | vitruvianscience/OpenDeep | e96efc449101094354b615cf15afe6d03644fc36 | [
"Apache-2.0"
] | 252 | 2015-03-13T21:55:22.000Z | 2021-09-06T21:37:38.000Z | opendeep/log/logger.py | afcarl/OpenDeep | e96efc449101094354b615cf15afe6d03644fc36 | [
"Apache-2.0"
] | 16 | 2015-03-14T06:47:04.000Z | 2016-09-23T19:13:35.000Z | opendeep/log/logger.py | afcarl/OpenDeep | e96efc449101094354b615cf15afe6d03644fc36 | [
"Apache-2.0"
] | 68 | 2015-03-14T00:05:53.000Z | 2020-06-04T13:36:13.000Z | """
Configuring the logger for our example needs. By default in the logging_config.json file,
this will print logging levels of info and higher to log files in the logs/ directory. Debug goes to console.
"""
# standard libraries
import os
import logging
import logging.config
import json
# internal references
from opendeep.utils.file_ops import mkdir_p
def get_root_logger():
"""
Grabs the logger instance for the root of the OpenDeep package.
Returns
-------
logger
The logger for the root of the OpenDeep package.
"""
return logging.getLogger(__name__.split('.')[0])
def config_root_logger(config_file='logging_config.json'):
"""
Configures the root logger (returned from get_root_logger()) to the specifications in the JSON file `config_file`.
Parameters
----------
config_file : str
The string path to the configuration JSON file to use.
"""
# this could be called from scripts anywhere, but we want to keep the log-related items in this directory.
# therefore, change the cwd to this file's directory and then change back at the end.
prevdir = os.path.realpath(os.getcwd())
os.chdir(os.path.split(os.path.realpath(__file__))[0])
# load the basic parameters from the JSON configuration file
# config_file = os.path.join(os.path.split(os.path.realpath(__file__))[0], config_file)
path = config_file
env_key = 'LOG_CFG'
value = os.getenv(env_key, None)
if value:
path = value
# if the configuration exists
init = True
if os.path.exists(path):
with open(path, 'rt') as f:
try:
config = json.load(f)
except:
logging.basicConfig(level=logging.DEBUG)
logger = get_root_logger()
logger.exception('Exception in reading the JSON logging config file!')
logger.warning('Anyway, loading the basicConfig for the logger instead.')
init = False
if init:
# make the file paths to the log files
for handler in config.get('handlers', None):
if handler is not None:
path = config.get('handlers').get(handler).get('filename')
if path is not None:
path = os.path.normpath(path)
(dirs, _) = os.path.split(path)
if len(dirs) is not 0:
# dirs = os.path.join(os.path.split(os.path.realpath(__file__))[0], dirs)
try:
mkdir_p(dirs)
except:
logging.basicConfig(level=logging.DEBUG)
logger = get_root_logger()
logger.exception('Exception in creating the directory for a logging handler! '
'Path was {0!s}'.format(os.path.realpath(dirs)))
logger.warning('Anyway, loading the basicConfig for the logger instead.')
init = False
# load the configuration into the logging module
if init:
try:
logging.config.dictConfig(config)
except:
logging.basicConfig(level=logging.DEBUG)
logger = get_root_logger()
logger.exception('Exception in loading the JSON logging config file to the logging module!')
logger.warning('Anyway, loading the basicConfig for the logger instead.')
# otherwise, couldn't find the configuration file
else:
logging.basicConfig(level=logging.DEBUG)
logger = get_root_logger()
logger.warning("Could not find configuration file for logger! Was looking for {0!s}. "
"Using basicConfig instead...".format(os.path.realpath(path)))
# change the directory to the calling file's working directory
os.chdir(prevdir)
def delete_root_logger():
"""
Deletes the root logger (returned from get_root_logger()). This removes all existing handlers for the logger,
which effectively renders it useless.
"""
# get rid of all the existing handlers - effectively renders the logger useless
root_logger = get_root_logger()
while root_logger.handlers:
root_logger.handlers.pop()
| 40.135135 | 118 | 0.595511 |
eb6a5261872eeb42706a34a1204ed8449fc7e138 | 877 | py | Python | COLAB-GOOGLE-Practices/colab google/data/L3/l3.py | ailabteam/Daily-Working | 0a36b5b6e92941e2e101a151eda202cb57567f4a | [
"MIT"
] | 1 | 2019-10-24T04:19:00.000Z | 2019-10-24T04:19:00.000Z | COLAB-GOOGLE-Practices/colab google/data/L3/l3.py | ailabteam/Daily-Working | 0a36b5b6e92941e2e101a151eda202cb57567f4a | [
"MIT"
] | null | null | null | COLAB-GOOGLE-Practices/colab google/data/L3/l3.py | ailabteam/Daily-Working | 0a36b5b6e92941e2e101a151eda202cb57567f4a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 5 10:56:14 2019
@author: DELL
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Hàm sigmoid
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Toán tử AND
plt.scatter([1], [1], c='red', edgecolors='none', s=30, label='cho vay')
plt.scatter([0, 0, 1], [0, 1, 0], c='blue', edgecolors='none', s=30, label='từ chối')
plt.plot([0, 1.5], [1.5, 0], 'g')
# Toán tử OR
plt.scatter([0, 1, 1], [1, 0, 1], c='red', edgecolors='none', s=30, label='cho vay')
plt.scatter([0], [0], c='blue', edgecolors='none', s=30, label='từ chối')
plt.plot([-0.5, 1.5], [1, -1], 'g')
plt.xlabel('x1')
plt.ylabel('x2')
# Toán tử XOR
plt.scatter([1, 0], [0, 1], c='red', edgecolors='none', s=30, label='cho vay')
plt.scatter([1, 0], [1, 0], c='blue', edgecolors='none', s=30, label='từ chối')
plt.xlabel('x1')
plt.ylabel('x2')
| 26.575758 | 85 | 0.588369 |
c10fc98f6f9cd46663649e015de46f17bd21c926 | 10,918 | py | Python | statsmodels/graphics/tsaplots.py | larsoner/statsmodels | e0b772ed95880e58fd0c089c04ab01eb393c2485 | [
"BSD-3-Clause"
] | 6 | 2017-08-23T12:43:44.000Z | 2021-08-18T08:20:15.000Z | statsmodels/graphics/tsaplots.py | bert9bert/statsmodels | 898ddfc483c45bb0f8e5156dd8506abda84c9b63 | [
"BSD-3-Clause"
] | null | null | null | statsmodels/graphics/tsaplots.py | bert9bert/statsmodels | 898ddfc483c45bb0f8e5156dd8506abda84c9b63 | [
"BSD-3-Clause"
] | 3 | 2017-08-23T12:43:49.000Z | 2018-04-24T02:27:33.000Z | """Correlation plot functions."""
import numpy as np
from statsmodels.compat.pandas import sort_values
from statsmodels.graphics import utils
from statsmodels.tsa.stattools import acf, pacf
def _prepare_data_corr_plot(x, lags, zero):
zero = bool(zero)
irregular = False if zero else True
if lags is None:
lags = np.arange(not zero, len(x))
elif np.isscalar(lags):
lags = np.arange(not zero, int(lags) + 1) # +1 for zero lag
else:
irregular = True
lags = np.asanyarray(lags).astype(np.int)
nlags = lags.max(0)
return lags, nlags, irregular
def _plot_corr(ax, title, acf_x, confint, lags, irregular, use_vlines, **kwargs):
if irregular:
acf_x = acf_x[lags]
if confint is not None:
confint = confint[lags]
if use_vlines:
ax.vlines(lags, [0], acf_x, **kwargs)
ax.axhline(**kwargs)
kwargs.setdefault('marker', 'o')
kwargs.setdefault('markersize', 5)
kwargs.setdefault('linestyle', 'None')
ax.margins(.05)
ax.plot(lags, acf_x, **kwargs)
ax.set_title(title)
if confint is not None:
if lags[0] == 0:
lags = lags[1:]
confint = confint[1:]
acf_x = acf_x[1:]
ax.fill_between(lags, confint[:, 0] - acf_x, confint[:, 1] - acf_x, alpha=.25)
def plot_acf(x, ax=None, lags=None, alpha=.05, use_vlines=True, unbiased=False,
fft=False, title='Autocorrelation', zero=True, **kwargs):
"""Plot the autocorrelation function
Plots lags on the horizontal and the correlations on vertical axis.
Parameters
----------
x : array_like
Array of time-series values
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
lags : int or array_like, optional
int or Array of lag values, used on horizontal axis. Uses
np.arange(lags) when lags is an int. If not provided,
``lags=np.arange(len(corr))`` is used.
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett's formula. If None, no confidence intervals are plotted.
use_vlines : bool, optional
If True, vertical lines and markers are plotted.
If False, only markers are plotted. The default marker is 'o'; it can
be overridden with a ``marker`` kwarg.
unbiased : bool
If True, then denominators for autocovariance are n-k, otherwise n
fft : bool, optional
If True, computes the ACF via FFT.
title : str, optional
Title to place on plot. Default is 'Autocorrelation'
zero : bool, optional
Flag indicating whether to include the 0-lag autocorrelation.
Default is True.
**kwargs : kwargs, optional
Optional keyword arguments that are directly passed on to the
Matplotlib ``plot`` and ``axhline`` functions.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
matplotlib.pyplot.xcorr
matplotlib.pyplot.acorr
mpl_examples/pylab_examples/xcorr_demo.py
Notes
-----
Adapted from matplotlib's `xcorr`.
Data are plotted as ``plot(lags, corr, **kwargs)``
"""
fig, ax = utils.create_mpl_ax(ax)
lags, nlags, irregular = _prepare_data_corr_plot(x, lags, zero)
confint = None
# acf has different return type based on alpha
if alpha is None:
acf_x = acf(x, nlags=nlags, alpha=alpha, fft=fft,
unbiased=unbiased)
else:
acf_x, confint = acf(x, nlags=nlags, alpha=alpha, fft=fft,
unbiased=unbiased)
_plot_corr(ax, title, acf_x, confint, lags, irregular, use_vlines, **kwargs)
return fig
def plot_pacf(x, ax=None, lags=None, alpha=.05, method='ywm', use_vlines=True,
title='Partial Autocorrelation', zero=True, **kwargs):
"""Plot the partial autocorrelation function
Plots lags on the horizontal and the correlations on vertical axis.
Parameters
----------
x : array_like
Array of time-series values
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
lags : int or array_like, optional
int or Array of lag values, used on horizontal axis. Uses
np.arange(lags) when lags is an int. If not provided,
``lags=np.arange(len(corr))`` is used.
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x))
method : 'ywunbiased' (default) or 'ywmle' or 'ols'
specifies which method for the calculations to use:
- yw or ywunbiased : yule walker with bias correction in denominator
for acovf
- ywm or ywmle : yule walker without bias correction
- ols - regression of time series on lags of it and on constant
- ld or ldunbiased : Levinson-Durbin recursion with bias correction
- ldb or ldbiased : Levinson-Durbin recursion without bias correction
use_vlines : bool, optional
If True, vertical lines and markers are plotted.
If False, only markers are plotted. The default marker is 'o'; it can
be overridden with a ``marker`` kwarg.
title : str, optional
Title to place on plot. Default is 'Partial Autocorrelation'
zero : bool, optional
Flag indicating whether to include the 0-lag autocorrelation.
Default is True.
**kwargs : kwargs, optional
Optional keyword arguments that are directly passed on to the
Matplotlib ``plot`` and ``axhline`` functions.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
matplotlib.pyplot.xcorr
matplotlib.pyplot.acorr
mpl_examples/pylab_examples/xcorr_demo.py
Notes
-----
Adapted from matplotlib's `xcorr`.
Data are plotted as ``plot(lags, corr, **kwargs)``
"""
fig, ax = utils.create_mpl_ax(ax)
lags, nlags, irregular = _prepare_data_corr_plot(x, lags, zero)
confint = None
if alpha is None:
acf_x = pacf(x, nlags=nlags, alpha=alpha, method=method)
else:
acf_x, confint = pacf(x, nlags=nlags, alpha=alpha, method=method)
_plot_corr(ax, title, acf_x, confint, lags, irregular, use_vlines, **kwargs)
return fig
def seasonal_plot(grouped_x, xticklabels, ylabel=None, ax=None):
"""
Consider using one of month_plot or quarter_plot unless you need
irregular plotting.
Parameters
----------
grouped_x : iterable of DataFrames
Should be a GroupBy object (or similar pair of group_names and groups
as DataFrames) with a DatetimeIndex or PeriodIndex
xticklabels : list of str
List of season labels, one for each group.
ylabel : str
Lable for y axis
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
"""
fig, ax = utils.create_mpl_ax(ax)
start = 0
ticks = []
for season, df in grouped_x:
df = df.copy() # or sort balks for series. may be better way
df.sort_index()
nobs = len(df)
x_plot = np.arange(start, start + nobs)
ticks.append(x_plot.mean())
ax.plot(x_plot, df.values, 'k')
ax.hlines(df.values.mean(), x_plot[0], x_plot[-1], colors='r',
linewidth=3)
start += nobs
ax.set_xticks(ticks)
ax.set_xticklabels(xticklabels)
ax.set_ylabel(ylabel)
ax.margins(.1, .05)
return fig
def month_plot(x, dates=None, ylabel=None, ax=None):
"""
Seasonal plot of monthly data
Parameters
----------
x : array-like
Seasonal data to plot. If dates is None, x must be a pandas object
with a PeriodIndex or DatetimeIndex with a monthly frequency.
dates : array-like, optional
If `x` is not a pandas object, then dates must be supplied.
ylabel : str, optional
The label for the y-axis. Will attempt to use the `name` attribute
of the Series.
ax : matplotlib.axes, optional
Existing axes instance.
Returns
-------
matplotlib.Figure
Examples
--------
>>> import statsmodels.api as sm
>>> import pandas as pd
>>> dta = sm.datasets.elnino.load_pandas().data
>>> dta['YEAR'] = dta.YEAR.astype(int).astype(str)
>>> dta = dta.set_index('YEAR').T.unstack()
>>> dates = pd.to_datetime(list(map(lambda x : '-'.join(x) + '-1',
... dta.index.values)))
>>> dta.index = pd.DatetimeIndex(dates, freq='MS')
>>> fig = sm.graphics.tsa.month_plot(dta)
.. plot:: plots/graphics_month_plot.py
"""
from pandas import DataFrame
if dates is None:
from statsmodels.tools.data import _check_period_index
_check_period_index(x, freq="M")
else:
from pandas import Series, PeriodIndex
x = Series(x, index=PeriodIndex(dates, freq="M"))
xticklabels = ['j','f','m','a','m','j','j','a','s','o','n','d']
return seasonal_plot(x.groupby(lambda y : y.month), xticklabels,
ylabel=ylabel, ax=ax)
def quarter_plot(x, dates=None, ylabel=None, ax=None):
"""
Seasonal plot of quarterly data
Parameters
----------
x : array-like
Seasonal data to plot. If dates is None, x must be a pandas object
with a PeriodIndex or DatetimeIndex with a monthly frequency.
dates : array-like, optional
If `x` is not a pandas object, then dates must be supplied.
ylabel : str, optional
The label for the y-axis. Will attempt to use the `name` attribute
of the Series.
ax : matplotlib.axes, optional
Existing axes instance.
Returns
-------
matplotlib.Figure
"""
from pandas import DataFrame
if dates is None:
from statsmodels.tools.data import _check_period_index
_check_period_index(x, freq="Q")
else:
from pandas import Series, PeriodIndex
x = Series(x, index=PeriodIndex(dates, freq="Q"))
xticklabels = ['q1', 'q2', 'q3', 'q4']
return seasonal_plot(x.groupby(lambda y : y.quarter), xticklabels,
ylabel=ylabel, ax=ax)
| 33.388379 | 86 | 0.632259 |
80b49d0ae4cf3e9c0edaafde8d39e6770890217f | 454 | py | Python | Sorting/problems/two_array_element_swap.py | kimjiwook0129/Coding-Interivew-Cheatsheet | 574e6acecdb617b9c3cef7ec3b154ab183d8b99a | [
"MIT"
] | 3 | 2022-01-09T04:33:04.000Z | 2022-02-04T17:40:43.000Z | Sorting/problems/two_array_element_swap.py | kimjiwook0129/Coding-Interivew-Cheatsheet | 574e6acecdb617b9c3cef7ec3b154ab183d8b99a | [
"MIT"
] | null | null | null | Sorting/problems/two_array_element_swap.py | kimjiwook0129/Coding-Interivew-Cheatsheet | 574e6acecdb617b9c3cef7ec3b154ab183d8b99a | [
"MIT"
] | null | null | null | # 이것이 코딩테스트다 p.182
import sys
if __name__ == "__main__":
N, K = map(int, input().split())
lst_A = list(map(int, sys.stdin.readline().rstrip().split()))
lst_B = list(map(int, sys.stdin.readline().rstrip().split()))
lst_A.sort()
lst_B.sort()
for i in range(K):
a = lst_A[i]
b = lst_B[N - i - 1]
if b > a:
lst_A[i], lst_B[N - i - 1] = b, a
else:
break
print(sum(lst_A)) | 25.222222 | 65 | 0.506608 |
0eaeead28a385652269daff022ab2b305aec76f0 | 902 | py | Python | setup.py | HaxballGym/HaxballGym-tools | ec627801c7eac1ebf71fef75b4c3696fd1baea27 | [
"Apache-2.0"
] | null | null | null | setup.py | HaxballGym/HaxballGym-tools | ec627801c7eac1ebf71fef75b4c3696fd1baea27 | [
"Apache-2.0"
] | null | null | null | setup.py | HaxballGym/HaxballGym-tools | ec627801c7eac1ebf71fef75b4c3696fd1baea27 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
__version__ = None # This will get replaced when reading version.py
exec(open('haxballgym_tools/version.py').read())
with open('README.md', 'r') as readme_file:
long_description = readme_file.read()
setup(
name='haxballgym_tools',
packages=find_packages(),
version=__version__,
description='Extra tools for HaxballGym, like SB3 compatibility',
long_description=long_description,
long_description_content_type='text/markdown',
author='Wazarr',
install_requires=[
'haxballgym>=0.3.0',
],
python_requires='>=3.7',
license='Apache 2.0',
license_file='LICENSE',
keywords=['haxball', 'gym', 'reinforcement-learning'],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
],
) | 31.103448 | 69 | 0.677384 |
6f8011a6cf74ce96501920266aa080d8474f355a | 3,262 | py | Python | manila_ui/dashboards/admin/share_instances/views.py | mail2nsrajesh/manila-ui | 6c55579d69083525b40ad85a2bd83deebbaa9eeb | [
"Apache-2.0"
] | null | null | null | manila_ui/dashboards/admin/share_instances/views.py | mail2nsrajesh/manila-ui | 6c55579d69083525b40ad85a2bd83deebbaa9eeb | [
"Apache-2.0"
] | null | null | null | manila_ui/dashboards/admin/share_instances/views.py | mail2nsrajesh/manila-ui | 6c55579d69083525b40ad85a2bd83deebbaa9eeb | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Admin views for managing share instances.
"""
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from manila_ui.api import manila
from manila_ui.dashboards.admin.share_instances import tables as si_tables
from manila_ui.dashboards.admin.share_instances import tabs as si_tabs
from manila_ui.dashboards import utils as ui_utils
class ShareInstancesView(tables.MultiTableView):
table_classes = (
si_tables.ShareInstancesTable,
)
template_name = "admin/share_instances/index.html"
page_title = _("Share Instances")
@memoized.memoized_method
def get_share_instances_data(self):
try:
share_instances = manila.share_instance_list(self.request)
except Exception:
share_instances = []
exceptions.handle(
self.request, _("Unable to retrieve share instances."))
return share_instances
class ShareInstanceDetailView(tabs.TabView):
tab_group_class = si_tabs.ShareInstanceDetailTabs
template_name = 'admin/share_instances/detail.html'
def get_context_data(self, **kwargs):
context = super(self.__class__, self).get_context_data(**kwargs)
share_instance = self.get_data()
context["share_instance"] = share_instance
context["page_title"] = (
_("Share Instance Details: %s") % share_instance.id)
return context
@memoized.memoized_method
def get_data(self):
try:
share_instance_id = self.kwargs['share_instance_id']
share_instance = manila.share_instance_get(
self.request, share_instance_id)
share_instance.export_locations = (
manila.share_instance_export_location_list(
self.request, share_instance_id))
export_locations = [
exp['path'] for exp in share_instance.export_locations
]
share_instance.el_size = ui_utils.calculate_longest_str_size(
export_locations)
return share_instance
except Exception:
redirect = reverse('horizon:admin:share_instances:index')
exceptions.handle(
self.request,
_('Unable to retrieve share instance details.'),
redirect=redirect)
def get_tabs(self, request, *args, **kwargs):
share_instance = self.get_data()
return self.tab_group_class(
request, share_instance=share_instance, **kwargs)
| 37.068182 | 78 | 0.685162 |
533bc04a57f367682885026a43c5f20be1049f65 | 1,967 | py | Python | grr/server/grr_response_server/gui/api_plugins/config_regression_test.py | tsehori/grr | 048506f22f74642bfe61749069a45ddf496fdab3 | [
"Apache-2.0"
] | 1 | 2021-07-01T01:43:06.000Z | 2021-07-01T01:43:06.000Z | grr/server/grr_response_server/gui/api_plugins/config_regression_test.py | tsehori/grr | 048506f22f74642bfe61749069a45ddf496fdab3 | [
"Apache-2.0"
] | 44 | 2021-05-14T22:49:24.000Z | 2022-03-13T21:54:02.000Z | grr/server/grr_response_server/gui/api_plugins/config_regression_test.py | tsehori/grr | 048506f22f74642bfe61749069a45ddf496fdab3 | [
"Apache-2.0"
] | 1 | 2020-06-25T14:25:54.000Z | 2020-06-25T14:25:54.000Z | #!/usr/bin/env python
# Lint as: python3
"""This modules contains regression tests for config API handler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from grr_response_server.gui import api_regression_test_lib
from grr_response_server.gui.api_plugins import config as config_plugin
from grr_response_server.gui.api_plugins import config_test as config_plugin_test
class ApiListGrrBinariesHandlerRegressionTest(
config_plugin_test.ApiGrrBinaryTestMixin,
api_regression_test_lib.ApiRegressionTest):
api_method = "ListGrrBinaries"
handler = config_plugin.ApiListGrrBinariesHandler
def Run(self):
self.SetUpBinaries()
self.Check("ListGrrBinaries")
class ApiGetGrrBinaryHandlerRegressionTest(
config_plugin_test.ApiGrrBinaryTestMixin,
api_regression_test_lib.ApiRegressionTest):
api_method = "GetGrrBinary"
handler = config_plugin.ApiGetGrrBinaryHandler
def Run(self):
self.SetUpBinaries()
self.Check(
"GetGrrBinary",
args=config_plugin.ApiGetGrrBinaryArgs(type="PYTHON_HACK", path="test"))
self.Check(
"GetGrrBinary",
args=config_plugin.ApiGetGrrBinaryArgs(
type="EXECUTABLE", path="windows/test.exe"))
class ApiGetGrrBinaryBlobHandlerRegressionTest(
config_plugin_test.ApiGrrBinaryTestMixin,
api_regression_test_lib.ApiRegressionTest):
api_method = "GetGrrBinaryBlob"
handler = config_plugin.ApiGetGrrBinaryBlobHandler
def Run(self):
self.SetUpBinaries()
self.Check(
"GetGrrBinaryBlob",
args=config_plugin.ApiGetGrrBinaryBlobArgs(
type="PYTHON_HACK", path="test"))
self.Check(
"GetGrrBinaryBlob",
args=config_plugin.ApiGetGrrBinaryBlobArgs(
type="EXECUTABLE", path="windows/test.exe"))
def main(argv):
api_regression_test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| 26.581081 | 81 | 0.758516 |
644d6fd768fbd4fbfa0d82a696d80bff9bc7e645 | 4,453 | gyp | Python | shared_model/packages/javascript/binding.gyp | steephengeorge/iroha | 9e0e19035308c6ebaf706f709c5b7b3ac46e708b | [
"Apache-2.0"
] | null | null | null | shared_model/packages/javascript/binding.gyp | steephengeorge/iroha | 9e0e19035308c6ebaf706f709c5b7b3ac46e708b | [
"Apache-2.0"
] | null | null | null | shared_model/packages/javascript/binding.gyp | steephengeorge/iroha | 9e0e19035308c6ebaf706f709c5b7b3ac46e708b | [
"Apache-2.0"
] | null | null | null | {
'variables': {
'iroha_home_dir': '../../../'
},
'targets': [
{
'target_name': 'shared_model',
'type': 'none',
'actions': [
{
'action_name': 'configure',
'message': 'Generate CMake build configuration for shared_model...',
'inputs': [
'<(iroha_home_dir)/shared_model/bindings/CMakeLists.txt'
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/shared_model/bindings/Makefile',
],
'action': [
'cmake',
'-H<(iroha_home_dir)',
'-B<(SHARED_INTERMEDIATE_DIR)',
'-DSWIG_NODE=ON',
'-DENABLE_LIBS_PACKAGING=OFF',
'-DSHARED_MODEL_DISABLE_COMPATIBILITY=ON',
'-DCMAKE_POSITION_INDEPENDENT_CODE=ON',
'-DCMAKE_BUILD_TYPE=Release'
],
},
{
'action_name': 'build',
'message': 'Build shared_model libraries by CMake...',
'inputs': [
'<(SHARED_INTERMEDIATE_DIR)/shared_model/bindings/Makefile',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/shared_model/bindings/bindingsJAVASCRIPT_wrap.cxx',
'<(SHARED_INTERMEDIATE_DIR)/shared_model/bindings/libirohanode.a',
'<(SHARED_INTERMEDIATE_DIR)/shared_model/bindings/libbindings.a'
],
'action': [
'cmake',
'--build', '<(SHARED_INTERMEDIATE_DIR)',
'--target', 'irohanode',
'--',
'-j<!(echo "$(getconf _NPROCESSORS_ONLN)")'
]
},
],
###
# Copy all necessary static libs to PRODUCT_DIR, so we ensure their existence!
###
'copies': [
{
'files': [
'<(SHARED_INTERMEDIATE_DIR)/shared_model/bindings/libirohanode.a',
'<(SHARED_INTERMEDIATE_DIR)/shared_model/bindings/libbindings.a',
'<(SHARED_INTERMEDIATE_DIR)/schema/libschema.a',
'<(SHARED_INTERMEDIATE_DIR)/libs/generator/libgenerator.a',
'<(SHARED_INTERMEDIATE_DIR)/libs/amount/libiroha_amount.a',
'<(SHARED_INTERMEDIATE_DIR)/shared_model/validators/libshared_model_stateless_validation.a',
# Cryptography libs
'<(SHARED_INTERMEDIATE_DIR)/shared_model/cryptography/ed25519_sha3_impl/libshared_model_cryptography.a',
'<(SHARED_INTERMEDIATE_DIR)/shared_model/cryptography/ed25519_sha3_impl/internal/libhash.a',
'<(SHARED_INTERMEDIATE_DIR)/shared_model/cryptography/ed25519_sha3_impl/internal/libed25519_crypto.a',
'<(SHARED_INTERMEDIATE_DIR)/shared_model/cryptography/model_impl/libshared_model_cryptography_model.a',
# Third-party libraries
'<(iroha_home_dir)/external/src/hyperledger_ed25519-build/libed25519.a'
],
'destination': '<(PRODUCT_DIR)'
}
]
},
{
'target_name': '<(module_name)',
'dependencies': [ 'shared_model' ],
'include_dirs': [
'<(iroha_home_dir)/shared_model',
'<(iroha_home_dir)/libs',
'<(iroha_home_dir)/schema'
],
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/shared_model/bindings/bindingsJAVASCRIPT_wrap.cxx'
],
'cflags_cc': ['-std=c++14', '-fexceptions', '-DDISABLE_BACKWARD'],
'cflags_cc!': ['-fno-rtti'],
'libraries': [
'-L/usr/local/lib',
'-L<(PRODUCT_DIR)',
'-lirohanode', # Library contains SWIG runtime
'-lbindings',
'-lgenerator',
'-liroha_amount',
'-lschema',
'-lshared_model_stateless_validation',
# Cryptography libs
'-lshared_model_cryptography',
'-lhash',
'-led25519_crypto',
'-lshared_model_cryptography_model',
# Third-party libraries
'-led25519',
'-lprotobuf'
],
'conditions': [
[ 'OS == "mac"', {
'xcode_settings': {
'GCC_ENABLE_CPP_RTTI': 'YES',
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES',
'OTHER_CFLAGS': ['-std=c++14', '-DDISABLE_BACKWARD']
}
}
]
]
},
{
'target_name': 'action_after_build',
'type': 'none',
'dependencies': [ '<(module_name)' ],
'copies': [
{
'files': [ '<(PRODUCT_DIR)/<(module_name).node' ],
'destination': '<(module_path)'
}
]
}
]
}
| 33.734848 | 116 | 0.551314 |
36527fe25558e6edecd224b2cea50bc2e0c8b979 | 1,297 | py | Python | vanmongo/connection.py | SatelCreative/vanmongo | 9037c0c0ad56f6fa3fb687c48607c285c4e14a03 | [
"MIT"
] | null | null | null | vanmongo/connection.py | SatelCreative/vanmongo | 9037c0c0ad56f6fa3fb687c48607c285c4e14a03 | [
"MIT"
] | 5 | 2021-06-25T17:49:21.000Z | 2021-09-15T00:15:58.000Z | vanmongo/connection.py | SatelCreative/vanmongo | 9037c0c0ad56f6fa3fb687c48607c285c4e14a03 | [
"MIT"
] | null | null | null | from __future__ import annotations
from base64 import b64decode, b64encode
from typing import Any, Generic, List, Optional, Type, TypeVar
from pydantic import BaseModel
from pydantic.generics import GenericModel
Node = TypeVar("Node")
Model = TypeVar("Model", bound=BaseModel)
def base64_encode_model(model: Model) -> str:
return b64encode(model.json(exclude_none=True).encode()).decode()
def base64_decode_model(Model: Type[Model], value: str) -> Model:
return Model.parse_raw(b64decode(value.encode()))
class MongoCursor(BaseModel):
id: str
sort: Optional[str] = None
value: Optional[Any] = None
def base64_encode(self):
return base64_encode_model(self)
@classmethod
def base64_decode(cls, value: str):
return base64_decode_model(cls, value)
class MeilCursor(BaseModel):
offset: int
query: str
def base64_encode(self):
return base64_encode_model(self)
@classmethod
def base64_decode(cls, value: str):
return base64_decode_model(cls, value)
class Edge(GenericModel, Generic[Node]):
node: Node
cursor: str
class PageInfo(BaseModel):
has_next_page: bool
has_previous_page: bool
class Connection(GenericModel, Generic[Node]):
edges: List[Edge[Node]]
page_info: PageInfo
| 21.983051 | 69 | 0.718581 |
d2adeba08a4794c3c7b4cb4a13be4a8e631bafdc | 20,409 | py | Python | tools/rating_curve_comparison.py | hohe12ly/inundation-mapping | d133addd4d730b5c468dcf1a8f7dfab35c55cbd7 | [
"Info-ZIP"
] | 25 | 2020-10-13T17:45:31.000Z | 2022-01-25T18:35:49.000Z | tools/rating_curve_comparison.py | dhardestylewis/cahaba | dcf414f5655ecafbf8bb62cd219aef405e55f0a2 | [
"Info-ZIP"
] | 422 | 2020-10-06T16:48:38.000Z | 2022-02-03T22:43:23.000Z | tools/rating_curve_comparison.py | dhardestylewis/cahaba | dcf414f5655ecafbf8bb62cd219aef405e55f0a2 | [
"Info-ZIP"
] | 7 | 2020-10-06T16:17:49.000Z | 2021-12-07T23:16:05.000Z | #!/usr/bin/env python3
import os
import sys
import pandas as pd
import numpy as np
import argparse
import matplotlib.pyplot as plt
import seaborn as sns
from functools import reduce
from multiprocessing import Pool
from os.path import isfile, join
import shutil
import warnings
from pathlib import Path
import time
warnings.simplefilter(action='ignore', category=FutureWarning)
"""
Plot Rating Curves and Compare to USGS Gages
Parameters
----------
fim_dir : str
Directory containing FIM output folders.
output_dir : str
Directory containing rating curve plots and tables.
usgs_gages_filename : str
File name of USGS rating curves.
nwm_flow_dir : str
Directory containing NWM recurrence flows files.
number_of_jobs : str
Number of jobs.
stat_groups : str
string of columns to group eval metrics.
"""
def check_file_age(file):
'''
Checks if file exists, determines the file age, and recommends
updating if older than 1 month.
Returns
-------
None.
'''
file = Path(file)
if file.is_file():
modification_time = file.stat().st_mtime
current_time = time.time()
file_age_days = (current_time - modification_time)/86400
if file_age_days > 30:
check = f'{file.name} is {int(file_age_days)} days old, consider updating.\nUpdate with rating_curve_get_usgs_curves.py'
else:
check = f'{file.name} is {int(file_age_days)} days old.'
return check
# recurr_intervals = ['recurr_1_5_cms.csv','recurr_5_0_cms.csv','recurr_10_0_cms.csv']
def generate_rating_curve_metrics(args):
elev_table_filename = args[0]
hydrotable_filename = args[1]
usgs_gages_filename = args[2]
usgs_recurr_stats_filename = args[3]
nwm_recurr_data_filename = args[4]
rc_comparison_plot_filename = args[5]
nwm_flow_dir = args[6]
catfim_flows_filename = args[7]
huc = args[8]
elev_table = pd.read_csv(elev_table_filename,dtype={'location_id': str})
hydrotable = pd.read_csv(hydrotable_filename,dtype={'HUC': str,'feature_id': str})
usgs_gages = pd.read_csv(usgs_gages_filename,dtype={'location_id': str})
# Join rating curves with elevation data
hydrotable = hydrotable.merge(elev_table, on="HydroID")
relevant_gages = list(hydrotable.location_id.unique())
usgs_gages = usgs_gages[usgs_gages['location_id'].isin(relevant_gages)]
usgs_gages = usgs_gages.reset_index(drop=True)
if len(usgs_gages) > 0:
# Adjust rating curve to elevation
hydrotable['elevation_ft'] = (hydrotable.stage + hydrotable.dem_adj_elevation) * 3.28084 # convert from m to ft
# hydrotable['raw_elevation_ft'] = (hydrotable.stage + hydrotable.dem_elevation) * 3.28084 # convert from m to ft
hydrotable['discharge_cfs'] = hydrotable.discharge_cms * 35.3147
usgs_gages = usgs_gages.rename(columns={"flow": "discharge_cfs", "elevation_navd88": "elevation_ft"})
hydrotable['source'] = "FIM"
usgs_gages['source'] = "USGS"
limited_hydrotable = hydrotable.filter(items=['location_id','elevation_ft','discharge_cfs','source'])
select_usgs_gages = usgs_gages.filter(items=['location_id', 'elevation_ft', 'discharge_cfs','source'])
rating_curves = limited_hydrotable.append(select_usgs_gages)
# Add stream order
stream_orders = hydrotable.filter(items=['location_id','str_order']).drop_duplicates()
rating_curves = rating_curves.merge(stream_orders, on='location_id')
rating_curves['str_order'] = rating_curves['str_order'].astype('int')
# plot rating curves
generate_facet_plot(rating_curves, rc_comparison_plot_filename)
# NWM recurr intervals
recurr_1_5_yr_filename = join(nwm_flow_dir,'recurr_1_5_cms.csv')
recurr_5_yr_filename = join(nwm_flow_dir,'recurr_5_0_cms.csv')
recurr_10_yr_filename = join(nwm_flow_dir,'recurr_10_0_cms.csv')
# Update column names
recurr_1_5_yr = pd.read_csv(recurr_1_5_yr_filename,dtype={'feature_id': str})
recurr_1_5_yr = recurr_1_5_yr.rename(columns={"discharge": "1.5"})
recurr_5_yr = pd.read_csv(recurr_5_yr_filename,dtype={'feature_id': str})
recurr_5_yr = recurr_5_yr.rename(columns={"discharge": "5.0"})
recurr_10_yr = pd.read_csv(recurr_10_yr_filename,dtype={'feature_id': str})
recurr_10_yr = recurr_10_yr.rename(columns={"discharge": "10.0"})
# Merge NWM recurr intervals into a single layer
nwm_recurr_intervals_all = reduce(lambda x,y: pd.merge(x,y, on='feature_id', how='outer'), [recurr_1_5_yr, recurr_5_yr, recurr_10_yr])
nwm_recurr_intervals_all = pd.melt(nwm_recurr_intervals_all, id_vars=['feature_id'], value_vars=['1.5','5.0','10.0'], var_name='recurr_interval', value_name='discharge_cms')
# Append catfim data (already set up in format similar to nwm_recurr_intervals_all)
cat_fim = pd.read_csv(catfim_flows_filename, dtype={'feature_id':str})
nwm_recurr_intervals_all = nwm_recurr_intervals_all.append(cat_fim)
# Convert discharge to cfs and filter
nwm_recurr_intervals_all['discharge_cfs'] = nwm_recurr_intervals_all.discharge_cms * 35.3147
nwm_recurr_intervals_all = nwm_recurr_intervals_all.filter(items=['discharge_cfs', 'recurr_interval','feature_id']).drop_duplicates()
# Identify unique gages
usgs_crosswalk = hydrotable.filter(items=['location_id', 'feature_id']).drop_duplicates()
nwm_recurr_data_table = pd.DataFrame()
usgs_recurr_data = pd.DataFrame()
# Interpolate USGS/FIM elevation at each gage
for index, gage in usgs_crosswalk.iterrows():
# Interpolate USGS elevation at NWM recurrence intervals
usgs_rc = rating_curves.loc[(rating_curves.location_id==gage.location_id) & (rating_curves.source=="USGS")]
if len(usgs_rc) <1:
print(f"missing USGS rating curve data for usgs station {gage.location_id} in huc {huc}")
continue
str_order = np.unique(usgs_rc.str_order).item()
feature_id = str(gage.feature_id)
usgs_pred_elev = get_reccur_intervals(usgs_rc, usgs_crosswalk,nwm_recurr_intervals_all)
# Handle sites missing data
if len(usgs_pred_elev) <1:
print(f"missing USGS elevation data for usgs station {gage.location_id} in huc {huc}")
continue
# Clean up data
usgs_pred_elev['location_id'] = gage.location_id
usgs_pred_elev = usgs_pred_elev.filter(items=['location_id','recurr_interval', 'discharge_cfs','pred_elev'])
usgs_pred_elev = usgs_pred_elev.rename(columns={"pred_elev": "USGS"})
# Interpolate FIM elevation at NWM recurrence intervals
fim_rc = rating_curves.loc[(rating_curves.location_id==gage.location_id) & (rating_curves.source=="FIM")]
if len(fim_rc) <1:
print(f"missing FIM rating curve data for usgs station {gage.location_id} in huc {huc}")
continue
fim_pred_elev = get_reccur_intervals(fim_rc, usgs_crosswalk,nwm_recurr_intervals_all)
# Handle sites missing data
if len(fim_pred_elev) <1:
print(f"missing FIM elevation data for usgs station {gage.location_id} in huc {huc}")
continue
# Clean up data
fim_pred_elev = fim_pred_elev.rename(columns={"pred_elev": "FIM"})
fim_pred_elev = fim_pred_elev.filter(items=['recurr_interval', 'discharge_cfs','FIM'])
usgs_pred_elev = usgs_pred_elev.merge(fim_pred_elev, on=['recurr_interval','discharge_cfs'])
# Add attributes
usgs_pred_elev['HUC'] = huc
usgs_pred_elev['HUC4'] = huc[0:4]
usgs_pred_elev['str_order'] = str_order
usgs_pred_elev['feature_id'] = feature_id
# Melt dataframe
usgs_pred_elev = pd.melt(usgs_pred_elev, id_vars=['location_id','feature_id','recurr_interval','discharge_cfs','HUC','HUC4','str_order'], value_vars=['USGS','FIM'], var_name="source", value_name='elevation_ft')
nwm_recurr_data_table = nwm_recurr_data_table.append(usgs_pred_elev)
# Interpolate FIM elevation at USGS observations
# fim_rc = fim_rc.merge(usgs_crosswalk, on="location_id")
# usgs_rc = usgs_rc.rename(columns={"elevation_ft": "USGS"})
#
# # Sort stage in ascending order
# usgs_rc = usgs_rc.sort_values('USGS',ascending=True)
#
# # Interpolate FIM elevation at USGS observations
# usgs_rc['FIM'] = np.interp(usgs_rc.discharge_cfs.values, fim_rc['discharge_cfs'], fim_rc['elevation_ft'], left = np.nan, right = np.nan)
# usgs_rc = usgs_rc[usgs_rc['FIM'].notna()]
# usgs_rc = usgs_rc.drop(columns=["source"])
#
# # Melt dataframe
# usgs_rc = pd.melt(usgs_rc, id_vars=['location_id','discharge_cfs','str_order'], value_vars=['USGS','FIM'], var_name="source", value_name='elevation_ft')
#
# if not usgs_rc.empty:
# usgs_recurr_data = usgs_recurr_data.append(usgs_rc)
# Generate stats for all sites in huc
# if not usgs_recurr_data.empty:
# usgs_recurr_stats_table = calculate_rc_stats_elev(usgs_recurr_data)
# usgs_recurr_stats_table.to_csv(usgs_recurr_stats_filename,index=False)
# # Generate plots (not currently being used)
# fim_elev_at_USGS_rc_plot_filename = join(dirname(rc_comparison_plot_filename),'FIM_elevations_at_USGS_rc_' + str(huc) +'.png')
# generate_facet_plot(usgs_recurr_data, fim_elev_at_USGS_rc_plot_filename)
if not nwm_recurr_data_table.empty:
nwm_recurr_data_table.discharge_cfs = np.round(nwm_recurr_data_table.discharge_cfs,2)
nwm_recurr_data_table.elevation_ft = np.round(nwm_recurr_data_table.elevation_ft,2)
nwm_recurr_data_table.to_csv(nwm_recurr_data_filename,index=False)
else:
print(f"no USGS data for gage(s): {relevant_gages} in huc {huc}")
def aggregate_metrics(output_dir,procs_list,stat_groups):
# agg_usgs_interp_elev_stats = join(output_dir,'agg_usgs_interp_elev_stats.csv')
agg_nwm_recurr_flow_elev = join(output_dir,'agg_nwm_recurr_flow_elevations.csv')
agg_nwm_recurr_flow_elev_stats = join(output_dir,f"agg_nwm_recurr_flow_elev_stats_{'_'.join(stat_groups)}.csv")
# if os.path.isfile(agg_usgs_interp_elev_stats):
# os.remove(agg_usgs_interp_elev_stats)
if os.path.isfile(agg_nwm_recurr_flow_elev):
os.remove(agg_nwm_recurr_flow_elev)
if os.path.isfile(agg_nwm_recurr_flow_elev_stats):
os.remove(agg_nwm_recurr_flow_elev_stats)
for huc in procs_list:
# if os.path.isfile(huc[3]):
# usgs_recurr_stats = pd.read_csv(huc[3])
#
# # Write/append usgs_recurr_stats
# if os.path.isfile(agg_usgs_interp_elev_stats):
# usgs_recurr_stats.to_csv(agg_usgs_interp_elev_stats,index=False, mode='a',header=False)
# else:
# usgs_recurr_stats.to_csv(agg_usgs_interp_elev_stats,index=False)
if os.path.isfile(huc[4]):
nwm_recurr_data = pd.read_csv(huc[4],dtype={'location_id': str,
'feature_id': str})
# Write/append nwm_recurr_data
if os.path.isfile(agg_nwm_recurr_flow_elev):
nwm_recurr_data.to_csv(agg_nwm_recurr_flow_elev,index=False, mode='a',header=False)
else:
nwm_recurr_data.to_csv(agg_nwm_recurr_flow_elev,index=False)
agg_stats = pd.read_csv(agg_nwm_recurr_flow_elev,dtype={'location_id': str,
'feature_id': str})
agg_recurr_stats_table = calculate_rc_stats_elev(agg_stats,stat_groups)
agg_recurr_stats_table.to_csv(agg_nwm_recurr_flow_elev_stats,index=False)
def generate_facet_plot(rc, plot_filename):
# Filter FIM elevation based on USGS data
for gage in rc.location_id.unique():
min_elev = rc.loc[(rc.location_id==gage) & (rc.source=='USGS')].elevation_ft.min()
max_elev = rc.loc[(rc.location_id==gage) & (rc.source=='USGS')].elevation_ft.max()
rc = rc.drop(rc[(rc.location_id==gage) & (rc.source=='FIM') & (rc.elevation_ft > (max_elev + 2))].index)
rc = rc.drop(rc[(rc.location_id==gage) & (rc.source=='FIM') & (rc.elevation_ft < min_elev - 2)].index)
rc = rc.rename(columns={"location_id": "USGS Gage"})
## Generate rating curve plots
num_plots = len(rc["USGS Gage"].unique())
if num_plots > 3:
columns = num_plots // 3
else:
columns = 1
sns.set(style="ticks")
g = sns.FacetGrid(rc, col="USGS Gage", hue="source", hue_order=['USGS','FIM'], sharex=False, sharey=False,col_wrap=columns)
g.map(sns.scatterplot, "discharge_cfs", "elevation_ft", palette="tab20c", marker="o")
g.set_axis_labels(x_var="Discharge (cfs)", y_var="Elevation (ft)")
# Adjust the arrangement of the plots
g.fig.tight_layout(w_pad=1)
g.add_legend()
plt.savefig(plot_filename)
plt.close()
def get_reccur_intervals(site_rc, usgs_crosswalk,nwm_recurr_intervals):
usgs_site = site_rc.merge(usgs_crosswalk, on="location_id")
nwm_ids = len(usgs_site.feature_id.drop_duplicates())
if nwm_ids > 0:
nwm_recurr_intervals = nwm_recurr_intervals.copy().loc[nwm_recurr_intervals.feature_id==usgs_site.feature_id.drop_duplicates().item()]
nwm_recurr_intervals['pred_elev'] = np.interp(nwm_recurr_intervals.discharge_cfs.values, usgs_site['discharge_cfs'], usgs_site['elevation_ft'], left = np.nan, right = np.nan)
return nwm_recurr_intervals
else:
return []
def calculate_rc_stats_elev(rc,stat_groups=None):
usgs_elev = "USGS"
src_elev = "FIM"
# Collect any extra columns not associated with melt
col_index = list(rc.columns)
pivot_vars = ['source','elevation_ft']
col_index = [col for col in col_index if col not in pivot_vars]
# Unmelt elevation/source
rc_unmelt = (rc.set_index(col_index)
.pivot(columns="source")['elevation_ft']
.reset_index()
.rename_axis(None, axis=1)
)
if stat_groups is None:
stat_groups = ['location_id']
# Calculate variables for NRMSE
rc_unmelt["yhat_minus_y"] = rc_unmelt[src_elev] - rc_unmelt[usgs_elev]
rc_unmelt["yhat_minus_y_squared"] = rc_unmelt["yhat_minus_y"] ** 2
# Calculate metrics by group
station_rc = rc_unmelt.groupby(stat_groups)
# Calculate variables for NRMSE
sum_y_diff = station_rc.apply(lambda x: x["yhat_minus_y_squared"].sum())\
.reset_index(stat_groups, drop = False).rename({0: "sum_y_diff"}, axis=1)
# Determine number of events that are modeled
n = station_rc.apply(lambda x: x[usgs_elev].count())\
.reset_index(stat_groups, drop = False).rename({0: "n"}, axis=1)
# Determine the maximum/minimum USGS elevation
y_max = station_rc.apply(lambda x: x[usgs_elev].max())\
.reset_index(stat_groups, drop = False).rename({0: "y_max"}, axis=1)
y_min = station_rc.apply(lambda x: x[usgs_elev].min())\
.reset_index(stat_groups, drop = False).rename({0: "y_min"}, axis=1)
# Collect variables for NRMSE
nrmse_table = reduce(lambda x,y: pd.merge(x,y, on=stat_groups, how='outer'), [sum_y_diff, n, y_max, y_min])
nrmse_table_group = nrmse_table.groupby(stat_groups)
# Calculate nrmse
nrmse = nrmse_table_group.apply(lambda x: ((x['sum_y_diff'] / x['n']) ** 0.5) / (x['y_max'] - x['y_min']))\
.reset_index(stat_groups, drop = False).rename({0: "nrmse"}, axis=1)
# Calculate Mean Absolute Depth Difference
mean_abs_y_diff = station_rc.apply(lambda x: (abs(x["yhat_minus_y"]).mean()))\
.reset_index(stat_groups, drop = False).rename({0: "mean_abs_y_diff_ft"}, axis=1)
# Calculate Percent Bias
percent_bias = station_rc.apply(lambda x: 100 * (x["yhat_minus_y"].sum() / x[usgs_elev].sum()))\
.reset_index(stat_groups, drop = False).rename({0: "percent_bias"}, axis=1)
rc_stat_table = reduce(lambda x,y: pd.merge(x,y, on=stat_groups, how='outer'), [nrmse, mean_abs_y_diff, percent_bias])
return rc_stat_table
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='generate rating curve plots and tables for FIM and USGS gages')
parser.add_argument('-fim_dir','--fim-dir', help='FIM output dir', required=True,type=str)
parser.add_argument('-output_dir','--output-dir', help='rating curves output folder', required=True,type=str)
parser.add_argument('-gages','--usgs-gages-filename',help='USGS rating curves',required=True,type=str)
parser.add_argument('-flows','--nwm-flow-dir',help='NWM recurrence flows dir',required=True,type=str)
parser.add_argument('-catfim', '--catfim-flows-filename', help='Categorical FIM flows file',required = True,type=str)
parser.add_argument('-j','--number-of-jobs',help='number of workers',required=False,default=1,type=int)
parser.add_argument('-group','--stat-groups',help='column(s) to group stats',required=False,type=str)
args = vars(parser.parse_args())
fim_dir = args['fim_dir']
output_dir = args['output_dir']
usgs_gages_filename = args['usgs_gages_filename']
nwm_flow_dir = args['nwm_flow_dir']
catfim_flows_filename = args['catfim_flows_filename']
number_of_jobs = args['number_of_jobs']
stat_groups = args['stat_groups']
stat_groups = stat_groups.split()
procs_list = []
plots_dir = join(output_dir,'plots')
os.makedirs(plots_dir, exist_ok=True)
tables_dir = join(output_dir,'tables')
os.makedirs(tables_dir, exist_ok=True)
#Check age of gages csv and recommend updating if older than 30 days.
print(check_file_age(usgs_gages_filename))
# Open log file
sys.__stdout__ = sys.stdout
log_file = open(join(output_dir,'rating_curve_comparison.log'),"w")
sys.stdout = log_file
merged_elev_table = []
huc_list = os.listdir(fim_dir)
for huc in huc_list:
if huc != 'logs':
elev_table_filename = join(fim_dir,huc,'usgs_elev_table.csv')
hydrotable_filename = join(fim_dir,huc,'hydroTable.csv')
usgs_recurr_stats_filename = join(tables_dir,f"usgs_interpolated_elevation_stats_{huc}.csv")
nwm_recurr_data_filename = join(tables_dir,f"nwm_recurrence_flow_elevations_{huc}.csv")
rc_comparison_plot_filename = join(plots_dir,f"FIM-USGS_rating_curve_comparison_{huc}.png")
if isfile(elev_table_filename):
procs_list.append([elev_table_filename, hydrotable_filename, usgs_gages_filename, usgs_recurr_stats_filename, nwm_recurr_data_filename, rc_comparison_plot_filename,nwm_flow_dir, catfim_flows_filename, huc])
# Aggregate all of the individual huc elev_tables into one aggregate for accessing all data in one csv
read_elev_table = pd.read_csv(elev_table_filename)
read_elev_table['huc'] = huc
merged_elev_table.append(read_elev_table)
# Output a concatenated elev_table to_csv
if merged_elev_table:
print(f"Creating aggregate elev table csv")
concat_elev_table = pd.concat(merged_elev_table)
concat_elev_table['thal_burn_depth_meters'] = concat_elev_table['dem_elevation'] - concat_elev_table['dem_adj_elevation']
concat_elev_table.to_csv(join(output_dir,'agg_usgs_elev_table.csv'),index=False)
# Initiate multiprocessing
print(f"Generating rating curve metrics for {len(procs_list)} hucs using {number_of_jobs} jobs")
with Pool(processes=number_of_jobs) as pool:
pool.map(generate_rating_curve_metrics, procs_list)
print(f"Aggregating rating curve metrics for {len(procs_list)} hucs")
aggregate_metrics(output_dir,procs_list,stat_groups)
print('Delete intermediate tables')
shutil.rmtree(tables_dir, ignore_errors=True)
# Close log file
sys.stdout = sys.__stdout__
log_file.close()
| 44.854945 | 222 | 0.682395 |
7556de256bbca57e64a7b2c8dbdca009598f50a9 | 2,137 | py | Python | tests/tools/test_histogram2d.py | dgorelik/differential-privacy-library | 5a7a267c591320036615a52dfad1918dc3718e62 | [
"MIT"
] | 1 | 2020-05-03T06:06:44.000Z | 2020-05-03T06:06:44.000Z | tests/tools/test_histogram2d.py | dohmatob/differential-privacy-library | 1a17bf0e3bf7d18d5c19258abbf81c27fd9a5e16 | [
"MIT"
] | null | null | null | tests/tools/test_histogram2d.py | dohmatob/differential-privacy-library | 1a17bf0e3bf7d18d5c19258abbf81c27fd9a5e16 | [
"MIT"
] | 1 | 2022-02-23T13:56:19.000Z | 2022-02-23T13:56:19.000Z | import numpy as np
from unittest import TestCase
from diffprivlib.tools.histograms import histogram2d
from diffprivlib.utils import global_seed, PrivacyLeakWarning
class TestHistogram2d(TestCase):
def test_no_params(self):
x = np.array([1, 2, 3, 4, 5])
y = np.array([5, 7, 1, 5, 9])
with self.assertWarns(PrivacyLeakWarning):
res = histogram2d(x, y)
self.assertIsNotNone(res)
def test_no_range(self):
x = np.array([1, 2, 3, 4, 5])
y = np.array([5, 7, 1, 5, 9])
with self.assertWarns(PrivacyLeakWarning):
res = histogram2d(x, y, epsilon=1)
self.assertIsNotNone(res)
def test_missing_range(self):
x = np.array([1, 2, 3, 4, 5])
y = np.array([5, 7, 1, 5, 9])
with self.assertWarns(PrivacyLeakWarning):
res = histogram2d(x, y, epsilon=1, range=[(0, 10), None])
self.assertIsNotNone(res)
def test_same_edges(self):
x = np.array([1, 2, 3, 4, 5])
y = np.array([5, 7, 1, 5, 9])
_, edges_x, edges_y = np.histogram2d(x, y, bins=3, range=[(0, 10), (0, 10)])
_, dp_edges_x, dp_edges_y = histogram2d(x, y, epsilon=1, bins=3, range=[(0, 10), (0, 10)])
self.assertTrue((edges_x == dp_edges_x).all())
self.assertTrue((edges_y == dp_edges_y).all())
def test_different_result(self):
global_seed(3141592653)
x = np.array([1, 2, 3, 4, 5])
y = np.array([5, 7, 1, 5, 9])
hist, _, _ = np.histogram2d(x, y, bins=3, range=[(0, 10), (0, 10)])
dp_hist, _, _ = histogram2d(x, y, epsilon=0.1, bins=3, range=[(0, 10), (0, 10)])
# print("Non-private histogram: %s" % hist)
# print("Private histogram: %s" % dp_hist)
self.assertTrue((hist != dp_hist).any())
def test_density(self):
global_seed(3141592653)
x = np.array([1, 2, 3, 4, 5])
y = np.array([5, 7, 1, 5, 9])
dp_hist, _, _ = histogram2d(x, y, epsilon=1, bins=3, range=[(0, 10), (0, 10)], density=True)
# print(dp_hist.sum())
self.assertAlmostEqual(dp_hist.sum(), 1.0 * (3 / 10) ** 2)
| 35.616667 | 100 | 0.565278 |
7ef5fdda7e9666f209c582eb4bb164701bcdb17f | 517 | py | Python | aprendizado/curso_em_video/desafios/desafio096.py | renatodev95/Python | 2adee4a01de41f8bbb68fce563100c135a5ab549 | [
"MIT"
] | null | null | null | aprendizado/curso_em_video/desafios/desafio096.py | renatodev95/Python | 2adee4a01de41f8bbb68fce563100c135a5ab549 | [
"MIT"
] | null | null | null | aprendizado/curso_em_video/desafios/desafio096.py | renatodev95/Python | 2adee4a01de41f8bbb68fce563100c135a5ab549 | [
"MIT"
] | null | null | null | # Faça um programa que tenha uma função chamada área(), que receba as dimensões
# de um terreno retangular (largura e comprimento) e mostre a área do terreno.
def titulo(txt):
print('-' * 30)
print(f'{txt:^30}')
print('-' * 30, '')
def area(larg, comp):
a = larg * comp
print(f'A área de um terreno {larg}x{comp} é de {a}m².\n')
titulo('CONTROLE DE TERRENO')
largura = float(input('Largura (m): '))
comprimento = float(input('Comprimento (m): '))
area(largura, comprimento)
| 27.210526 | 80 | 0.628627 |
244c6610c050c5f1cc4d8ba8cb574a3eb2d92b2c | 222 | py | Python | users/urls.py | Joaxin/django-welogs | 260a72322cdc5591ecd3ceae1dc99a66da333d2b | [
"MIT"
] | null | null | null | users/urls.py | Joaxin/django-welogs | 260a72322cdc5591ecd3ceae1dc99a66da333d2b | [
"MIT"
] | null | null | null | users/urls.py | Joaxin/django-welogs | 260a72322cdc5591ecd3ceae1dc99a66da333d2b | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
app_name = "users"
urlpatterns = [
path('profile/', views.profile, name='profile'),
path('profile/update/', views.profile_update, name='profile_update'),
] | 24.666667 | 74 | 0.68018 |
6664c3b026665568036c77684726a8d59a1da442 | 232 | py | Python | sams-roku-interface/colors.py | sam-maryland/sams-roku-interface | 5a11588a2054ea46a16851b95ed2c04e3219898f | [
"MIT"
] | 1 | 2019-12-09T20:06:24.000Z | 2019-12-09T20:06:24.000Z | sams-roku-interface/colors.py | sammaryland/sams-roku-interface | 5a11588a2054ea46a16851b95ed2c04e3219898f | [
"MIT"
] | 2 | 2021-03-31T19:19:23.000Z | 2021-06-02T00:45:17.000Z | sams-roku-interface/colors.py | sammaryland/sams-roku-interface | 5a11588a2054ea46a16851b95ed2c04e3219898f | [
"MIT"
] | null | null | null | class Colors:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m' | 21.090909 | 24 | 0.517241 |
83ab6adc271ccd255f0ef30cf97e8c2297197793 | 137 | py | Python | py_tdlib/constructors/delete_chat_reply_markup.py | Mr-TelegramBot/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 24 | 2018-10-05T13:04:30.000Z | 2020-05-12T08:45:34.000Z | py_tdlib/constructors/delete_chat_reply_markup.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 3 | 2019-06-26T07:20:20.000Z | 2021-05-24T13:06:56.000Z | py_tdlib/constructors/delete_chat_reply_markup.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 5 | 2018-10-05T14:29:28.000Z | 2020-08-11T15:04:10.000Z | from ..factory import Method
class deleteChatReplyMarkup(Method):
chat_id = None # type: "int53"
message_id = None # type: "int53"
| 19.571429 | 36 | 0.715328 |
ac3f5e817e7f7abe120883218f6f78e5d6cf39ad | 737 | py | Python | auth/admin.py | Junhua9981/WebProjectFinal | 8db619b4196fa3bc684202ddb24a725c15e06d78 | [
"MIT"
] | 120 | 2020-09-04T23:07:58.000Z | 2022-03-22T03:00:39.000Z | auth/admin.py | Junhua9981/WebProjectFinal | 8db619b4196fa3bc684202ddb24a725c15e06d78 | [
"MIT"
] | 10 | 2016-03-25T09:28:36.000Z | 2021-07-26T15:04:41.000Z | auth/admin.py | Junhua9981/WebProjectFinal | 8db619b4196fa3bc684202ddb24a725c15e06d78 | [
"MIT"
] | 38 | 2020-09-16T18:47:09.000Z | 2022-03-25T07:52:57.000Z | from fastapi import HTTPException, Depends, status
from fastapi.security import HTTPBasicCredentials, HTTPBasic
from passlib.context import CryptContext
from database.database import admin_collection
security = HTTPBasic()
hash_helper = CryptContext(schemes=["bcrypt"])
async def validate_login(credentials: HTTPBasicCredentials = Depends(security)):
admin = admin_collection.find_one({"email": credentials.username})
if admin:
password = hash_helper.verify(credentials.password, admin['password'])
if not password:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect email or password"
)
return True
return False | 36.85 | 80 | 0.723202 |
740c15cf202ff0b94136db22f838060e30832dc6 | 441 | py | Python | demos/Multiscale/uniswap/model/sys_params.py | w-ghub/demos | 6382676fae89bd5a190626612712fcedf17bca6d | [
"MIT"
] | 56 | 2020-07-08T23:23:15.000Z | 2022-03-11T20:43:09.000Z | demos/Multiscale/uniswap/model/sys_params.py | w-ghub/demos | 6382676fae89bd5a190626612712fcedf17bca6d | [
"MIT"
] | 41 | 2020-07-11T23:24:06.000Z | 2022-01-28T13:28:07.000Z | demos/Multiscale/uniswap/model/sys_params.py | w-ghub/demos | 6382676fae89bd5a190626612712fcedf17bca6d | [
"MIT"
] | 39 | 2020-07-15T11:35:04.000Z | 2022-02-01T16:02:51.000Z | import pandas as pd
sys_params = {
'fee_numerator': [997, 997, 997, 997,
995, 995, 995, 995],
'fee_denominator': [1000],
'uniswap_events': [pd.read_pickle('./data/uniswap_events.pickle')],
'fix_cost': [-1], # -1 to deactivate
'retail_precision': [3,3,15,15,
3,3,15,15],
'retail_tolerance': [0.0005, 0.025, 0.0005, 0.025,
0.0005, 0.025, 0.0005, 0.025]
} | 33.923077 | 71 | 0.53288 |
937417375d19864ddf807a520959c783f29cb311 | 2,191 | py | Python | tests/models/test_airplane_model.py | ascii-dev/flight-booking | 3a64951f91d0254402bc5c14e5ef6d1bd2cf372e | [
"MIT"
] | null | null | null | tests/models/test_airplane_model.py | ascii-dev/flight-booking | 3a64951f91d0254402bc5c14e5ef6d1bd2cf372e | [
"MIT"
] | 30 | 2019-05-26T09:39:12.000Z | 2021-06-02T00:16:58.000Z | tests/models/test_airplane_model.py | ascii-dev/flight-booking | 3a64951f91d0254402bc5c14e5ef6d1bd2cf372e | [
"MIT"
] | null | null | null | from api.models.airplane import Airplane
class TestAirplaneModel:
def test_new_airplane_succeeds(self, init_db, new_airplane):
"""
Test that airplane can be created successfully through the
model
:param init_db: initialize the database
:param new_airplane: creates new airplane through the model
:return: assertion
"""
assert new_airplane == new_airplane.save()
def test_get_a_single_airplane_succeeds(self, init_db, new_airplane):
"""
Tests that getting a single airplane from the database
through the model is successful
:param init_db: initialize the database
:param new_airplane: creates new airplane through the model
:return: assertion
"""
new_airplane.save()
assert Airplane.query.get(new_airplane.id) == new_airplane
def test_update_a_airplane_succeeds(self, init_db, new_airplane):
"""
Tests that updating a airplane from the database through
the model is successful
:param init_db: initialize the database
:param new_airplane: creates a new airplane through the model
:return: assertion
"""
new_airplane.save()
new_airplane.update(capacity=321)
assert new_airplane.capacity == 321
def test_delete_a_airplane_succeeds(self, init_db, new_airplane):
"""
Tests that deleting a airplane from the database through the
model is successful
:param init_db: initialize the database
:param new_airplane: creates a new airplane through the model
:return: None
"""
new_airplane.save()
new_airplane.delete()
def test_get_airplane_string_representation(self, new_airplane):
"""
Tests to compute and assert string representation of
a new airplane
:param new_airplane: creates a new airplane through the model
:return: assertion
"""
brand = new_airplane.brand
model = new_airplane.model
capacity = new_airplane.capacity
assert repr(new_airplane) == \
f'<Airplane {brand} {model} {capacity}>'
| 35.918033 | 73 | 0.659516 |
88ec23b4fa04d7e8c9e852e4554762e3afedd2f9 | 289 | py | Python | Rig/Lobby/lobby_room.py | Oulala-Leon/Text-Factory | fbf24221529ccf7a35894090f8595da526c0523d | [
"Apache-2.0"
] | null | null | null | Rig/Lobby/lobby_room.py | Oulala-Leon/Text-Factory | fbf24221529ccf7a35894090f8595da526c0523d | [
"Apache-2.0"
] | null | null | null | Rig/Lobby/lobby_room.py | Oulala-Leon/Text-Factory | fbf24221529ccf7a35894090f8595da526c0523d | [
"Apache-2.0"
] | null | null | null | import tell
import sys
def check_arglen(argv, minimum, maximum=-1):
"Checks arguments length vs expected minimum and maximum."
maximum = minimum if maximum == -1 else maximum
size = len(argv)
return size if size >= minimum and size <= maximum else sys.exit(tell.README())
| 32.111111 | 83 | 0.709343 |
95ea82b753f8dee6f45badf0d3ec324a10f0ed60 | 6,287 | py | Python | tests/tensorflow_cloud/deploy_test.py | gogasca/cloud | 9ad530b64464ba68c65b2cefd12b1e5043486006 | [
"Apache-2.0"
] | null | null | null | tests/tensorflow_cloud/deploy_test.py | gogasca/cloud | 9ad530b64464ba68c65b2cefd12b1e5043486006 | [
"Apache-2.0"
] | null | null | null | tests/tensorflow_cloud/deploy_test.py | gogasca/cloud | 9ad530b64464ba68c65b2cefd12b1e5043486006 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the cloud deploy module."""
import io
import mock
import os
import shutil
import sys
import tarfile
import unittest
from tensorflow_cloud import deploy
from tensorflow_cloud import machine_config
from tensorflow_cloud import package
from mock import call, patch
class TestDeploy(unittest.TestCase):
def setup(self, MockDiscovery):
self.mock_job_id = 'tf-train-abcde'
self.mock_project_name = 'my-gcp-project'
self.entry_point = 'testdata/sample_compile_fit.py'
self.chief_config = machine_config.COMMON_MACHINE_CONFIGS['K80_4X']
self.worker_count = 2
self.worker_config = machine_config.COMMON_MACHINE_CONFIGS['K80_1X']
self.region = 'us-central-a'
self.docker_img = 'custom-image-tag'
self.entry_point_args = ['1000']
self.stream_logs = False
self.expected_request_dict = {
'jobId': self.mock_job_id,
'trainingInput': {
'use_chief_in_tf_config': True,
'scaleTier': 'custom',
'region': self.region,
'args': self.entry_point_args,
'masterType': 'n1-standard-16',
'workerType': 'n1-standard-8',
'workerCount': str(self.worker_count),
'workerConfig': {
'acceleratorConfig': {
'count': '1',
'type': 'NVIDIA_TESLA_K80'
},
'imageUri': self.docker_img,
},
'masterConfig': {
'acceleratorConfig': {
'count': '4',
'type': 'NVIDIA_TESLA_K80'
},
'imageUri': self.docker_img,
}
},
}
# Verify mocking is correct and setup method mocks.
assert MockDiscovery is deploy.discovery
def _mock_generate_job_id():
return self.mock_job_id
deploy._generate_job_id = _mock_generate_job_id
def _mock_get_project_name():
return self.mock_project_name
deploy.gcp.get_project_name = _mock_get_project_name
@patch('sys.stdout', new_callable=io.StringIO)
@patch('tensorflow_cloud.deploy.discovery')
def test_deploy_job(self, MockDiscovery, MockStdOut):
self.setup(MockDiscovery)
job_name = deploy.deploy_job(
self.region, self.docker_img, self.chief_config, self.worker_count,
self.worker_config, self.entry_point_args, self.stream_logs)
self.assertEqual(job_name, self.mock_job_id)
# Verify discovery API is invoked as expected.
self.assertEqual(MockDiscovery.build.call_count, 1)
args, _ = MockDiscovery.build.call_args
self.assertListEqual(list(args), ['ml', 'v1'])
# Verify job is created as expected
build_ret_val = MockDiscovery.build.return_value
self.assertEqual(build_ret_val.projects.call_count, 1)
proj_ret_val = build_ret_val.projects.return_value
self.assertEqual(proj_ret_val.jobs.call_count, 1)
jobs_ret_val = proj_ret_val.jobs.return_value
self.assertEqual(jobs_ret_val.create.call_count, 1)
# Verify job creation args
_, kwargs = jobs_ret_val.create.call_args
self.assertDictEqual(kwargs, {
'parent': 'projects/' + self.mock_project_name,
'body': self.expected_request_dict})
# Verify print statement
self.assertEqual(
MockStdOut.getvalue(),
'Job submitted successfully.\nYour job ID is: {}\nPlease access '
'your job logs at the following URL:\nhttps://'
'console.cloud.google.com/mlengine/jobs/{}?project={}\n'.format(
self.mock_job_id,
self.mock_job_id,
self.mock_project_name))
@patch('tensorflow_cloud.deploy.discovery')
def test_request_dict_without_workers(self, MockDiscovery):
self.setup(MockDiscovery)
worker_count = 0
job_name = deploy.deploy_job(
self.region, self.docker_img, self.chief_config, worker_count,
None, self.entry_point_args, self.stream_logs)
build_ret_val = MockDiscovery.build.return_value
proj_ret_val = build_ret_val.projects.return_value
jobs_ret_val = proj_ret_val.jobs.return_value
self.expected_request_dict['trainingInput']['workerCount'] = str(
worker_count)
del self.expected_request_dict['trainingInput']['workerType']
del self.expected_request_dict['trainingInput']['workerConfig']
# Verify job creation args
_, kwargs = jobs_ret_val.create.call_args
self.assertDictEqual(kwargs, {
'parent': 'projects/' + self.mock_project_name,
'body': self.expected_request_dict})
@patch('tensorflow_cloud.deploy.discovery')
def test_request_dict_without_user_args(self, MockDiscovery):
self.setup(MockDiscovery)
job_name = deploy.deploy_job(
self.region, self.docker_img, self.chief_config, self.worker_count,
self.worker_config, None, self.stream_logs)
build_ret_val = MockDiscovery.build.return_value
proj_ret_val = build_ret_val.projects.return_value
jobs_ret_val = proj_ret_val.jobs.return_value
del self.expected_request_dict['trainingInput']['args']
# Verify job creation args
_, kwargs = jobs_ret_val.create.call_args
self.assertDictEqual(kwargs, {
'parent': 'projects/' + self.mock_project_name,
'body': self.expected_request_dict})
| 38.570552 | 79 | 0.643391 |
c8ae5d88bc7eab1f651138610209ebccb2e82601 | 1,627 | py | Python | setup.py | mgxd/niworkflows | d28857d0be2a63263e4c29af44e84d18fdc44d2f | [
"BSD-3-Clause"
] | null | null | null | setup.py | mgxd/niworkflows | d28857d0be2a63263e4c29af44e84d18fdc44d2f | [
"BSD-3-Clause"
] | 1 | 2020-01-24T02:42:31.000Z | 2020-01-24T02:51:48.000Z | setup.py | mgxd/niworkflows | d28857d0be2a63263e4c29af44e84d18fdc44d2f | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: oesteban
# @Date: 2015-11-19 16:44:27
# @Last Modified by: oesteban
""" niworkflows setup script """
PACKAGE_NAME = 'niworkflows'
def main():
""" Install entry-point """
from os import path as op
from setuptools import setup, find_packages
import runpy
ldict = runpy.run_path(op.join(op.abspath(op.dirname(__file__)),
'niworkflows', 'info.py'))
setup(
name=PACKAGE_NAME,
version=ldict['__version__'],
description=ldict['__description__'],
long_description=ldict['__longdesc__'],
author=ldict['__author__'],
author_email=ldict['__email__'],
maintainer=ldict['__maintainer__'],
maintainer_email=ldict['__email__'],
license=ldict['__license__'],
url=ldict['URL'],
download_url=ldict['DOWNLOAD_URL'],
classifiers=ldict['CLASSIFIERS'],
packages=find_packages(exclude=['*.tests']),
zip_safe=False,
# Dependencies handling
setup_requires=ldict['SETUP_REQUIRES'],
install_requires=list(set(ldict['REQUIRES'])),
dependency_links=ldict['LINKS_REQUIRES'],
tests_require=ldict['TESTS_REQUIRES'],
extras_require=ldict['EXTRA_REQUIRES'],
# Data
package_data={'niworkflows': ['data/t1-mni_registration*.json',
'nipype/pipeline/engine/report_template.html',
'nipype/external/d3.js']},
include_package_data=True,
)
if __name__ == '__main__':
main()
| 31.901961 | 84 | 0.601721 |
6ac750591a277fc78ec540ede6df7cb5ea7da784 | 570 | py | Python | src/backend/aspen/database/models/enum.py | chanzuckerberg/czgenepi | 87bd2b1739acdfe2c7c25663fafb01dc24c5e2fd | [
"MIT"
] | null | null | null | src/backend/aspen/database/models/enum.py | chanzuckerberg/czgenepi | 87bd2b1739acdfe2c7c25663fafb01dc24c5e2fd | [
"MIT"
] | 30 | 2022-02-01T23:19:14.000Z | 2022-03-29T19:34:20.000Z | src/backend/aspen/database/models/enum.py | chanzuckerberg/czgenepi | 87bd2b1739acdfe2c7c25663fafb01dc24c5e2fd | [
"MIT"
] | null | null | null | from typing import Type, TYPE_CHECKING, TypeVar
# https://github.com/dropbox/sqlalchemy-stubs/issues/114
# This is the (gross) workaround. Keep an eye on the issue and get rid of it once it's fixed.
if TYPE_CHECKING:
from sqlalchemy.sql.type_api import TypeEngine
T = TypeVar("T")
class Enum(TypeEngine[T]):
def __init__(self, enum: Type[T]) -> None:
...
else:
from enumtables import EnumType as Enum # noqa: F401
Enum.cache_ok = (
True # SqlAlchemy 1.4 requires us to set a cache_ok flag on type decorators.
)
| 28.5 | 93 | 0.673684 |
578f3e758cd5132b1a535cbc56a46c32104a2818 | 56 | py | Python | 04 Data Structure/list.py | diaamshalabi/the-ultimate-python-programming-bootcamp | f19170640217684a218d862fb4108053dabab8b3 | [
"MIT"
] | 2 | 2022-02-09T08:09:58.000Z | 2022-02-10T14:16:10.000Z | 04 Data Structure/list.py | diaa-shalabi/the-ultimate-python-programming-bootcamp | f19170640217684a218d862fb4108053dabab8b3 | [
"MIT"
] | null | null | null | 04 Data Structure/list.py | diaa-shalabi/the-ultimate-python-programming-bootcamp | f19170640217684a218d862fb4108053dabab8b3 | [
"MIT"
] | null | null | null | my_list= [3, 4, 6, 2]
my_list1 = list(("Hello World"))
| 14 | 32 | 0.589286 |
f47037b7d3e959e51b907bfb5f90f7a35dcdad2b | 5,287 | py | Python | lib/modules/powershell/situational_awareness/network/smbautobrute.py | kumardineshwar/Empire | 8b8741242e929897f2759698b780853b77b2a81e | [
"BSD-3-Clause"
] | 3 | 2019-08-26T02:39:03.000Z | 2021-03-30T00:04:44.000Z | lib/modules/powershell/situational_awareness/network/smbautobrute.py | kumardineshwar/Empire | 8b8741242e929897f2759698b780853b77b2a81e | [
"BSD-3-Clause"
] | null | null | null | lib/modules/powershell/situational_awareness/network/smbautobrute.py | kumardineshwar/Empire | 8b8741242e929897f2759698b780853b77b2a81e | [
"BSD-3-Clause"
] | 8 | 2017-06-09T12:54:46.000Z | 2021-11-09T06:44:09.000Z | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Invoke-SMBAutoBrute',
# list of one or more authors for the module
'Author': ['@curi0usJack'],
# more verbose multi-line description of the module
'Description': ('Runs an SMB brute against a list of usernames/passwords. '
'Will check the DCs to interrogate the bad password count of the '
'users and will keep bruting until either a valid credential is '
'discoverd or the bad password count reaches one below the threshold. '
'Run "shell net accounts" on a valid agent to determine the lockout '
'threshold. VERY noisy! Generates a ton of traffic on the DCs.' ),
# True if the module needs to run in the background
'Background' : True,
# File extension to save the file as
'OutputExtension' : None,
# True if the module needs admin rights to run
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : False,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
# list of any references/other comments
'Comments': [
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to run smbautobrute from.',
'Required' : True,
'Value' : ''
},
'UserList' : {
'Description' : 'File of users to brute (on the target), one per line. If not specified, autobrute will query a list of users with badpwdcount < LockoutThreshold - 1 for each password brute. Wrap path in double quotes.',
'Required' : False,
'Value' : ''
},
'PasswordList' : {
'Description' : 'Comma separated list of passwords to test. Wrap in double quotes.',
'Required' : True,
'Value' : ''
},
'ShowVerbose' : {
'Description' : 'Show failed attempts & skipped accounts in addition to success.',
'Required' : False,
'Value' : ''
},
'LockoutThreshold' : {
'Description' : 'The max number of bad password attempts until the account locks. Autobrute will try till one less than this setting.',
'Required' : True,
'Value' : ''
},
'Delay' : {
'Description' : 'Amount of time to wait (in milliseconds) between attempts. Default 100.',
'Required' : False,
'Value' : ''
},
'StopOnSuccess' : {
'Description' : 'Quit running after the first successful authentication.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# use the pattern below
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/Invoke-SMBAutoBrute.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
scriptcmd = "Invoke-SMBAutoBrute"
# add any arguments to the end execution of the script
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
scriptcmd += " -" + str(option)
else:
scriptcmd += " -" + str(option) + " " + str(values['Value'])
script += scriptcmd
#print helpers.color(scriptcmd)
return script
| 40.05303 | 240 | 0.529412 |
03c922d7d4b6867279152a4b718382aaacbde67a | 1,392 | py | Python | waldur_core/structure/tests/serializers.py | opennode/nodeconductor | d6c17a9592bb6c49c33567542eef8d099605a46a | [
"MIT"
] | 23 | 2015-01-15T13:29:53.000Z | 2017-05-04T05:12:24.000Z | waldur_core/structure/tests/serializers.py | opennode/nodeconductor | d6c17a9592bb6c49c33567542eef8d099605a46a | [
"MIT"
] | null | null | null | waldur_core/structure/tests/serializers.py | opennode/nodeconductor | d6c17a9592bb6c49c33567542eef8d099605a46a | [
"MIT"
] | 8 | 2015-01-11T18:51:47.000Z | 2017-06-29T18:53:12.000Z | from rest_framework import serializers
from waldur_core.structure import serializers as structure_serializers
from . import models
class ServiceSerializer(structure_serializers.BaseServiceSerializer):
SERVICE_ACCOUNT_EXTRA_FIELDS = {
'tenant_name': '',
'availability_zone': '',
}
class Meta(structure_serializers.BaseServiceSerializer.Meta):
model = models.TestService
required_fields = 'backend_url', 'username', 'password'
class ServiceProjectLinkSerializer(structure_serializers.BaseServiceProjectLinkSerializer):
class Meta(structure_serializers.BaseServiceProjectLinkSerializer.Meta):
model = models.TestServiceProjectLink
extra_kwargs = {
'service': {'lookup_field': 'uuid', 'view_name': 'test-detail'},
}
class NewInstanceSerializer(structure_serializers.VirtualMachineSerializer):
service = serializers.HyperlinkedRelatedField(
source='service_project_link.service',
view_name='test-detail',
read_only=True,
lookup_field='uuid')
service_project_link = serializers.HyperlinkedRelatedField(
view_name='test-spl-detail',
queryset=models.TestServiceProjectLink.objects.all(),
allow_null=True,
required=False,
)
class Meta(structure_serializers.BaseResourceSerializer.Meta):
model = models.TestNewInstance
| 32.372093 | 91 | 0.730603 |