repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
4
175
func_name
stringlengths
1
129
whole_func_string
stringlengths
91
50.9k
language
stringclasses
1 value
func_code_string
stringlengths
91
50.9k
func_code_tokens
sequence
func_documentation_string
stringlengths
1
31.6k
func_documentation_tokens
sequence
split_name
stringclasses
1 value
func_code_url
stringlengths
89
268
score
float64
0
0.09
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/util.py
_get_task_target
def _get_task_target(): """Get the default target for a pipeline task. Current version id format is: user_defined_version.minor_version_number Current module id is just the module's name. It could be "default" Returns: A complete target name is of format version.module. If module is the default module, just version. None if target can not be determined. """ # Break circular dependency. # pylint: disable=g-import-not-at-top import pipeline if pipeline._TEST_MODE: return None # Further protect against test cases that doesn't set env vars # propertly. if ("CURRENT_VERSION_ID" not in os.environ or "CURRENT_MODULE_ID" not in os.environ): logging.warning("Running Pipeline in non TEST_MODE but important " "env vars are not set.") return None version = os.environ["CURRENT_VERSION_ID"].split(".")[0] module = os.environ["CURRENT_MODULE_ID"] return "%s.%s" % (version, module)
python
def _get_task_target(): """Get the default target for a pipeline task. Current version id format is: user_defined_version.minor_version_number Current module id is just the module's name. It could be "default" Returns: A complete target name is of format version.module. If module is the default module, just version. None if target can not be determined. """ # Break circular dependency. # pylint: disable=g-import-not-at-top import pipeline if pipeline._TEST_MODE: return None # Further protect against test cases that doesn't set env vars # propertly. if ("CURRENT_VERSION_ID" not in os.environ or "CURRENT_MODULE_ID" not in os.environ): logging.warning("Running Pipeline in non TEST_MODE but important " "env vars are not set.") return None version = os.environ["CURRENT_VERSION_ID"].split(".")[0] module = os.environ["CURRENT_MODULE_ID"] return "%s.%s" % (version, module)
[ "def", "_get_task_target", "(", ")", ":", "# Break circular dependency.", "# pylint: disable=g-import-not-at-top", "import", "pipeline", "if", "pipeline", ".", "_TEST_MODE", ":", "return", "None", "# Further protect against test cases that doesn't set env vars", "# propertly.", "if", "(", "\"CURRENT_VERSION_ID\"", "not", "in", "os", ".", "environ", "or", "\"CURRENT_MODULE_ID\"", "not", "in", "os", ".", "environ", ")", ":", "logging", ".", "warning", "(", "\"Running Pipeline in non TEST_MODE but important \"", "\"env vars are not set.\"", ")", "return", "None", "version", "=", "os", ".", "environ", "[", "\"CURRENT_VERSION_ID\"", "]", ".", "split", "(", "\".\"", ")", "[", "0", "]", "module", "=", "os", ".", "environ", "[", "\"CURRENT_MODULE_ID\"", "]", "return", "\"%s.%s\"", "%", "(", "version", ",", "module", ")" ]
Get the default target for a pipeline task. Current version id format is: user_defined_version.minor_version_number Current module id is just the module's name. It could be "default" Returns: A complete target name is of format version.module. If module is the default module, just version. None if target can not be determined.
[ "Get", "the", "default", "target", "for", "a", "pipeline", "task", "." ]
train
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/util.py#L40-L66
0.013613
aws/sagemaker-python-sdk
src/sagemaker/estimator.py
_TrainingJob.start_new
def start_new(cls, estimator, inputs): """Create a new Amazon SageMaker training job from the estimator. Args: estimator (sagemaker.estimator.EstimatorBase): Estimator object created by the user. inputs (str): Parameters used when called :meth:`~sagemaker.estimator.EstimatorBase.fit`. Returns: sagemaker.estimator._TrainingJob: Constructed object that captures all information about the started training job. """ local_mode = estimator.sagemaker_session.local_mode model_uri = estimator.model_uri # Allow file:// input only in local mode if cls._is_local_channel(inputs) or cls._is_local_channel(model_uri): if not local_mode: raise ValueError('File URIs are supported in local mode only. Please use a S3 URI instead.') config = _Job._load_config(inputs, estimator) if estimator.hyperparameters() is not None: hyperparameters = {str(k): str(v) for (k, v) in estimator.hyperparameters().items()} train_args = config.copy() train_args['input_mode'] = estimator.input_mode train_args['job_name'] = estimator._current_job_name train_args['hyperparameters'] = hyperparameters train_args['tags'] = estimator.tags train_args['metric_definitions'] = estimator.metric_definitions if estimator.enable_network_isolation(): train_args['enable_network_isolation'] = True if estimator.encrypt_inter_container_traffic: train_args['encrypt_inter_container_traffic'] = True if isinstance(estimator, sagemaker.algorithm.AlgorithmEstimator): train_args['algorithm_arn'] = estimator.algorithm_arn else: train_args['image'] = estimator.train_image() estimator.sagemaker_session.train(**train_args) return cls(estimator.sagemaker_session, estimator._current_job_name)
python
def start_new(cls, estimator, inputs): """Create a new Amazon SageMaker training job from the estimator. Args: estimator (sagemaker.estimator.EstimatorBase): Estimator object created by the user. inputs (str): Parameters used when called :meth:`~sagemaker.estimator.EstimatorBase.fit`. Returns: sagemaker.estimator._TrainingJob: Constructed object that captures all information about the started training job. """ local_mode = estimator.sagemaker_session.local_mode model_uri = estimator.model_uri # Allow file:// input only in local mode if cls._is_local_channel(inputs) or cls._is_local_channel(model_uri): if not local_mode: raise ValueError('File URIs are supported in local mode only. Please use a S3 URI instead.') config = _Job._load_config(inputs, estimator) if estimator.hyperparameters() is not None: hyperparameters = {str(k): str(v) for (k, v) in estimator.hyperparameters().items()} train_args = config.copy() train_args['input_mode'] = estimator.input_mode train_args['job_name'] = estimator._current_job_name train_args['hyperparameters'] = hyperparameters train_args['tags'] = estimator.tags train_args['metric_definitions'] = estimator.metric_definitions if estimator.enable_network_isolation(): train_args['enable_network_isolation'] = True if estimator.encrypt_inter_container_traffic: train_args['encrypt_inter_container_traffic'] = True if isinstance(estimator, sagemaker.algorithm.AlgorithmEstimator): train_args['algorithm_arn'] = estimator.algorithm_arn else: train_args['image'] = estimator.train_image() estimator.sagemaker_session.train(**train_args) return cls(estimator.sagemaker_session, estimator._current_job_name)
[ "def", "start_new", "(", "cls", ",", "estimator", ",", "inputs", ")", ":", "local_mode", "=", "estimator", ".", "sagemaker_session", ".", "local_mode", "model_uri", "=", "estimator", ".", "model_uri", "# Allow file:// input only in local mode", "if", "cls", ".", "_is_local_channel", "(", "inputs", ")", "or", "cls", ".", "_is_local_channel", "(", "model_uri", ")", ":", "if", "not", "local_mode", ":", "raise", "ValueError", "(", "'File URIs are supported in local mode only. Please use a S3 URI instead.'", ")", "config", "=", "_Job", ".", "_load_config", "(", "inputs", ",", "estimator", ")", "if", "estimator", ".", "hyperparameters", "(", ")", "is", "not", "None", ":", "hyperparameters", "=", "{", "str", "(", "k", ")", ":", "str", "(", "v", ")", "for", "(", "k", ",", "v", ")", "in", "estimator", ".", "hyperparameters", "(", ")", ".", "items", "(", ")", "}", "train_args", "=", "config", ".", "copy", "(", ")", "train_args", "[", "'input_mode'", "]", "=", "estimator", ".", "input_mode", "train_args", "[", "'job_name'", "]", "=", "estimator", ".", "_current_job_name", "train_args", "[", "'hyperparameters'", "]", "=", "hyperparameters", "train_args", "[", "'tags'", "]", "=", "estimator", ".", "tags", "train_args", "[", "'metric_definitions'", "]", "=", "estimator", ".", "metric_definitions", "if", "estimator", ".", "enable_network_isolation", "(", ")", ":", "train_args", "[", "'enable_network_isolation'", "]", "=", "True", "if", "estimator", ".", "encrypt_inter_container_traffic", ":", "train_args", "[", "'encrypt_inter_container_traffic'", "]", "=", "True", "if", "isinstance", "(", "estimator", ",", "sagemaker", ".", "algorithm", ".", "AlgorithmEstimator", ")", ":", "train_args", "[", "'algorithm_arn'", "]", "=", "estimator", ".", "algorithm_arn", "else", ":", "train_args", "[", "'image'", "]", "=", "estimator", ".", "train_image", "(", ")", "estimator", ".", "sagemaker_session", ".", "train", "(", "*", "*", "train_args", ")", "return", "cls", "(", "estimator", ".", "sagemaker_session", ",", "estimator", ".", "_current_job_name", ")" ]
Create a new Amazon SageMaker training job from the estimator. Args: estimator (sagemaker.estimator.EstimatorBase): Estimator object created by the user. inputs (str): Parameters used when called :meth:`~sagemaker.estimator.EstimatorBase.fit`. Returns: sagemaker.estimator._TrainingJob: Constructed object that captures all information about the started training job.
[ "Create", "a", "new", "Amazon", "SageMaker", "training", "job", "from", "the", "estimator", "." ]
train
https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/estimator.py#L540-L585
0.003551
python-xlib/python-xlib
Xlib/display.py
Display.no_operation
def no_operation(self, onerror = None): """Do nothing but send a request to the server.""" request.NoOperation(display = self.display, onerror = onerror)
python
def no_operation(self, onerror = None): """Do nothing but send a request to the server.""" request.NoOperation(display = self.display, onerror = onerror)
[ "def", "no_operation", "(", "self", ",", "onerror", "=", "None", ")", ":", "request", ".", "NoOperation", "(", "display", "=", "self", ".", "display", ",", "onerror", "=", "onerror", ")" ]
Do nothing but send a request to the server.
[ "Do", "nothing", "but", "send", "a", "request", "to", "the", "server", "." ]
train
https://github.com/python-xlib/python-xlib/blob/8901e831737e79fe5645f48089d70e1d1046d2f2/Xlib/display.py#L947-L950
0.040609
PrefPy/prefpy
prefpy/stats.py
mse
def mse(mean, estimator): """ Description: Calculates the Mean Squared Error (MSE) of an estimation on flat numpy ndarrays. Parameters: mean: actual value (numpy ndarray) estimator: estimated value of the mean (numpy ndarray) """ return np.mean((np.asarray(estimator) - np.asarray(mean)) ** 2, axis=0)
python
def mse(mean, estimator): """ Description: Calculates the Mean Squared Error (MSE) of an estimation on flat numpy ndarrays. Parameters: mean: actual value (numpy ndarray) estimator: estimated value of the mean (numpy ndarray) """ return np.mean((np.asarray(estimator) - np.asarray(mean)) ** 2, axis=0)
[ "def", "mse", "(", "mean", ",", "estimator", ")", ":", "return", "np", ".", "mean", "(", "(", "np", ".", "asarray", "(", "estimator", ")", "-", "np", ".", "asarray", "(", "mean", ")", ")", "**", "2", ",", "axis", "=", "0", ")" ]
Description: Calculates the Mean Squared Error (MSE) of an estimation on flat numpy ndarrays. Parameters: mean: actual value (numpy ndarray) estimator: estimated value of the mean (numpy ndarray)
[ "Description", ":", "Calculates", "the", "Mean", "Squared", "Error", "(", "MSE", ")", "of", "an", "estimation", "on", "flat", "numpy", "ndarrays", ".", "Parameters", ":", "mean", ":", "actual", "value", "(", "numpy", "ndarray", ")", "estimator", ":", "estimated", "value", "of", "the", "mean", "(", "numpy", "ndarray", ")" ]
train
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/stats.py#L22-L31
0.002725
bukun/TorCMS
torcms/model/label_model.py
MLabel.get_id_by_name
def get_id_by_name(tag_name, kind='z'): ''' Get ID by tag_name of the label. ''' recs = TabTag.select().where( (TabTag.name == tag_name) & (TabTag.kind == kind) ) logger.info('tag count of {0}: {1} '.format(tag_name, recs.count())) # the_id = '' if recs.count() == 1: the_id = recs.get().uid elif recs.count() > 1: rec0 = None for rec in recs: # Only keep one. if rec0: TabPost2Tag.delete().where(TabPost2Tag.tag_id == rec.uid).execute() TabTag.delete().where(TabTag.uid == rec.uid).execute() else: rec0 = rec the_id = rec0.uid else: the_id = MLabel.create_tag(tag_name) return the_id
python
def get_id_by_name(tag_name, kind='z'): ''' Get ID by tag_name of the label. ''' recs = TabTag.select().where( (TabTag.name == tag_name) & (TabTag.kind == kind) ) logger.info('tag count of {0}: {1} '.format(tag_name, recs.count())) # the_id = '' if recs.count() == 1: the_id = recs.get().uid elif recs.count() > 1: rec0 = None for rec in recs: # Only keep one. if rec0: TabPost2Tag.delete().where(TabPost2Tag.tag_id == rec.uid).execute() TabTag.delete().where(TabTag.uid == rec.uid).execute() else: rec0 = rec the_id = rec0.uid else: the_id = MLabel.create_tag(tag_name) return the_id
[ "def", "get_id_by_name", "(", "tag_name", ",", "kind", "=", "'z'", ")", ":", "recs", "=", "TabTag", ".", "select", "(", ")", ".", "where", "(", "(", "TabTag", ".", "name", "==", "tag_name", ")", "&", "(", "TabTag", ".", "kind", "==", "kind", ")", ")", "logger", ".", "info", "(", "'tag count of {0}: {1} '", ".", "format", "(", "tag_name", ",", "recs", ".", "count", "(", ")", ")", ")", "# the_id = ''", "if", "recs", ".", "count", "(", ")", "==", "1", ":", "the_id", "=", "recs", ".", "get", "(", ")", ".", "uid", "elif", "recs", ".", "count", "(", ")", ">", "1", ":", "rec0", "=", "None", "for", "rec", "in", "recs", ":", "# Only keep one.", "if", "rec0", ":", "TabPost2Tag", ".", "delete", "(", ")", ".", "where", "(", "TabPost2Tag", ".", "tag_id", "==", "rec", ".", "uid", ")", ".", "execute", "(", ")", "TabTag", ".", "delete", "(", ")", ".", "where", "(", "TabTag", ".", "uid", "==", "rec", ".", "uid", ")", ".", "execute", "(", ")", "else", ":", "rec0", "=", "rec", "the_id", "=", "rec0", ".", "uid", "else", ":", "the_id", "=", "MLabel", ".", "create_tag", "(", "tag_name", ")", "return", "the_id" ]
Get ID by tag_name of the label.
[ "Get", "ID", "by", "tag_name", "of", "the", "label", "." ]
train
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/label_model.py#L22-L46
0.003517
JarryShaw/PyPCAPKit
src/protocols/application/httpv2.py
HTTPv2.read_http
def read_http(self, length): """Read Hypertext Transfer Protocol (HTTP/2). Structure of HTTP/2 packet [RFC 7540]: +-----------------------------------------------+ | Length (24) | +---------------+---------------+---------------+ | Type (8) | Flags (8) | +-+-------------+---------------+-------------------------------+ |R| Stream Identifier (31) | +=+=============================================================+ | Frame Payload (0...) ... +---------------------------------------------------------------+ Octets Bits Name Description 0 0 http.length Length 3 24 http.type Type 4 32 http.flags Flags 5 40 - Reserved 5 41 http.sid Stream Identifier 9 72 http.payload Frame Payload """ if length is None: length = len(self) if length < 9: raise ProtocolError(f'HTTP/2: invalid format', quiet=True) _tlen = self._read_unpack(3) _type = self._read_unpack(1) _flag = self._read_binary(1) _rsid = self._read_binary(4) if _tlen != length: raise ProtocolError(f'HTTP/2: [Type {_type}] invalid format', quiet=True) if int(_rsid[0], base=2): raise ProtocolError(f'HTTP/2: [Type {_type}] invalid format', quiet=True) http = dict( length=_tlen, type=_HTTP_TYPE.get(_type), sid=int(_rsid[1:], base=2), packet=self._read_packet(_tlen), ) if http['type'] is None: raise ProtocolError(f'HTTP/2: [Type {_type}] invalid format', quiet=True) if http['type'] in ('SETTINGS', 'PING') and http['sid'] != 0: raise ProtocolError(f'HTTP/2: [Type {_type}] invalid format', quiet=True) _http = _HTTP_FUNC[_type](self, _tlen, _type, _flag) http.update(_http) return http
python
def read_http(self, length): """Read Hypertext Transfer Protocol (HTTP/2). Structure of HTTP/2 packet [RFC 7540]: +-----------------------------------------------+ | Length (24) | +---------------+---------------+---------------+ | Type (8) | Flags (8) | +-+-------------+---------------+-------------------------------+ |R| Stream Identifier (31) | +=+=============================================================+ | Frame Payload (0...) ... +---------------------------------------------------------------+ Octets Bits Name Description 0 0 http.length Length 3 24 http.type Type 4 32 http.flags Flags 5 40 - Reserved 5 41 http.sid Stream Identifier 9 72 http.payload Frame Payload """ if length is None: length = len(self) if length < 9: raise ProtocolError(f'HTTP/2: invalid format', quiet=True) _tlen = self._read_unpack(3) _type = self._read_unpack(1) _flag = self._read_binary(1) _rsid = self._read_binary(4) if _tlen != length: raise ProtocolError(f'HTTP/2: [Type {_type}] invalid format', quiet=True) if int(_rsid[0], base=2): raise ProtocolError(f'HTTP/2: [Type {_type}] invalid format', quiet=True) http = dict( length=_tlen, type=_HTTP_TYPE.get(_type), sid=int(_rsid[1:], base=2), packet=self._read_packet(_tlen), ) if http['type'] is None: raise ProtocolError(f'HTTP/2: [Type {_type}] invalid format', quiet=True) if http['type'] in ('SETTINGS', 'PING') and http['sid'] != 0: raise ProtocolError(f'HTTP/2: [Type {_type}] invalid format', quiet=True) _http = _HTTP_FUNC[_type](self, _tlen, _type, _flag) http.update(_http) return http
[ "def", "read_http", "(", "self", ",", "length", ")", ":", "if", "length", "is", "None", ":", "length", "=", "len", "(", "self", ")", "if", "length", "<", "9", ":", "raise", "ProtocolError", "(", "f'HTTP/2: invalid format'", ",", "quiet", "=", "True", ")", "_tlen", "=", "self", ".", "_read_unpack", "(", "3", ")", "_type", "=", "self", ".", "_read_unpack", "(", "1", ")", "_flag", "=", "self", ".", "_read_binary", "(", "1", ")", "_rsid", "=", "self", ".", "_read_binary", "(", "4", ")", "if", "_tlen", "!=", "length", ":", "raise", "ProtocolError", "(", "f'HTTP/2: [Type {_type}] invalid format'", ",", "quiet", "=", "True", ")", "if", "int", "(", "_rsid", "[", "0", "]", ",", "base", "=", "2", ")", ":", "raise", "ProtocolError", "(", "f'HTTP/2: [Type {_type}] invalid format'", ",", "quiet", "=", "True", ")", "http", "=", "dict", "(", "length", "=", "_tlen", ",", "type", "=", "_HTTP_TYPE", ".", "get", "(", "_type", ")", ",", "sid", "=", "int", "(", "_rsid", "[", "1", ":", "]", ",", "base", "=", "2", ")", ",", "packet", "=", "self", ".", "_read_packet", "(", "_tlen", ")", ",", ")", "if", "http", "[", "'type'", "]", "is", "None", ":", "raise", "ProtocolError", "(", "f'HTTP/2: [Type {_type}] invalid format'", ",", "quiet", "=", "True", ")", "if", "http", "[", "'type'", "]", "in", "(", "'SETTINGS'", ",", "'PING'", ")", "and", "http", "[", "'sid'", "]", "!=", "0", ":", "raise", "ProtocolError", "(", "f'HTTP/2: [Type {_type}] invalid format'", ",", "quiet", "=", "True", ")", "_http", "=", "_HTTP_FUNC", "[", "_type", "]", "(", "self", ",", "_tlen", ",", "_type", ",", "_flag", ")", "http", ".", "update", "(", "_http", ")", "return", "http" ]
Read Hypertext Transfer Protocol (HTTP/2). Structure of HTTP/2 packet [RFC 7540]: +-----------------------------------------------+ | Length (24) | +---------------+---------------+---------------+ | Type (8) | Flags (8) | +-+-------------+---------------+-------------------------------+ |R| Stream Identifier (31) | +=+=============================================================+ | Frame Payload (0...) ... +---------------------------------------------------------------+ Octets Bits Name Description 0 0 http.length Length 3 24 http.type Type 4 32 http.flags Flags 5 40 - Reserved 5 41 http.sid Stream Identifier 9 72 http.payload Frame Payload
[ "Read", "Hypertext", "Transfer", "Protocol", "(", "HTTP", "/", "2", ")", "." ]
train
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/application/httpv2.py#L99-L155
0.002563
datosgobar/pydatajson
pydatajson/validation.py
_catalog_validation_to_list
def _catalog_validation_to_list(response): """Formatea la validación de un catálogo a dos listas de errores. Una lista de errores para "catalog" y otra para "dataset". """ # crea una lista de dicts para volcarse en una tabla (catalog) rows_catalog = [] validation_result = { "catalog_title": response["error"]["catalog"]["title"], "catalog_status": response["error"]["catalog"]["status"], } for error in response["error"]["catalog"]["errors"]: catalog_result = dict(validation_result) catalog_result.update({ "catalog_error_message": error["message"], "catalog_error_location": ", ".join(error["path"]), }) rows_catalog.append(catalog_result) if len(response["error"]["catalog"]["errors"]) == 0: catalog_result = dict(validation_result) catalog_result.update({ "catalog_error_message": None, "catalog_error_location": None }) rows_catalog.append(catalog_result) # crea una lista de dicts para volcarse en una tabla (dataset) rows_dataset = [] for dataset in response["error"]["dataset"]: validation_result = { "dataset_title": dataset["title"], "dataset_identifier": dataset["identifier"], "dataset_list_index": dataset["list_index"], "dataset_status": dataset["status"] } for error in dataset["errors"]: dataset_result = dict(validation_result) dataset_result.update({ "dataset_error_message": error["message"], "dataset_error_location": error["path"][-1] }) rows_dataset.append(dataset_result) if len(dataset["errors"]) == 0: dataset_result = dict(validation_result) dataset_result.update({ "dataset_error_message": None, "dataset_error_location": None }) rows_dataset.append(dataset_result) return {"catalog": rows_catalog, "dataset": rows_dataset}
python
def _catalog_validation_to_list(response): """Formatea la validación de un catálogo a dos listas de errores. Una lista de errores para "catalog" y otra para "dataset". """ # crea una lista de dicts para volcarse en una tabla (catalog) rows_catalog = [] validation_result = { "catalog_title": response["error"]["catalog"]["title"], "catalog_status": response["error"]["catalog"]["status"], } for error in response["error"]["catalog"]["errors"]: catalog_result = dict(validation_result) catalog_result.update({ "catalog_error_message": error["message"], "catalog_error_location": ", ".join(error["path"]), }) rows_catalog.append(catalog_result) if len(response["error"]["catalog"]["errors"]) == 0: catalog_result = dict(validation_result) catalog_result.update({ "catalog_error_message": None, "catalog_error_location": None }) rows_catalog.append(catalog_result) # crea una lista de dicts para volcarse en una tabla (dataset) rows_dataset = [] for dataset in response["error"]["dataset"]: validation_result = { "dataset_title": dataset["title"], "dataset_identifier": dataset["identifier"], "dataset_list_index": dataset["list_index"], "dataset_status": dataset["status"] } for error in dataset["errors"]: dataset_result = dict(validation_result) dataset_result.update({ "dataset_error_message": error["message"], "dataset_error_location": error["path"][-1] }) rows_dataset.append(dataset_result) if len(dataset["errors"]) == 0: dataset_result = dict(validation_result) dataset_result.update({ "dataset_error_message": None, "dataset_error_location": None }) rows_dataset.append(dataset_result) return {"catalog": rows_catalog, "dataset": rows_dataset}
[ "def", "_catalog_validation_to_list", "(", "response", ")", ":", "# crea una lista de dicts para volcarse en una tabla (catalog)", "rows_catalog", "=", "[", "]", "validation_result", "=", "{", "\"catalog_title\"", ":", "response", "[", "\"error\"", "]", "[", "\"catalog\"", "]", "[", "\"title\"", "]", ",", "\"catalog_status\"", ":", "response", "[", "\"error\"", "]", "[", "\"catalog\"", "]", "[", "\"status\"", "]", ",", "}", "for", "error", "in", "response", "[", "\"error\"", "]", "[", "\"catalog\"", "]", "[", "\"errors\"", "]", ":", "catalog_result", "=", "dict", "(", "validation_result", ")", "catalog_result", ".", "update", "(", "{", "\"catalog_error_message\"", ":", "error", "[", "\"message\"", "]", ",", "\"catalog_error_location\"", ":", "\", \"", ".", "join", "(", "error", "[", "\"path\"", "]", ")", ",", "}", ")", "rows_catalog", ".", "append", "(", "catalog_result", ")", "if", "len", "(", "response", "[", "\"error\"", "]", "[", "\"catalog\"", "]", "[", "\"errors\"", "]", ")", "==", "0", ":", "catalog_result", "=", "dict", "(", "validation_result", ")", "catalog_result", ".", "update", "(", "{", "\"catalog_error_message\"", ":", "None", ",", "\"catalog_error_location\"", ":", "None", "}", ")", "rows_catalog", ".", "append", "(", "catalog_result", ")", "# crea una lista de dicts para volcarse en una tabla (dataset)", "rows_dataset", "=", "[", "]", "for", "dataset", "in", "response", "[", "\"error\"", "]", "[", "\"dataset\"", "]", ":", "validation_result", "=", "{", "\"dataset_title\"", ":", "dataset", "[", "\"title\"", "]", ",", "\"dataset_identifier\"", ":", "dataset", "[", "\"identifier\"", "]", ",", "\"dataset_list_index\"", ":", "dataset", "[", "\"list_index\"", "]", ",", "\"dataset_status\"", ":", "dataset", "[", "\"status\"", "]", "}", "for", "error", "in", "dataset", "[", "\"errors\"", "]", ":", "dataset_result", "=", "dict", "(", "validation_result", ")", "dataset_result", ".", "update", "(", "{", "\"dataset_error_message\"", ":", "error", "[", "\"message\"", "]", ",", "\"dataset_error_location\"", ":", "error", "[", "\"path\"", "]", "[", "-", "1", "]", "}", ")", "rows_dataset", ".", "append", "(", "dataset_result", ")", "if", "len", "(", "dataset", "[", "\"errors\"", "]", ")", "==", "0", ":", "dataset_result", "=", "dict", "(", "validation_result", ")", "dataset_result", ".", "update", "(", "{", "\"dataset_error_message\"", ":", "None", ",", "\"dataset_error_location\"", ":", "None", "}", ")", "rows_dataset", ".", "append", "(", "dataset_result", ")", "return", "{", "\"catalog\"", ":", "rows_catalog", ",", "\"dataset\"", ":", "rows_dataset", "}" ]
Formatea la validación de un catálogo a dos listas de errores. Una lista de errores para "catalog" y otra para "dataset".
[ "Formatea", "la", "validación", "de", "un", "catálogo", "a", "dos", "listas", "de", "errores", "." ]
train
https://github.com/datosgobar/pydatajson/blob/3141082ffbaa295e2deaf6ffbbc5a59f5859960e/pydatajson/validation.py#L366-L419
0.000481
SKA-ScienceDataProcessor/integration-prototype
sip/tango_control/tango_processing_block/app/processing_block_device.py
ProcessingBlockDevice.pb_id
def pb_id(self, pb_id: str): """Set the PB Id for this device.""" # FIXME(BMo) instead of creating the object to check if the PB exists # use a method on PB List? # ProcessingBlock(pb_id) self.set_state(DevState.ON) self._pb_id = pb_id
python
def pb_id(self, pb_id: str): """Set the PB Id for this device.""" # FIXME(BMo) instead of creating the object to check if the PB exists # use a method on PB List? # ProcessingBlock(pb_id) self.set_state(DevState.ON) self._pb_id = pb_id
[ "def", "pb_id", "(", "self", ",", "pb_id", ":", "str", ")", ":", "# FIXME(BMo) instead of creating the object to check if the PB exists", "# use a method on PB List?", "# ProcessingBlock(pb_id)", "self", ".", "set_state", "(", "DevState", ".", "ON", ")", "self", ".", "_pb_id", "=", "pb_id" ]
Set the PB Id for this device.
[ "Set", "the", "PB", "Id", "for", "this", "device", "." ]
train
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/tango_control/tango_processing_block/app/processing_block_device.py#L48-L54
0.006803
Yelp/kafka-utils
kafka_utils/util/__init__.py
format_to_json
def format_to_json(data): """Converts `data` into json If stdout is a tty it performs a pretty print. """ if sys.stdout.isatty(): return json.dumps(data, indent=4, separators=(',', ': ')) else: return json.dumps(data)
python
def format_to_json(data): """Converts `data` into json If stdout is a tty it performs a pretty print. """ if sys.stdout.isatty(): return json.dumps(data, indent=4, separators=(',', ': ')) else: return json.dumps(data)
[ "def", "format_to_json", "(", "data", ")", ":", "if", "sys", ".", "stdout", ".", "isatty", "(", ")", ":", "return", "json", ".", "dumps", "(", "data", ",", "indent", "=", "4", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", "else", ":", "return", "json", ".", "dumps", "(", "data", ")" ]
Converts `data` into json If stdout is a tty it performs a pretty print.
[ "Converts", "data", "into", "json", "If", "stdout", "is", "a", "tty", "it", "performs", "a", "pretty", "print", "." ]
train
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/__init__.py#L141-L148
0.003953
Metatab/metatab
metatab/terms.py
SectionTerm.add_term
def add_term(self, t): """Add a term to this section and set it's ownership. Should only be used on root level terms""" if t not in self.terms: if t.parent_term_lc == 'root': self.terms.append(t) self.doc.add_term(t, add_section=False) t.set_ownership() else: raise GenerateError("Can only add or move root-level terms. Term '{}' parent is '{}' " .format(t, t.parent_term_lc)) assert t.section or t.join_lc == 'root.root', t
python
def add_term(self, t): """Add a term to this section and set it's ownership. Should only be used on root level terms""" if t not in self.terms: if t.parent_term_lc == 'root': self.terms.append(t) self.doc.add_term(t, add_section=False) t.set_ownership() else: raise GenerateError("Can only add or move root-level terms. Term '{}' parent is '{}' " .format(t, t.parent_term_lc)) assert t.section or t.join_lc == 'root.root', t
[ "def", "add_term", "(", "self", ",", "t", ")", ":", "if", "t", "not", "in", "self", ".", "terms", ":", "if", "t", ".", "parent_term_lc", "==", "'root'", ":", "self", ".", "terms", ".", "append", "(", "t", ")", "self", ".", "doc", ".", "add_term", "(", "t", ",", "add_section", "=", "False", ")", "t", ".", "set_ownership", "(", ")", "else", ":", "raise", "GenerateError", "(", "\"Can only add or move root-level terms. Term '{}' parent is '{}' \"", ".", "format", "(", "t", ",", "t", ".", "parent_term_lc", ")", ")", "assert", "t", ".", "section", "or", "t", ".", "join_lc", "==", "'root.root'", ",", "t" ]
Add a term to this section and set it's ownership. Should only be used on root level terms
[ "Add", "a", "term", "to", "this", "section", "and", "set", "it", "s", "ownership", ".", "Should", "only", "be", "used", "on", "root", "level", "terms" ]
train
https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/terms.py#L744-L758
0.006944
rigetti/quantumflow
tools/benchmark.py
benchmark_gops
def benchmark_gops(N, gates, reps): """Return benchmark performance in GOPS (Gate operations per second)""" t = timeit.timeit(lambda: benchmark(N, gates), number=reps) gops = (GATES*REPS)/t gops = int((gops * 100) + 0.5) / 100.0 return gops
python
def benchmark_gops(N, gates, reps): """Return benchmark performance in GOPS (Gate operations per second)""" t = timeit.timeit(lambda: benchmark(N, gates), number=reps) gops = (GATES*REPS)/t gops = int((gops * 100) + 0.5) / 100.0 return gops
[ "def", "benchmark_gops", "(", "N", ",", "gates", ",", "reps", ")", ":", "t", "=", "timeit", ".", "timeit", "(", "lambda", ":", "benchmark", "(", "N", ",", "gates", ")", ",", "number", "=", "reps", ")", "gops", "=", "(", "GATES", "*", "REPS", ")", "/", "t", "gops", "=", "int", "(", "(", "gops", "*", "100", ")", "+", "0.5", ")", "/", "100.0", "return", "gops" ]
Return benchmark performance in GOPS (Gate operations per second)
[ "Return", "benchmark", "performance", "in", "GOPS", "(", "Gate", "operations", "per", "second", ")" ]
train
https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/tools/benchmark.py#L48-L53
0.003846
ehansis/ozelot
ozelot/orm/base.py
render_diagram
def render_diagram(out_base): """Render a data model diagram Included in the diagram are all classes from the model registry. For your project, write a small script that imports all models that you would like to have included and then calls this function. .. note:: This function requires the 'dot' executable from the GraphViz package to be installed and its location configured in your `project_config.py` variable :attr:`DOT_EXECUTABLE`. Args: out_base (str): output base path (file endings will be appended) """ import codecs import subprocess import sadisplay # generate class descriptions desc = sadisplay.describe(list(model_registry.values()), show_methods=False, show_properties=True, show_indexes=True, ) # write description in DOT format with codecs.open(out_base + '.dot', 'w', encoding='utf-8') as f: f.write(sadisplay.dot(desc)) # check existence of DOT_EXECUTABLE variable and file if not hasattr(config, 'DOT_EXECUTABLE'): raise RuntimeError("Please configure the 'DOT_EXECUTABLE' variable in your 'project_config.py'") if not os.path.exists(config.DOT_EXECUTABLE): raise IOError("Could not find file pointed to by 'DOT_EXECUTABLE': " + str(config.DOT_EXECUTABLE)) # render to image using DOT # noinspection PyUnresolvedReferences subprocess.check_call([ config.DOT_EXECUTABLE, '-T', 'png', '-o', out_base + '.png', out_base + '.dot' ])
python
def render_diagram(out_base): """Render a data model diagram Included in the diagram are all classes from the model registry. For your project, write a small script that imports all models that you would like to have included and then calls this function. .. note:: This function requires the 'dot' executable from the GraphViz package to be installed and its location configured in your `project_config.py` variable :attr:`DOT_EXECUTABLE`. Args: out_base (str): output base path (file endings will be appended) """ import codecs import subprocess import sadisplay # generate class descriptions desc = sadisplay.describe(list(model_registry.values()), show_methods=False, show_properties=True, show_indexes=True, ) # write description in DOT format with codecs.open(out_base + '.dot', 'w', encoding='utf-8') as f: f.write(sadisplay.dot(desc)) # check existence of DOT_EXECUTABLE variable and file if not hasattr(config, 'DOT_EXECUTABLE'): raise RuntimeError("Please configure the 'DOT_EXECUTABLE' variable in your 'project_config.py'") if not os.path.exists(config.DOT_EXECUTABLE): raise IOError("Could not find file pointed to by 'DOT_EXECUTABLE': " + str(config.DOT_EXECUTABLE)) # render to image using DOT # noinspection PyUnresolvedReferences subprocess.check_call([ config.DOT_EXECUTABLE, '-T', 'png', '-o', out_base + '.png', out_base + '.dot' ])
[ "def", "render_diagram", "(", "out_base", ")", ":", "import", "codecs", "import", "subprocess", "import", "sadisplay", "# generate class descriptions", "desc", "=", "sadisplay", ".", "describe", "(", "list", "(", "model_registry", ".", "values", "(", ")", ")", ",", "show_methods", "=", "False", ",", "show_properties", "=", "True", ",", "show_indexes", "=", "True", ",", ")", "# write description in DOT format", "with", "codecs", ".", "open", "(", "out_base", "+", "'.dot'", ",", "'w'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "f", ".", "write", "(", "sadisplay", ".", "dot", "(", "desc", ")", ")", "# check existence of DOT_EXECUTABLE variable and file", "if", "not", "hasattr", "(", "config", ",", "'DOT_EXECUTABLE'", ")", ":", "raise", "RuntimeError", "(", "\"Please configure the 'DOT_EXECUTABLE' variable in your 'project_config.py'\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "config", ".", "DOT_EXECUTABLE", ")", ":", "raise", "IOError", "(", "\"Could not find file pointed to by 'DOT_EXECUTABLE': \"", "+", "str", "(", "config", ".", "DOT_EXECUTABLE", ")", ")", "# render to image using DOT", "# noinspection PyUnresolvedReferences", "subprocess", ".", "check_call", "(", "[", "config", ".", "DOT_EXECUTABLE", ",", "'-T'", ",", "'png'", ",", "'-o'", ",", "out_base", "+", "'.png'", ",", "out_base", "+", "'.dot'", "]", ")" ]
Render a data model diagram Included in the diagram are all classes from the model registry. For your project, write a small script that imports all models that you would like to have included and then calls this function. .. note:: This function requires the 'dot' executable from the GraphViz package to be installed and its location configured in your `project_config.py` variable :attr:`DOT_EXECUTABLE`. Args: out_base (str): output base path (file endings will be appended)
[ "Render", "a", "data", "model", "diagram" ]
train
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/ozelot/orm/base.py#L187-L228
0.003643
Genida/dependenpy
src/dependenpy/node.py
LeafNode.root
def root(self): """ Property to return the root of this node. Returns: Package: this node's root package. """ node = self while node.package is not None: node = node.package return node
python
def root(self): """ Property to return the root of this node. Returns: Package: this node's root package. """ node = self while node.package is not None: node = node.package return node
[ "def", "root", "(", "self", ")", ":", "node", "=", "self", "while", "node", ".", "package", "is", "not", "None", ":", "node", "=", "node", ".", "package", "return", "node" ]
Property to return the root of this node. Returns: Package: this node's root package.
[ "Property", "to", "return", "the", "root", "of", "this", "node", "." ]
train
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/node.py#L355-L365
0.007519
esafak/mca
src/mca.py
MCA.cont_c
def cont_c(self, percent=0.9, N=None): # bug? check axis number 0 vs 1 here """Return the contribution of each column.""" if not hasattr(self, 'G'): self.fs_c(N=self.rank) # generate G return apply_along_axis(lambda _: _/self.L[:N], 1, apply_along_axis(lambda _: _*self.c, 0, self.G[:, :N]**2))
python
def cont_c(self, percent=0.9, N=None): # bug? check axis number 0 vs 1 here """Return the contribution of each column.""" if not hasattr(self, 'G'): self.fs_c(N=self.rank) # generate G return apply_along_axis(lambda _: _/self.L[:N], 1, apply_along_axis(lambda _: _*self.c, 0, self.G[:, :N]**2))
[ "def", "cont_c", "(", "self", ",", "percent", "=", "0.9", ",", "N", "=", "None", ")", ":", "# bug? check axis number 0 vs 1 here", "if", "not", "hasattr", "(", "self", ",", "'G'", ")", ":", "self", ".", "fs_c", "(", "N", "=", "self", ".", "rank", ")", "# generate G", "return", "apply_along_axis", "(", "lambda", "_", ":", "_", "/", "self", ".", "L", "[", ":", "N", "]", ",", "1", ",", "apply_along_axis", "(", "lambda", "_", ":", "_", "*", "self", ".", "c", ",", "0", ",", "self", ".", "G", "[", ":", ",", ":", "N", "]", "**", "2", ")", ")" ]
Return the contribution of each column.
[ "Return", "the", "contribution", "of", "each", "column", "." ]
train
https://github.com/esafak/mca/blob/f2b79ecbf37629902ccdbad2e1a556977c53d370/src/mca.py#L177-L183
0.028846
PmagPy/PmagPy
programs/demag_gui.py
Demag_GUI.get_DIR
def get_DIR(self): """ Dialog that allows user to choose a working directory """ dlg = wx.DirDialog(self, "Choose a directory:", defaultPath=self.currentDirectory, style=wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON | wx.DD_CHANGE_DIR) ok = self.show_dlg(dlg) if ok == wx.ID_OK: new_WD = dlg.GetPath() dlg.Destroy() else: new_WD = os.getcwd() dlg.Destroy() return new_WD
python
def get_DIR(self): """ Dialog that allows user to choose a working directory """ dlg = wx.DirDialog(self, "Choose a directory:", defaultPath=self.currentDirectory, style=wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON | wx.DD_CHANGE_DIR) ok = self.show_dlg(dlg) if ok == wx.ID_OK: new_WD = dlg.GetPath() dlg.Destroy() else: new_WD = os.getcwd() dlg.Destroy() return new_WD
[ "def", "get_DIR", "(", "self", ")", ":", "dlg", "=", "wx", ".", "DirDialog", "(", "self", ",", "\"Choose a directory:\"", ",", "defaultPath", "=", "self", ".", "currentDirectory", ",", "style", "=", "wx", ".", "DD_DEFAULT_STYLE", "|", "wx", ".", "DD_NEW_DIR_BUTTON", "|", "wx", ".", "DD_CHANGE_DIR", ")", "ok", "=", "self", ".", "show_dlg", "(", "dlg", ")", "if", "ok", "==", "wx", ".", "ID_OK", ":", "new_WD", "=", "dlg", ".", "GetPath", "(", ")", "dlg", ".", "Destroy", "(", ")", "else", ":", "new_WD", "=", "os", ".", "getcwd", "(", ")", "dlg", ".", "Destroy", "(", ")", "return", "new_WD" ]
Dialog that allows user to choose a working directory
[ "Dialog", "that", "allows", "user", "to", "choose", "a", "working", "directory" ]
train
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/demag_gui.py#L5093-L5106
0.007905
jmvrbanac/lplight
lplight/client.py
LaunchpadClient.get_project
def get_project(self, name): """ Retrives project information by name :param name: The formal project name in string form. """ uri = '{base}/{project}'.format(base=self.BASE_URI, project=name) resp = self._client.get(uri, model=models.Project) return resp
python
def get_project(self, name): """ Retrives project information by name :param name: The formal project name in string form. """ uri = '{base}/{project}'.format(base=self.BASE_URI, project=name) resp = self._client.get(uri, model=models.Project) return resp
[ "def", "get_project", "(", "self", ",", "name", ")", ":", "uri", "=", "'{base}/{project}'", ".", "format", "(", "base", "=", "self", ".", "BASE_URI", ",", "project", "=", "name", ")", "resp", "=", "self", ".", "_client", ".", "get", "(", "uri", ",", "model", "=", "models", ".", "Project", ")", "return", "resp" ]
Retrives project information by name :param name: The formal project name in string form.
[ "Retrives", "project", "information", "by", "name" ]
train
https://github.com/jmvrbanac/lplight/blob/4d58b45e49ad9ba9e95f8c106d5c49e1658a69a7/lplight/client.py#L57-L65
0.006557
rvswift/EB
EB/builder/utilities/classification.py
make_score_structure
def make_score_structure(molecules, ensemble): """ puts data in the score_structure format for subsequent processing :param molecules: list [mol_object_1, mol_object_2, .... ] mol_objects are instances of common_tools.molecules :return score_structure: list [(id, best_score, best_query, status, net decoy count, net active count), ..., ] """ # sort molecules by their ensemble score #sort_order = get_sort_order(molecules) sort_order = 'asc' sorted_molecules = screener.screener(molecules, ensemble, sort_order) # initiate variables score_structure = [] net_active_count = 0 net_decoy_count = 0 for mol in sorted_molecules: # determine net active count & net decoy count status = mol.GetProp('status') if status == '1': net_active_count += 1 elif status == '0': net_decoy_count += 1 else: continue score_structure.append((mol.GetProp('id'), mol.GetProp('best_score'), mol.GetProp('best_query'), status, net_decoy_count, net_active_count)) return score_structure
python
def make_score_structure(molecules, ensemble): """ puts data in the score_structure format for subsequent processing :param molecules: list [mol_object_1, mol_object_2, .... ] mol_objects are instances of common_tools.molecules :return score_structure: list [(id, best_score, best_query, status, net decoy count, net active count), ..., ] """ # sort molecules by their ensemble score #sort_order = get_sort_order(molecules) sort_order = 'asc' sorted_molecules = screener.screener(molecules, ensemble, sort_order) # initiate variables score_structure = [] net_active_count = 0 net_decoy_count = 0 for mol in sorted_molecules: # determine net active count & net decoy count status = mol.GetProp('status') if status == '1': net_active_count += 1 elif status == '0': net_decoy_count += 1 else: continue score_structure.append((mol.GetProp('id'), mol.GetProp('best_score'), mol.GetProp('best_query'), status, net_decoy_count, net_active_count)) return score_structure
[ "def", "make_score_structure", "(", "molecules", ",", "ensemble", ")", ":", "# sort molecules by their ensemble score", "#sort_order = get_sort_order(molecules)", "sort_order", "=", "'asc'", "sorted_molecules", "=", "screener", ".", "screener", "(", "molecules", ",", "ensemble", ",", "sort_order", ")", "# initiate variables", "score_structure", "=", "[", "]", "net_active_count", "=", "0", "net_decoy_count", "=", "0", "for", "mol", "in", "sorted_molecules", ":", "# determine net active count & net decoy count", "status", "=", "mol", ".", "GetProp", "(", "'status'", ")", "if", "status", "==", "'1'", ":", "net_active_count", "+=", "1", "elif", "status", "==", "'0'", ":", "net_decoy_count", "+=", "1", "else", ":", "continue", "score_structure", ".", "append", "(", "(", "mol", ".", "GetProp", "(", "'id'", ")", ",", "mol", ".", "GetProp", "(", "'best_score'", ")", ",", "mol", ".", "GetProp", "(", "'best_query'", ")", ",", "status", ",", "net_decoy_count", ",", "net_active_count", ")", ")", "return", "score_structure" ]
puts data in the score_structure format for subsequent processing :param molecules: list [mol_object_1, mol_object_2, .... ] mol_objects are instances of common_tools.molecules :return score_structure: list [(id, best_score, best_query, status, net decoy count, net active count), ..., ]
[ "puts", "data", "in", "the", "score_structure", "format", "for", "subsequent", "processing", ":", "param", "molecules", ":", "list", "[", "mol_object_1", "mol_object_2", "....", "]", "mol_objects", "are", "instances", "of", "common_tools", ".", "molecules", ":", "return", "score_structure", ":", "list", "[", "(", "id", "best_score", "best_query", "status", "net", "decoy", "count", "net", "active", "count", ")", "...", "]" ]
train
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/utilities/classification.py#L383-L412
0.004378
adafruit/Adafruit_Python_SSD1306
Adafruit_SSD1306/SSD1306.py
SSD1306Base.set_contrast
def set_contrast(self, contrast): """Sets the contrast of the display. Contrast should be a value between 0 and 255.""" if contrast < 0 or contrast > 255: raise ValueError('Contrast must be a value from 0 to 255 (inclusive).') self.command(SSD1306_SETCONTRAST) self.command(contrast)
python
def set_contrast(self, contrast): """Sets the contrast of the display. Contrast should be a value between 0 and 255.""" if contrast < 0 or contrast > 255: raise ValueError('Contrast must be a value from 0 to 255 (inclusive).') self.command(SSD1306_SETCONTRAST) self.command(contrast)
[ "def", "set_contrast", "(", "self", ",", "contrast", ")", ":", "if", "contrast", "<", "0", "or", "contrast", ">", "255", ":", "raise", "ValueError", "(", "'Contrast must be a value from 0 to 255 (inclusive).'", ")", "self", ".", "command", "(", "SSD1306_SETCONTRAST", ")", "self", ".", "command", "(", "contrast", ")" ]
Sets the contrast of the display. Contrast should be a value between 0 and 255.
[ "Sets", "the", "contrast", "of", "the", "display", ".", "Contrast", "should", "be", "a", "value", "between", "0", "and", "255", "." ]
train
https://github.com/adafruit/Adafruit_Python_SSD1306/blob/8819e2d203df49f2843059d981b7347d9881c82b/Adafruit_SSD1306/SSD1306.py#L215-L221
0.011905
DLR-RM/RAFCON
source/rafcon/gui/controllers/state_machine_tree.py
StateMachineTreeController.show_content
def show_content(self, state_model): """Check state machine tree specific show content flag. Is returning true if the upper most library state of a state model has a enabled show content flag or if there is no library root state above this state. :param rafcon.gui.models.abstract_state.AbstractStateModel state_model: The state model to check """ upper_most_lib_state_m = None if isinstance(state_model, LibraryStateModel): uppermost_library_root_state = state_model.state.get_uppermost_library_root_state() if uppermost_library_root_state is None: upper_most_lib_state_m = state_model else: upper_lib_state = uppermost_library_root_state.parent upper_most_lib_state_m = self._selected_sm_model.get_state_model_by_path(upper_lib_state.get_path()) if upper_most_lib_state_m: return upper_most_lib_state_m.show_content() else: return True
python
def show_content(self, state_model): """Check state machine tree specific show content flag. Is returning true if the upper most library state of a state model has a enabled show content flag or if there is no library root state above this state. :param rafcon.gui.models.abstract_state.AbstractStateModel state_model: The state model to check """ upper_most_lib_state_m = None if isinstance(state_model, LibraryStateModel): uppermost_library_root_state = state_model.state.get_uppermost_library_root_state() if uppermost_library_root_state is None: upper_most_lib_state_m = state_model else: upper_lib_state = uppermost_library_root_state.parent upper_most_lib_state_m = self._selected_sm_model.get_state_model_by_path(upper_lib_state.get_path()) if upper_most_lib_state_m: return upper_most_lib_state_m.show_content() else: return True
[ "def", "show_content", "(", "self", ",", "state_model", ")", ":", "upper_most_lib_state_m", "=", "None", "if", "isinstance", "(", "state_model", ",", "LibraryStateModel", ")", ":", "uppermost_library_root_state", "=", "state_model", ".", "state", ".", "get_uppermost_library_root_state", "(", ")", "if", "uppermost_library_root_state", "is", "None", ":", "upper_most_lib_state_m", "=", "state_model", "else", ":", "upper_lib_state", "=", "uppermost_library_root_state", ".", "parent", "upper_most_lib_state_m", "=", "self", ".", "_selected_sm_model", ".", "get_state_model_by_path", "(", "upper_lib_state", ".", "get_path", "(", ")", ")", "if", "upper_most_lib_state_m", ":", "return", "upper_most_lib_state_m", ".", "show_content", "(", ")", "else", ":", "return", "True" ]
Check state machine tree specific show content flag. Is returning true if the upper most library state of a state model has a enabled show content flag or if there is no library root state above this state. :param rafcon.gui.models.abstract_state.AbstractStateModel state_model: The state model to check
[ "Check", "state", "machine", "tree", "specific", "show", "content", "flag", "." ]
train
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/state_machine_tree.py#L363-L382
0.005888
apache/incubator-mxnet
python/mxnet/symbol/symbol.py
Symbol._set_attr
def _set_attr(self, **kwargs): """Sets an attribute of the symbol. For example. A._set_attr(foo="bar") adds the mapping ``"{foo: bar}"`` to the symbol's attribute dictionary. Parameters ---------- **kwargs The attributes to set """ for key, value in kwargs.items(): if not isinstance(value, string_types): raise ValueError("Set Attr only accepts string values") check_call(_LIB.MXSymbolSetAttr( self.handle, c_str(key), c_str(str(value))))
python
def _set_attr(self, **kwargs): """Sets an attribute of the symbol. For example. A._set_attr(foo="bar") adds the mapping ``"{foo: bar}"`` to the symbol's attribute dictionary. Parameters ---------- **kwargs The attributes to set """ for key, value in kwargs.items(): if not isinstance(value, string_types): raise ValueError("Set Attr only accepts string values") check_call(_LIB.MXSymbolSetAttr( self.handle, c_str(key), c_str(str(value))))
[ "def", "_set_attr", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "if", "not", "isinstance", "(", "value", ",", "string_types", ")", ":", "raise", "ValueError", "(", "\"Set Attr only accepts string values\"", ")", "check_call", "(", "_LIB", ".", "MXSymbolSetAttr", "(", "self", ".", "handle", ",", "c_str", "(", "key", ")", ",", "c_str", "(", "str", "(", "value", ")", ")", ")", ")" ]
Sets an attribute of the symbol. For example. A._set_attr(foo="bar") adds the mapping ``"{foo: bar}"`` to the symbol's attribute dictionary. Parameters ---------- **kwargs The attributes to set
[ "Sets", "an", "attribute", "of", "the", "symbol", "." ]
train
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/symbol/symbol.py#L635-L650
0.00349
juanifioren/django-oidc-provider
oidc_provider/lib/utils/common.py
get_site_url
def get_site_url(site_url=None, request=None): """ Construct the site url. Orders to decide site url: 1. valid `site_url` parameter 2. valid `SITE_URL` in settings 3. construct from `request` object """ site_url = site_url or settings.get('SITE_URL') if site_url: return site_url elif request: return '{}://{}'.format(request.scheme, request.get_host()) else: raise Exception('Either pass `site_url`, ' 'or set `SITE_URL` in settings, ' 'or pass `request` object.')
python
def get_site_url(site_url=None, request=None): """ Construct the site url. Orders to decide site url: 1. valid `site_url` parameter 2. valid `SITE_URL` in settings 3. construct from `request` object """ site_url = site_url or settings.get('SITE_URL') if site_url: return site_url elif request: return '{}://{}'.format(request.scheme, request.get_host()) else: raise Exception('Either pass `site_url`, ' 'or set `SITE_URL` in settings, ' 'or pass `request` object.')
[ "def", "get_site_url", "(", "site_url", "=", "None", ",", "request", "=", "None", ")", ":", "site_url", "=", "site_url", "or", "settings", ".", "get", "(", "'SITE_URL'", ")", "if", "site_url", ":", "return", "site_url", "elif", "request", ":", "return", "'{}://{}'", ".", "format", "(", "request", ".", "scheme", ",", "request", ".", "get_host", "(", ")", ")", "else", ":", "raise", "Exception", "(", "'Either pass `site_url`, '", "'or set `SITE_URL` in settings, '", "'or pass `request` object.'", ")" ]
Construct the site url. Orders to decide site url: 1. valid `site_url` parameter 2. valid `SITE_URL` in settings 3. construct from `request` object
[ "Construct", "the", "site", "url", "." ]
train
https://github.com/juanifioren/django-oidc-provider/blob/f0daed07b2ac7608565b80d4c80ccf04d8c416a8/oidc_provider/lib/utils/common.py#L25-L42
0.001684
pycontribs/pyrax
pyrax/base_identity.py
BaseIdentity.get_client
def get_client(self, service, region, public=True, cached=True, client_class=None): """ Returns the client object for the specified service and region. By default the public endpoint is used. If you wish to work with a services internal endpoints, specify `public=False`. By default, if a client has already been created for the given service, region, and public values, that will be returned. To force a new client to be created, pass 'cached=False'. """ if not self.authenticated: raise exc.NotAuthenticated("You must authenticate before trying " "to create clients.") clt = ep = None mapped_service = self.service_mapping.get(service) or service svc = self.services.get(mapped_service) if svc: ep = svc.endpoints.get(region) if ep: clt = ep._get_client(public=public, cached=cached, client_class=client_class) if not clt: raise exc.NoSuchClient("There is no client available for the " "service '%s' in the region '%s'." % (service, region)) return clt
python
def get_client(self, service, region, public=True, cached=True, client_class=None): """ Returns the client object for the specified service and region. By default the public endpoint is used. If you wish to work with a services internal endpoints, specify `public=False`. By default, if a client has already been created for the given service, region, and public values, that will be returned. To force a new client to be created, pass 'cached=False'. """ if not self.authenticated: raise exc.NotAuthenticated("You must authenticate before trying " "to create clients.") clt = ep = None mapped_service = self.service_mapping.get(service) or service svc = self.services.get(mapped_service) if svc: ep = svc.endpoints.get(region) if ep: clt = ep._get_client(public=public, cached=cached, client_class=client_class) if not clt: raise exc.NoSuchClient("There is no client available for the " "service '%s' in the region '%s'." % (service, region)) return clt
[ "def", "get_client", "(", "self", ",", "service", ",", "region", ",", "public", "=", "True", ",", "cached", "=", "True", ",", "client_class", "=", "None", ")", ":", "if", "not", "self", ".", "authenticated", ":", "raise", "exc", ".", "NotAuthenticated", "(", "\"You must authenticate before trying \"", "\"to create clients.\"", ")", "clt", "=", "ep", "=", "None", "mapped_service", "=", "self", ".", "service_mapping", ".", "get", "(", "service", ")", "or", "service", "svc", "=", "self", ".", "services", ".", "get", "(", "mapped_service", ")", "if", "svc", ":", "ep", "=", "svc", ".", "endpoints", ".", "get", "(", "region", ")", "if", "ep", ":", "clt", "=", "ep", ".", "_get_client", "(", "public", "=", "public", ",", "cached", "=", "cached", ",", "client_class", "=", "client_class", ")", "if", "not", "clt", ":", "raise", "exc", ".", "NoSuchClient", "(", "\"There is no client available for the \"", "\"service '%s' in the region '%s'.\"", "%", "(", "service", ",", "region", ")", ")", "return", "clt" ]
Returns the client object for the specified service and region. By default the public endpoint is used. If you wish to work with a services internal endpoints, specify `public=False`. By default, if a client has already been created for the given service, region, and public values, that will be returned. To force a new client to be created, pass 'cached=False'.
[ "Returns", "the", "client", "object", "for", "the", "specified", "service", "and", "region", "." ]
train
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/base_identity.py#L382-L408
0.004983
billyoverton/tweetqueue
tweetqueue/__main__.py
queue
def queue(ctx, message): """Adds a message to your twitter queue""" if not valid_tweet(message): click.echo("Message is too long for twitter.") click.echo("Message: " + message) ctx.exit(2) if ctx.obj['DRYRUN']: click.echo("Message not queue due to dry-run mode.") ctx.exit(0) ctx.obj['TWEETLIST'].append(message)
python
def queue(ctx, message): """Adds a message to your twitter queue""" if not valid_tweet(message): click.echo("Message is too long for twitter.") click.echo("Message: " + message) ctx.exit(2) if ctx.obj['DRYRUN']: click.echo("Message not queue due to dry-run mode.") ctx.exit(0) ctx.obj['TWEETLIST'].append(message)
[ "def", "queue", "(", "ctx", ",", "message", ")", ":", "if", "not", "valid_tweet", "(", "message", ")", ":", "click", ".", "echo", "(", "\"Message is too long for twitter.\"", ")", "click", ".", "echo", "(", "\"Message: \"", "+", "message", ")", "ctx", ".", "exit", "(", "2", ")", "if", "ctx", ".", "obj", "[", "'DRYRUN'", "]", ":", "click", ".", "echo", "(", "\"Message not queue due to dry-run mode.\"", ")", "ctx", ".", "exit", "(", "0", ")", "ctx", ".", "obj", "[", "'TWEETLIST'", "]", ".", "append", "(", "message", ")" ]
Adds a message to your twitter queue
[ "Adds", "a", "message", "to", "your", "twitter", "queue" ]
train
https://github.com/billyoverton/tweetqueue/blob/e54972a0137ea2a21b2357b81408d9d4c92fdd61/tweetqueue/__main__.py#L93-L104
0.002695
pjmark/NIMPA
niftypet/nimpa/prc/imio.py
orientnii
def orientnii(imfile): '''Get the orientation from NIfTI sform. Not fully functional yet.''' strorient = ['L-R', 'S-I', 'A-P'] niiorient = [] niixyz = np.zeros(3,dtype=np.int8) if os.path.isfile(imfile): nim = nib.load(imfile) pct = nim.get_data() A = nim.get_sform() for i in range(3): niixyz[i] = np.argmax(abs(A[i,:-1])) niiorient.append( strorient[ niixyz[i] ] ) print niiorient
python
def orientnii(imfile): '''Get the orientation from NIfTI sform. Not fully functional yet.''' strorient = ['L-R', 'S-I', 'A-P'] niiorient = [] niixyz = np.zeros(3,dtype=np.int8) if os.path.isfile(imfile): nim = nib.load(imfile) pct = nim.get_data() A = nim.get_sform() for i in range(3): niixyz[i] = np.argmax(abs(A[i,:-1])) niiorient.append( strorient[ niixyz[i] ] ) print niiorient
[ "def", "orientnii", "(", "imfile", ")", ":", "strorient", "=", "[", "'L-R'", ",", "'S-I'", ",", "'A-P'", "]", "niiorient", "=", "[", "]", "niixyz", "=", "np", ".", "zeros", "(", "3", ",", "dtype", "=", "np", ".", "int8", ")", "if", "os", ".", "path", ".", "isfile", "(", "imfile", ")", ":", "nim", "=", "nib", ".", "load", "(", "imfile", ")", "pct", "=", "nim", ".", "get_data", "(", ")", "A", "=", "nim", ".", "get_sform", "(", ")", "for", "i", "in", "range", "(", "3", ")", ":", "niixyz", "[", "i", "]", "=", "np", ".", "argmax", "(", "abs", "(", "A", "[", "i", ",", ":", "-", "1", "]", ")", ")", "niiorient", ".", "append", "(", "strorient", "[", "niixyz", "[", "i", "]", "]", ")", "print", "niiorient" ]
Get the orientation from NIfTI sform. Not fully functional yet.
[ "Get", "the", "orientation", "from", "NIfTI", "sform", ".", "Not", "fully", "functional", "yet", "." ]
train
https://github.com/pjmark/NIMPA/blob/3f4231fed2934a1d92e4cd8e9e153b0118e29d86/niftypet/nimpa/prc/imio.py#L191-L203
0.014989
HazyResearch/metal
metal/utils.py
convert_labels
def convert_labels(Y, source, dest): """Convert a matrix from one label type to another Args: Y: A np.ndarray or torch.Tensor of labels (ints) source: The convention the labels are currently expressed in dest: The convention to convert the labels to Conventions: 'categorical': [0: abstain, 1: positive, 2: negative] 'plusminus': [0: abstain, 1: positive, -1: negative] 'onezero': [0: negative, 1: positive] Note that converting to 'onezero' will combine abstain and negative labels. """ if Y is None: return Y if isinstance(Y, np.ndarray): Y = Y.copy() assert isinstance(Y, int) elif isinstance(Y, torch.Tensor): Y = Y.clone() assert np.sum(Y.numpy() - Y.numpy().astype(int)) == 0.0 else: raise ValueError("Unrecognized label data type.") negative_map = {"categorical": 2, "plusminus": -1, "onezero": 0} Y[Y == negative_map[source]] = negative_map[dest] return Y
python
def convert_labels(Y, source, dest): """Convert a matrix from one label type to another Args: Y: A np.ndarray or torch.Tensor of labels (ints) source: The convention the labels are currently expressed in dest: The convention to convert the labels to Conventions: 'categorical': [0: abstain, 1: positive, 2: negative] 'plusminus': [0: abstain, 1: positive, -1: negative] 'onezero': [0: negative, 1: positive] Note that converting to 'onezero' will combine abstain and negative labels. """ if Y is None: return Y if isinstance(Y, np.ndarray): Y = Y.copy() assert isinstance(Y, int) elif isinstance(Y, torch.Tensor): Y = Y.clone() assert np.sum(Y.numpy() - Y.numpy().astype(int)) == 0.0 else: raise ValueError("Unrecognized label data type.") negative_map = {"categorical": 2, "plusminus": -1, "onezero": 0} Y[Y == negative_map[source]] = negative_map[dest] return Y
[ "def", "convert_labels", "(", "Y", ",", "source", ",", "dest", ")", ":", "if", "Y", "is", "None", ":", "return", "Y", "if", "isinstance", "(", "Y", ",", "np", ".", "ndarray", ")", ":", "Y", "=", "Y", ".", "copy", "(", ")", "assert", "isinstance", "(", "Y", ",", "int", ")", "elif", "isinstance", "(", "Y", ",", "torch", ".", "Tensor", ")", ":", "Y", "=", "Y", ".", "clone", "(", ")", "assert", "np", ".", "sum", "(", "Y", ".", "numpy", "(", ")", "-", "Y", ".", "numpy", "(", ")", ".", "astype", "(", "int", ")", ")", "==", "0.0", "else", ":", "raise", "ValueError", "(", "\"Unrecognized label data type.\"", ")", "negative_map", "=", "{", "\"categorical\"", ":", "2", ",", "\"plusminus\"", ":", "-", "1", ",", "\"onezero\"", ":", "0", "}", "Y", "[", "Y", "==", "negative_map", "[", "source", "]", "]", "=", "negative_map", "[", "dest", "]", "return", "Y" ]
Convert a matrix from one label type to another Args: Y: A np.ndarray or torch.Tensor of labels (ints) source: The convention the labels are currently expressed in dest: The convention to convert the labels to Conventions: 'categorical': [0: abstain, 1: positive, 2: negative] 'plusminus': [0: abstain, 1: positive, -1: negative] 'onezero': [0: negative, 1: positive] Note that converting to 'onezero' will combine abstain and negative labels.
[ "Convert", "a", "matrix", "from", "one", "label", "type", "to", "another" ]
train
https://github.com/HazyResearch/metal/blob/c24e3772e25ac6d0917b8b7af4c1bcb92928f84a/metal/utils.py#L101-L128
0.00099
ethereum/web3.py
web3/gas_strategies/time_based.py
_compute_gas_price
def _compute_gas_price(probabilities, desired_probability): """ Given a sorted range of ``Probability`` named-tuples returns a gas price computed based on where the ``desired_probability`` would fall within the range. :param probabilities: An iterable of `Probability` named-tuples sorted in reverse order. :param desired_probability: An floating point representation of the desired probability. (e.g. ``85% -> 0.85``) """ first = probabilities[0] last = probabilities[-1] if desired_probability >= first.prob: return int(first.gas_price) elif desired_probability <= last.prob: return int(last.gas_price) for left, right in sliding_window(2, probabilities): if desired_probability < right.prob: continue elif desired_probability > left.prob: # This code block should never be reachable as it would indicate # that we already passed by the probability window in which our # `desired_probability` is located. raise Exception('Invariant') adj_prob = desired_probability - right.prob window_size = left.prob - right.prob position = adj_prob / window_size gas_window_size = left.gas_price - right.gas_price gas_price = int(math.ceil(right.gas_price + gas_window_size * position)) return gas_price else: # The initial `if/else` clause in this function handles the case where # the `desired_probability` is either above or below the min/max # probability found in the `probabilities`. # # With these two cases handled, the only way this code block should be # reachable would be if the `probabilities` were not sorted correctly. # Otherwise, the `desired_probability` **must** fall between two of the # values in the `probabilities``. raise Exception('Invariant')
python
def _compute_gas_price(probabilities, desired_probability): """ Given a sorted range of ``Probability`` named-tuples returns a gas price computed based on where the ``desired_probability`` would fall within the range. :param probabilities: An iterable of `Probability` named-tuples sorted in reverse order. :param desired_probability: An floating point representation of the desired probability. (e.g. ``85% -> 0.85``) """ first = probabilities[0] last = probabilities[-1] if desired_probability >= first.prob: return int(first.gas_price) elif desired_probability <= last.prob: return int(last.gas_price) for left, right in sliding_window(2, probabilities): if desired_probability < right.prob: continue elif desired_probability > left.prob: # This code block should never be reachable as it would indicate # that we already passed by the probability window in which our # `desired_probability` is located. raise Exception('Invariant') adj_prob = desired_probability - right.prob window_size = left.prob - right.prob position = adj_prob / window_size gas_window_size = left.gas_price - right.gas_price gas_price = int(math.ceil(right.gas_price + gas_window_size * position)) return gas_price else: # The initial `if/else` clause in this function handles the case where # the `desired_probability` is either above or below the min/max # probability found in the `probabilities`. # # With these two cases handled, the only way this code block should be # reachable would be if the `probabilities` were not sorted correctly. # Otherwise, the `desired_probability` **must** fall between two of the # values in the `probabilities``. raise Exception('Invariant')
[ "def", "_compute_gas_price", "(", "probabilities", ",", "desired_probability", ")", ":", "first", "=", "probabilities", "[", "0", "]", "last", "=", "probabilities", "[", "-", "1", "]", "if", "desired_probability", ">=", "first", ".", "prob", ":", "return", "int", "(", "first", ".", "gas_price", ")", "elif", "desired_probability", "<=", "last", ".", "prob", ":", "return", "int", "(", "last", ".", "gas_price", ")", "for", "left", ",", "right", "in", "sliding_window", "(", "2", ",", "probabilities", ")", ":", "if", "desired_probability", "<", "right", ".", "prob", ":", "continue", "elif", "desired_probability", ">", "left", ".", "prob", ":", "# This code block should never be reachable as it would indicate", "# that we already passed by the probability window in which our", "# `desired_probability` is located.", "raise", "Exception", "(", "'Invariant'", ")", "adj_prob", "=", "desired_probability", "-", "right", ".", "prob", "window_size", "=", "left", ".", "prob", "-", "right", ".", "prob", "position", "=", "adj_prob", "/", "window_size", "gas_window_size", "=", "left", ".", "gas_price", "-", "right", ".", "gas_price", "gas_price", "=", "int", "(", "math", ".", "ceil", "(", "right", ".", "gas_price", "+", "gas_window_size", "*", "position", ")", ")", "return", "gas_price", "else", ":", "# The initial `if/else` clause in this function handles the case where", "# the `desired_probability` is either above or below the min/max", "# probability found in the `probabilities`.", "#", "# With these two cases handled, the only way this code block should be", "# reachable would be if the `probabilities` were not sorted correctly.", "# Otherwise, the `desired_probability` **must** fall between two of the", "# values in the `probabilities``.", "raise", "Exception", "(", "'Invariant'", ")" ]
Given a sorted range of ``Probability`` named-tuples returns a gas price computed based on where the ``desired_probability`` would fall within the range. :param probabilities: An iterable of `Probability` named-tuples sorted in reverse order. :param desired_probability: An floating point representation of the desired probability. (e.g. ``85% -> 0.85``)
[ "Given", "a", "sorted", "range", "of", "Probability", "named", "-", "tuples", "returns", "a", "gas", "price", "computed", "based", "on", "where", "the", "desired_probability", "would", "fall", "within", "the", "range", "." ]
train
https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/web3/gas_strategies/time_based.py#L94-L136
0.001553
gabstopper/smc-python
smc-monitoring/smc_monitoring/models/filters.py
TranslatedFilter.within_ipv4_network
def within_ipv4_network(self, field, values): """ This filter adds specified networks to a filter to check for inclusion. :param str field: name of field to filter on. Taken from 'Show Filter Expression' within SMC. :param list values: network definitions, in cidr format, i.e: 1.1.1.0/24. """ v = ['ipv4_net("%s")' % net for net in values] self.update_filter('{} IN union({})'.format(field, ','.join(v)))
python
def within_ipv4_network(self, field, values): """ This filter adds specified networks to a filter to check for inclusion. :param str field: name of field to filter on. Taken from 'Show Filter Expression' within SMC. :param list values: network definitions, in cidr format, i.e: 1.1.1.0/24. """ v = ['ipv4_net("%s")' % net for net in values] self.update_filter('{} IN union({})'.format(field, ','.join(v)))
[ "def", "within_ipv4_network", "(", "self", ",", "field", ",", "values", ")", ":", "v", "=", "[", "'ipv4_net(\"%s\")'", "%", "net", "for", "net", "in", "values", "]", "self", ".", "update_filter", "(", "'{} IN union({})'", ".", "format", "(", "field", ",", "','", ".", "join", "(", "v", ")", ")", ")" ]
This filter adds specified networks to a filter to check for inclusion. :param str field: name of field to filter on. Taken from 'Show Filter Expression' within SMC. :param list values: network definitions, in cidr format, i.e: 1.1.1.0/24.
[ "This", "filter", "adds", "specified", "networks", "to", "a", "filter", "to", "check", "for", "inclusion", ".", ":", "param", "str", "field", ":", "name", "of", "field", "to", "filter", "on", ".", "Taken", "from", "Show", "Filter", "Expression", "within", "SMC", ".", ":", "param", "list", "values", ":", "network", "definitions", "in", "cidr", "format", "i", ".", "e", ":", "1", ".", "1", ".", "1", ".", "0", "/", "24", "." ]
train
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc-monitoring/smc_monitoring/models/filters.py#L264-L274
0.008163
szastupov/aiotg
aiotg/chat.py
Chat.kick_chat_member
def kick_chat_member(self, user_id): """ Use this method to kick a user from a group or a supergroup. The bot must be an administrator in the group for this to work. :param int user_id: Unique identifier of the target user """ return self.bot.api_call("kickChatMember", chat_id=self.id, user_id=user_id)
python
def kick_chat_member(self, user_id): """ Use this method to kick a user from a group or a supergroup. The bot must be an administrator in the group for this to work. :param int user_id: Unique identifier of the target user """ return self.bot.api_call("kickChatMember", chat_id=self.id, user_id=user_id)
[ "def", "kick_chat_member", "(", "self", ",", "user_id", ")", ":", "return", "self", ".", "bot", ".", "api_call", "(", "\"kickChatMember\"", ",", "chat_id", "=", "self", ".", "id", ",", "user_id", "=", "user_id", ")" ]
Use this method to kick a user from a group or a supergroup. The bot must be an administrator in the group for this to work. :param int user_id: Unique identifier of the target user
[ "Use", "this", "method", "to", "kick", "a", "user", "from", "a", "group", "or", "a", "supergroup", ".", "The", "bot", "must", "be", "an", "administrator", "in", "the", "group", "for", "this", "to", "work", "." ]
train
https://github.com/szastupov/aiotg/blob/eed81a6a728c02120f1d730a6e8b8fe50263c010/aiotg/chat.py#L338-L345
0.008523
dw/mitogen
mitogen/parent.py
Context.call_no_reply
def call_no_reply(self, fn, *args, **kwargs): """ See :meth:`CallChain.call_no_reply`. """ self.default_call_chain.call_no_reply(fn, *args, **kwargs)
python
def call_no_reply(self, fn, *args, **kwargs): """ See :meth:`CallChain.call_no_reply`. """ self.default_call_chain.call_no_reply(fn, *args, **kwargs)
[ "def", "call_no_reply", "(", "self", ",", "fn", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "default_call_chain", ".", "call_no_reply", "(", "fn", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
See :meth:`CallChain.call_no_reply`.
[ "See", ":", "meth", ":", "CallChain", ".", "call_no_reply", "." ]
train
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/parent.py#L1761-L1765
0.01105
sebp/scikit-survival
sksurv/meta/stacking.py
Stacking.predict
def predict(self, X): """Perform prediction. Only available of the meta estimator has a predict method. Parameters ---------- X : array-like, shape = (n_samples, n_features) Data with samples to predict. Returns ------- prediction : array, shape = (n_samples, n_dim) Prediction of meta estimator that combines predictions of base estimators. `n_dim` depends on the return value of meta estimator's `predict` method. """ X = numpy.asarray(X) Xt = self._predict_estimators(X) return self.meta_estimator.predict(Xt)
python
def predict(self, X): """Perform prediction. Only available of the meta estimator has a predict method. Parameters ---------- X : array-like, shape = (n_samples, n_features) Data with samples to predict. Returns ------- prediction : array, shape = (n_samples, n_dim) Prediction of meta estimator that combines predictions of base estimators. `n_dim` depends on the return value of meta estimator's `predict` method. """ X = numpy.asarray(X) Xt = self._predict_estimators(X) return self.meta_estimator.predict(Xt)
[ "def", "predict", "(", "self", ",", "X", ")", ":", "X", "=", "numpy", ".", "asarray", "(", "X", ")", "Xt", "=", "self", ".", "_predict_estimators", "(", "X", ")", "return", "self", ".", "meta_estimator", ".", "predict", "(", "Xt", ")" ]
Perform prediction. Only available of the meta estimator has a predict method. Parameters ---------- X : array-like, shape = (n_samples, n_features) Data with samples to predict. Returns ------- prediction : array, shape = (n_samples, n_dim) Prediction of meta estimator that combines predictions of base estimators. `n_dim` depends on the return value of meta estimator's `predict` method.
[ "Perform", "prediction", "." ]
train
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/meta/stacking.py#L138-L158
0.002981
numberoverzero/bloop
bloop/search.py
Search.prepare
def prepare(self): """Constructs a :class:`~bloop.search.PreparedSearch`.""" p = PreparedSearch() p.prepare( engine=self.engine, mode=self.mode, model=self.model, index=self.index, key=self.key, filter=self.filter, projection=self.projection, consistent=self.consistent, forward=self.forward, parallel=self.parallel ) return p
python
def prepare(self): """Constructs a :class:`~bloop.search.PreparedSearch`.""" p = PreparedSearch() p.prepare( engine=self.engine, mode=self.mode, model=self.model, index=self.index, key=self.key, filter=self.filter, projection=self.projection, consistent=self.consistent, forward=self.forward, parallel=self.parallel ) return p
[ "def", "prepare", "(", "self", ")", ":", "p", "=", "PreparedSearch", "(", ")", "p", ".", "prepare", "(", "engine", "=", "self", ".", "engine", ",", "mode", "=", "self", ".", "mode", ",", "model", "=", "self", ".", "model", ",", "index", "=", "self", ".", "index", ",", "key", "=", "self", ".", "key", ",", "filter", "=", "self", ".", "filter", ",", "projection", "=", "self", ".", "projection", ",", "consistent", "=", "self", ".", "consistent", ",", "forward", "=", "self", ".", "forward", ",", "parallel", "=", "self", ".", "parallel", ")", "return", "p" ]
Constructs a :class:`~bloop.search.PreparedSearch`.
[ "Constructs", "a", ":", "class", ":", "~bloop", ".", "search", ".", "PreparedSearch", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/search.py#L200-L215
0.004115
allenai/allennlp
allennlp/semparse/domain_languages/domain_language.py
DomainLanguage.logical_form_to_action_sequence
def logical_form_to_action_sequence(self, logical_form: str) -> List[str]: """ Converts a logical form into a linearization of the production rules from its abstract syntax tree. The linearization is top-down, depth-first. Each production rule is formatted as "LHS -> RHS", where "LHS" is a single non-terminal type, and RHS is either a terminal or a list of non-terminals (other possible values for RHS in a more general context-free grammar are not produced by our grammar induction logic). Non-terminals are `types` in the grammar, either basic types (like ``int``, ``str``, or some class that you define), or functional types, represented with angle brackets with a colon separating arguments from the return type. Multi-argument functions have commas separating their argument types. For example, ``<int:int>`` is a function that takes an integer and returns an integer, and ``<int,int:int>`` is a function that takes two integer arguments and returns an integer. As an example translation from logical form to complete action sequence, the logical form ``(add 2 3)`` would be translated to ``['@start@ -> int', 'int -> [<int,int:int>, int, int]', '<int,int:int> -> add', 'int -> 2', 'int -> 3']``. """ expression = util.lisp_to_nested_expression(logical_form) try: transitions, start_type = self._get_transitions(expression, expected_type=None) if self._start_types and start_type not in self._start_types: raise ParsingError(f"Expression had unallowed start type of {start_type}: {expression}") except ParsingError: logger.error(f'Error parsing logical form: {logical_form}') raise transitions.insert(0, f'@start@ -> {start_type}') return transitions
python
def logical_form_to_action_sequence(self, logical_form: str) -> List[str]: """ Converts a logical form into a linearization of the production rules from its abstract syntax tree. The linearization is top-down, depth-first. Each production rule is formatted as "LHS -> RHS", where "LHS" is a single non-terminal type, and RHS is either a terminal or a list of non-terminals (other possible values for RHS in a more general context-free grammar are not produced by our grammar induction logic). Non-terminals are `types` in the grammar, either basic types (like ``int``, ``str``, or some class that you define), or functional types, represented with angle brackets with a colon separating arguments from the return type. Multi-argument functions have commas separating their argument types. For example, ``<int:int>`` is a function that takes an integer and returns an integer, and ``<int,int:int>`` is a function that takes two integer arguments and returns an integer. As an example translation from logical form to complete action sequence, the logical form ``(add 2 3)`` would be translated to ``['@start@ -> int', 'int -> [<int,int:int>, int, int]', '<int,int:int> -> add', 'int -> 2', 'int -> 3']``. """ expression = util.lisp_to_nested_expression(logical_form) try: transitions, start_type = self._get_transitions(expression, expected_type=None) if self._start_types and start_type not in self._start_types: raise ParsingError(f"Expression had unallowed start type of {start_type}: {expression}") except ParsingError: logger.error(f'Error parsing logical form: {logical_form}') raise transitions.insert(0, f'@start@ -> {start_type}') return transitions
[ "def", "logical_form_to_action_sequence", "(", "self", ",", "logical_form", ":", "str", ")", "->", "List", "[", "str", "]", ":", "expression", "=", "util", ".", "lisp_to_nested_expression", "(", "logical_form", ")", "try", ":", "transitions", ",", "start_type", "=", "self", ".", "_get_transitions", "(", "expression", ",", "expected_type", "=", "None", ")", "if", "self", ".", "_start_types", "and", "start_type", "not", "in", "self", ".", "_start_types", ":", "raise", "ParsingError", "(", "f\"Expression had unallowed start type of {start_type}: {expression}\"", ")", "except", "ParsingError", ":", "logger", ".", "error", "(", "f'Error parsing logical form: {logical_form}'", ")", "raise", "transitions", ".", "insert", "(", "0", ",", "f'@start@ -> {start_type}'", ")", "return", "transitions" ]
Converts a logical form into a linearization of the production rules from its abstract syntax tree. The linearization is top-down, depth-first. Each production rule is formatted as "LHS -> RHS", where "LHS" is a single non-terminal type, and RHS is either a terminal or a list of non-terminals (other possible values for RHS in a more general context-free grammar are not produced by our grammar induction logic). Non-terminals are `types` in the grammar, either basic types (like ``int``, ``str``, or some class that you define), or functional types, represented with angle brackets with a colon separating arguments from the return type. Multi-argument functions have commas separating their argument types. For example, ``<int:int>`` is a function that takes an integer and returns an integer, and ``<int,int:int>`` is a function that takes two integer arguments and returns an integer. As an example translation from logical form to complete action sequence, the logical form ``(add 2 3)`` would be translated to ``['@start@ -> int', 'int -> [<int,int:int>, int, int]', '<int,int:int> -> add', 'int -> 2', 'int -> 3']``.
[ "Converts", "a", "logical", "form", "into", "a", "linearization", "of", "the", "production", "rules", "from", "its", "abstract", "syntax", "tree", ".", "The", "linearization", "is", "top", "-", "down", "depth", "-", "first", "." ]
train
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/domain_language.py#L379-L409
0.007882
dh1tw/pyhamtools
pyhamtools/lookuplib.py
LookupLib._check_inv_operation_for_date
def _check_inv_operation_for_date(self, item, timestamp, data_dict, data_index_dict): """ Checks if the callsign is marked as an invalid operation for a given timestamp. In case the operation is invalid, True is returned. Otherwise a KeyError is raised. """ if item in data_index_dict: for item in data_index_dict[item]: # startdate < timestamp if const.START in data_dict[item] and not const.END in data_dict[item]: if data_dict[item][const.START] < timestamp: return True # enddate > timestamp elif not const.START in data_dict[item] and const.END in data_dict[item]: if data_dict[item][const.END] > timestamp: return True # startdate > timestamp > enddate elif const.START in data_dict[item] and const.END in data_dict[item]: if data_dict[item][const.START] < timestamp \ and data_dict[item][const.END] > timestamp: return True # no startdate or enddate available elif not const.START in data_dict[item] and not const.END in data_dict[item]: return True raise KeyError
python
def _check_inv_operation_for_date(self, item, timestamp, data_dict, data_index_dict): """ Checks if the callsign is marked as an invalid operation for a given timestamp. In case the operation is invalid, True is returned. Otherwise a KeyError is raised. """ if item in data_index_dict: for item in data_index_dict[item]: # startdate < timestamp if const.START in data_dict[item] and not const.END in data_dict[item]: if data_dict[item][const.START] < timestamp: return True # enddate > timestamp elif not const.START in data_dict[item] and const.END in data_dict[item]: if data_dict[item][const.END] > timestamp: return True # startdate > timestamp > enddate elif const.START in data_dict[item] and const.END in data_dict[item]: if data_dict[item][const.START] < timestamp \ and data_dict[item][const.END] > timestamp: return True # no startdate or enddate available elif not const.START in data_dict[item] and not const.END in data_dict[item]: return True raise KeyError
[ "def", "_check_inv_operation_for_date", "(", "self", ",", "item", ",", "timestamp", ",", "data_dict", ",", "data_index_dict", ")", ":", "if", "item", "in", "data_index_dict", ":", "for", "item", "in", "data_index_dict", "[", "item", "]", ":", "# startdate < timestamp", "if", "const", ".", "START", "in", "data_dict", "[", "item", "]", "and", "not", "const", ".", "END", "in", "data_dict", "[", "item", "]", ":", "if", "data_dict", "[", "item", "]", "[", "const", ".", "START", "]", "<", "timestamp", ":", "return", "True", "# enddate > timestamp", "elif", "not", "const", ".", "START", "in", "data_dict", "[", "item", "]", "and", "const", ".", "END", "in", "data_dict", "[", "item", "]", ":", "if", "data_dict", "[", "item", "]", "[", "const", ".", "END", "]", ">", "timestamp", ":", "return", "True", "# startdate > timestamp > enddate", "elif", "const", ".", "START", "in", "data_dict", "[", "item", "]", "and", "const", ".", "END", "in", "data_dict", "[", "item", "]", ":", "if", "data_dict", "[", "item", "]", "[", "const", ".", "START", "]", "<", "timestamp", "and", "data_dict", "[", "item", "]", "[", "const", ".", "END", "]", ">", "timestamp", ":", "return", "True", "# no startdate or enddate available", "elif", "not", "const", ".", "START", "in", "data_dict", "[", "item", "]", "and", "not", "const", ".", "END", "in", "data_dict", "[", "item", "]", ":", "return", "True", "raise", "KeyError" ]
Checks if the callsign is marked as an invalid operation for a given timestamp. In case the operation is invalid, True is returned. Otherwise a KeyError is raised.
[ "Checks", "if", "the", "callsign", "is", "marked", "as", "an", "invalid", "operation", "for", "a", "given", "timestamp", ".", "In", "case", "the", "operation", "is", "invalid", "True", "is", "returned", ".", "Otherwise", "a", "KeyError", "is", "raised", "." ]
train
https://github.com/dh1tw/pyhamtools/blob/ee7e4b8732e23c298da10e07163748156c16d0fa/pyhamtools/lookuplib.py#L453-L482
0.008922
neon-jungle/wagtailvideos
wagtailvideos/models.py
get_local_file
def get_local_file(file): """ Get a local version of the file, downloading it from the remote storage if required. The returned value should be used as a context manager to ensure any temporary files are cleaned up afterwards. """ try: with open(file.path): yield file.path except NotImplementedError: _, ext = os.path.splitext(file.name) with NamedTemporaryFile(prefix='wagtailvideo-', suffix=ext) as tmp: try: file.open('rb') for chunk in file.chunks(): tmp.write(chunk) finally: file.close() tmp.flush() yield tmp.name
python
def get_local_file(file): """ Get a local version of the file, downloading it from the remote storage if required. The returned value should be used as a context manager to ensure any temporary files are cleaned up afterwards. """ try: with open(file.path): yield file.path except NotImplementedError: _, ext = os.path.splitext(file.name) with NamedTemporaryFile(prefix='wagtailvideo-', suffix=ext) as tmp: try: file.open('rb') for chunk in file.chunks(): tmp.write(chunk) finally: file.close() tmp.flush() yield tmp.name
[ "def", "get_local_file", "(", "file", ")", ":", "try", ":", "with", "open", "(", "file", ".", "path", ")", ":", "yield", "file", ".", "path", "except", "NotImplementedError", ":", "_", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "file", ".", "name", ")", "with", "NamedTemporaryFile", "(", "prefix", "=", "'wagtailvideo-'", ",", "suffix", "=", "ext", ")", "as", "tmp", ":", "try", ":", "file", ".", "open", "(", "'rb'", ")", "for", "chunk", "in", "file", ".", "chunks", "(", ")", ":", "tmp", ".", "write", "(", "chunk", ")", "finally", ":", "file", ".", "close", "(", ")", "tmp", ".", "flush", "(", ")", "yield", "tmp", ".", "name" ]
Get a local version of the file, downloading it from the remote storage if required. The returned value should be used as a context manager to ensure any temporary files are cleaned up afterwards.
[ "Get", "a", "local", "version", "of", "the", "file", "downloading", "it", "from", "the", "remote", "storage", "if", "required", ".", "The", "returned", "value", "should", "be", "used", "as", "a", "context", "manager", "to", "ensure", "any", "temporary", "files", "are", "cleaned", "up", "afterwards", "." ]
train
https://github.com/neon-jungle/wagtailvideos/blob/05a43571ac4b5e7cf07fbb89e804e53447b699c2/wagtailvideos/models.py#L292-L311
0.001427
tensorflow/tensorboard
tensorboard/plugins/hparams/summary.py
session_start_pb
def session_start_pb(hparams, model_uri='', monitor_url='', group_name='', start_time_secs=None): """Constructs a SessionStartInfo protobuffer. Creates a summary that contains a training session metadata information. One such summary per training session should be created. Each should have a different run. Args: hparams: A dictionary with string keys. Describes the hyperparameter values used in the session, mapping each hyperparameter name to its value. Supported value types are `bool`, `int`, `float`, `str`, `list`, `tuple`. The type of value must correspond to the type of hyperparameter (defined in the corresponding api_pb2.HParamInfo member of the Experiment protobuf) as follows: +-----------------+---------------------------------+ |Hyperparameter | Allowed (Python) value types | |type | | +-----------------+---------------------------------+ |DATA_TYPE_BOOL | bool | |DATA_TYPE_FLOAT64| int, float | |DATA_TYPE_STRING | six.string_types, tuple, list | +-----------------+---------------------------------+ Tuple and list instances will be converted to their string representation. model_uri: See the comment for the field with the same name of plugin_data_pb2.SessionStartInfo. monitor_url: See the comment for the field with the same name of plugin_data_pb2.SessionStartInfo. group_name: See the comment for the field with the same name of plugin_data_pb2.SessionStartInfo. start_time_secs: float. The time to use as the session start time. Represented as seconds since the UNIX epoch. If None uses the current time. Returns: The summary protobuffer mentioned above. """ if start_time_secs is None: start_time_secs = time.time() session_start_info = plugin_data_pb2.SessionStartInfo( model_uri=model_uri, monitor_url=monitor_url, group_name=group_name, start_time_secs=start_time_secs) for (hp_name, hp_val) in six.iteritems(hparams): if isinstance(hp_val, (float, int)): session_start_info.hparams[hp_name].number_value = hp_val elif isinstance(hp_val, six.string_types): session_start_info.hparams[hp_name].string_value = hp_val elif isinstance(hp_val, bool): session_start_info.hparams[hp_name].bool_value = hp_val elif isinstance(hp_val, (list, tuple)): session_start_info.hparams[hp_name].string_value = str(hp_val) else: raise TypeError('hparams[%s]=%s has type: %s which is not supported' % (hp_name, hp_val, type(hp_val))) return _summary(metadata.SESSION_START_INFO_TAG, plugin_data_pb2.HParamsPluginData( session_start_info=session_start_info))
python
def session_start_pb(hparams, model_uri='', monitor_url='', group_name='', start_time_secs=None): """Constructs a SessionStartInfo protobuffer. Creates a summary that contains a training session metadata information. One such summary per training session should be created. Each should have a different run. Args: hparams: A dictionary with string keys. Describes the hyperparameter values used in the session, mapping each hyperparameter name to its value. Supported value types are `bool`, `int`, `float`, `str`, `list`, `tuple`. The type of value must correspond to the type of hyperparameter (defined in the corresponding api_pb2.HParamInfo member of the Experiment protobuf) as follows: +-----------------+---------------------------------+ |Hyperparameter | Allowed (Python) value types | |type | | +-----------------+---------------------------------+ |DATA_TYPE_BOOL | bool | |DATA_TYPE_FLOAT64| int, float | |DATA_TYPE_STRING | six.string_types, tuple, list | +-----------------+---------------------------------+ Tuple and list instances will be converted to their string representation. model_uri: See the comment for the field with the same name of plugin_data_pb2.SessionStartInfo. monitor_url: See the comment for the field with the same name of plugin_data_pb2.SessionStartInfo. group_name: See the comment for the field with the same name of plugin_data_pb2.SessionStartInfo. start_time_secs: float. The time to use as the session start time. Represented as seconds since the UNIX epoch. If None uses the current time. Returns: The summary protobuffer mentioned above. """ if start_time_secs is None: start_time_secs = time.time() session_start_info = plugin_data_pb2.SessionStartInfo( model_uri=model_uri, monitor_url=monitor_url, group_name=group_name, start_time_secs=start_time_secs) for (hp_name, hp_val) in six.iteritems(hparams): if isinstance(hp_val, (float, int)): session_start_info.hparams[hp_name].number_value = hp_val elif isinstance(hp_val, six.string_types): session_start_info.hparams[hp_name].string_value = hp_val elif isinstance(hp_val, bool): session_start_info.hparams[hp_name].bool_value = hp_val elif isinstance(hp_val, (list, tuple)): session_start_info.hparams[hp_name].string_value = str(hp_val) else: raise TypeError('hparams[%s]=%s has type: %s which is not supported' % (hp_name, hp_val, type(hp_val))) return _summary(metadata.SESSION_START_INFO_TAG, plugin_data_pb2.HParamsPluginData( session_start_info=session_start_info))
[ "def", "session_start_pb", "(", "hparams", ",", "model_uri", "=", "''", ",", "monitor_url", "=", "''", ",", "group_name", "=", "''", ",", "start_time_secs", "=", "None", ")", ":", "if", "start_time_secs", "is", "None", ":", "start_time_secs", "=", "time", ".", "time", "(", ")", "session_start_info", "=", "plugin_data_pb2", ".", "SessionStartInfo", "(", "model_uri", "=", "model_uri", ",", "monitor_url", "=", "monitor_url", ",", "group_name", "=", "group_name", ",", "start_time_secs", "=", "start_time_secs", ")", "for", "(", "hp_name", ",", "hp_val", ")", "in", "six", ".", "iteritems", "(", "hparams", ")", ":", "if", "isinstance", "(", "hp_val", ",", "(", "float", ",", "int", ")", ")", ":", "session_start_info", ".", "hparams", "[", "hp_name", "]", ".", "number_value", "=", "hp_val", "elif", "isinstance", "(", "hp_val", ",", "six", ".", "string_types", ")", ":", "session_start_info", ".", "hparams", "[", "hp_name", "]", ".", "string_value", "=", "hp_val", "elif", "isinstance", "(", "hp_val", ",", "bool", ")", ":", "session_start_info", ".", "hparams", "[", "hp_name", "]", ".", "bool_value", "=", "hp_val", "elif", "isinstance", "(", "hp_val", ",", "(", "list", ",", "tuple", ")", ")", ":", "session_start_info", ".", "hparams", "[", "hp_name", "]", ".", "string_value", "=", "str", "(", "hp_val", ")", "else", ":", "raise", "TypeError", "(", "'hparams[%s]=%s has type: %s which is not supported'", "%", "(", "hp_name", ",", "hp_val", ",", "type", "(", "hp_val", ")", ")", ")", "return", "_summary", "(", "metadata", ".", "SESSION_START_INFO_TAG", ",", "plugin_data_pb2", ".", "HParamsPluginData", "(", "session_start_info", "=", "session_start_info", ")", ")" ]
Constructs a SessionStartInfo protobuffer. Creates a summary that contains a training session metadata information. One such summary per training session should be created. Each should have a different run. Args: hparams: A dictionary with string keys. Describes the hyperparameter values used in the session, mapping each hyperparameter name to its value. Supported value types are `bool`, `int`, `float`, `str`, `list`, `tuple`. The type of value must correspond to the type of hyperparameter (defined in the corresponding api_pb2.HParamInfo member of the Experiment protobuf) as follows: +-----------------+---------------------------------+ |Hyperparameter | Allowed (Python) value types | |type | | +-----------------+---------------------------------+ |DATA_TYPE_BOOL | bool | |DATA_TYPE_FLOAT64| int, float | |DATA_TYPE_STRING | six.string_types, tuple, list | +-----------------+---------------------------------+ Tuple and list instances will be converted to their string representation. model_uri: See the comment for the field with the same name of plugin_data_pb2.SessionStartInfo. monitor_url: See the comment for the field with the same name of plugin_data_pb2.SessionStartInfo. group_name: See the comment for the field with the same name of plugin_data_pb2.SessionStartInfo. start_time_secs: float. The time to use as the session start time. Represented as seconds since the UNIX epoch. If None uses the current time. Returns: The summary protobuffer mentioned above.
[ "Constructs", "a", "SessionStartInfo", "protobuffer", "." ]
train
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/summary.py#L83-L147
0.003802
wglass/lighthouse
lighthouse/configs/monitor.py
ConfigFileMonitor.start
def start(self, on_add, on_update, on_delete): """ Starts monitoring the file path, passing along on_(add|update|delete) callbacks to a watchdog observer. Iterates over the files in the target path before starting the observer and calls the on_created callback before starting the observer, so that existing files aren't missed. """ handler = ConfigFileChangeHandler( self.target_class, on_add, on_update, on_delete ) for file_name in os.listdir(self.file_path): if os.path.isdir(os.path.join(self.file_path, file_name)): continue if ( not self.target_class.config_subdirectory and not ( file_name.endswith(".yaml") or file_name.endswith(".yml") ) ): continue handler.on_created( events.FileCreatedEvent( os.path.join(self.file_path, file_name) ) ) observer = observers.Observer() observer.schedule(handler, self.file_path) observer.start() return observer
python
def start(self, on_add, on_update, on_delete): """ Starts monitoring the file path, passing along on_(add|update|delete) callbacks to a watchdog observer. Iterates over the files in the target path before starting the observer and calls the on_created callback before starting the observer, so that existing files aren't missed. """ handler = ConfigFileChangeHandler( self.target_class, on_add, on_update, on_delete ) for file_name in os.listdir(self.file_path): if os.path.isdir(os.path.join(self.file_path, file_name)): continue if ( not self.target_class.config_subdirectory and not ( file_name.endswith(".yaml") or file_name.endswith(".yml") ) ): continue handler.on_created( events.FileCreatedEvent( os.path.join(self.file_path, file_name) ) ) observer = observers.Observer() observer.schedule(handler, self.file_path) observer.start() return observer
[ "def", "start", "(", "self", ",", "on_add", ",", "on_update", ",", "on_delete", ")", ":", "handler", "=", "ConfigFileChangeHandler", "(", "self", ".", "target_class", ",", "on_add", ",", "on_update", ",", "on_delete", ")", "for", "file_name", "in", "os", ".", "listdir", "(", "self", ".", "file_path", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "self", ".", "file_path", ",", "file_name", ")", ")", ":", "continue", "if", "(", "not", "self", ".", "target_class", ".", "config_subdirectory", "and", "not", "(", "file_name", ".", "endswith", "(", "\".yaml\"", ")", "or", "file_name", ".", "endswith", "(", "\".yml\"", ")", ")", ")", ":", "continue", "handler", ".", "on_created", "(", "events", ".", "FileCreatedEvent", "(", "os", ".", "path", ".", "join", "(", "self", ".", "file_path", ",", "file_name", ")", ")", ")", "observer", "=", "observers", ".", "Observer", "(", ")", "observer", ".", "schedule", "(", "handler", ",", "self", ".", "file_path", ")", "observer", ".", "start", "(", ")", "return", "observer" ]
Starts monitoring the file path, passing along on_(add|update|delete) callbacks to a watchdog observer. Iterates over the files in the target path before starting the observer and calls the on_created callback before starting the observer, so that existing files aren't missed.
[ "Starts", "monitoring", "the", "file", "path", "passing", "along", "on_", "(", "add|update|delete", ")", "callbacks", "to", "a", "watchdog", "observer", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/configs/monitor.py#L29-L63
0.001671
GPflow/GPflow
gpflow/conditionals.py
_conditional
def _conditional(Xnew, X, kern, f, *, full_cov=False, q_sqrt=None, white=False, full_output_cov=None): """ Given f, representing the GP at the points X, produce the mean and (co-)variance of the GP at the points Xnew. Additionally, there may be Gaussian uncertainty about f as represented by q_sqrt. In this case `f` represents the mean of the distribution and q_sqrt the square-root of the covariance. Additionally, the GP may have been centered (whitened) so that p(v) = N(0, I) f = L v thus p(f) = N(0, LL^T) = N(0, K). In this case `f` represents the values taken by v. The method can either return the diagonals of the covariance matrix for each output (default) or the full covariance matrix (full_cov=True). We assume R independent GPs, represented by the columns of f (and the first dimension of q_sqrt). :param Xnew: data matrix, size N x D. Evaluate the GP at these new points :param X: data points, size M x D. :param kern: GPflow kernel. :param f: data matrix, M x R, representing the function values at X, for R functions. :param q_sqrt: matrix of standard-deviations or Cholesky matrices, size M x R or R x M x M. :param white: boolean of whether to use the whitened representation as described above. :return: - mean: N x R - variance: N x R (full_cov = False), R x N x N (full_cov = True) """ logger.debug("Conditional: Kernel") num_data = tf.shape(X)[-2] # M Kmm = kern.K(X) + tf.eye(num_data, dtype=settings.float_type) * settings.numerics.jitter_level # [..., M, M] Kmn = kern.K(X, Xnew) # [M, ..., N] if full_cov: Knn = kern.K(Xnew) # [...,N,N] else: Knn = kern.Kdiag(Xnew) # [...,N] mean, var = base_conditional(Kmn, Kmm, Knn, f, full_cov=full_cov, q_sqrt=q_sqrt, white=white) return mean, var
python
def _conditional(Xnew, X, kern, f, *, full_cov=False, q_sqrt=None, white=False, full_output_cov=None): """ Given f, representing the GP at the points X, produce the mean and (co-)variance of the GP at the points Xnew. Additionally, there may be Gaussian uncertainty about f as represented by q_sqrt. In this case `f` represents the mean of the distribution and q_sqrt the square-root of the covariance. Additionally, the GP may have been centered (whitened) so that p(v) = N(0, I) f = L v thus p(f) = N(0, LL^T) = N(0, K). In this case `f` represents the values taken by v. The method can either return the diagonals of the covariance matrix for each output (default) or the full covariance matrix (full_cov=True). We assume R independent GPs, represented by the columns of f (and the first dimension of q_sqrt). :param Xnew: data matrix, size N x D. Evaluate the GP at these new points :param X: data points, size M x D. :param kern: GPflow kernel. :param f: data matrix, M x R, representing the function values at X, for R functions. :param q_sqrt: matrix of standard-deviations or Cholesky matrices, size M x R or R x M x M. :param white: boolean of whether to use the whitened representation as described above. :return: - mean: N x R - variance: N x R (full_cov = False), R x N x N (full_cov = True) """ logger.debug("Conditional: Kernel") num_data = tf.shape(X)[-2] # M Kmm = kern.K(X) + tf.eye(num_data, dtype=settings.float_type) * settings.numerics.jitter_level # [..., M, M] Kmn = kern.K(X, Xnew) # [M, ..., N] if full_cov: Knn = kern.K(Xnew) # [...,N,N] else: Knn = kern.Kdiag(Xnew) # [...,N] mean, var = base_conditional(Kmn, Kmm, Knn, f, full_cov=full_cov, q_sqrt=q_sqrt, white=white) return mean, var
[ "def", "_conditional", "(", "Xnew", ",", "X", ",", "kern", ",", "f", ",", "*", ",", "full_cov", "=", "False", ",", "q_sqrt", "=", "None", ",", "white", "=", "False", ",", "full_output_cov", "=", "None", ")", ":", "logger", ".", "debug", "(", "\"Conditional: Kernel\"", ")", "num_data", "=", "tf", ".", "shape", "(", "X", ")", "[", "-", "2", "]", "# M", "Kmm", "=", "kern", ".", "K", "(", "X", ")", "+", "tf", ".", "eye", "(", "num_data", ",", "dtype", "=", "settings", ".", "float_type", ")", "*", "settings", ".", "numerics", ".", "jitter_level", "# [..., M, M]", "Kmn", "=", "kern", ".", "K", "(", "X", ",", "Xnew", ")", "# [M, ..., N]", "if", "full_cov", ":", "Knn", "=", "kern", ".", "K", "(", "Xnew", ")", "# [...,N,N]", "else", ":", "Knn", "=", "kern", ".", "Kdiag", "(", "Xnew", ")", "# [...,N]", "mean", ",", "var", "=", "base_conditional", "(", "Kmn", ",", "Kmm", ",", "Knn", ",", "f", ",", "full_cov", "=", "full_cov", ",", "q_sqrt", "=", "q_sqrt", ",", "white", "=", "white", ")", "return", "mean", ",", "var" ]
Given f, representing the GP at the points X, produce the mean and (co-)variance of the GP at the points Xnew. Additionally, there may be Gaussian uncertainty about f as represented by q_sqrt. In this case `f` represents the mean of the distribution and q_sqrt the square-root of the covariance. Additionally, the GP may have been centered (whitened) so that p(v) = N(0, I) f = L v thus p(f) = N(0, LL^T) = N(0, K). In this case `f` represents the values taken by v. The method can either return the diagonals of the covariance matrix for each output (default) or the full covariance matrix (full_cov=True). We assume R independent GPs, represented by the columns of f (and the first dimension of q_sqrt). :param Xnew: data matrix, size N x D. Evaluate the GP at these new points :param X: data points, size M x D. :param kern: GPflow kernel. :param f: data matrix, M x R, representing the function values at X, for R functions. :param q_sqrt: matrix of standard-deviations or Cholesky matrices, size M x R or R x M x M. :param white: boolean of whether to use the whitened representation as described above. :return: - mean: N x R - variance: N x R (full_cov = False), R x N x N (full_cov = True)
[ "Given", "f", "representing", "the", "GP", "at", "the", "points", "X", "produce", "the", "mean", "and", "(", "co", "-", ")", "variance", "of", "the", "GP", "at", "the", "points", "Xnew", "." ]
train
https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/conditionals.py#L80-L127
0.002593
bwohlberg/sporco
sporco/admm/parcbpdn.py
par_y1step
def par_y1step(i): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}_{1,G_i}`, one of the disjoint problems of optimizing :math:`\mathbf{y}_1`. Parameters ---------- i : int Index of grouping to update """ global mp_Y1 grpind = slice(mp_grp[i], mp_grp[i+1]) XU1 = mp_X[grpind] + 1/mp_alpha*mp_U1[grpind] if mp_wl1.shape[mp_axisM] is 1: gamma = mp_lmbda/(mp_alpha**2*mp_rho)*mp_wl1 else: gamma = mp_lmbda/(mp_alpha**2*mp_rho)*mp_wl1[grpind] Y1 = sp.prox_l1(XU1, gamma) if mp_NonNegCoef: Y1[Y1 < 0.0] = 0.0 if mp_NoBndryCross: for n in range(len(mp_Nv)): Y1[(slice(None),) + (slice(None),)*n + (slice(1-mp_Dshp[n], None),)] = 0.0 mp_Y1[mp_grp[i]:mp_grp[i+1]] = Y1
python
def par_y1step(i): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}_{1,G_i}`, one of the disjoint problems of optimizing :math:`\mathbf{y}_1`. Parameters ---------- i : int Index of grouping to update """ global mp_Y1 grpind = slice(mp_grp[i], mp_grp[i+1]) XU1 = mp_X[grpind] + 1/mp_alpha*mp_U1[grpind] if mp_wl1.shape[mp_axisM] is 1: gamma = mp_lmbda/(mp_alpha**2*mp_rho)*mp_wl1 else: gamma = mp_lmbda/(mp_alpha**2*mp_rho)*mp_wl1[grpind] Y1 = sp.prox_l1(XU1, gamma) if mp_NonNegCoef: Y1[Y1 < 0.0] = 0.0 if mp_NoBndryCross: for n in range(len(mp_Nv)): Y1[(slice(None),) + (slice(None),)*n + (slice(1-mp_Dshp[n], None),)] = 0.0 mp_Y1[mp_grp[i]:mp_grp[i+1]] = Y1
[ "def", "par_y1step", "(", "i", ")", ":", "global", "mp_Y1", "grpind", "=", "slice", "(", "mp_grp", "[", "i", "]", ",", "mp_grp", "[", "i", "+", "1", "]", ")", "XU1", "=", "mp_X", "[", "grpind", "]", "+", "1", "/", "mp_alpha", "*", "mp_U1", "[", "grpind", "]", "if", "mp_wl1", ".", "shape", "[", "mp_axisM", "]", "is", "1", ":", "gamma", "=", "mp_lmbda", "/", "(", "mp_alpha", "**", "2", "*", "mp_rho", ")", "*", "mp_wl1", "else", ":", "gamma", "=", "mp_lmbda", "/", "(", "mp_alpha", "**", "2", "*", "mp_rho", ")", "*", "mp_wl1", "[", "grpind", "]", "Y1", "=", "sp", ".", "prox_l1", "(", "XU1", ",", "gamma", ")", "if", "mp_NonNegCoef", ":", "Y1", "[", "Y1", "<", "0.0", "]", "=", "0.0", "if", "mp_NoBndryCross", ":", "for", "n", "in", "range", "(", "len", "(", "mp_Nv", ")", ")", ":", "Y1", "[", "(", "slice", "(", "None", ")", ",", ")", "+", "(", "slice", "(", "None", ")", ",", ")", "*", "n", "+", "(", "slice", "(", "1", "-", "mp_Dshp", "[", "n", "]", ",", "None", ")", ",", ")", "]", "=", "0.0", "mp_Y1", "[", "mp_grp", "[", "i", "]", ":", "mp_grp", "[", "i", "+", "1", "]", "]", "=", "Y1" ]
r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}_{1,G_i}`, one of the disjoint problems of optimizing :math:`\mathbf{y}_1`. Parameters ---------- i : int Index of grouping to update
[ "r", "Minimise", "Augmented", "Lagrangian", "with", "respect", "to", ":", "math", ":", "\\", "mathbf", "{", "y", "}", "_", "{", "1", "G_i", "}", "one", "of", "the", "disjoint", "problems", "of", "optimizing", ":", "math", ":", "\\", "mathbf", "{", "y", "}", "_1", "." ]
train
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/parcbpdn.py#L207-L232
0.001235
datajoint/datajoint-python
datajoint/heading.py
Heading.project
def project(self, attribute_list, named_attributes=None, force_primary_key=None): """ derive a new heading by selecting, renaming, or computing attributes. In relational algebra these operators are known as project, rename, and extend. :param attribute_list: the full list of existing attributes to include :param force_primary_key: attributes to force to be converted to primary :param named_attributes: dictionary of renamed attributes """ try: # check for missing attributes raise DataJointError('Attribute `%s` is not found' % next(a for a in attribute_list if a not in self.names)) except StopIteration: if named_attributes is None: named_attributes = {} if force_primary_key is None: force_primary_key = set() rename_map = {v: k for k, v in named_attributes.items() if v in self.attributes} # copied and renamed attributes copy_attrs = (dict(self.attributes[k].todict(), in_key=self.attributes[k].in_key or k in force_primary_key, **({'name': rename_map[k], 'sql_expression': '`%s`' % k} if k in rename_map else {})) for k in self.attributes if k in rename_map or k in attribute_list) compute_attrs = (dict(default_attribute_properties, name=new_name, sql_expression=expr) for new_name, expr in named_attributes.items() if expr not in rename_map) return Heading(chain(copy_attrs, compute_attrs))
python
def project(self, attribute_list, named_attributes=None, force_primary_key=None): """ derive a new heading by selecting, renaming, or computing attributes. In relational algebra these operators are known as project, rename, and extend. :param attribute_list: the full list of existing attributes to include :param force_primary_key: attributes to force to be converted to primary :param named_attributes: dictionary of renamed attributes """ try: # check for missing attributes raise DataJointError('Attribute `%s` is not found' % next(a for a in attribute_list if a not in self.names)) except StopIteration: if named_attributes is None: named_attributes = {} if force_primary_key is None: force_primary_key = set() rename_map = {v: k for k, v in named_attributes.items() if v in self.attributes} # copied and renamed attributes copy_attrs = (dict(self.attributes[k].todict(), in_key=self.attributes[k].in_key or k in force_primary_key, **({'name': rename_map[k], 'sql_expression': '`%s`' % k} if k in rename_map else {})) for k in self.attributes if k in rename_map or k in attribute_list) compute_attrs = (dict(default_attribute_properties, name=new_name, sql_expression=expr) for new_name, expr in named_attributes.items() if expr not in rename_map) return Heading(chain(copy_attrs, compute_attrs))
[ "def", "project", "(", "self", ",", "attribute_list", ",", "named_attributes", "=", "None", ",", "force_primary_key", "=", "None", ")", ":", "try", ":", "# check for missing attributes", "raise", "DataJointError", "(", "'Attribute `%s` is not found'", "%", "next", "(", "a", "for", "a", "in", "attribute_list", "if", "a", "not", "in", "self", ".", "names", ")", ")", "except", "StopIteration", ":", "if", "named_attributes", "is", "None", ":", "named_attributes", "=", "{", "}", "if", "force_primary_key", "is", "None", ":", "force_primary_key", "=", "set", "(", ")", "rename_map", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "named_attributes", ".", "items", "(", ")", "if", "v", "in", "self", ".", "attributes", "}", "# copied and renamed attributes", "copy_attrs", "=", "(", "dict", "(", "self", ".", "attributes", "[", "k", "]", ".", "todict", "(", ")", ",", "in_key", "=", "self", ".", "attributes", "[", "k", "]", ".", "in_key", "or", "k", "in", "force_primary_key", ",", "*", "*", "(", "{", "'name'", ":", "rename_map", "[", "k", "]", ",", "'sql_expression'", ":", "'`%s`'", "%", "k", "}", "if", "k", "in", "rename_map", "else", "{", "}", ")", ")", "for", "k", "in", "self", ".", "attributes", "if", "k", "in", "rename_map", "or", "k", "in", "attribute_list", ")", "compute_attrs", "=", "(", "dict", "(", "default_attribute_properties", ",", "name", "=", "new_name", ",", "sql_expression", "=", "expr", ")", "for", "new_name", ",", "expr", "in", "named_attributes", ".", "items", "(", ")", "if", "expr", "not", "in", "rename_map", ")", "return", "Heading", "(", "chain", "(", "copy_attrs", ",", "compute_attrs", ")", ")" ]
derive a new heading by selecting, renaming, or computing attributes. In relational algebra these operators are known as project, rename, and extend. :param attribute_list: the full list of existing attributes to include :param force_primary_key: attributes to force to be converted to primary :param named_attributes: dictionary of renamed attributes
[ "derive", "a", "new", "heading", "by", "selecting", "renaming", "or", "computing", "attributes", ".", "In", "relational", "algebra", "these", "operators", "are", "known", "as", "project", "rename", "and", "extend", ".", ":", "param", "attribute_list", ":", "the", "full", "list", "of", "existing", "attributes", "to", "include", ":", "param", "force_primary_key", ":", "attributes", "to", "force", "to", "be", "converted", "to", "primary", ":", "param", "named_attributes", ":", "dictionary", "of", "renamed", "attributes" ]
train
https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/heading.py#L244-L269
0.007389
google/grr
grr/server/grr_response_server/data_stores/common.py
DatabaseDirectorySize
def DatabaseDirectorySize(root_path, extension): """Compute size (in bytes) and number of files of a file-based data store.""" directories = collections.deque([root_path]) total_size = 0 total_files = 0 while directories: directory = directories.popleft() try: items = os.listdir(directory) except OSError: continue for comp in items: path = os.path.join(directory, comp) try: statinfo = os.lstat(path) if stat.S_ISLNK(statinfo.st_mode): continue if stat.S_ISDIR(statinfo.st_mode): directories.append(path) elif stat.S_ISREG(statinfo.st_mode): if comp.endswith(extension): total_size += statinfo.st_size total_files += 1 except OSError: continue return total_size, total_files
python
def DatabaseDirectorySize(root_path, extension): """Compute size (in bytes) and number of files of a file-based data store.""" directories = collections.deque([root_path]) total_size = 0 total_files = 0 while directories: directory = directories.popleft() try: items = os.listdir(directory) except OSError: continue for comp in items: path = os.path.join(directory, comp) try: statinfo = os.lstat(path) if stat.S_ISLNK(statinfo.st_mode): continue if stat.S_ISDIR(statinfo.st_mode): directories.append(path) elif stat.S_ISREG(statinfo.st_mode): if comp.endswith(extension): total_size += statinfo.st_size total_files += 1 except OSError: continue return total_size, total_files
[ "def", "DatabaseDirectorySize", "(", "root_path", ",", "extension", ")", ":", "directories", "=", "collections", ".", "deque", "(", "[", "root_path", "]", ")", "total_size", "=", "0", "total_files", "=", "0", "while", "directories", ":", "directory", "=", "directories", ".", "popleft", "(", ")", "try", ":", "items", "=", "os", ".", "listdir", "(", "directory", ")", "except", "OSError", ":", "continue", "for", "comp", "in", "items", ":", "path", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "comp", ")", "try", ":", "statinfo", "=", "os", ".", "lstat", "(", "path", ")", "if", "stat", ".", "S_ISLNK", "(", "statinfo", ".", "st_mode", ")", ":", "continue", "if", "stat", ".", "S_ISDIR", "(", "statinfo", ".", "st_mode", ")", ":", "directories", ".", "append", "(", "path", ")", "elif", "stat", ".", "S_ISREG", "(", "statinfo", ".", "st_mode", ")", ":", "if", "comp", ".", "endswith", "(", "extension", ")", ":", "total_size", "+=", "statinfo", ".", "st_size", "total_files", "+=", "1", "except", "OSError", ":", "continue", "return", "total_size", ",", "total_files" ]
Compute size (in bytes) and number of files of a file-based data store.
[ "Compute", "size", "(", "in", "bytes", ")", "and", "number", "of", "files", "of", "a", "file", "-", "based", "data", "store", "." ]
train
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/data_stores/common.py#L73-L98
0.018116
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py
FabricBase.get_key_state
def get_key_state(self, status, state_dict): """Returns the key associated with the dict. """ for key, val in state_dict.items(): if val == status: return key
python
def get_key_state(self, status, state_dict): """Returns the key associated with the dict. """ for key, val in state_dict.items(): if val == status: return key
[ "def", "get_key_state", "(", "self", ",", "status", ",", "state_dict", ")", ":", "for", "key", ",", "val", "in", "state_dict", ".", "items", "(", ")", ":", "if", "val", "==", "status", ":", "return", "key" ]
Returns the key associated with the dict.
[ "Returns", "the", "key", "associated", "with", "the", "dict", "." ]
train
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L1559-L1563
0.009901
castelao/oceansdb
oceansdb/cars.py
cars_profile
def cars_profile(filename, doy, latitude, longitude, depth): """ For now only the nearest value For now only for one position, not an array of positions longitude 0-360 """ assert np.size(doy) == 1 assert np.size(latitude) == 1 assert np.size(longitude) == 1 #assert np.size(depth) == 1 assert (longitude >= 0) & (longitude <= 360) assert depth >= 0 nc = netCDF4.Dataset(filename) t = 2 * np.pi * doy/366 # Improve this. Optimize to get only necessary Z Z = slice(0, nc['depth'].size) I = np.absolute(nc['lat'][:] - latitude).argmin() J = np.absolute(nc['lon'][:] - longitude).argmin() # Not efficient, but it works assert (nc['depth'][:64] == nc['depth_ann'][:]).all() assert (nc['depth'][:55] == nc['depth_semiann'][:]).all() value = nc['mean'][:, I, J] value[:64] += nc['an_cos'][Z, I, J] * np.cos(t) + \ nc['an_sin'][:, I, J] * np.sin(t) value[:55] += nc['sa_cos'][Z, I, J] * np.cos(2*t) + \ nc['sa_sin'][:, I, J] * np.sin(2*t) value = value output = {'depth': np.asanyarray(depth)} from scipy.interpolate import griddata output['value'] = griddata(nc['depth'][Z], value[Z], depth) for v in ['std_dev']: output[v] = griddata(nc['depth'][Z], nc[v][Z, I, J], depth) return output
python
def cars_profile(filename, doy, latitude, longitude, depth): """ For now only the nearest value For now only for one position, not an array of positions longitude 0-360 """ assert np.size(doy) == 1 assert np.size(latitude) == 1 assert np.size(longitude) == 1 #assert np.size(depth) == 1 assert (longitude >= 0) & (longitude <= 360) assert depth >= 0 nc = netCDF4.Dataset(filename) t = 2 * np.pi * doy/366 # Improve this. Optimize to get only necessary Z Z = slice(0, nc['depth'].size) I = np.absolute(nc['lat'][:] - latitude).argmin() J = np.absolute(nc['lon'][:] - longitude).argmin() # Not efficient, but it works assert (nc['depth'][:64] == nc['depth_ann'][:]).all() assert (nc['depth'][:55] == nc['depth_semiann'][:]).all() value = nc['mean'][:, I, J] value[:64] += nc['an_cos'][Z, I, J] * np.cos(t) + \ nc['an_sin'][:, I, J] * np.sin(t) value[:55] += nc['sa_cos'][Z, I, J] * np.cos(2*t) + \ nc['sa_sin'][:, I, J] * np.sin(2*t) value = value output = {'depth': np.asanyarray(depth)} from scipy.interpolate import griddata output['value'] = griddata(nc['depth'][Z], value[Z], depth) for v in ['std_dev']: output[v] = griddata(nc['depth'][Z], nc[v][Z, I, J], depth) return output
[ "def", "cars_profile", "(", "filename", ",", "doy", ",", "latitude", ",", "longitude", ",", "depth", ")", ":", "assert", "np", ".", "size", "(", "doy", ")", "==", "1", "assert", "np", ".", "size", "(", "latitude", ")", "==", "1", "assert", "np", ".", "size", "(", "longitude", ")", "==", "1", "#assert np.size(depth) == 1", "assert", "(", "longitude", ">=", "0", ")", "&", "(", "longitude", "<=", "360", ")", "assert", "depth", ">=", "0", "nc", "=", "netCDF4", ".", "Dataset", "(", "filename", ")", "t", "=", "2", "*", "np", ".", "pi", "*", "doy", "/", "366", "# Improve this. Optimize to get only necessary Z", "Z", "=", "slice", "(", "0", ",", "nc", "[", "'depth'", "]", ".", "size", ")", "I", "=", "np", ".", "absolute", "(", "nc", "[", "'lat'", "]", "[", ":", "]", "-", "latitude", ")", ".", "argmin", "(", ")", "J", "=", "np", ".", "absolute", "(", "nc", "[", "'lon'", "]", "[", ":", "]", "-", "longitude", ")", ".", "argmin", "(", ")", "# Not efficient, but it works", "assert", "(", "nc", "[", "'depth'", "]", "[", ":", "64", "]", "==", "nc", "[", "'depth_ann'", "]", "[", ":", "]", ")", ".", "all", "(", ")", "assert", "(", "nc", "[", "'depth'", "]", "[", ":", "55", "]", "==", "nc", "[", "'depth_semiann'", "]", "[", ":", "]", ")", ".", "all", "(", ")", "value", "=", "nc", "[", "'mean'", "]", "[", ":", ",", "I", ",", "J", "]", "value", "[", ":", "64", "]", "+=", "nc", "[", "'an_cos'", "]", "[", "Z", ",", "I", ",", "J", "]", "*", "np", ".", "cos", "(", "t", ")", "+", "nc", "[", "'an_sin'", "]", "[", ":", ",", "I", ",", "J", "]", "*", "np", ".", "sin", "(", "t", ")", "value", "[", ":", "55", "]", "+=", "nc", "[", "'sa_cos'", "]", "[", "Z", ",", "I", ",", "J", "]", "*", "np", ".", "cos", "(", "2", "*", "t", ")", "+", "nc", "[", "'sa_sin'", "]", "[", ":", ",", "I", ",", "J", "]", "*", "np", ".", "sin", "(", "2", "*", "t", ")", "value", "=", "value", "output", "=", "{", "'depth'", ":", "np", ".", "asanyarray", "(", "depth", ")", "}", "from", "scipy", ".", "interpolate", "import", "griddata", "output", "[", "'value'", "]", "=", "griddata", "(", "nc", "[", "'depth'", "]", "[", "Z", "]", ",", "value", "[", "Z", "]", ",", "depth", ")", "for", "v", "in", "[", "'std_dev'", "]", ":", "output", "[", "v", "]", "=", "griddata", "(", "nc", "[", "'depth'", "]", "[", "Z", "]", ",", "nc", "[", "v", "]", "[", "Z", ",", "I", ",", "J", "]", ",", "depth", ")", "return", "output" ]
For now only the nearest value For now only for one position, not an array of positions longitude 0-360
[ "For", "now", "only", "the", "nearest", "value", "For", "now", "only", "for", "one", "position", "not", "an", "array", "of", "positions", "longitude", "0", "-", "360" ]
train
https://github.com/castelao/oceansdb/blob/a154c5b845845a602800f9bc53d1702d4cb0f9c5/oceansdb/cars.py#L66-L106
0.003706
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/classtracker.py
ClassTracker.track_change
def track_change(self, instance, resolution_level=0): """ Change tracking options for the already tracked object 'instance'. If instance is not tracked, a KeyError will be raised. """ tobj = self.objects[id(instance)] tobj.set_resolution_level(resolution_level)
python
def track_change(self, instance, resolution_level=0): """ Change tracking options for the already tracked object 'instance'. If instance is not tracked, a KeyError will be raised. """ tobj = self.objects[id(instance)] tobj.set_resolution_level(resolution_level)
[ "def", "track_change", "(", "self", ",", "instance", ",", "resolution_level", "=", "0", ")", ":", "tobj", "=", "self", ".", "objects", "[", "id", "(", "instance", ")", "]", "tobj", ".", "set_resolution_level", "(", "resolution_level", ")" ]
Change tracking options for the already tracked object 'instance'. If instance is not tracked, a KeyError will be raised.
[ "Change", "tracking", "options", "for", "the", "already", "tracked", "object", "instance", ".", "If", "instance", "is", "not", "tracked", "a", "KeyError", "will", "be", "raised", "." ]
train
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker.py#L349-L355
0.006472
maljovec/topopy
topopy/MorseSmaleComplex.py
MorseSmaleComplex.save
def save(self, filename=None): """ Saves a constructed Morse-Smale Complex in json file @ In, filename, a filename for storing the hierarchical merging of features and the base level partitions of the data """ if filename is None: filename = "morse_smale_complex.json" with open(filename, "w") as fp: fp.write(self.to_json())
python
def save(self, filename=None): """ Saves a constructed Morse-Smale Complex in json file @ In, filename, a filename for storing the hierarchical merging of features and the base level partitions of the data """ if filename is None: filename = "morse_smale_complex.json" with open(filename, "w") as fp: fp.write(self.to_json())
[ "def", "save", "(", "self", ",", "filename", "=", "None", ")", ":", "if", "filename", "is", "None", ":", "filename", "=", "\"morse_smale_complex.json\"", "with", "open", "(", "filename", ",", "\"w\"", ")", "as", "fp", ":", "fp", ".", "write", "(", "self", ".", "to_json", "(", ")", ")" ]
Saves a constructed Morse-Smale Complex in json file @ In, filename, a filename for storing the hierarchical merging of features and the base level partitions of the data
[ "Saves", "a", "constructed", "Morse", "-", "Smale", "Complex", "in", "json", "file" ]
train
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/MorseSmaleComplex.py#L159-L168
0.004796
bukun/TorCMS
helper_scripts/script_meta_xlsx_import.py
update_category
def update_category(uid, postdata, kwargs): ''' Update the category of the post. ''' catid = kwargs['catid'] if ('catid' in kwargs and MCategory.get_by_uid(kwargs['catid'])) else None post_data = postdata current_infos = MPost2Catalog.query_by_entity_uid(uid, kind='').objects() new_category_arr = [] # Used to update post2category, to keep order. def_cate_arr = ['gcat{0}'.format(x) for x in range(10)] # for old page. def_cate_arr.append('def_cat_uid') # Used to update post extinfo. cat_dic = {} for key in def_cate_arr: if key not in post_data: continue if post_data[key] == '' or post_data[key] == '0': continue # 有可能选重复了。保留前面的 if post_data[key] in new_category_arr: continue new_category_arr.append(post_data[key] + ' ' * (4 - len(post_data[key]))) cat_dic[key] = post_data[key] + ' ' * (4 - len(post_data[key])) if catid: def_cat_id = catid elif new_category_arr: def_cat_id = new_category_arr[0] else: def_cat_id = None if def_cat_id: cat_dic['def_cat_uid'] = def_cat_id cat_dic['def_cat_pid'] = MCategory.get_by_uid(def_cat_id).pid print('=' * 40) print(uid) print(cat_dic) MPost.update_jsonb(uid, cat_dic) for index, catid in enumerate(new_category_arr): MPost2Catalog.add_record(uid, catid, index) # Delete the old category if not in post requests. for cur_info in current_infos: if cur_info.tag_id not in new_category_arr: MPost2Catalog.remove_relation(uid, cur_info.tag_id)
python
def update_category(uid, postdata, kwargs): ''' Update the category of the post. ''' catid = kwargs['catid'] if ('catid' in kwargs and MCategory.get_by_uid(kwargs['catid'])) else None post_data = postdata current_infos = MPost2Catalog.query_by_entity_uid(uid, kind='').objects() new_category_arr = [] # Used to update post2category, to keep order. def_cate_arr = ['gcat{0}'.format(x) for x in range(10)] # for old page. def_cate_arr.append('def_cat_uid') # Used to update post extinfo. cat_dic = {} for key in def_cate_arr: if key not in post_data: continue if post_data[key] == '' or post_data[key] == '0': continue # 有可能选重复了。保留前面的 if post_data[key] in new_category_arr: continue new_category_arr.append(post_data[key] + ' ' * (4 - len(post_data[key]))) cat_dic[key] = post_data[key] + ' ' * (4 - len(post_data[key])) if catid: def_cat_id = catid elif new_category_arr: def_cat_id = new_category_arr[0] else: def_cat_id = None if def_cat_id: cat_dic['def_cat_uid'] = def_cat_id cat_dic['def_cat_pid'] = MCategory.get_by_uid(def_cat_id).pid print('=' * 40) print(uid) print(cat_dic) MPost.update_jsonb(uid, cat_dic) for index, catid in enumerate(new_category_arr): MPost2Catalog.add_record(uid, catid, index) # Delete the old category if not in post requests. for cur_info in current_infos: if cur_info.tag_id not in new_category_arr: MPost2Catalog.remove_relation(uid, cur_info.tag_id)
[ "def", "update_category", "(", "uid", ",", "postdata", ",", "kwargs", ")", ":", "catid", "=", "kwargs", "[", "'catid'", "]", "if", "(", "'catid'", "in", "kwargs", "and", "MCategory", ".", "get_by_uid", "(", "kwargs", "[", "'catid'", "]", ")", ")", "else", "None", "post_data", "=", "postdata", "current_infos", "=", "MPost2Catalog", ".", "query_by_entity_uid", "(", "uid", ",", "kind", "=", "''", ")", ".", "objects", "(", ")", "new_category_arr", "=", "[", "]", "# Used to update post2category, to keep order.", "def_cate_arr", "=", "[", "'gcat{0}'", ".", "format", "(", "x", ")", "for", "x", "in", "range", "(", "10", ")", "]", "# for old page.", "def_cate_arr", ".", "append", "(", "'def_cat_uid'", ")", "# Used to update post extinfo.", "cat_dic", "=", "{", "}", "for", "key", "in", "def_cate_arr", ":", "if", "key", "not", "in", "post_data", ":", "continue", "if", "post_data", "[", "key", "]", "==", "''", "or", "post_data", "[", "key", "]", "==", "'0'", ":", "continue", "# 有可能选重复了。保留前面的", "if", "post_data", "[", "key", "]", "in", "new_category_arr", ":", "continue", "new_category_arr", ".", "append", "(", "post_data", "[", "key", "]", "+", "' '", "*", "(", "4", "-", "len", "(", "post_data", "[", "key", "]", ")", ")", ")", "cat_dic", "[", "key", "]", "=", "post_data", "[", "key", "]", "+", "' '", "*", "(", "4", "-", "len", "(", "post_data", "[", "key", "]", ")", ")", "if", "catid", ":", "def_cat_id", "=", "catid", "elif", "new_category_arr", ":", "def_cat_id", "=", "new_category_arr", "[", "0", "]", "else", ":", "def_cat_id", "=", "None", "if", "def_cat_id", ":", "cat_dic", "[", "'def_cat_uid'", "]", "=", "def_cat_id", "cat_dic", "[", "'def_cat_pid'", "]", "=", "MCategory", ".", "get_by_uid", "(", "def_cat_id", ")", ".", "pid", "print", "(", "'='", "*", "40", ")", "print", "(", "uid", ")", "print", "(", "cat_dic", ")", "MPost", ".", "update_jsonb", "(", "uid", ",", "cat_dic", ")", "for", "index", ",", "catid", "in", "enumerate", "(", "new_category_arr", ")", ":", "MPost2Catalog", ".", "add_record", "(", "uid", ",", "catid", ",", "index", ")", "# Delete the old category if not in post requests.", "for", "cur_info", "in", "current_infos", ":", "if", "cur_info", ".", "tag_id", "not", "in", "new_category_arr", ":", "MPost2Catalog", ".", "remove_relation", "(", "uid", ",", "cur_info", ".", "tag_id", ")" ]
Update the category of the post.
[ "Update", "the", "category", "of", "the", "post", "." ]
train
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/helper_scripts/script_meta_xlsx_import.py#L19-L72
0.001819
log2timeline/dfdatetime
dfdatetime/posix_time.py
PosixTime._GetNormalizedTimestamp
def _GetNormalizedTimestamp(self): """Retrieves the normalized timestamp. Returns: decimal.Decimal: normalized timestamp, which contains the number of seconds since January 1, 1970 00:00:00 and a fraction of second used for increased precision, or None if the normalized timestamp cannot be determined. """ if self._normalized_timestamp is None: if self._timestamp is not None: self._normalized_timestamp = decimal.Decimal(self._timestamp) return self._normalized_timestamp
python
def _GetNormalizedTimestamp(self): """Retrieves the normalized timestamp. Returns: decimal.Decimal: normalized timestamp, which contains the number of seconds since January 1, 1970 00:00:00 and a fraction of second used for increased precision, or None if the normalized timestamp cannot be determined. """ if self._normalized_timestamp is None: if self._timestamp is not None: self._normalized_timestamp = decimal.Decimal(self._timestamp) return self._normalized_timestamp
[ "def", "_GetNormalizedTimestamp", "(", "self", ")", ":", "if", "self", ".", "_normalized_timestamp", "is", "None", ":", "if", "self", ".", "_timestamp", "is", "not", "None", ":", "self", ".", "_normalized_timestamp", "=", "decimal", ".", "Decimal", "(", "self", ".", "_timestamp", ")", "return", "self", ".", "_normalized_timestamp" ]
Retrieves the normalized timestamp. Returns: decimal.Decimal: normalized timestamp, which contains the number of seconds since January 1, 1970 00:00:00 and a fraction of second used for increased precision, or None if the normalized timestamp cannot be determined.
[ "Retrieves", "the", "normalized", "timestamp", "." ]
train
https://github.com/log2timeline/dfdatetime/blob/141ca4ef1eff3d354b5deaac3d81cb08506f98d6/dfdatetime/posix_time.py#L51-L64
0.005505
JustinLovinger/optimal
optimal/optimize.py
_meta_fitness_func
def _meta_fitness_func(parameters, _optimizer, _problems, _master_fitness_dict, _runs=20): """Test a metaheuristic with parameters encoded in solution. Our goal is to minimize number of evaluation runs until a solution is found, while maximizing chance of finding solution to the underlying problem NOTE: while meta optimization requires a 'known' solution, this solution can be an estimate to provide the meta optimizer with a sense of progress. """ # Create the optimizer with parameters encoded in solution optimizer = copy.deepcopy(_optimizer) optimizer._set_hyperparameters(parameters) optimizer.logging = False # Preload fitness dictionary from master, and disable clearing dict # NOTE: master_fitness_dict will be modified inline, and therefore, # we do not need to take additional steps to update it if _master_fitness_dict != None: # None means low memory mode optimizer.clear_cache = False optimizer._Optimizer__encoded_cache = _master_fitness_dict # Because metaheuristics are stochastic, we run the optimizer multiple times, # to obtain an average of performance all_evaluation_runs = [] solutions_found = [] for _ in range(_runs): for problem in _problems: # Get performance for problem optimizer.optimize(problem) all_evaluation_runs.append(optimizer.fitness_runs) if optimizer.solution_found: solutions_found.append(1.0) else: solutions_found.append(0.0) # Our main goal is to minimize time the optimizer takes fitness = 1.0 / helpers.avg(all_evaluation_runs) # Optimizer is heavily penalized for missing solutions # To avoid 0 fitness fitness = fitness * helpers.avg(solutions_found)**2 + 1e-19 return fitness
python
def _meta_fitness_func(parameters, _optimizer, _problems, _master_fitness_dict, _runs=20): """Test a metaheuristic with parameters encoded in solution. Our goal is to minimize number of evaluation runs until a solution is found, while maximizing chance of finding solution to the underlying problem NOTE: while meta optimization requires a 'known' solution, this solution can be an estimate to provide the meta optimizer with a sense of progress. """ # Create the optimizer with parameters encoded in solution optimizer = copy.deepcopy(_optimizer) optimizer._set_hyperparameters(parameters) optimizer.logging = False # Preload fitness dictionary from master, and disable clearing dict # NOTE: master_fitness_dict will be modified inline, and therefore, # we do not need to take additional steps to update it if _master_fitness_dict != None: # None means low memory mode optimizer.clear_cache = False optimizer._Optimizer__encoded_cache = _master_fitness_dict # Because metaheuristics are stochastic, we run the optimizer multiple times, # to obtain an average of performance all_evaluation_runs = [] solutions_found = [] for _ in range(_runs): for problem in _problems: # Get performance for problem optimizer.optimize(problem) all_evaluation_runs.append(optimizer.fitness_runs) if optimizer.solution_found: solutions_found.append(1.0) else: solutions_found.append(0.0) # Our main goal is to minimize time the optimizer takes fitness = 1.0 / helpers.avg(all_evaluation_runs) # Optimizer is heavily penalized for missing solutions # To avoid 0 fitness fitness = fitness * helpers.avg(solutions_found)**2 + 1e-19 return fitness
[ "def", "_meta_fitness_func", "(", "parameters", ",", "_optimizer", ",", "_problems", ",", "_master_fitness_dict", ",", "_runs", "=", "20", ")", ":", "# Create the optimizer with parameters encoded in solution", "optimizer", "=", "copy", ".", "deepcopy", "(", "_optimizer", ")", "optimizer", ".", "_set_hyperparameters", "(", "parameters", ")", "optimizer", ".", "logging", "=", "False", "# Preload fitness dictionary from master, and disable clearing dict", "# NOTE: master_fitness_dict will be modified inline, and therefore,", "# we do not need to take additional steps to update it", "if", "_master_fitness_dict", "!=", "None", ":", "# None means low memory mode", "optimizer", ".", "clear_cache", "=", "False", "optimizer", ".", "_Optimizer__encoded_cache", "=", "_master_fitness_dict", "# Because metaheuristics are stochastic, we run the optimizer multiple times,", "# to obtain an average of performance", "all_evaluation_runs", "=", "[", "]", "solutions_found", "=", "[", "]", "for", "_", "in", "range", "(", "_runs", ")", ":", "for", "problem", "in", "_problems", ":", "# Get performance for problem", "optimizer", ".", "optimize", "(", "problem", ")", "all_evaluation_runs", ".", "append", "(", "optimizer", ".", "fitness_runs", ")", "if", "optimizer", ".", "solution_found", ":", "solutions_found", ".", "append", "(", "1.0", ")", "else", ":", "solutions_found", ".", "append", "(", "0.0", ")", "# Our main goal is to minimize time the optimizer takes", "fitness", "=", "1.0", "/", "helpers", ".", "avg", "(", "all_evaluation_runs", ")", "# Optimizer is heavily penalized for missing solutions", "# To avoid 0 fitness", "fitness", "=", "fitness", "*", "helpers", ".", "avg", "(", "solutions_found", ")", "**", "2", "+", "1e-19", "return", "fitness" ]
Test a metaheuristic with parameters encoded in solution. Our goal is to minimize number of evaluation runs until a solution is found, while maximizing chance of finding solution to the underlying problem NOTE: while meta optimization requires a 'known' solution, this solution can be an estimate to provide the meta optimizer with a sense of progress.
[ "Test", "a", "metaheuristic", "with", "parameters", "encoded", "in", "solution", "." ]
train
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/optimize.py#L841-L886
0.002062
lextoumbourou/txstripe
txstripe/resource.py
APIResource.retrieve
def retrieve(cls, id, api_key=None, **params): """Return a deferred.""" instance = cls(id, api_key, **params) d = instance.refresh() return d.addCallback(lambda _: instance)
python
def retrieve(cls, id, api_key=None, **params): """Return a deferred.""" instance = cls(id, api_key, **params) d = instance.refresh() return d.addCallback(lambda _: instance)
[ "def", "retrieve", "(", "cls", ",", "id", ",", "api_key", "=", "None", ",", "*", "*", "params", ")", ":", "instance", "=", "cls", "(", "id", ",", "api_key", ",", "*", "*", "params", ")", "d", "=", "instance", ".", "refresh", "(", ")", "return", "d", ".", "addCallback", "(", "lambda", "_", ":", "instance", ")" ]
Return a deferred.
[ "Return", "a", "deferred", "." ]
train
https://github.com/lextoumbourou/txstripe/blob/a69e67f524258026fd1840655a0578311bba3b89/txstripe/resource.py#L132-L136
0.009756
ska-sa/hypercube
hypercube/util/__init__.py
array_bytes
def array_bytes(array): """ Estimates the memory of the supplied array in bytes """ return np.product(array.shape)*np.dtype(array.dtype).itemsize
python
def array_bytes(array): """ Estimates the memory of the supplied array in bytes """ return np.product(array.shape)*np.dtype(array.dtype).itemsize
[ "def", "array_bytes", "(", "array", ")", ":", "return", "np", ".", "product", "(", "array", ".", "shape", ")", "*", "np", ".", "dtype", "(", "array", ".", "dtype", ")", ".", "itemsize" ]
Estimates the memory of the supplied array in bytes
[ "Estimates", "the", "memory", "of", "the", "supplied", "array", "in", "bytes" ]
train
https://github.com/ska-sa/hypercube/blob/6564a9e65ccd9ed7e7a71bd643f183e1ec645b29/hypercube/util/__init__.py#L26-L28
0.006536
ska-sa/katcp-python
katcp/core.py
MessageParser._unescape_match
def _unescape_match(self, match): """Given an re.Match, unescape the escape code it represents.""" char = match.group(1) if char in self.ESCAPE_LOOKUP: return self.ESCAPE_LOOKUP[char] elif not char: raise KatcpSyntaxError("Escape slash at end of argument.") else: raise KatcpSyntaxError("Invalid escape character %r." % (char,))
python
def _unescape_match(self, match): """Given an re.Match, unescape the escape code it represents.""" char = match.group(1) if char in self.ESCAPE_LOOKUP: return self.ESCAPE_LOOKUP[char] elif not char: raise KatcpSyntaxError("Escape slash at end of argument.") else: raise KatcpSyntaxError("Invalid escape character %r." % (char,))
[ "def", "_unescape_match", "(", "self", ",", "match", ")", ":", "char", "=", "match", ".", "group", "(", "1", ")", "if", "char", "in", "self", ".", "ESCAPE_LOOKUP", ":", "return", "self", ".", "ESCAPE_LOOKUP", "[", "char", "]", "elif", "not", "char", ":", "raise", "KatcpSyntaxError", "(", "\"Escape slash at end of argument.\"", ")", "else", ":", "raise", "KatcpSyntaxError", "(", "\"Invalid escape character %r.\"", "%", "(", "char", ",", ")", ")" ]
Given an re.Match, unescape the escape code it represents.
[ "Given", "an", "re", ".", "Match", "unescape", "the", "escape", "code", "it", "represents", "." ]
train
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/core.py#L509-L517
0.00495
objectrocket/python-client
objectrocket/acls.py
Acls.create
def create(self, instance, cidr_mask, description, **kwargs): """Create an ACL entry for the specified instance. :param str instance: The name of the instance to associate the new ACL entry with. :param str cidr_mask: The IPv4 CIDR mask for the new ACL entry. :param str description: A short description for the new ACL entry. :param collector kwargs: (optional) Additional key=value pairs to be supplied to the creation payload. **Caution:** fields unrecognized by the API will cause this request to fail with a 400 from the API. """ # Build up request data. url = self._url.format(instance=instance) request_data = { 'cidr_mask': cidr_mask, 'description': description } request_data.update(kwargs) # Call to create an instance. response = requests.post( url, data=json.dumps(request_data), **self._default_request_kwargs ) # Log outcome of instance creation request. if response.status_code == 200: logger.info('Successfully created a new ACL for instance {} with: {}.' .format(instance, request_data)) else: logger.info('Failed to create a new ACL for instance {} with: {}.' .format(instance, request_data)) data = self._get_response_data(response) return self._concrete_acl(data)
python
def create(self, instance, cidr_mask, description, **kwargs): """Create an ACL entry for the specified instance. :param str instance: The name of the instance to associate the new ACL entry with. :param str cidr_mask: The IPv4 CIDR mask for the new ACL entry. :param str description: A short description for the new ACL entry. :param collector kwargs: (optional) Additional key=value pairs to be supplied to the creation payload. **Caution:** fields unrecognized by the API will cause this request to fail with a 400 from the API. """ # Build up request data. url = self._url.format(instance=instance) request_data = { 'cidr_mask': cidr_mask, 'description': description } request_data.update(kwargs) # Call to create an instance. response = requests.post( url, data=json.dumps(request_data), **self._default_request_kwargs ) # Log outcome of instance creation request. if response.status_code == 200: logger.info('Successfully created a new ACL for instance {} with: {}.' .format(instance, request_data)) else: logger.info('Failed to create a new ACL for instance {} with: {}.' .format(instance, request_data)) data = self._get_response_data(response) return self._concrete_acl(data)
[ "def", "create", "(", "self", ",", "instance", ",", "cidr_mask", ",", "description", ",", "*", "*", "kwargs", ")", ":", "# Build up request data.", "url", "=", "self", ".", "_url", ".", "format", "(", "instance", "=", "instance", ")", "request_data", "=", "{", "'cidr_mask'", ":", "cidr_mask", ",", "'description'", ":", "description", "}", "request_data", ".", "update", "(", "kwargs", ")", "# Call to create an instance.", "response", "=", "requests", ".", "post", "(", "url", ",", "data", "=", "json", ".", "dumps", "(", "request_data", ")", ",", "*", "*", "self", ".", "_default_request_kwargs", ")", "# Log outcome of instance creation request.", "if", "response", ".", "status_code", "==", "200", ":", "logger", ".", "info", "(", "'Successfully created a new ACL for instance {} with: {}.'", ".", "format", "(", "instance", ",", "request_data", ")", ")", "else", ":", "logger", ".", "info", "(", "'Failed to create a new ACL for instance {} with: {}.'", ".", "format", "(", "instance", ",", "request_data", ")", ")", "data", "=", "self", ".", "_get_response_data", "(", "response", ")", "return", "self", ".", "_concrete_acl", "(", "data", ")" ]
Create an ACL entry for the specified instance. :param str instance: The name of the instance to associate the new ACL entry with. :param str cidr_mask: The IPv4 CIDR mask for the new ACL entry. :param str description: A short description for the new ACL entry. :param collector kwargs: (optional) Additional key=value pairs to be supplied to the creation payload. **Caution:** fields unrecognized by the API will cause this request to fail with a 400 from the API.
[ "Create", "an", "ACL", "entry", "for", "the", "specified", "instance", "." ]
train
https://github.com/objectrocket/python-client/blob/a65868c7511ff49a5fbe304e53bf592b7fc6d5ef/objectrocket/acls.py#L41-L75
0.004013
softlayer/softlayer-python
SoftLayer/managers/network.py
NetworkManager.edit_rwhois
def edit_rwhois(self, abuse_email=None, address1=None, address2=None, city=None, company_name=None, country=None, first_name=None, last_name=None, postal_code=None, private_residence=None, state=None): """Edit rwhois record.""" update = {} for key, value in [('abuseEmail', abuse_email), ('address1', address1), ('address2', address2), ('city', city), ('companyName', company_name), ('country', country), ('firstName', first_name), ('lastName', last_name), ('privateResidenceFlag', private_residence), ('state', state), ('postalCode', postal_code)]: if value is not None: update[key] = value # If there's anything to update, update it if update: rwhois = self.get_rwhois() return self.client['Network_Subnet_Rwhois_Data'].editObject( update, id=rwhois['id']) return True
python
def edit_rwhois(self, abuse_email=None, address1=None, address2=None, city=None, company_name=None, country=None, first_name=None, last_name=None, postal_code=None, private_residence=None, state=None): """Edit rwhois record.""" update = {} for key, value in [('abuseEmail', abuse_email), ('address1', address1), ('address2', address2), ('city', city), ('companyName', company_name), ('country', country), ('firstName', first_name), ('lastName', last_name), ('privateResidenceFlag', private_residence), ('state', state), ('postalCode', postal_code)]: if value is not None: update[key] = value # If there's anything to update, update it if update: rwhois = self.get_rwhois() return self.client['Network_Subnet_Rwhois_Data'].editObject( update, id=rwhois['id']) return True
[ "def", "edit_rwhois", "(", "self", ",", "abuse_email", "=", "None", ",", "address1", "=", "None", ",", "address2", "=", "None", ",", "city", "=", "None", ",", "company_name", "=", "None", ",", "country", "=", "None", ",", "first_name", "=", "None", ",", "last_name", "=", "None", ",", "postal_code", "=", "None", ",", "private_residence", "=", "None", ",", "state", "=", "None", ")", ":", "update", "=", "{", "}", "for", "key", ",", "value", "in", "[", "(", "'abuseEmail'", ",", "abuse_email", ")", ",", "(", "'address1'", ",", "address1", ")", ",", "(", "'address2'", ",", "address2", ")", ",", "(", "'city'", ",", "city", ")", ",", "(", "'companyName'", ",", "company_name", ")", ",", "(", "'country'", ",", "country", ")", ",", "(", "'firstName'", ",", "first_name", ")", ",", "(", "'lastName'", ",", "last_name", ")", ",", "(", "'privateResidenceFlag'", ",", "private_residence", ")", ",", "(", "'state'", ",", "state", ")", ",", "(", "'postalCode'", ",", "postal_code", ")", "]", ":", "if", "value", "is", "not", "None", ":", "update", "[", "key", "]", "=", "value", "# If there's anything to update, update it", "if", "update", ":", "rwhois", "=", "self", ".", "get_rwhois", "(", ")", "return", "self", ".", "client", "[", "'Network_Subnet_Rwhois_Data'", "]", ".", "editObject", "(", "update", ",", "id", "=", "rwhois", "[", "'id'", "]", ")", "return", "True" ]
Edit rwhois record.
[ "Edit", "rwhois", "record", "." ]
train
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/network.py#L257-L283
0.004105
ray-project/ray
python/ray/tune/examples/tune_mnist_async_hyperband.py
bias_variable
def bias_variable(shape): """bias_variable generates a bias variable of a given shape.""" initial = tf.constant(0.1, shape=shape) return tf.Variable(initial)
python
def bias_variable(shape): """bias_variable generates a bias variable of a given shape.""" initial = tf.constant(0.1, shape=shape) return tf.Variable(initial)
[ "def", "bias_variable", "(", "shape", ")", ":", "initial", "=", "tf", ".", "constant", "(", "0.1", ",", "shape", "=", "shape", ")", "return", "tf", ".", "Variable", "(", "initial", ")" ]
bias_variable generates a bias variable of a given shape.
[ "bias_variable", "generates", "a", "bias", "variable", "of", "a", "given", "shape", "." ]
train
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/examples/tune_mnist_async_hyperband.py#L127-L130
0.005917
bitly/asyncmongo
asyncmongo/cursor.py
Cursor.insert
def insert(self, doc_or_docs, manipulate=True, safe=True, check_keys=True, callback=None, **kwargs): """Insert a document(s) into this collection. If `manipulate` is set, the document(s) are manipulated using any :class:`~pymongo.son_manipulator.SONManipulator` instances that have been added to this :class:`~pymongo.database.Database`. Returns the ``"_id"`` of the inserted document or a list of ``"_id"`` values of the inserted documents. If the document(s) does not already contain an ``"_id"`` one will be added. If `safe` is ``True`` then the insert will be checked for errors, raising :class:`~pymongo.errors.OperationFailure` if one occurred. Safe inserts wait for a response from the database, while normal inserts do not. Any additional keyword arguments imply ``safe=True``, and will be used as options for the resultant `getLastError` command. For example, to wait for replication to 3 nodes, pass ``w=3``. :Parameters: - `doc_or_docs`: a document or list of documents to be inserted - `manipulate` (optional): manipulate the documents before inserting? - `safe` (optional): check that the insert succeeded? - `check_keys` (optional): check if keys start with '$' or contain '.', raising :class:`~pymongo.errors.InvalidName` in either case - `**kwargs` (optional): any additional arguments imply ``safe=True``, and will be used as options for the `getLastError` command .. mongodoc:: insert """ if not isinstance(safe, bool): raise TypeError("safe must be an instance of bool") docs = doc_or_docs # return_one = False if isinstance(docs, dict): # return_one = True docs = [docs] # if manipulate: # docs = [self.__database._fix_incoming(doc, self) for doc in docs] self.__limit = None if kwargs: safe = True if safe and not callable(callback): raise TypeError("callback must be callable") if not safe and callback is not None: raise TypeError("callback can not be used with safe=False") if callback: callback = functools.partial(self._handle_response, orig_callback=callback) connection = self.__pool.connection() try: connection.send_message( message.insert(self.full_collection_name, docs, check_keys, safe, kwargs), callback=callback) except: connection.close() raise
python
def insert(self, doc_or_docs, manipulate=True, safe=True, check_keys=True, callback=None, **kwargs): """Insert a document(s) into this collection. If `manipulate` is set, the document(s) are manipulated using any :class:`~pymongo.son_manipulator.SONManipulator` instances that have been added to this :class:`~pymongo.database.Database`. Returns the ``"_id"`` of the inserted document or a list of ``"_id"`` values of the inserted documents. If the document(s) does not already contain an ``"_id"`` one will be added. If `safe` is ``True`` then the insert will be checked for errors, raising :class:`~pymongo.errors.OperationFailure` if one occurred. Safe inserts wait for a response from the database, while normal inserts do not. Any additional keyword arguments imply ``safe=True``, and will be used as options for the resultant `getLastError` command. For example, to wait for replication to 3 nodes, pass ``w=3``. :Parameters: - `doc_or_docs`: a document or list of documents to be inserted - `manipulate` (optional): manipulate the documents before inserting? - `safe` (optional): check that the insert succeeded? - `check_keys` (optional): check if keys start with '$' or contain '.', raising :class:`~pymongo.errors.InvalidName` in either case - `**kwargs` (optional): any additional arguments imply ``safe=True``, and will be used as options for the `getLastError` command .. mongodoc:: insert """ if not isinstance(safe, bool): raise TypeError("safe must be an instance of bool") docs = doc_or_docs # return_one = False if isinstance(docs, dict): # return_one = True docs = [docs] # if manipulate: # docs = [self.__database._fix_incoming(doc, self) for doc in docs] self.__limit = None if kwargs: safe = True if safe and not callable(callback): raise TypeError("callback must be callable") if not safe and callback is not None: raise TypeError("callback can not be used with safe=False") if callback: callback = functools.partial(self._handle_response, orig_callback=callback) connection = self.__pool.connection() try: connection.send_message( message.insert(self.full_collection_name, docs, check_keys, safe, kwargs), callback=callback) except: connection.close() raise
[ "def", "insert", "(", "self", ",", "doc_or_docs", ",", "manipulate", "=", "True", ",", "safe", "=", "True", ",", "check_keys", "=", "True", ",", "callback", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "safe", ",", "bool", ")", ":", "raise", "TypeError", "(", "\"safe must be an instance of bool\"", ")", "docs", "=", "doc_or_docs", "# return_one = False", "if", "isinstance", "(", "docs", ",", "dict", ")", ":", "# return_one = True", "docs", "=", "[", "docs", "]", "# if manipulate:", "# docs = [self.__database._fix_incoming(doc, self) for doc in docs]", "self", ".", "__limit", "=", "None", "if", "kwargs", ":", "safe", "=", "True", "if", "safe", "and", "not", "callable", "(", "callback", ")", ":", "raise", "TypeError", "(", "\"callback must be callable\"", ")", "if", "not", "safe", "and", "callback", "is", "not", "None", ":", "raise", "TypeError", "(", "\"callback can not be used with safe=False\"", ")", "if", "callback", ":", "callback", "=", "functools", ".", "partial", "(", "self", ".", "_handle_response", ",", "orig_callback", "=", "callback", ")", "connection", "=", "self", ".", "__pool", ".", "connection", "(", ")", "try", ":", "connection", ".", "send_message", "(", "message", ".", "insert", "(", "self", ".", "full_collection_name", ",", "docs", ",", "check_keys", ",", "safe", ",", "kwargs", ")", ",", "callback", "=", "callback", ")", "except", ":", "connection", ".", "close", "(", ")", "raise" ]
Insert a document(s) into this collection. If `manipulate` is set, the document(s) are manipulated using any :class:`~pymongo.son_manipulator.SONManipulator` instances that have been added to this :class:`~pymongo.database.Database`. Returns the ``"_id"`` of the inserted document or a list of ``"_id"`` values of the inserted documents. If the document(s) does not already contain an ``"_id"`` one will be added. If `safe` is ``True`` then the insert will be checked for errors, raising :class:`~pymongo.errors.OperationFailure` if one occurred. Safe inserts wait for a response from the database, while normal inserts do not. Any additional keyword arguments imply ``safe=True``, and will be used as options for the resultant `getLastError` command. For example, to wait for replication to 3 nodes, pass ``w=3``. :Parameters: - `doc_or_docs`: a document or list of documents to be inserted - `manipulate` (optional): manipulate the documents before inserting? - `safe` (optional): check that the insert succeeded? - `check_keys` (optional): check if keys start with '$' or contain '.', raising :class:`~pymongo.errors.InvalidName` in either case - `**kwargs` (optional): any additional arguments imply ``safe=True``, and will be used as options for the `getLastError` command .. mongodoc:: insert
[ "Insert", "a", "document", "(", "s", ")", "into", "this", "collection", ".", "If", "manipulate", "is", "set", "the", "document", "(", "s", ")", "are", "manipulated", "using", "any", ":", "class", ":", "~pymongo", ".", "son_manipulator", ".", "SONManipulator", "instances", "that", "have", "been", "added", "to", "this", ":", "class", ":", "~pymongo", ".", "database", ".", "Database", ".", "Returns", "the", "_id", "of", "the", "inserted", "document", "or", "a", "list", "of", "_id", "values", "of", "the", "inserted", "documents", ".", "If", "the", "document", "(", "s", ")", "does", "not", "already", "contain", "an", "_id", "one", "will", "be", "added", ".", "If", "safe", "is", "True", "then", "the", "insert", "will", "be", "checked", "for", "errors", "raising", ":", "class", ":", "~pymongo", ".", "errors", ".", "OperationFailure", "if", "one", "occurred", ".", "Safe", "inserts", "wait", "for", "a", "response", "from", "the", "database", "while", "normal", "inserts", "do", "not", ".", "Any", "additional", "keyword", "arguments", "imply", "safe", "=", "True", "and", "will", "be", "used", "as", "options", "for", "the", "resultant", "getLastError", "command", ".", "For", "example", "to", "wait", "for", "replication", "to", "3", "nodes", "pass", "w", "=", "3", ".", ":", "Parameters", ":", "-", "doc_or_docs", ":", "a", "document", "or", "list", "of", "documents", "to", "be", "inserted", "-", "manipulate", "(", "optional", ")", ":", "manipulate", "the", "documents", "before", "inserting?", "-", "safe", "(", "optional", ")", ":", "check", "that", "the", "insert", "succeeded?", "-", "check_keys", "(", "optional", ")", ":", "check", "if", "keys", "start", "with", "$", "or", "contain", ".", "raising", ":", "class", ":", "~pymongo", ".", "errors", ".", "InvalidName", "in", "either", "case", "-", "**", "kwargs", "(", "optional", ")", ":", "any", "additional", "arguments", "imply", "safe", "=", "True", "and", "will", "be", "used", "as", "options", "for", "the", "getLastError", "command", "..", "mongodoc", "::", "insert" ]
train
https://github.com/bitly/asyncmongo/blob/3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b/asyncmongo/cursor.py#L56-L124
0.005999
Telefonica/toolium
toolium/utils.py
Utils.wait_until_element_not_contain_text
def wait_until_element_not_contain_text(self, element, text, timeout=None): """Search element and wait until it does not contain the expected text :param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found :param text: text expected to be contained into the element :param timeout: max time to wait :returns: the web element if it does not contain the given text :rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement :raises TimeoutException: If the element contains the expected text after the timeout """ return self._wait_until(self._expected_condition_find_element_not_containing_text, (element, text), timeout)
python
def wait_until_element_not_contain_text(self, element, text, timeout=None): """Search element and wait until it does not contain the expected text :param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found :param text: text expected to be contained into the element :param timeout: max time to wait :returns: the web element if it does not contain the given text :rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement :raises TimeoutException: If the element contains the expected text after the timeout """ return self._wait_until(self._expected_condition_find_element_not_containing_text, (element, text), timeout)
[ "def", "wait_until_element_not_contain_text", "(", "self", ",", "element", ",", "text", ",", "timeout", "=", "None", ")", ":", "return", "self", ".", "_wait_until", "(", "self", ".", "_expected_condition_find_element_not_containing_text", ",", "(", "element", ",", "text", ")", ",", "timeout", ")" ]
Search element and wait until it does not contain the expected text :param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found :param text: text expected to be contained into the element :param timeout: max time to wait :returns: the web element if it does not contain the given text :rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement :raises TimeoutException: If the element contains the expected text after the timeout
[ "Search", "element", "and", "wait", "until", "it", "does", "not", "contain", "the", "expected", "text" ]
train
https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/utils.py#L393-L403
0.007762
log2timeline/plaso
plaso/filters/file_entry.py
SignaturesFileEntryFilter.Print
def Print(self, output_writer): """Prints a human readable version of the filter. Args: output_writer (CLIOutputWriter): output writer. """ if self._file_scanner: output_writer.Write('\tsignature identifiers: {0:s}\n'.format( ', '.join(self._signature_identifiers)))
python
def Print(self, output_writer): """Prints a human readable version of the filter. Args: output_writer (CLIOutputWriter): output writer. """ if self._file_scanner: output_writer.Write('\tsignature identifiers: {0:s}\n'.format( ', '.join(self._signature_identifiers)))
[ "def", "Print", "(", "self", ",", "output_writer", ")", ":", "if", "self", ".", "_file_scanner", ":", "output_writer", ".", "Write", "(", "'\\tsignature identifiers: {0:s}\\n'", ".", "format", "(", "', '", ".", "join", "(", "self", ".", "_signature_identifiers", ")", ")", ")" ]
Prints a human readable version of the filter. Args: output_writer (CLIOutputWriter): output writer.
[ "Prints", "a", "human", "readable", "version", "of", "the", "filter", "." ]
train
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/filters/file_entry.py#L345-L353
0.006557
apache/incubator-mxnet
example/svrg_module/api_usage_example/example_api_train.py
create_network
def create_network(batch_size, update_freq): """Create a linear regression network for performing SVRG optimization. Parameters ---------- batch_size: int Size of data split update_freq: int Update Frequency for calculating full gradients Returns ---------- di: mx.io.NDArrayIter Data iterator update_freq: SVRGModule An instance of SVRGModule for performing SVRG optimization """ import logging head = '%(asctime)-15s %(message)s' logging.basicConfig(level=logging.INFO, format=head) train_data = np.random.randint(1, 5, [1000, 2]) weights = np.array([1.0, 2.0]) train_label = train_data.dot(weights) di = mx.io.NDArrayIter(train_data, train_label, batch_size=batch_size, shuffle=True, label_name='lin_reg_label') X = mx.sym.Variable('data') Y = mx.symbol.Variable('lin_reg_label') fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1) lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro") mod = SVRGModule( symbol=lro, data_names=['data'], label_names=['lin_reg_label'], update_freq=update_freq, logger=logging ) return di, mod
python
def create_network(batch_size, update_freq): """Create a linear regression network for performing SVRG optimization. Parameters ---------- batch_size: int Size of data split update_freq: int Update Frequency for calculating full gradients Returns ---------- di: mx.io.NDArrayIter Data iterator update_freq: SVRGModule An instance of SVRGModule for performing SVRG optimization """ import logging head = '%(asctime)-15s %(message)s' logging.basicConfig(level=logging.INFO, format=head) train_data = np.random.randint(1, 5, [1000, 2]) weights = np.array([1.0, 2.0]) train_label = train_data.dot(weights) di = mx.io.NDArrayIter(train_data, train_label, batch_size=batch_size, shuffle=True, label_name='lin_reg_label') X = mx.sym.Variable('data') Y = mx.symbol.Variable('lin_reg_label') fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1) lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro") mod = SVRGModule( symbol=lro, data_names=['data'], label_names=['lin_reg_label'], update_freq=update_freq, logger=logging ) return di, mod
[ "def", "create_network", "(", "batch_size", ",", "update_freq", ")", ":", "import", "logging", "head", "=", "'%(asctime)-15s %(message)s'", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ",", "format", "=", "head", ")", "train_data", "=", "np", ".", "random", ".", "randint", "(", "1", ",", "5", ",", "[", "1000", ",", "2", "]", ")", "weights", "=", "np", ".", "array", "(", "[", "1.0", ",", "2.0", "]", ")", "train_label", "=", "train_data", ".", "dot", "(", "weights", ")", "di", "=", "mx", ".", "io", ".", "NDArrayIter", "(", "train_data", ",", "train_label", ",", "batch_size", "=", "batch_size", ",", "shuffle", "=", "True", ",", "label_name", "=", "'lin_reg_label'", ")", "X", "=", "mx", ".", "sym", ".", "Variable", "(", "'data'", ")", "Y", "=", "mx", ".", "symbol", ".", "Variable", "(", "'lin_reg_label'", ")", "fully_connected_layer", "=", "mx", ".", "sym", ".", "FullyConnected", "(", "data", "=", "X", ",", "name", "=", "'fc1'", ",", "num_hidden", "=", "1", ")", "lro", "=", "mx", ".", "sym", ".", "LinearRegressionOutput", "(", "data", "=", "fully_connected_layer", ",", "label", "=", "Y", ",", "name", "=", "\"lro\"", ")", "mod", "=", "SVRGModule", "(", "symbol", "=", "lro", ",", "data_names", "=", "[", "'data'", "]", ",", "label_names", "=", "[", "'lin_reg_label'", "]", ",", "update_freq", "=", "update_freq", ",", "logger", "=", "logging", ")", "return", "di", ",", "mod" ]
Create a linear regression network for performing SVRG optimization. Parameters ---------- batch_size: int Size of data split update_freq: int Update Frequency for calculating full gradients Returns ---------- di: mx.io.NDArrayIter Data iterator update_freq: SVRGModule An instance of SVRGModule for performing SVRG optimization
[ "Create", "a", "linear", "regression", "network", "for", "performing", "SVRG", "optimization", ".", "Parameters", "----------", "batch_size", ":", "int", "Size", "of", "data", "split", "update_freq", ":", "int", "Update", "Frequency", "for", "calculating", "full", "gradients" ]
train
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/svrg_module/api_usage_example/example_api_train.py#L73-L109
0.003218
10gen/mongo-orchestration
mongo_orchestration/process.py
PortPool.change_range
def change_range(self, min_port=1025, max_port=2000, port_sequence=None): """change Pool port range""" self.__init_range(min_port, max_port, port_sequence)
python
def change_range(self, min_port=1025, max_port=2000, port_sequence=None): """change Pool port range""" self.__init_range(min_port, max_port, port_sequence)
[ "def", "change_range", "(", "self", ",", "min_port", "=", "1025", ",", "max_port", "=", "2000", ",", "port_sequence", "=", "None", ")", ":", "self", ".", "__init_range", "(", "min_port", ",", "max_port", ",", "port_sequence", ")" ]
change Pool port range
[ "change", "Pool", "port", "range" ]
train
https://github.com/10gen/mongo-orchestration/blob/81fd2224205922ea2178b08190b53a33aec47261/mongo_orchestration/process.py#L133-L135
0.011696
aouyar/healthgraph-api
healthgraph/authmgr.py
AuthManager.revoke_access_token
def revoke_access_token(self, access_token): """Revokes the Access Token by accessing the De-authorization Endpoint of Health Graph API. @param access_token: Access Token for querying Health Graph API. """ payload = {'access_token': access_token,} req = requests.post(settings.API_DEAUTHORIZATION_URL, data=payload)
python
def revoke_access_token(self, access_token): """Revokes the Access Token by accessing the De-authorization Endpoint of Health Graph API. @param access_token: Access Token for querying Health Graph API. """ payload = {'access_token': access_token,} req = requests.post(settings.API_DEAUTHORIZATION_URL, data=payload)
[ "def", "revoke_access_token", "(", "self", ",", "access_token", ")", ":", "payload", "=", "{", "'access_token'", ":", "access_token", ",", "}", "req", "=", "requests", ".", "post", "(", "settings", ".", "API_DEAUTHORIZATION_URL", ",", "data", "=", "payload", ")" ]
Revokes the Access Token by accessing the De-authorization Endpoint of Health Graph API. @param access_token: Access Token for querying Health Graph API.
[ "Revokes", "the", "Access", "Token", "by", "accessing", "the", "De", "-", "authorization", "Endpoint", "of", "Health", "Graph", "API", "." ]
train
https://github.com/aouyar/healthgraph-api/blob/fc5135ab353ca1f05e8a70ec784ff921e686c072/healthgraph/authmgr.py#L107-L115
0.013123
Rapptz/discord.py
discord/guild.py
Guild.unban
async def unban(self, user, *, reason=None): """|coro| Unbans a user from the guild. The user must meet the :class:`abc.Snowflake` abc. You must have the :attr:`~Permissions.ban_members` permission to do this. Parameters ----------- user: :class:`abc.Snowflake` The user to unban. reason: Optional[:class:`str`] The reason for doing this action. Shows up on the audit log. Raises ------- Forbidden You do not have the proper permissions to unban. HTTPException Unbanning failed. """ await self._state.http.unban(user.id, self.id, reason=reason)
python
async def unban(self, user, *, reason=None): """|coro| Unbans a user from the guild. The user must meet the :class:`abc.Snowflake` abc. You must have the :attr:`~Permissions.ban_members` permission to do this. Parameters ----------- user: :class:`abc.Snowflake` The user to unban. reason: Optional[:class:`str`] The reason for doing this action. Shows up on the audit log. Raises ------- Forbidden You do not have the proper permissions to unban. HTTPException Unbanning failed. """ await self._state.http.unban(user.id, self.id, reason=reason)
[ "async", "def", "unban", "(", "self", ",", "user", ",", "*", ",", "reason", "=", "None", ")", ":", "await", "self", ".", "_state", ".", "http", ".", "unban", "(", "user", ".", "id", ",", "self", ".", "id", ",", "reason", "=", "reason", ")" ]
|coro| Unbans a user from the guild. The user must meet the :class:`abc.Snowflake` abc. You must have the :attr:`~Permissions.ban_members` permission to do this. Parameters ----------- user: :class:`abc.Snowflake` The user to unban. reason: Optional[:class:`str`] The reason for doing this action. Shows up on the audit log. Raises ------- Forbidden You do not have the proper permissions to unban. HTTPException Unbanning failed.
[ "|coro|" ]
train
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/guild.py#L1375-L1399
0.002789
sbg/sevenbridges-python
sevenbridges/models/project.py
Project.add_files
def add_files(self, files): """ Adds files to this project. :param files: List of files or a Collection object. """ for file in files: file.copy(project=self.id)
python
def add_files(self, files): """ Adds files to this project. :param files: List of files or a Collection object. """ for file in files: file.copy(project=self.id)
[ "def", "add_files", "(", "self", ",", "files", ")", ":", "for", "file", "in", "files", ":", "file", ".", "copy", "(", "project", "=", "self", ".", "id", ")" ]
Adds files to this project. :param files: List of files or a Collection object.
[ "Adds", "files", "to", "this", "project", ".", ":", "param", "files", ":", "List", "of", "files", "or", "a", "Collection", "object", "." ]
train
https://github.com/sbg/sevenbridges-python/blob/f62640d1018d959f0b686f2dbe5e183085336607/sevenbridges/models/project.py#L321-L327
0.00939
PmagPy/PmagPy
programs/thellier_gui.py
Arai_GUI.on_menu_exit
def on_menu_exit(self, event): """ Runs whenever Thellier GUI exits """ if self.close_warning: TEXT = "Data is not saved to a file yet!\nTo properly save your data:\n1) Analysis --> Save current interpretations to a redo file.\nor\n1) File --> Save MagIC tables.\n\n Press OK to exit without saving." dlg1 = wx.MessageDialog( None, caption="Warning:", message=TEXT, style=wx.OK | wx.CANCEL | wx.ICON_EXCLAMATION) if self.show_dlg(dlg1) == wx.ID_OK: dlg1.Destroy() self.GUI_log.close() self.Destroy() # if a custom quit event is specified, fire it if self.evt_quit: event = self.evt_quit(self.GetId()) self.GetEventHandler().ProcessEvent(event) if self.standalone: sys.exit() else: self.GUI_log.close() self.Destroy() # if a custom quit event is specified, fire it if self.evt_quit: event = self.evt_quit(self.GetId()) self.GetEventHandler().ProcessEvent(event) if self.standalone: sys.exit()
python
def on_menu_exit(self, event): """ Runs whenever Thellier GUI exits """ if self.close_warning: TEXT = "Data is not saved to a file yet!\nTo properly save your data:\n1) Analysis --> Save current interpretations to a redo file.\nor\n1) File --> Save MagIC tables.\n\n Press OK to exit without saving." dlg1 = wx.MessageDialog( None, caption="Warning:", message=TEXT, style=wx.OK | wx.CANCEL | wx.ICON_EXCLAMATION) if self.show_dlg(dlg1) == wx.ID_OK: dlg1.Destroy() self.GUI_log.close() self.Destroy() # if a custom quit event is specified, fire it if self.evt_quit: event = self.evt_quit(self.GetId()) self.GetEventHandler().ProcessEvent(event) if self.standalone: sys.exit() else: self.GUI_log.close() self.Destroy() # if a custom quit event is specified, fire it if self.evt_quit: event = self.evt_quit(self.GetId()) self.GetEventHandler().ProcessEvent(event) if self.standalone: sys.exit()
[ "def", "on_menu_exit", "(", "self", ",", "event", ")", ":", "if", "self", ".", "close_warning", ":", "TEXT", "=", "\"Data is not saved to a file yet!\\nTo properly save your data:\\n1) Analysis --> Save current interpretations to a redo file.\\nor\\n1) File --> Save MagIC tables.\\n\\n Press OK to exit without saving.\"", "dlg1", "=", "wx", ".", "MessageDialog", "(", "None", ",", "caption", "=", "\"Warning:\"", ",", "message", "=", "TEXT", ",", "style", "=", "wx", ".", "OK", "|", "wx", ".", "CANCEL", "|", "wx", ".", "ICON_EXCLAMATION", ")", "if", "self", ".", "show_dlg", "(", "dlg1", ")", "==", "wx", ".", "ID_OK", ":", "dlg1", ".", "Destroy", "(", ")", "self", ".", "GUI_log", ".", "close", "(", ")", "self", ".", "Destroy", "(", ")", "# if a custom quit event is specified, fire it", "if", "self", ".", "evt_quit", ":", "event", "=", "self", ".", "evt_quit", "(", "self", ".", "GetId", "(", ")", ")", "self", ".", "GetEventHandler", "(", ")", ".", "ProcessEvent", "(", "event", ")", "if", "self", ".", "standalone", ":", "sys", ".", "exit", "(", ")", "else", ":", "self", ".", "GUI_log", ".", "close", "(", ")", "self", ".", "Destroy", "(", ")", "# if a custom quit event is specified, fire it", "if", "self", ".", "evt_quit", ":", "event", "=", "self", ".", "evt_quit", "(", "self", ".", "GetId", "(", ")", ")", "self", ".", "GetEventHandler", "(", ")", ".", "ProcessEvent", "(", "event", ")", "if", "self", ".", "standalone", ":", "sys", ".", "exit", "(", ")" ]
Runs whenever Thellier GUI exits
[ "Runs", "whenever", "Thellier", "GUI", "exits" ]
train
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/thellier_gui.py#L2283-L2309
0.003208
materialsproject/pymatgen
pymatgen/io/lammps/data.py
LammpsData.structure
def structure(self): """ Exports a periodic structure object representing the simulation box. Return: Structure """ masses = self.masses atoms = self.atoms.copy() if "nx" in atoms.columns: atoms.drop(["nx", "ny", "nz"], axis=1, inplace=True) atoms["molecule-ID"] = 1 ld_copy = self.__class__(self.box, masses, atoms) topologies = ld_copy.disassemble()[-1] molecule = topologies[0].sites coords = molecule.cart_coords - np.array(self.box.bounds)[:, 0] species = molecule.species latt = self.box.to_lattice() site_properties = {} if "q" in atoms: site_properties["charge"] = atoms["q"].values if self.velocities is not None: site_properties["velocities"] = self.velocities.values return Structure(latt, species, coords, coords_are_cartesian=True, site_properties=site_properties)
python
def structure(self): """ Exports a periodic structure object representing the simulation box. Return: Structure """ masses = self.masses atoms = self.atoms.copy() if "nx" in atoms.columns: atoms.drop(["nx", "ny", "nz"], axis=1, inplace=True) atoms["molecule-ID"] = 1 ld_copy = self.__class__(self.box, masses, atoms) topologies = ld_copy.disassemble()[-1] molecule = topologies[0].sites coords = molecule.cart_coords - np.array(self.box.bounds)[:, 0] species = molecule.species latt = self.box.to_lattice() site_properties = {} if "q" in atoms: site_properties["charge"] = atoms["q"].values if self.velocities is not None: site_properties["velocities"] = self.velocities.values return Structure(latt, species, coords, coords_are_cartesian=True, site_properties=site_properties)
[ "def", "structure", "(", "self", ")", ":", "masses", "=", "self", ".", "masses", "atoms", "=", "self", ".", "atoms", ".", "copy", "(", ")", "if", "\"nx\"", "in", "atoms", ".", "columns", ":", "atoms", ".", "drop", "(", "[", "\"nx\"", ",", "\"ny\"", ",", "\"nz\"", "]", ",", "axis", "=", "1", ",", "inplace", "=", "True", ")", "atoms", "[", "\"molecule-ID\"", "]", "=", "1", "ld_copy", "=", "self", ".", "__class__", "(", "self", ".", "box", ",", "masses", ",", "atoms", ")", "topologies", "=", "ld_copy", ".", "disassemble", "(", ")", "[", "-", "1", "]", "molecule", "=", "topologies", "[", "0", "]", ".", "sites", "coords", "=", "molecule", ".", "cart_coords", "-", "np", ".", "array", "(", "self", ".", "box", ".", "bounds", ")", "[", ":", ",", "0", "]", "species", "=", "molecule", ".", "species", "latt", "=", "self", ".", "box", ".", "to_lattice", "(", ")", "site_properties", "=", "{", "}", "if", "\"q\"", "in", "atoms", ":", "site_properties", "[", "\"charge\"", "]", "=", "atoms", "[", "\"q\"", "]", ".", "values", "if", "self", ".", "velocities", "is", "not", "None", ":", "site_properties", "[", "\"velocities\"", "]", "=", "self", ".", "velocities", ".", "values", "return", "Structure", "(", "latt", ",", "species", ",", "coords", ",", "coords_are_cartesian", "=", "True", ",", "site_properties", "=", "site_properties", ")" ]
Exports a periodic structure object representing the simulation box. Return: Structure
[ "Exports", "a", "periodic", "structure", "object", "representing", "the", "simulation", "box", "." ]
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/lammps/data.py#L273-L299
0.001992
OpenTreeOfLife/peyotl
peyotl/git_storage/git_action.py
get_HEAD_SHA1
def get_HEAD_SHA1(git_dir): """Not locked! """ head_file = os.path.join(git_dir, 'HEAD') with open(head_file, 'r') as hf: head_contents = hf.read().strip() assert head_contents.startswith('ref: ') ref_filename = head_contents[5:] # strip off "ref: " real_ref = os.path.join(git_dir, ref_filename) with open(real_ref, 'r') as rf: return rf.read().strip()
python
def get_HEAD_SHA1(git_dir): """Not locked! """ head_file = os.path.join(git_dir, 'HEAD') with open(head_file, 'r') as hf: head_contents = hf.read().strip() assert head_contents.startswith('ref: ') ref_filename = head_contents[5:] # strip off "ref: " real_ref = os.path.join(git_dir, ref_filename) with open(real_ref, 'r') as rf: return rf.read().strip()
[ "def", "get_HEAD_SHA1", "(", "git_dir", ")", ":", "head_file", "=", "os", ".", "path", ".", "join", "(", "git_dir", ",", "'HEAD'", ")", "with", "open", "(", "head_file", ",", "'r'", ")", "as", "hf", ":", "head_contents", "=", "hf", ".", "read", "(", ")", ".", "strip", "(", ")", "assert", "head_contents", ".", "startswith", "(", "'ref: '", ")", "ref_filename", "=", "head_contents", "[", "5", ":", "]", "# strip off \"ref: \"", "real_ref", "=", "os", ".", "path", ".", "join", "(", "git_dir", ",", "ref_filename", ")", "with", "open", "(", "real_ref", ",", "'r'", ")", "as", "rf", ":", "return", "rf", ".", "read", "(", ")", ".", "strip", "(", ")" ]
Not locked!
[ "Not", "locked!" ]
train
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/git_storage/git_action.py#L17-L27
0.002488
aio-libs/aioredis
aioredis/pool.py
ConnectionsPool.get_connection
def get_connection(self, command, args=()): """Get free connection from pool. Returns connection. """ # TODO: find a better way to determine if connection is free # and not havily used. command = command.upper().strip() is_pubsub = command in _PUBSUB_COMMANDS if is_pubsub and self._pubsub_conn: if not self._pubsub_conn.closed: return self._pubsub_conn, self._pubsub_conn.address self._pubsub_conn = None for i in range(self.freesize): conn = self._pool[0] self._pool.rotate(1) if conn.closed: # or conn._waiters: (eg: busy connection) continue if conn.in_pubsub: continue if is_pubsub: self._pubsub_conn = conn self._pool.remove(conn) self._used.add(conn) return conn, conn.address return None, self._address
python
def get_connection(self, command, args=()): """Get free connection from pool. Returns connection. """ # TODO: find a better way to determine if connection is free # and not havily used. command = command.upper().strip() is_pubsub = command in _PUBSUB_COMMANDS if is_pubsub and self._pubsub_conn: if not self._pubsub_conn.closed: return self._pubsub_conn, self._pubsub_conn.address self._pubsub_conn = None for i in range(self.freesize): conn = self._pool[0] self._pool.rotate(1) if conn.closed: # or conn._waiters: (eg: busy connection) continue if conn.in_pubsub: continue if is_pubsub: self._pubsub_conn = conn self._pool.remove(conn) self._used.add(conn) return conn, conn.address return None, self._address
[ "def", "get_connection", "(", "self", ",", "command", ",", "args", "=", "(", ")", ")", ":", "# TODO: find a better way to determine if connection is free", "# and not havily used.", "command", "=", "command", ".", "upper", "(", ")", ".", "strip", "(", ")", "is_pubsub", "=", "command", "in", "_PUBSUB_COMMANDS", "if", "is_pubsub", "and", "self", ".", "_pubsub_conn", ":", "if", "not", "self", ".", "_pubsub_conn", ".", "closed", ":", "return", "self", ".", "_pubsub_conn", ",", "self", ".", "_pubsub_conn", ".", "address", "self", ".", "_pubsub_conn", "=", "None", "for", "i", "in", "range", "(", "self", ".", "freesize", ")", ":", "conn", "=", "self", ".", "_pool", "[", "0", "]", "self", ".", "_pool", ".", "rotate", "(", "1", ")", "if", "conn", ".", "closed", ":", "# or conn._waiters: (eg: busy connection)", "continue", "if", "conn", ".", "in_pubsub", ":", "continue", "if", "is_pubsub", ":", "self", ".", "_pubsub_conn", "=", "conn", "self", ".", "_pool", ".", "remove", "(", "conn", ")", "self", ".", "_used", ".", "add", "(", "conn", ")", "return", "conn", ",", "conn", ".", "address", "return", "None", ",", "self", ".", "_address" ]
Get free connection from pool. Returns connection.
[ "Get", "free", "connection", "from", "pool", "." ]
train
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/pool.py#L220-L245
0.00202
albahnsen/CostSensitiveClassification
costcla/utils/cross_validation.py
_fit_and_score
def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score=False, return_parameters=False): """Fit estimator and compute scores for a given dataset split. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. scoring : callable A scorer callable object / function with signature ``scorer(estimator, X, y)``. train : array-like, shape = (n_train_samples,) Indices of training samples. test : array-like, shape = (n_test_samples,) Indices of test samples. verbose : integer The verbosity level. parameters : dict or None Parameters to be set on the estimator. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. return_train_score : boolean, optional, default: False Compute and return score on training set. return_parameters : boolean, optional, default: False Return parameters that has been used for the estimator. Returns ------- train_score : float, optional Score on training set, returned only if `return_train_score` is `True`. test_score : float Score on test set. n_test_samples : int Number of test samples. scoring_time : float Time spent for fitting and scoring in seconds. parameters : dict or None, optional The parameters that have been evaluated. """ if verbose > 1: if parameters is None: msg = "no parameters to be set" else: msg = '%s' % (', '.join('%s=%s' % (k, v) for k, v in list(parameters.items()))) print("[CV] %s %s" % (msg, (64 - len(msg)) * '.')) # Adjust lenght of sample weights n_samples = _num_samples(X) fit_params = fit_params if fit_params is not None else {} fit_params = dict([(k, np.asarray(v)[train] if hasattr(v, '__len__') and len(v) == n_samples else v) for k, v in list(fit_params.items())]) if parameters is not None: estimator.set_params(**parameters) start_time = time.time() X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) test_score = _score(estimator, X_test, y_test, scorer) if return_train_score: train_score = _score(estimator, X_train, y_train, scorer) scoring_time = time.time() - start_time if verbose > 2: msg += ", score=%f" % test_score if verbose > 1: end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time)) print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg)) ret = [train_score] if return_train_score else [] ret.extend([test_score, _num_samples(X_test), scoring_time]) if return_parameters: ret.append(parameters) return ret
python
def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score=False, return_parameters=False): """Fit estimator and compute scores for a given dataset split. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. scoring : callable A scorer callable object / function with signature ``scorer(estimator, X, y)``. train : array-like, shape = (n_train_samples,) Indices of training samples. test : array-like, shape = (n_test_samples,) Indices of test samples. verbose : integer The verbosity level. parameters : dict or None Parameters to be set on the estimator. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. return_train_score : boolean, optional, default: False Compute and return score on training set. return_parameters : boolean, optional, default: False Return parameters that has been used for the estimator. Returns ------- train_score : float, optional Score on training set, returned only if `return_train_score` is `True`. test_score : float Score on test set. n_test_samples : int Number of test samples. scoring_time : float Time spent for fitting and scoring in seconds. parameters : dict or None, optional The parameters that have been evaluated. """ if verbose > 1: if parameters is None: msg = "no parameters to be set" else: msg = '%s' % (', '.join('%s=%s' % (k, v) for k, v in list(parameters.items()))) print("[CV] %s %s" % (msg, (64 - len(msg)) * '.')) # Adjust lenght of sample weights n_samples = _num_samples(X) fit_params = fit_params if fit_params is not None else {} fit_params = dict([(k, np.asarray(v)[train] if hasattr(v, '__len__') and len(v) == n_samples else v) for k, v in list(fit_params.items())]) if parameters is not None: estimator.set_params(**parameters) start_time = time.time() X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) test_score = _score(estimator, X_test, y_test, scorer) if return_train_score: train_score = _score(estimator, X_train, y_train, scorer) scoring_time = time.time() - start_time if verbose > 2: msg += ", score=%f" % test_score if verbose > 1: end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time)) print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg)) ret = [train_score] if return_train_score else [] ret.extend([test_score, _num_samples(X_test), scoring_time]) if return_parameters: ret.append(parameters) return ret
[ "def", "_fit_and_score", "(", "estimator", ",", "X", ",", "y", ",", "scorer", ",", "train", ",", "test", ",", "verbose", ",", "parameters", ",", "fit_params", ",", "return_train_score", "=", "False", ",", "return_parameters", "=", "False", ")", ":", "if", "verbose", ">", "1", ":", "if", "parameters", "is", "None", ":", "msg", "=", "\"no parameters to be set\"", "else", ":", "msg", "=", "'%s'", "%", "(", "', '", ".", "join", "(", "'%s=%s'", "%", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "list", "(", "parameters", ".", "items", "(", ")", ")", ")", ")", "print", "(", "\"[CV] %s %s\"", "%", "(", "msg", ",", "(", "64", "-", "len", "(", "msg", ")", ")", "*", "'.'", ")", ")", "# Adjust lenght of sample weights", "n_samples", "=", "_num_samples", "(", "X", ")", "fit_params", "=", "fit_params", "if", "fit_params", "is", "not", "None", "else", "{", "}", "fit_params", "=", "dict", "(", "[", "(", "k", ",", "np", ".", "asarray", "(", "v", ")", "[", "train", "]", "if", "hasattr", "(", "v", ",", "'__len__'", ")", "and", "len", "(", "v", ")", "==", "n_samples", "else", "v", ")", "for", "k", ",", "v", "in", "list", "(", "fit_params", ".", "items", "(", ")", ")", "]", ")", "if", "parameters", "is", "not", "None", ":", "estimator", ".", "set_params", "(", "*", "*", "parameters", ")", "start_time", "=", "time", ".", "time", "(", ")", "X_train", ",", "y_train", "=", "_safe_split", "(", "estimator", ",", "X", ",", "y", ",", "train", ")", "X_test", ",", "y_test", "=", "_safe_split", "(", "estimator", ",", "X", ",", "y", ",", "test", ",", "train", ")", "if", "y_train", "is", "None", ":", "estimator", ".", "fit", "(", "X_train", ",", "*", "*", "fit_params", ")", "else", ":", "estimator", ".", "fit", "(", "X_train", ",", "y_train", ",", "*", "*", "fit_params", ")", "test_score", "=", "_score", "(", "estimator", ",", "X_test", ",", "y_test", ",", "scorer", ")", "if", "return_train_score", ":", "train_score", "=", "_score", "(", "estimator", ",", "X_train", ",", "y_train", ",", "scorer", ")", "scoring_time", "=", "time", ".", "time", "(", ")", "-", "start_time", "if", "verbose", ">", "2", ":", "msg", "+=", "\", score=%f\"", "%", "test_score", "if", "verbose", ">", "1", ":", "end_msg", "=", "\"%s -%s\"", "%", "(", "msg", ",", "logger", ".", "short_format_time", "(", "scoring_time", ")", ")", "print", "(", "\"[CV] %s %s\"", "%", "(", "(", "64", "-", "len", "(", "end_msg", ")", ")", "*", "'.'", ",", "end_msg", ")", ")", "ret", "=", "[", "train_score", "]", "if", "return_train_score", "else", "[", "]", "ret", ".", "extend", "(", "[", "test_score", ",", "_num_samples", "(", "X_test", ")", ",", "scoring_time", "]", ")", "if", "return_parameters", ":", "ret", ".", "append", "(", "parameters", ")", "return", "ret" ]
Fit estimator and compute scores for a given dataset split. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. scoring : callable A scorer callable object / function with signature ``scorer(estimator, X, y)``. train : array-like, shape = (n_train_samples,) Indices of training samples. test : array-like, shape = (n_test_samples,) Indices of test samples. verbose : integer The verbosity level. parameters : dict or None Parameters to be set on the estimator. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. return_train_score : boolean, optional, default: False Compute and return score on training set. return_parameters : boolean, optional, default: False Return parameters that has been used for the estimator. Returns ------- train_score : float, optional Score on training set, returned only if `return_train_score` is `True`. test_score : float Score on test set. n_test_samples : int Number of test samples. scoring_time : float Time spent for fitting and scoring in seconds. parameters : dict or None, optional The parameters that have been evaluated.
[ "Fit", "estimator", "and", "compute", "scores", "for", "a", "given", "dataset", "split", "." ]
train
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/utils/cross_validation.py#L1154-L1255
0.0003
CityOfZion/neo-boa
boa/code/module.py
Module.link_methods
def link_methods(self): """ Perform linkage of addresses between methods. """ from ..compiler import Compiler for method in self.methods: method.prepare() self.all_vm_tokens = OrderedDict() address = 0 for method in self.orderered_methods: if not method.is_interop: # print("ADDING METHOD %s " % method.full_name) method.address = address for key, vmtoken in method.vm_tokens.items(): self.all_vm_tokens[address] = vmtoken address += 1 if vmtoken.data is not None and vmtoken.vm_op != VMOp.NOP: address += len(vmtoken.data) vmtoken.addr = vmtoken.addr + method.address for key, vmtoken in self.all_vm_tokens.items(): if vmtoken.src_method is not None: target_method = self.method_by_name(vmtoken.target_method) if target_method: jump_len = target_method.address - vmtoken.addr param_ret_counts = bytearray() if Compiler.instance().nep8: param_ret_counts = vmtoken.data[0:2] jump_len -= 2 if jump_len > -32767 and jump_len < 32767: vmtoken.data = param_ret_counts + jump_len.to_bytes(2, 'little', signed=True) else: vmtoken.data = param_ret_counts + jump_len.to_bytes(4, 'little', signed=True) else: raise Exception("Target method %s not found" % vmtoken.target_method)
python
def link_methods(self): """ Perform linkage of addresses between methods. """ from ..compiler import Compiler for method in self.methods: method.prepare() self.all_vm_tokens = OrderedDict() address = 0 for method in self.orderered_methods: if not method.is_interop: # print("ADDING METHOD %s " % method.full_name) method.address = address for key, vmtoken in method.vm_tokens.items(): self.all_vm_tokens[address] = vmtoken address += 1 if vmtoken.data is not None and vmtoken.vm_op != VMOp.NOP: address += len(vmtoken.data) vmtoken.addr = vmtoken.addr + method.address for key, vmtoken in self.all_vm_tokens.items(): if vmtoken.src_method is not None: target_method = self.method_by_name(vmtoken.target_method) if target_method: jump_len = target_method.address - vmtoken.addr param_ret_counts = bytearray() if Compiler.instance().nep8: param_ret_counts = vmtoken.data[0:2] jump_len -= 2 if jump_len > -32767 and jump_len < 32767: vmtoken.data = param_ret_counts + jump_len.to_bytes(2, 'little', signed=True) else: vmtoken.data = param_ret_counts + jump_len.to_bytes(4, 'little', signed=True) else: raise Exception("Target method %s not found" % vmtoken.target_method)
[ "def", "link_methods", "(", "self", ")", ":", "from", ".", ".", "compiler", "import", "Compiler", "for", "method", "in", "self", ".", "methods", ":", "method", ".", "prepare", "(", ")", "self", ".", "all_vm_tokens", "=", "OrderedDict", "(", ")", "address", "=", "0", "for", "method", "in", "self", ".", "orderered_methods", ":", "if", "not", "method", ".", "is_interop", ":", "# print(\"ADDING METHOD %s \" % method.full_name)", "method", ".", "address", "=", "address", "for", "key", ",", "vmtoken", "in", "method", ".", "vm_tokens", ".", "items", "(", ")", ":", "self", ".", "all_vm_tokens", "[", "address", "]", "=", "vmtoken", "address", "+=", "1", "if", "vmtoken", ".", "data", "is", "not", "None", "and", "vmtoken", ".", "vm_op", "!=", "VMOp", ".", "NOP", ":", "address", "+=", "len", "(", "vmtoken", ".", "data", ")", "vmtoken", ".", "addr", "=", "vmtoken", ".", "addr", "+", "method", ".", "address", "for", "key", ",", "vmtoken", "in", "self", ".", "all_vm_tokens", ".", "items", "(", ")", ":", "if", "vmtoken", ".", "src_method", "is", "not", "None", ":", "target_method", "=", "self", ".", "method_by_name", "(", "vmtoken", ".", "target_method", ")", "if", "target_method", ":", "jump_len", "=", "target_method", ".", "address", "-", "vmtoken", ".", "addr", "param_ret_counts", "=", "bytearray", "(", ")", "if", "Compiler", ".", "instance", "(", ")", ".", "nep8", ":", "param_ret_counts", "=", "vmtoken", ".", "data", "[", "0", ":", "2", "]", "jump_len", "-=", "2", "if", "jump_len", ">", "-", "32767", "and", "jump_len", "<", "32767", ":", "vmtoken", ".", "data", "=", "param_ret_counts", "+", "jump_len", ".", "to_bytes", "(", "2", ",", "'little'", ",", "signed", "=", "True", ")", "else", ":", "vmtoken", ".", "data", "=", "param_ret_counts", "+", "jump_len", ".", "to_bytes", "(", "4", ",", "'little'", ",", "signed", "=", "True", ")", "else", ":", "raise", "Exception", "(", "\"Target method %s not found\"", "%", "vmtoken", ".", "target_method", ")" ]
Perform linkage of addresses between methods.
[ "Perform", "linkage", "of", "addresses", "between", "methods", "." ]
train
https://github.com/CityOfZion/neo-boa/blob/5ec0f0acb2e2e3e4bbd3530252e6eae61b23d59b/boa/code/module.py#L234-L280
0.002907
iotile/coretools
iotileemulate/iotile/emulate/reference/controller_features/sensor_graph.py
SensorGraphSubsystem.persist
def persist(self): """Trigger saving the current sensorgraph to persistent storage.""" self.persisted_nodes = self.graph.dump_nodes() self.persisted_streamers = self.graph.dump_streamers() self.persisted_exists = True self.persisted_constants = self._sensor_log.dump_constants()
python
def persist(self): """Trigger saving the current sensorgraph to persistent storage.""" self.persisted_nodes = self.graph.dump_nodes() self.persisted_streamers = self.graph.dump_streamers() self.persisted_exists = True self.persisted_constants = self._sensor_log.dump_constants()
[ "def", "persist", "(", "self", ")", ":", "self", ".", "persisted_nodes", "=", "self", ".", "graph", ".", "dump_nodes", "(", ")", "self", ".", "persisted_streamers", "=", "self", ".", "graph", ".", "dump_streamers", "(", ")", "self", ".", "persisted_exists", "=", "True", "self", ".", "persisted_constants", "=", "self", ".", "_sensor_log", ".", "dump_constants", "(", ")" ]
Trigger saving the current sensorgraph to persistent storage.
[ "Trigger", "saving", "the", "current", "sensorgraph", "to", "persistent", "storage", "." ]
train
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotileemulate/iotile/emulate/reference/controller_features/sensor_graph.py#L332-L338
0.00627
uber/tchannel-python
tchannel/_queue.py
Queue.get_nowait
def get_nowait(self): """Returns a value from the queue without waiting. Raises ``QueueEmpty`` if no values are available right now. """ new_get = Future() with self._lock: if not self._get.done(): raise QueueEmpty get, self._get = self._get, new_get hole = get.result() if not hole.done(): # Restore the unfinished hole. new_get.set_result(hole) raise QueueEmpty node = hole.result() value = node.value new_hole, node.next = node.next, None new_get.set_result(new_hole) return value
python
def get_nowait(self): """Returns a value from the queue without waiting. Raises ``QueueEmpty`` if no values are available right now. """ new_get = Future() with self._lock: if not self._get.done(): raise QueueEmpty get, self._get = self._get, new_get hole = get.result() if not hole.done(): # Restore the unfinished hole. new_get.set_result(hole) raise QueueEmpty node = hole.result() value = node.value new_hole, node.next = node.next, None new_get.set_result(new_hole) return value
[ "def", "get_nowait", "(", "self", ")", ":", "new_get", "=", "Future", "(", ")", "with", "self", ".", "_lock", ":", "if", "not", "self", ".", "_get", ".", "done", "(", ")", ":", "raise", "QueueEmpty", "get", ",", "self", ".", "_get", "=", "self", ".", "_get", ",", "new_get", "hole", "=", "get", ".", "result", "(", ")", "if", "not", "hole", ".", "done", "(", ")", ":", "# Restore the unfinished hole.", "new_get", ".", "set_result", "(", "hole", ")", "raise", "QueueEmpty", "node", "=", "hole", ".", "result", "(", ")", "value", "=", "node", ".", "value", "new_hole", ",", "node", ".", "next", "=", "node", ".", "next", ",", "None", "new_get", ".", "set_result", "(", "new_hole", ")", "return", "value" ]
Returns a value from the queue without waiting. Raises ``QueueEmpty`` if no values are available right now.
[ "Returns", "a", "value", "from", "the", "queue", "without", "waiting", "." ]
train
https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/_queue.py#L135-L157
0.003035
phaethon/kamene
kamene/fields.py
Field.randval
def randval(self): """Return a volatile object whose value is both random and suitable for this field""" fmtt = self.fmt[-1] if fmtt in "BHIQ": return {"B":RandByte,"H":RandShort,"I":RandInt, "Q":RandLong}[fmtt]() elif fmtt == "s": if self.fmt[0] in "0123456789": l = int(self.fmt[:-1]) else: l = int(self.fmt[1:-1]) return RandBin(l) else: warning("no random class for [%s] (fmt=%s)." % (self.name, self.fmt))
python
def randval(self): """Return a volatile object whose value is both random and suitable for this field""" fmtt = self.fmt[-1] if fmtt in "BHIQ": return {"B":RandByte,"H":RandShort,"I":RandInt, "Q":RandLong}[fmtt]() elif fmtt == "s": if self.fmt[0] in "0123456789": l = int(self.fmt[:-1]) else: l = int(self.fmt[1:-1]) return RandBin(l) else: warning("no random class for [%s] (fmt=%s)." % (self.name, self.fmt))
[ "def", "randval", "(", "self", ")", ":", "fmtt", "=", "self", ".", "fmt", "[", "-", "1", "]", "if", "fmtt", "in", "\"BHIQ\"", ":", "return", "{", "\"B\"", ":", "RandByte", ",", "\"H\"", ":", "RandShort", ",", "\"I\"", ":", "RandInt", ",", "\"Q\"", ":", "RandLong", "}", "[", "fmtt", "]", "(", ")", "elif", "fmtt", "==", "\"s\"", ":", "if", "self", ".", "fmt", "[", "0", "]", "in", "\"0123456789\"", ":", "l", "=", "int", "(", "self", ".", "fmt", "[", ":", "-", "1", "]", ")", "else", ":", "l", "=", "int", "(", "self", ".", "fmt", "[", "1", ":", "-", "1", "]", ")", "return", "RandBin", "(", "l", ")", "else", ":", "warning", "(", "\"no random class for [%s] (fmt=%s).\"", "%", "(", "self", ".", "name", ",", "self", ".", "fmt", ")", ")" ]
Return a volatile object whose value is both random and suitable for this field
[ "Return", "a", "volatile", "object", "whose", "value", "is", "both", "random", "and", "suitable", "for", "this", "field" ]
train
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/fields.py#L93-L105
0.023985
ultrabug/py3status
py3status/py3.py
Py3.trigger_event
def trigger_event(self, module_name, event): """ Trigger an event on a named module. """ if module_name: self._py3_wrapper.events_thread.process_event(module_name, event)
python
def trigger_event(self, module_name, event): """ Trigger an event on a named module. """ if module_name: self._py3_wrapper.events_thread.process_event(module_name, event)
[ "def", "trigger_event", "(", "self", ",", "module_name", ",", "event", ")", ":", "if", "module_name", ":", "self", ".", "_py3_wrapper", ".", "events_thread", ".", "process_event", "(", "module_name", ",", "event", ")" ]
Trigger an event on a named module.
[ "Trigger", "an", "event", "on", "a", "named", "module", "." ]
train
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/py3.py#L531-L536
0.009346
trustar/trustar-python
trustar/indicator_client.py
IndicatorClient.get_indicators_metadata
def get_indicators_metadata(self, indicators): """ Provide metadata associated with an list of indicators, including value, indicatorType, noteCount, sightings, lastSeen, enclaveIds, and tags. The metadata is determined based on the enclaves the user making the request has READ access to. :param indicators: a list of |Indicator| objects to query. Values are required, types are optional. Types might be required to distinguish in a case where one indicator value has been associated with multiple types based on different contexts. :return: A list of |Indicator| objects. The following attributes of the objects will be returned: correlation_count, last_seen, sightings, notes, tags, enclave_ids. All other attributes of the Indicator objects will have Null values. """ data = [{ 'value': i.value, 'indicatorType': i.type } for i in indicators] resp = self._client.post("indicators/metadata", data=json.dumps(data)) return [Indicator.from_dict(x) for x in resp.json()]
python
def get_indicators_metadata(self, indicators): """ Provide metadata associated with an list of indicators, including value, indicatorType, noteCount, sightings, lastSeen, enclaveIds, and tags. The metadata is determined based on the enclaves the user making the request has READ access to. :param indicators: a list of |Indicator| objects to query. Values are required, types are optional. Types might be required to distinguish in a case where one indicator value has been associated with multiple types based on different contexts. :return: A list of |Indicator| objects. The following attributes of the objects will be returned: correlation_count, last_seen, sightings, notes, tags, enclave_ids. All other attributes of the Indicator objects will have Null values. """ data = [{ 'value': i.value, 'indicatorType': i.type } for i in indicators] resp = self._client.post("indicators/metadata", data=json.dumps(data)) return [Indicator.from_dict(x) for x in resp.json()]
[ "def", "get_indicators_metadata", "(", "self", ",", "indicators", ")", ":", "data", "=", "[", "{", "'value'", ":", "i", ".", "value", ",", "'indicatorType'", ":", "i", ".", "type", "}", "for", "i", "in", "indicators", "]", "resp", "=", "self", ".", "_client", ".", "post", "(", "\"indicators/metadata\"", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ")", "return", "[", "Indicator", ".", "from_dict", "(", "x", ")", "for", "x", "in", "resp", ".", "json", "(", ")", "]" ]
Provide metadata associated with an list of indicators, including value, indicatorType, noteCount, sightings, lastSeen, enclaveIds, and tags. The metadata is determined based on the enclaves the user making the request has READ access to. :param indicators: a list of |Indicator| objects to query. Values are required, types are optional. Types might be required to distinguish in a case where one indicator value has been associated with multiple types based on different contexts. :return: A list of |Indicator| objects. The following attributes of the objects will be returned: correlation_count, last_seen, sightings, notes, tags, enclave_ids. All other attributes of the Indicator objects will have Null values.
[ "Provide", "metadata", "associated", "with", "an", "list", "of", "indicators", "including", "value", "indicatorType", "noteCount", "sightings", "lastSeen", "enclaveIds", "and", "tags", ".", "The", "metadata", "is", "determined", "based", "on", "the", "enclaves", "the", "user", "making", "the", "request", "has", "READ", "access", "to", "." ]
train
https://github.com/trustar/trustar-python/blob/707d51adc58d68aed7de12a4ca37949cb75cf122/trustar/indicator_client.py#L300-L321
0.008757
poppy-project/pypot
pypot/vrep/remoteApiBindings/vrep.py
simxGetObjectOrientation
def simxGetObjectOrientation(clientID, objectHandle, relativeToObjectHandle, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' eulerAngles = (ct.c_float*3)() ret = c_GetObjectOrientation(clientID, objectHandle, relativeToObjectHandle, eulerAngles, operationMode) arr = [] for i in range(3): arr.append(eulerAngles[i]) return ret, arr
python
def simxGetObjectOrientation(clientID, objectHandle, relativeToObjectHandle, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' eulerAngles = (ct.c_float*3)() ret = c_GetObjectOrientation(clientID, objectHandle, relativeToObjectHandle, eulerAngles, operationMode) arr = [] for i in range(3): arr.append(eulerAngles[i]) return ret, arr
[ "def", "simxGetObjectOrientation", "(", "clientID", ",", "objectHandle", ",", "relativeToObjectHandle", ",", "operationMode", ")", ":", "eulerAngles", "=", "(", "ct", ".", "c_float", "*", "3", ")", "(", ")", "ret", "=", "c_GetObjectOrientation", "(", "clientID", ",", "objectHandle", ",", "relativeToObjectHandle", ",", "eulerAngles", ",", "operationMode", ")", "arr", "=", "[", "]", "for", "i", "in", "range", "(", "3", ")", ":", "arr", ".", "append", "(", "eulerAngles", "[", "i", "]", ")", "return", "ret", ",", "arr" ]
Please have a look at the function description/documentation in the V-REP user manual
[ "Please", "have", "a", "look", "at", "the", "function", "description", "/", "documentation", "in", "the", "V", "-", "REP", "user", "manual" ]
train
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/remoteApiBindings/vrep.py#L536-L545
0.009238
Chilipp/model-organization
docs/square_full.py
SquareModelOrganizer.run
def run(self, **kwargs): """ Run the model Parameters ---------- ``**kwargs`` Any other parameter for the :meth:`model_organization.ModelOrganizer.app_main` method """ from calculate import compute self.app_main(**kwargs) # get the default output name output = osp.join(self.exp_config['expdir'], 'output.dat') # save the paths in the configuration self.exp_config['output'] = output # run the model data = np.loadtxt(self.exp_config['infile']) out = compute(data) # save the output self.logger.info('Saving output data to %s', osp.relpath(output)) np.savetxt(output, out) # store some additional information in the configuration of the # experiment self.exp_config['mean'] = mean = float(out.mean()) self.exp_config['std'] = std = float(out.std()) self.logger.debug('Mean: %s, Standard deviation: %s', mean, std)
python
def run(self, **kwargs): """ Run the model Parameters ---------- ``**kwargs`` Any other parameter for the :meth:`model_organization.ModelOrganizer.app_main` method """ from calculate import compute self.app_main(**kwargs) # get the default output name output = osp.join(self.exp_config['expdir'], 'output.dat') # save the paths in the configuration self.exp_config['output'] = output # run the model data = np.loadtxt(self.exp_config['infile']) out = compute(data) # save the output self.logger.info('Saving output data to %s', osp.relpath(output)) np.savetxt(output, out) # store some additional information in the configuration of the # experiment self.exp_config['mean'] = mean = float(out.mean()) self.exp_config['std'] = std = float(out.std()) self.logger.debug('Mean: %s, Standard deviation: %s', mean, std)
[ "def", "run", "(", "self", ",", "*", "*", "kwargs", ")", ":", "from", "calculate", "import", "compute", "self", ".", "app_main", "(", "*", "*", "kwargs", ")", "# get the default output name", "output", "=", "osp", ".", "join", "(", "self", ".", "exp_config", "[", "'expdir'", "]", ",", "'output.dat'", ")", "# save the paths in the configuration", "self", ".", "exp_config", "[", "'output'", "]", "=", "output", "# run the model", "data", "=", "np", ".", "loadtxt", "(", "self", ".", "exp_config", "[", "'infile'", "]", ")", "out", "=", "compute", "(", "data", ")", "# save the output", "self", ".", "logger", ".", "info", "(", "'Saving output data to %s'", ",", "osp", ".", "relpath", "(", "output", ")", ")", "np", ".", "savetxt", "(", "output", ",", "out", ")", "# store some additional information in the configuration of the", "# experiment", "self", ".", "exp_config", "[", "'mean'", "]", "=", "mean", "=", "float", "(", "out", ".", "mean", "(", ")", ")", "self", ".", "exp_config", "[", "'std'", "]", "=", "std", "=", "float", "(", "out", ".", "std", "(", ")", ")", "self", ".", "logger", ".", "debug", "(", "'Mean: %s, Standard deviation: %s'", ",", "mean", ",", "std", ")" ]
Run the model Parameters ---------- ``**kwargs`` Any other parameter for the :meth:`model_organization.ModelOrganizer.app_main` method
[ "Run", "the", "model" ]
train
https://github.com/Chilipp/model-organization/blob/694d1219c7ed7e1b2b17153afa11bdc21169bca2/docs/square_full.py#L45-L75
0.001949
nicolargo/glances
glances/plugins/glances_irq.py
Plugin.update
def update(self): """Update the IRQ stats.""" # Init new stats stats = self.get_init_value() # IRQ plugin only available on GNU/Linux if not LINUX: return self.stats if self.input_method == 'local': # Grab the stats stats = self.irq.get() elif self.input_method == 'snmp': # not available pass # Get the TOP 5 (by rate/s) stats = sorted(stats, key=operator.itemgetter('irq_rate'), reverse=True)[:5] # Update the stats self.stats = stats return self.stats
python
def update(self): """Update the IRQ stats.""" # Init new stats stats = self.get_init_value() # IRQ plugin only available on GNU/Linux if not LINUX: return self.stats if self.input_method == 'local': # Grab the stats stats = self.irq.get() elif self.input_method == 'snmp': # not available pass # Get the TOP 5 (by rate/s) stats = sorted(stats, key=operator.itemgetter('irq_rate'), reverse=True)[:5] # Update the stats self.stats = stats return self.stats
[ "def", "update", "(", "self", ")", ":", "# Init new stats", "stats", "=", "self", ".", "get_init_value", "(", ")", "# IRQ plugin only available on GNU/Linux", "if", "not", "LINUX", ":", "return", "self", ".", "stats", "if", "self", ".", "input_method", "==", "'local'", ":", "# Grab the stats", "stats", "=", "self", ".", "irq", ".", "get", "(", ")", "elif", "self", ".", "input_method", "==", "'snmp'", ":", "# not available", "pass", "# Get the TOP 5 (by rate/s)", "stats", "=", "sorted", "(", "stats", ",", "key", "=", "operator", ".", "itemgetter", "(", "'irq_rate'", ")", ",", "reverse", "=", "True", ")", "[", ":", "5", "]", "# Update the stats", "self", ".", "stats", "=", "stats", "return", "self", ".", "stats" ]
Update the IRQ stats.
[ "Update", "the", "IRQ", "stats", "." ]
train
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_irq.py#L53-L78
0.003021
okpy/ok-client
client/sources/common/pyconsole.py
PythonConsole.load
def load(self, code, setup='', teardown=''): """Prepares a set of setup, test, and teardown code to be run in the console. PARAMETERS: code -- list; processed lines of code. Elements in the list are either strings (input) or CodeAnswer objects (output) setup -- str; raw setup code teardown -- str; raw teardown code """ super().load(code, setup, teardown) self._frame = self._original_frame.copy()
python
def load(self, code, setup='', teardown=''): """Prepares a set of setup, test, and teardown code to be run in the console. PARAMETERS: code -- list; processed lines of code. Elements in the list are either strings (input) or CodeAnswer objects (output) setup -- str; raw setup code teardown -- str; raw teardown code """ super().load(code, setup, teardown) self._frame = self._original_frame.copy()
[ "def", "load", "(", "self", ",", "code", ",", "setup", "=", "''", ",", "teardown", "=", "''", ")", ":", "super", "(", ")", ".", "load", "(", "code", ",", "setup", ",", "teardown", ")", "self", ".", "_frame", "=", "self", ".", "_original_frame", ".", "copy", "(", ")" ]
Prepares a set of setup, test, and teardown code to be run in the console. PARAMETERS: code -- list; processed lines of code. Elements in the list are either strings (input) or CodeAnswer objects (output) setup -- str; raw setup code teardown -- str; raw teardown code
[ "Prepares", "a", "set", "of", "setup", "test", "and", "teardown", "code", "to", "be", "run", "in", "the", "console", "." ]
train
https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/sources/common/pyconsole.py#L20-L31
0.004016
log2timeline/plaso
plaso/parsers/opera.py
OperaGlobalHistoryParser._ParseAndValidateRecord
def _ParseAndValidateRecord(self, parser_mediator, text_file_object): """Parses and validates an Opera global history record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. text_file_object (dfvfs.TextFile): text file. Returns: bool: True if the record was successfully parsed. """ try: title = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE) url = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE) timestamp = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE) popularity_index = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE) except UnicodeDecodeError: return False if len(title) == self._MAXIMUM_LINE_SIZE and title[-1] != '\n': return False if len(url) == self._MAXIMUM_LINE_SIZE and url[-1] != '\n': return False if len(timestamp) == self._MAXIMUM_LINE_SIZE and timestamp[-1] != '\n': return False if (len(popularity_index) == self._MAXIMUM_LINE_SIZE and popularity_index[-1] != '\n'): return False title = title.strip() url = url.strip() timestamp = timestamp.strip() popularity_index = popularity_index.strip() if not title or not url or not timestamp or not popularity_index: return False event_data = OperaGlobalHistoryEventData() if not self._IsValidUrl(url): return False event_data.url = url if title != url: event_data.title = title try: event_data.popularity_index = int(popularity_index, 10) timestamp = int(timestamp, 10) except ValueError: return False if event_data.popularity_index < 0: event_data.description = 'First and Only Visit' else: event_data.description = 'Last Visit' date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data) return True
python
def _ParseAndValidateRecord(self, parser_mediator, text_file_object): """Parses and validates an Opera global history record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. text_file_object (dfvfs.TextFile): text file. Returns: bool: True if the record was successfully parsed. """ try: title = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE) url = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE) timestamp = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE) popularity_index = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE) except UnicodeDecodeError: return False if len(title) == self._MAXIMUM_LINE_SIZE and title[-1] != '\n': return False if len(url) == self._MAXIMUM_LINE_SIZE and url[-1] != '\n': return False if len(timestamp) == self._MAXIMUM_LINE_SIZE and timestamp[-1] != '\n': return False if (len(popularity_index) == self._MAXIMUM_LINE_SIZE and popularity_index[-1] != '\n'): return False title = title.strip() url = url.strip() timestamp = timestamp.strip() popularity_index = popularity_index.strip() if not title or not url or not timestamp or not popularity_index: return False event_data = OperaGlobalHistoryEventData() if not self._IsValidUrl(url): return False event_data.url = url if title != url: event_data.title = title try: event_data.popularity_index = int(popularity_index, 10) timestamp = int(timestamp, 10) except ValueError: return False if event_data.popularity_index < 0: event_data.description = 'First and Only Visit' else: event_data.description = 'Last Visit' date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data) return True
[ "def", "_ParseAndValidateRecord", "(", "self", ",", "parser_mediator", ",", "text_file_object", ")", ":", "try", ":", "title", "=", "text_file_object", ".", "readline", "(", "size", "=", "self", ".", "_MAXIMUM_LINE_SIZE", ")", "url", "=", "text_file_object", ".", "readline", "(", "size", "=", "self", ".", "_MAXIMUM_LINE_SIZE", ")", "timestamp", "=", "text_file_object", ".", "readline", "(", "size", "=", "self", ".", "_MAXIMUM_LINE_SIZE", ")", "popularity_index", "=", "text_file_object", ".", "readline", "(", "size", "=", "self", ".", "_MAXIMUM_LINE_SIZE", ")", "except", "UnicodeDecodeError", ":", "return", "False", "if", "len", "(", "title", ")", "==", "self", ".", "_MAXIMUM_LINE_SIZE", "and", "title", "[", "-", "1", "]", "!=", "'\\n'", ":", "return", "False", "if", "len", "(", "url", ")", "==", "self", ".", "_MAXIMUM_LINE_SIZE", "and", "url", "[", "-", "1", "]", "!=", "'\\n'", ":", "return", "False", "if", "len", "(", "timestamp", ")", "==", "self", ".", "_MAXIMUM_LINE_SIZE", "and", "timestamp", "[", "-", "1", "]", "!=", "'\\n'", ":", "return", "False", "if", "(", "len", "(", "popularity_index", ")", "==", "self", ".", "_MAXIMUM_LINE_SIZE", "and", "popularity_index", "[", "-", "1", "]", "!=", "'\\n'", ")", ":", "return", "False", "title", "=", "title", ".", "strip", "(", ")", "url", "=", "url", ".", "strip", "(", ")", "timestamp", "=", "timestamp", ".", "strip", "(", ")", "popularity_index", "=", "popularity_index", ".", "strip", "(", ")", "if", "not", "title", "or", "not", "url", "or", "not", "timestamp", "or", "not", "popularity_index", ":", "return", "False", "event_data", "=", "OperaGlobalHistoryEventData", "(", ")", "if", "not", "self", ".", "_IsValidUrl", "(", "url", ")", ":", "return", "False", "event_data", ".", "url", "=", "url", "if", "title", "!=", "url", ":", "event_data", ".", "title", "=", "title", "try", ":", "event_data", ".", "popularity_index", "=", "int", "(", "popularity_index", ",", "10", ")", "timestamp", "=", "int", "(", "timestamp", ",", "10", ")", "except", "ValueError", ":", "return", "False", "if", "event_data", ".", "popularity_index", "<", "0", ":", "event_data", ".", "description", "=", "'First and Only Visit'", "else", ":", "event_data", ".", "description", "=", "'Last Visit'", "date_time", "=", "dfdatetime_posix_time", ".", "PosixTime", "(", "timestamp", "=", "timestamp", ")", "event", "=", "time_events", ".", "DateTimeValuesEvent", "(", "date_time", ",", "definitions", ".", "TIME_DESCRIPTION_LAST_VISITED", ")", "parser_mediator", ".", "ProduceEventWithEventData", "(", "event", ",", "event_data", ")", "return", "True" ]
Parses and validates an Opera global history record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. text_file_object (dfvfs.TextFile): text file. Returns: bool: True if the record was successfully parsed.
[ "Parses", "and", "validates", "an", "Opera", "global", "history", "record", "." ]
train
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/opera.py#L238-L303
0.009492
python-openxml/python-docx
docx/document.py
Document.add_heading
def add_heading(self, text="", level=1): """Return a heading paragraph newly added to the end of the document. The heading paragraph will contain *text* and have its paragraph style determined by *level*. If *level* is 0, the style is set to `Title`. If *level* is 1 (or omitted), `Heading 1` is used. Otherwise the style is set to `Heading {level}`. Raises |ValueError| if *level* is outside the range 0-9. """ if not 0 <= level <= 9: raise ValueError("level must be in range 0-9, got %d" % level) style = "Title" if level == 0 else "Heading %d" % level return self.add_paragraph(text, style)
python
def add_heading(self, text="", level=1): """Return a heading paragraph newly added to the end of the document. The heading paragraph will contain *text* and have its paragraph style determined by *level*. If *level* is 0, the style is set to `Title`. If *level* is 1 (or omitted), `Heading 1` is used. Otherwise the style is set to `Heading {level}`. Raises |ValueError| if *level* is outside the range 0-9. """ if not 0 <= level <= 9: raise ValueError("level must be in range 0-9, got %d" % level) style = "Title" if level == 0 else "Heading %d" % level return self.add_paragraph(text, style)
[ "def", "add_heading", "(", "self", ",", "text", "=", "\"\"", ",", "level", "=", "1", ")", ":", "if", "not", "0", "<=", "level", "<=", "9", ":", "raise", "ValueError", "(", "\"level must be in range 0-9, got %d\"", "%", "level", ")", "style", "=", "\"Title\"", "if", "level", "==", "0", "else", "\"Heading %d\"", "%", "level", "return", "self", ".", "add_paragraph", "(", "text", ",", "style", ")" ]
Return a heading paragraph newly added to the end of the document. The heading paragraph will contain *text* and have its paragraph style determined by *level*. If *level* is 0, the style is set to `Title`. If *level* is 1 (or omitted), `Heading 1` is used. Otherwise the style is set to `Heading {level}`. Raises |ValueError| if *level* is outside the range 0-9.
[ "Return", "a", "heading", "paragraph", "newly", "added", "to", "the", "end", "of", "the", "document", "." ]
train
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/document.py#L28-L39
0.0059
faxir/faxir-python
faxir/api/outbox_api.py
OutboxApi.get_outbox_fax
def get_outbox_fax(self, outbox_fax_id, **kwargs): # noqa: E501 """Get an outbox record # noqa: E501 Get an outbox fax record information # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_outbox_fax(outbox_fax_id, async=True) >>> result = thread.get() :param async bool :param str outbox_fax_id: (required) :return: Outbox If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_outbox_fax_with_http_info(outbox_fax_id, **kwargs) # noqa: E501 else: (data) = self.get_outbox_fax_with_http_info(outbox_fax_id, **kwargs) # noqa: E501 return data
python
def get_outbox_fax(self, outbox_fax_id, **kwargs): # noqa: E501 """Get an outbox record # noqa: E501 Get an outbox fax record information # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_outbox_fax(outbox_fax_id, async=True) >>> result = thread.get() :param async bool :param str outbox_fax_id: (required) :return: Outbox If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_outbox_fax_with_http_info(outbox_fax_id, **kwargs) # noqa: E501 else: (data) = self.get_outbox_fax_with_http_info(outbox_fax_id, **kwargs) # noqa: E501 return data
[ "def", "get_outbox_fax", "(", "self", ",", "outbox_fax_id", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "self", ".", "get_outbox_fax_with_http_info", "(", "outbox_fax_id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "get_outbox_fax_with_http_info", "(", "outbox_fax_id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
Get an outbox record # noqa: E501 Get an outbox fax record information # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_outbox_fax(outbox_fax_id, async=True) >>> result = thread.get() :param async bool :param str outbox_fax_id: (required) :return: Outbox If the method is called asynchronously, returns the request thread.
[ "Get", "an", "outbox", "record", "#", "noqa", ":", "E501" ]
train
https://github.com/faxir/faxir-python/blob/75ed2ea487a6be537342baea1077a02b0c8e70c1/faxir/api/outbox_api.py#L135-L155
0.002172
thelabnyc/wagtail_blog
blog/management/commands/wordpress_to_wagtail.py
Command.lookup_comment_by_wordpress_id
def lookup_comment_by_wordpress_id(self, comment_id, comments): """ Returns Django comment object with this wordpress id """ for comment in comments: if comment.wordpress_id == comment_id: return comment
python
def lookup_comment_by_wordpress_id(self, comment_id, comments): """ Returns Django comment object with this wordpress id """ for comment in comments: if comment.wordpress_id == comment_id: return comment
[ "def", "lookup_comment_by_wordpress_id", "(", "self", ",", "comment_id", ",", "comments", ")", ":", "for", "comment", "in", "comments", ":", "if", "comment", ".", "wordpress_id", "==", "comment_id", ":", "return", "comment" ]
Returns Django comment object with this wordpress id
[ "Returns", "Django", "comment", "object", "with", "this", "wordpress", "id" ]
train
https://github.com/thelabnyc/wagtail_blog/blob/7e092c02d10ec427c9a2c4b5dcbe910d88c628cf/blog/management/commands/wordpress_to_wagtail.py#L223-L227
0.008097
sunlightlabs/django-locksmith
locksmith/hub/views.py
confirm_registration
def confirm_registration(request, key, template="locksmith/confirmed.html"): ''' API key confirmation visiting this URL marks a Key as ready for use ''' context = {'LOCKSMITH_BASE_TEMPLATE': settings.LOCKSMITH_BASE_TEMPLATE} try: context['key'] = key_obj = Key.objects.get(key=key) if key_obj.status != 'U': context['error'] = 'Key Already Activated' else: key_obj.status = 'A' key_obj.save() key_obj.mark_for_update() except Key.DoesNotExist: context['error'] = 'Invalid Key' return render_to_response(template, context, context_instance=RequestContext(request))
python
def confirm_registration(request, key, template="locksmith/confirmed.html"): ''' API key confirmation visiting this URL marks a Key as ready for use ''' context = {'LOCKSMITH_BASE_TEMPLATE': settings.LOCKSMITH_BASE_TEMPLATE} try: context['key'] = key_obj = Key.objects.get(key=key) if key_obj.status != 'U': context['error'] = 'Key Already Activated' else: key_obj.status = 'A' key_obj.save() key_obj.mark_for_update() except Key.DoesNotExist: context['error'] = 'Invalid Key' return render_to_response(template, context, context_instance=RequestContext(request))
[ "def", "confirm_registration", "(", "request", ",", "key", ",", "template", "=", "\"locksmith/confirmed.html\"", ")", ":", "context", "=", "{", "'LOCKSMITH_BASE_TEMPLATE'", ":", "settings", ".", "LOCKSMITH_BASE_TEMPLATE", "}", "try", ":", "context", "[", "'key'", "]", "=", "key_obj", "=", "Key", ".", "objects", ".", "get", "(", "key", "=", "key", ")", "if", "key_obj", ".", "status", "!=", "'U'", ":", "context", "[", "'error'", "]", "=", "'Key Already Activated'", "else", ":", "key_obj", ".", "status", "=", "'A'", "key_obj", ".", "save", "(", ")", "key_obj", ".", "mark_for_update", "(", ")", "except", "Key", ".", "DoesNotExist", ":", "context", "[", "'error'", "]", "=", "'Invalid Key'", "return", "render_to_response", "(", "template", ",", "context", ",", "context_instance", "=", "RequestContext", "(", "request", ")", ")" ]
API key confirmation visiting this URL marks a Key as ready for use
[ "API", "key", "confirmation" ]
train
https://github.com/sunlightlabs/django-locksmith/blob/eef5b7c25404560aaad50b6e622594f89239b74b/locksmith/hub/views.py#L158-L176
0.001401
pandas-dev/pandas
pandas/core/algorithms.py
_ensure_data
def _ensure_data(values, dtype=None): """ routine to ensure that our data is of the correct input dtype for lower-level routines This will coerce: - ints -> int64 - uint -> uint64 - bool -> uint64 (TODO this should be uint8) - datetimelike -> i8 - datetime64tz -> i8 (in local tz) - categorical -> codes Parameters ---------- values : array-like dtype : pandas_dtype, optional coerce to this dtype Returns ------- (ndarray, pandas_dtype, algo dtype as a string) """ # we check some simple dtypes first try: if is_object_dtype(dtype): return ensure_object(np.asarray(values)), 'object', 'object' if is_bool_dtype(values) or is_bool_dtype(dtype): # we are actually coercing to uint64 # until our algos support uint8 directly (see TODO) return np.asarray(values).astype('uint64'), 'bool', 'uint64' elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype): return ensure_int64(values), 'int64', 'int64' elif (is_unsigned_integer_dtype(values) or is_unsigned_integer_dtype(dtype)): return ensure_uint64(values), 'uint64', 'uint64' elif is_float_dtype(values) or is_float_dtype(dtype): return ensure_float64(values), 'float64', 'float64' elif is_object_dtype(values) and dtype is None: return ensure_object(np.asarray(values)), 'object', 'object' elif is_complex_dtype(values) or is_complex_dtype(dtype): # ignore the fact that we are casting to float # which discards complex parts with catch_warnings(): simplefilter("ignore", np.ComplexWarning) values = ensure_float64(values) return values, 'float64', 'float64' except (TypeError, ValueError, OverflowError): # if we are trying to coerce to a dtype # and it is incompat this will fall thru to here return ensure_object(values), 'object', 'object' # datetimelike if (needs_i8_conversion(values) or is_period_dtype(dtype) or is_datetime64_any_dtype(dtype) or is_timedelta64_dtype(dtype)): if is_period_dtype(values) or is_period_dtype(dtype): from pandas import PeriodIndex values = PeriodIndex(values) dtype = values.dtype elif is_timedelta64_dtype(values) or is_timedelta64_dtype(dtype): from pandas import TimedeltaIndex values = TimedeltaIndex(values) dtype = values.dtype else: # Datetime from pandas import DatetimeIndex values = DatetimeIndex(values) dtype = values.dtype return values.asi8, dtype, 'int64' elif (is_categorical_dtype(values) and (is_categorical_dtype(dtype) or dtype is None)): values = getattr(values, 'values', values) values = values.codes dtype = 'category' # we are actually coercing to int64 # until our algos support int* directly (not all do) values = ensure_int64(values) return values, dtype, 'int64' # we have failed, return object values = np.asarray(values, dtype=np.object) return ensure_object(values), 'object', 'object'
python
def _ensure_data(values, dtype=None): """ routine to ensure that our data is of the correct input dtype for lower-level routines This will coerce: - ints -> int64 - uint -> uint64 - bool -> uint64 (TODO this should be uint8) - datetimelike -> i8 - datetime64tz -> i8 (in local tz) - categorical -> codes Parameters ---------- values : array-like dtype : pandas_dtype, optional coerce to this dtype Returns ------- (ndarray, pandas_dtype, algo dtype as a string) """ # we check some simple dtypes first try: if is_object_dtype(dtype): return ensure_object(np.asarray(values)), 'object', 'object' if is_bool_dtype(values) or is_bool_dtype(dtype): # we are actually coercing to uint64 # until our algos support uint8 directly (see TODO) return np.asarray(values).astype('uint64'), 'bool', 'uint64' elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype): return ensure_int64(values), 'int64', 'int64' elif (is_unsigned_integer_dtype(values) or is_unsigned_integer_dtype(dtype)): return ensure_uint64(values), 'uint64', 'uint64' elif is_float_dtype(values) or is_float_dtype(dtype): return ensure_float64(values), 'float64', 'float64' elif is_object_dtype(values) and dtype is None: return ensure_object(np.asarray(values)), 'object', 'object' elif is_complex_dtype(values) or is_complex_dtype(dtype): # ignore the fact that we are casting to float # which discards complex parts with catch_warnings(): simplefilter("ignore", np.ComplexWarning) values = ensure_float64(values) return values, 'float64', 'float64' except (TypeError, ValueError, OverflowError): # if we are trying to coerce to a dtype # and it is incompat this will fall thru to here return ensure_object(values), 'object', 'object' # datetimelike if (needs_i8_conversion(values) or is_period_dtype(dtype) or is_datetime64_any_dtype(dtype) or is_timedelta64_dtype(dtype)): if is_period_dtype(values) or is_period_dtype(dtype): from pandas import PeriodIndex values = PeriodIndex(values) dtype = values.dtype elif is_timedelta64_dtype(values) or is_timedelta64_dtype(dtype): from pandas import TimedeltaIndex values = TimedeltaIndex(values) dtype = values.dtype else: # Datetime from pandas import DatetimeIndex values = DatetimeIndex(values) dtype = values.dtype return values.asi8, dtype, 'int64' elif (is_categorical_dtype(values) and (is_categorical_dtype(dtype) or dtype is None)): values = getattr(values, 'values', values) values = values.codes dtype = 'category' # we are actually coercing to int64 # until our algos support int* directly (not all do) values = ensure_int64(values) return values, dtype, 'int64' # we have failed, return object values = np.asarray(values, dtype=np.object) return ensure_object(values), 'object', 'object'
[ "def", "_ensure_data", "(", "values", ",", "dtype", "=", "None", ")", ":", "# we check some simple dtypes first", "try", ":", "if", "is_object_dtype", "(", "dtype", ")", ":", "return", "ensure_object", "(", "np", ".", "asarray", "(", "values", ")", ")", ",", "'object'", ",", "'object'", "if", "is_bool_dtype", "(", "values", ")", "or", "is_bool_dtype", "(", "dtype", ")", ":", "# we are actually coercing to uint64", "# until our algos support uint8 directly (see TODO)", "return", "np", ".", "asarray", "(", "values", ")", ".", "astype", "(", "'uint64'", ")", ",", "'bool'", ",", "'uint64'", "elif", "is_signed_integer_dtype", "(", "values", ")", "or", "is_signed_integer_dtype", "(", "dtype", ")", ":", "return", "ensure_int64", "(", "values", ")", ",", "'int64'", ",", "'int64'", "elif", "(", "is_unsigned_integer_dtype", "(", "values", ")", "or", "is_unsigned_integer_dtype", "(", "dtype", ")", ")", ":", "return", "ensure_uint64", "(", "values", ")", ",", "'uint64'", ",", "'uint64'", "elif", "is_float_dtype", "(", "values", ")", "or", "is_float_dtype", "(", "dtype", ")", ":", "return", "ensure_float64", "(", "values", ")", ",", "'float64'", ",", "'float64'", "elif", "is_object_dtype", "(", "values", ")", "and", "dtype", "is", "None", ":", "return", "ensure_object", "(", "np", ".", "asarray", "(", "values", ")", ")", ",", "'object'", ",", "'object'", "elif", "is_complex_dtype", "(", "values", ")", "or", "is_complex_dtype", "(", "dtype", ")", ":", "# ignore the fact that we are casting to float", "# which discards complex parts", "with", "catch_warnings", "(", ")", ":", "simplefilter", "(", "\"ignore\"", ",", "np", ".", "ComplexWarning", ")", "values", "=", "ensure_float64", "(", "values", ")", "return", "values", ",", "'float64'", ",", "'float64'", "except", "(", "TypeError", ",", "ValueError", ",", "OverflowError", ")", ":", "# if we are trying to coerce to a dtype", "# and it is incompat this will fall thru to here", "return", "ensure_object", "(", "values", ")", ",", "'object'", ",", "'object'", "# datetimelike", "if", "(", "needs_i8_conversion", "(", "values", ")", "or", "is_period_dtype", "(", "dtype", ")", "or", "is_datetime64_any_dtype", "(", "dtype", ")", "or", "is_timedelta64_dtype", "(", "dtype", ")", ")", ":", "if", "is_period_dtype", "(", "values", ")", "or", "is_period_dtype", "(", "dtype", ")", ":", "from", "pandas", "import", "PeriodIndex", "values", "=", "PeriodIndex", "(", "values", ")", "dtype", "=", "values", ".", "dtype", "elif", "is_timedelta64_dtype", "(", "values", ")", "or", "is_timedelta64_dtype", "(", "dtype", ")", ":", "from", "pandas", "import", "TimedeltaIndex", "values", "=", "TimedeltaIndex", "(", "values", ")", "dtype", "=", "values", ".", "dtype", "else", ":", "# Datetime", "from", "pandas", "import", "DatetimeIndex", "values", "=", "DatetimeIndex", "(", "values", ")", "dtype", "=", "values", ".", "dtype", "return", "values", ".", "asi8", ",", "dtype", ",", "'int64'", "elif", "(", "is_categorical_dtype", "(", "values", ")", "and", "(", "is_categorical_dtype", "(", "dtype", ")", "or", "dtype", "is", "None", ")", ")", ":", "values", "=", "getattr", "(", "values", ",", "'values'", ",", "values", ")", "values", "=", "values", ".", "codes", "dtype", "=", "'category'", "# we are actually coercing to int64", "# until our algos support int* directly (not all do)", "values", "=", "ensure_int64", "(", "values", ")", "return", "values", ",", "dtype", ",", "'int64'", "# we have failed, return object", "values", "=", "np", ".", "asarray", "(", "values", ",", "dtype", "=", "np", ".", "object", ")", "return", "ensure_object", "(", "values", ")", ",", "'object'", ",", "'object'" ]
routine to ensure that our data is of the correct input dtype for lower-level routines This will coerce: - ints -> int64 - uint -> uint64 - bool -> uint64 (TODO this should be uint8) - datetimelike -> i8 - datetime64tz -> i8 (in local tz) - categorical -> codes Parameters ---------- values : array-like dtype : pandas_dtype, optional coerce to this dtype Returns ------- (ndarray, pandas_dtype, algo dtype as a string)
[ "routine", "to", "ensure", "that", "our", "data", "is", "of", "the", "correct", "input", "dtype", "for", "lower", "-", "level", "routines" ]
train
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L36-L127
0.000296
childsish/lhc-python
lhc/misc/performance_measures.py
confusion_matrix
def confusion_matrix(exp, obs): """Create a confusion matrix In each axis of the resulting confusion matrix the negative case is 0-index and the positive case 1-index. The labels get sorted, in a True/False scenario true positives will occur at (1,1). The first dimension (rows) of the resulting matrix is the expected class and the second dimension (columns) is the observed class. :param exp: expected values :type exp: list of float :param obs: observed values :type obs: list of float :rtype: tuple of square matrix and sorted labels """ assert len(exp) == len(obs) # Expected in the first dimension (0;rows), observed in the second (1;cols) lbls = sorted(set(exp)) res = numpy.zeros(shape=(len(lbls), len(lbls))) for i in range(len(exp)): res[lbls.index(exp[i]), lbls.index(obs[i])] += 1 return res, lbls
python
def confusion_matrix(exp, obs): """Create a confusion matrix In each axis of the resulting confusion matrix the negative case is 0-index and the positive case 1-index. The labels get sorted, in a True/False scenario true positives will occur at (1,1). The first dimension (rows) of the resulting matrix is the expected class and the second dimension (columns) is the observed class. :param exp: expected values :type exp: list of float :param obs: observed values :type obs: list of float :rtype: tuple of square matrix and sorted labels """ assert len(exp) == len(obs) # Expected in the first dimension (0;rows), observed in the second (1;cols) lbls = sorted(set(exp)) res = numpy.zeros(shape=(len(lbls), len(lbls))) for i in range(len(exp)): res[lbls.index(exp[i]), lbls.index(obs[i])] += 1 return res, lbls
[ "def", "confusion_matrix", "(", "exp", ",", "obs", ")", ":", "assert", "len", "(", "exp", ")", "==", "len", "(", "obs", ")", "# Expected in the first dimension (0;rows), observed in the second (1;cols)", "lbls", "=", "sorted", "(", "set", "(", "exp", ")", ")", "res", "=", "numpy", ".", "zeros", "(", "shape", "=", "(", "len", "(", "lbls", ")", ",", "len", "(", "lbls", ")", ")", ")", "for", "i", "in", "range", "(", "len", "(", "exp", ")", ")", ":", "res", "[", "lbls", ".", "index", "(", "exp", "[", "i", "]", ")", ",", "lbls", ".", "index", "(", "obs", "[", "i", "]", ")", "]", "+=", "1", "return", "res", ",", "lbls" ]
Create a confusion matrix In each axis of the resulting confusion matrix the negative case is 0-index and the positive case 1-index. The labels get sorted, in a True/False scenario true positives will occur at (1,1). The first dimension (rows) of the resulting matrix is the expected class and the second dimension (columns) is the observed class. :param exp: expected values :type exp: list of float :param obs: observed values :type obs: list of float :rtype: tuple of square matrix and sorted labels
[ "Create", "a", "confusion", "matrix", "In", "each", "axis", "of", "the", "resulting", "confusion", "matrix", "the", "negative", "case", "is", "0", "-", "index", "and", "the", "positive", "case", "1", "-", "index", ".", "The", "labels", "get", "sorted", "in", "a", "True", "/", "False", "scenario", "true", "positives", "will", "occur", "at", "(", "1", "1", ")", ".", "The", "first", "dimension", "(", "rows", ")", "of", "the", "resulting", "matrix", "is", "the", "expected", "class", "and", "the", "second", "dimension", "(", "columns", ")", "is", "the", "observed", "class", ".", ":", "param", "exp", ":", "expected", "values", ":", "type", "exp", ":", "list", "of", "float", ":", "param", "obs", ":", "observed", "values", ":", "type", "obs", ":", "list", "of", "float", ":", "rtype", ":", "tuple", "of", "square", "matrix", "and", "sorted", "labels" ]
train
https://github.com/childsish/lhc-python/blob/0a669f46a40a39f24d28665e8b5b606dc7e86beb/lhc/misc/performance_measures.py#L152-L173
0.003337
rigetti/pyquil
pyquil/wavefunction.py
Wavefunction.get_outcome_probs
def get_outcome_probs(self): """ Parses a wavefunction (array of complex amplitudes) and returns a dictionary of outcomes and associated probabilities. :return: A dict with outcomes as keys and probabilities as values. :rtype: dict """ outcome_dict = {} qubit_num = len(self) for index, amplitude in enumerate(self.amplitudes): outcome = get_bitstring_from_index(index, qubit_num) outcome_dict[outcome] = abs(amplitude) ** 2 return outcome_dict
python
def get_outcome_probs(self): """ Parses a wavefunction (array of complex amplitudes) and returns a dictionary of outcomes and associated probabilities. :return: A dict with outcomes as keys and probabilities as values. :rtype: dict """ outcome_dict = {} qubit_num = len(self) for index, amplitude in enumerate(self.amplitudes): outcome = get_bitstring_from_index(index, qubit_num) outcome_dict[outcome] = abs(amplitude) ** 2 return outcome_dict
[ "def", "get_outcome_probs", "(", "self", ")", ":", "outcome_dict", "=", "{", "}", "qubit_num", "=", "len", "(", "self", ")", "for", "index", ",", "amplitude", "in", "enumerate", "(", "self", ".", "amplitudes", ")", ":", "outcome", "=", "get_bitstring_from_index", "(", "index", ",", "qubit_num", ")", "outcome_dict", "[", "outcome", "]", "=", "abs", "(", "amplitude", ")", "**", "2", "return", "outcome_dict" ]
Parses a wavefunction (array of complex amplitudes) and returns a dictionary of outcomes and associated probabilities. :return: A dict with outcomes as keys and probabilities as values. :rtype: dict
[ "Parses", "a", "wavefunction", "(", "array", "of", "complex", "amplitudes", ")", "and", "returns", "a", "dictionary", "of", "outcomes", "and", "associated", "probabilities", "." ]
train
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/wavefunction.py#L115-L128
0.005464
Kortemme-Lab/klab
klab/bio/fragments/generate_fragments.py
reformat
def reformat(found_sequences): '''Truncate the FASTA headers so that the first field is a 4-character ID.''' for (pdb_id, chain, file_name), sequence in sorted(found_sequences.iteritems()): header = sequence[0] assert(header[0] == '>') tokens = header.split('|') tokens[0] = tokens[0][:5] assert(len(tokens[0]) == 5) sequence[0] = "|".join(tokens)
python
def reformat(found_sequences): '''Truncate the FASTA headers so that the first field is a 4-character ID.''' for (pdb_id, chain, file_name), sequence in sorted(found_sequences.iteritems()): header = sequence[0] assert(header[0] == '>') tokens = header.split('|') tokens[0] = tokens[0][:5] assert(len(tokens[0]) == 5) sequence[0] = "|".join(tokens)
[ "def", "reformat", "(", "found_sequences", ")", ":", "for", "(", "pdb_id", ",", "chain", ",", "file_name", ")", ",", "sequence", "in", "sorted", "(", "found_sequences", ".", "iteritems", "(", ")", ")", ":", "header", "=", "sequence", "[", "0", "]", "assert", "(", "header", "[", "0", "]", "==", "'>'", ")", "tokens", "=", "header", ".", "split", "(", "'|'", ")", "tokens", "[", "0", "]", "=", "tokens", "[", "0", "]", "[", ":", "5", "]", "assert", "(", "len", "(", "tokens", "[", "0", "]", ")", "==", "5", ")", "sequence", "[", "0", "]", "=", "\"|\"", ".", "join", "(", "tokens", ")" ]
Truncate the FASTA headers so that the first field is a 4-character ID.
[ "Truncate", "the", "FASTA", "headers", "so", "that", "the", "first", "field", "is", "a", "4", "-", "character", "ID", "." ]
train
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/fragments/generate_fragments.py#L723-L731
0.012407
improbable-research/keanu
keanu-python/keanu/vertex/generated.py
BooleanTake
def BooleanTake(input_vertex: vertex_constructor_param_types, index: Collection[int], label: Optional[str]=None) -> Vertex: """ A vertex that extracts a scalar at a given index :param input_vertex: the input vertex to extract from :param index: the index to extract at """ return Boolean(context.jvm_view().BooleanTakeVertex, label, cast_to_vertex(input_vertex), cast_to_long_array(index))
python
def BooleanTake(input_vertex: vertex_constructor_param_types, index: Collection[int], label: Optional[str]=None) -> Vertex: """ A vertex that extracts a scalar at a given index :param input_vertex: the input vertex to extract from :param index: the index to extract at """ return Boolean(context.jvm_view().BooleanTakeVertex, label, cast_to_vertex(input_vertex), cast_to_long_array(index))
[ "def", "BooleanTake", "(", "input_vertex", ":", "vertex_constructor_param_types", ",", "index", ":", "Collection", "[", "int", "]", ",", "label", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "Vertex", ":", "return", "Boolean", "(", "context", ".", "jvm_view", "(", ")", ".", "BooleanTakeVertex", ",", "label", ",", "cast_to_vertex", "(", "input_vertex", ")", ",", "cast_to_long_array", "(", "index", ")", ")" ]
A vertex that extracts a scalar at a given index :param input_vertex: the input vertex to extract from :param index: the index to extract at
[ "A", "vertex", "that", "extracts", "a", "scalar", "at", "a", "given", "index", ":", "param", "input_vertex", ":", "the", "input", "vertex", "to", "extract", "from", ":", "param", "index", ":", "the", "index", "to", "extract", "at" ]
train
https://github.com/improbable-research/keanu/blob/73189a8f569078e156168e795f82c7366c59574b/keanu-python/keanu/vertex/generated.py#L221-L228
0.014354
python-diamond/Diamond
src/collectors/netapp/netapp.py
NetAppCollector._gen_delta_depend
def _gen_delta_depend(self, path, derivative, multiplier, prettyname, device): """ For some metrics we need to divide the delta for one metric with the delta of another. Publishes a metric if the convertion goes well. """ primary_delta = derivative[path] shortpath = ".".join(path.split(".")[:-1]) basename = path.split(".")[-1] secondary_delta = None if basename in self.DIVIDERS.keys(): mateKey = ".".join([shortpath, self.DIVIDERS[basename]]) else: return if mateKey in derivative.keys(): secondary_delta = derivative[mateKey] else: return # If we find a corresponding secondary_delta, publish a metric if primary_delta > 0 and secondary_delta > 0: value = (float(primary_delta) / secondary_delta) * multiplier self._replace_and_publish(path, prettyname, value, device)
python
def _gen_delta_depend(self, path, derivative, multiplier, prettyname, device): """ For some metrics we need to divide the delta for one metric with the delta of another. Publishes a metric if the convertion goes well. """ primary_delta = derivative[path] shortpath = ".".join(path.split(".")[:-1]) basename = path.split(".")[-1] secondary_delta = None if basename in self.DIVIDERS.keys(): mateKey = ".".join([shortpath, self.DIVIDERS[basename]]) else: return if mateKey in derivative.keys(): secondary_delta = derivative[mateKey] else: return # If we find a corresponding secondary_delta, publish a metric if primary_delta > 0 and secondary_delta > 0: value = (float(primary_delta) / secondary_delta) * multiplier self._replace_and_publish(path, prettyname, value, device)
[ "def", "_gen_delta_depend", "(", "self", ",", "path", ",", "derivative", ",", "multiplier", ",", "prettyname", ",", "device", ")", ":", "primary_delta", "=", "derivative", "[", "path", "]", "shortpath", "=", "\".\"", ".", "join", "(", "path", ".", "split", "(", "\".\"", ")", "[", ":", "-", "1", "]", ")", "basename", "=", "path", ".", "split", "(", "\".\"", ")", "[", "-", "1", "]", "secondary_delta", "=", "None", "if", "basename", "in", "self", ".", "DIVIDERS", ".", "keys", "(", ")", ":", "mateKey", "=", "\".\"", ".", "join", "(", "[", "shortpath", ",", "self", ".", "DIVIDERS", "[", "basename", "]", "]", ")", "else", ":", "return", "if", "mateKey", "in", "derivative", ".", "keys", "(", ")", ":", "secondary_delta", "=", "derivative", "[", "mateKey", "]", "else", ":", "return", "# If we find a corresponding secondary_delta, publish a metric", "if", "primary_delta", ">", "0", "and", "secondary_delta", ">", "0", ":", "value", "=", "(", "float", "(", "primary_delta", ")", "/", "secondary_delta", ")", "*", "multiplier", "self", ".", "_replace_and_publish", "(", "path", ",", "prettyname", ",", "value", ",", "device", ")" ]
For some metrics we need to divide the delta for one metric with the delta of another. Publishes a metric if the convertion goes well.
[ "For", "some", "metrics", "we", "need", "to", "divide", "the", "delta", "for", "one", "metric", "with", "the", "delta", "of", "another", ".", "Publishes", "a", "metric", "if", "the", "convertion", "goes", "well", "." ]
train
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/netapp/netapp.py#L217-L240
0.003027
django-danceschool/django-danceschool
danceschool/core/models.py
Invoice.calculateTaxes
def calculateTaxes(self): ''' Updates the tax field to reflect the amount of taxes depending on the local rate as well as whether the buyer or seller pays sales tax. ''' tax_rate = (getConstant('registration__salesTaxRate') or 0) / 100 if tax_rate > 0: if self.buyerPaysSalesTax: # If the buyer pays taxes, then taxes are just added as a fraction of the price self.taxes = self.total * tax_rate else: # If the seller pays sales taxes, then adjusted_total will be their net revenue, # and under this calculation adjusted_total + taxes = the price charged adjusted_total = self.total / (1 + tax_rate) self.taxes = adjusted_total * tax_rate
python
def calculateTaxes(self): ''' Updates the tax field to reflect the amount of taxes depending on the local rate as well as whether the buyer or seller pays sales tax. ''' tax_rate = (getConstant('registration__salesTaxRate') or 0) / 100 if tax_rate > 0: if self.buyerPaysSalesTax: # If the buyer pays taxes, then taxes are just added as a fraction of the price self.taxes = self.total * tax_rate else: # If the seller pays sales taxes, then adjusted_total will be their net revenue, # and under this calculation adjusted_total + taxes = the price charged adjusted_total = self.total / (1 + tax_rate) self.taxes = adjusted_total * tax_rate
[ "def", "calculateTaxes", "(", "self", ")", ":", "tax_rate", "=", "(", "getConstant", "(", "'registration__salesTaxRate'", ")", "or", "0", ")", "/", "100", "if", "tax_rate", ">", "0", ":", "if", "self", ".", "buyerPaysSalesTax", ":", "# If the buyer pays taxes, then taxes are just added as a fraction of the price", "self", ".", "taxes", "=", "self", ".", "total", "*", "tax_rate", "else", ":", "# If the seller pays sales taxes, then adjusted_total will be their net revenue,", "# and under this calculation adjusted_total + taxes = the price charged", "adjusted_total", "=", "self", ".", "total", "/", "(", "1", "+", "tax_rate", ")", "self", ".", "taxes", "=", "adjusted_total", "*", "tax_rate" ]
Updates the tax field to reflect the amount of taxes depending on the local rate as well as whether the buyer or seller pays sales tax.
[ "Updates", "the", "tax", "field", "to", "reflect", "the", "amount", "of", "taxes", "depending", "on", "the", "local", "rate", "as", "well", "as", "whether", "the", "buyer", "or", "seller", "pays", "sales", "tax", "." ]
train
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/core/models.py#L2777-L2793
0.006196
zabuldon/teslajsonpy
teslajsonpy/gps.py
GPS.update
def update(self): """Update the current GPS location.""" self._controller.update(self._id, wake_if_asleep=False) data = self._controller.get_drive_params(self._id) if data: self.__longitude = data['longitude'] self.__latitude = data['latitude'] self.__heading = data['heading'] if self.__longitude and self.__latitude and self.__heading: self.__location = {'longitude': self.__longitude, 'latitude': self.__latitude, 'heading': self.__heading}
python
def update(self): """Update the current GPS location.""" self._controller.update(self._id, wake_if_asleep=False) data = self._controller.get_drive_params(self._id) if data: self.__longitude = data['longitude'] self.__latitude = data['latitude'] self.__heading = data['heading'] if self.__longitude and self.__latitude and self.__heading: self.__location = {'longitude': self.__longitude, 'latitude': self.__latitude, 'heading': self.__heading}
[ "def", "update", "(", "self", ")", ":", "self", ".", "_controller", ".", "update", "(", "self", ".", "_id", ",", "wake_if_asleep", "=", "False", ")", "data", "=", "self", ".", "_controller", ".", "get_drive_params", "(", "self", ".", "_id", ")", "if", "data", ":", "self", ".", "__longitude", "=", "data", "[", "'longitude'", "]", "self", ".", "__latitude", "=", "data", "[", "'latitude'", "]", "self", ".", "__heading", "=", "data", "[", "'heading'", "]", "if", "self", ".", "__longitude", "and", "self", ".", "__latitude", "and", "self", ".", "__heading", ":", "self", ".", "__location", "=", "{", "'longitude'", ":", "self", ".", "__longitude", ",", "'latitude'", ":", "self", ".", "__latitude", ",", "'heading'", ":", "self", ".", "__heading", "}" ]
Update the current GPS location.
[ "Update", "the", "current", "GPS", "location", "." ]
train
https://github.com/zabuldon/teslajsonpy/blob/673ecdb5c9483160fb1b97e30e62f2c863761c39/teslajsonpy/gps.py#L53-L64
0.003373
titusjan/argos
argos/repo/filesytemrtis.py
createRtiFromFileName
def createRtiFromFileName(fileName): """ Determines the type of RepoTreeItem to use given a file name and creates it. Uses a DirectoryRti for directories and an UnknownFileRti if the file extension doesn't match one of the registered RTI extensions. """ cls, rtiRegItem = detectRtiFromFileName(fileName) if cls is None: logger.warn("Unable to import plugin {}: {}" .format(rtiRegItem.fullName, rtiRegItem.exception)) rti = UnknownFileRti.createFromFileName(fileName) rti.setException(rtiRegItem.exception) else: rti = cls.createFromFileName(fileName) assert rti, "Sanity check failed (createRtiFromFileName). Please report this bug." return rti
python
def createRtiFromFileName(fileName): """ Determines the type of RepoTreeItem to use given a file name and creates it. Uses a DirectoryRti for directories and an UnknownFileRti if the file extension doesn't match one of the registered RTI extensions. """ cls, rtiRegItem = detectRtiFromFileName(fileName) if cls is None: logger.warn("Unable to import plugin {}: {}" .format(rtiRegItem.fullName, rtiRegItem.exception)) rti = UnknownFileRti.createFromFileName(fileName) rti.setException(rtiRegItem.exception) else: rti = cls.createFromFileName(fileName) assert rti, "Sanity check failed (createRtiFromFileName). Please report this bug." return rti
[ "def", "createRtiFromFileName", "(", "fileName", ")", ":", "cls", ",", "rtiRegItem", "=", "detectRtiFromFileName", "(", "fileName", ")", "if", "cls", "is", "None", ":", "logger", ".", "warn", "(", "\"Unable to import plugin {}: {}\"", ".", "format", "(", "rtiRegItem", ".", "fullName", ",", "rtiRegItem", ".", "exception", ")", ")", "rti", "=", "UnknownFileRti", ".", "createFromFileName", "(", "fileName", ")", "rti", ".", "setException", "(", "rtiRegItem", ".", "exception", ")", "else", ":", "rti", "=", "cls", ".", "createFromFileName", "(", "fileName", ")", "assert", "rti", ",", "\"Sanity check failed (createRtiFromFileName). Please report this bug.\"", "return", "rti" ]
Determines the type of RepoTreeItem to use given a file name and creates it. Uses a DirectoryRti for directories and an UnknownFileRti if the file extension doesn't match one of the registered RTI extensions.
[ "Determines", "the", "type", "of", "RepoTreeItem", "to", "use", "given", "a", "file", "name", "and", "creates", "it", ".", "Uses", "a", "DirectoryRti", "for", "directories", "and", "an", "UnknownFileRti", "if", "the", "file", "extension", "doesn", "t", "match", "one", "of", "the", "registered", "RTI", "extensions", "." ]
train
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/repo/filesytemrtis.py#L113-L128
0.004049
line/line-bot-sdk-python
linebot/utils.py
safe_compare_digest
def safe_compare_digest(val1, val2): """safe_compare_digest method. :param val1: string or bytes for compare :type val1: str | bytes :param val2: string or bytes for compare :type val2: str | bytes """ if len(val1) != len(val2): return False result = 0 if PY3 and isinstance(val1, bytes) and isinstance(val2, bytes): for i, j in zip(val1, val2): result |= i ^ j else: for i, j in zip(val1, val2): result |= (ord(i) ^ ord(j)) return result == 0
python
def safe_compare_digest(val1, val2): """safe_compare_digest method. :param val1: string or bytes for compare :type val1: str | bytes :param val2: string or bytes for compare :type val2: str | bytes """ if len(val1) != len(val2): return False result = 0 if PY3 and isinstance(val1, bytes) and isinstance(val2, bytes): for i, j in zip(val1, val2): result |= i ^ j else: for i, j in zip(val1, val2): result |= (ord(i) ^ ord(j)) return result == 0
[ "def", "safe_compare_digest", "(", "val1", ",", "val2", ")", ":", "if", "len", "(", "val1", ")", "!=", "len", "(", "val2", ")", ":", "return", "False", "result", "=", "0", "if", "PY3", "and", "isinstance", "(", "val1", ",", "bytes", ")", "and", "isinstance", "(", "val2", ",", "bytes", ")", ":", "for", "i", ",", "j", "in", "zip", "(", "val1", ",", "val2", ")", ":", "result", "|=", "i", "^", "j", "else", ":", "for", "i", ",", "j", "in", "zip", "(", "val1", ",", "val2", ")", ":", "result", "|=", "(", "ord", "(", "i", ")", "^", "ord", "(", "j", ")", ")", "return", "result", "==", "0" ]
safe_compare_digest method. :param val1: string or bytes for compare :type val1: str | bytes :param val2: string or bytes for compare :type val2: str | bytes
[ "safe_compare_digest", "method", "." ]
train
https://github.com/line/line-bot-sdk-python/blob/1b38bfc2497ff3e3c75be4b50e0f1b7425a07ce0/linebot/utils.py#L50-L69
0.001859
apache/incubator-mxnet
python/mxnet/gluon/model_zoo/vision/__init__.py
get_model
def get_model(name, **kwargs): """Returns a pre-defined model by name Parameters ---------- name : str Name of the model. pretrained : bool Whether to load the pretrained weights for model. classes : int Number of classes for the output layer. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. Returns ------- HybridBlock The model. """ models = {'resnet18_v1': resnet18_v1, 'resnet34_v1': resnet34_v1, 'resnet50_v1': resnet50_v1, 'resnet101_v1': resnet101_v1, 'resnet152_v1': resnet152_v1, 'resnet18_v2': resnet18_v2, 'resnet34_v2': resnet34_v2, 'resnet50_v2': resnet50_v2, 'resnet101_v2': resnet101_v2, 'resnet152_v2': resnet152_v2, 'vgg11': vgg11, 'vgg13': vgg13, 'vgg16': vgg16, 'vgg19': vgg19, 'vgg11_bn': vgg11_bn, 'vgg13_bn': vgg13_bn, 'vgg16_bn': vgg16_bn, 'vgg19_bn': vgg19_bn, 'alexnet': alexnet, 'densenet121': densenet121, 'densenet161': densenet161, 'densenet169': densenet169, 'densenet201': densenet201, 'squeezenet1.0': squeezenet1_0, 'squeezenet1.1': squeezenet1_1, 'inceptionv3': inception_v3, 'mobilenet1.0': mobilenet1_0, 'mobilenet0.75': mobilenet0_75, 'mobilenet0.5': mobilenet0_5, 'mobilenet0.25': mobilenet0_25, 'mobilenetv2_1.0': mobilenet_v2_1_0, 'mobilenetv2_0.75': mobilenet_v2_0_75, 'mobilenetv2_0.5': mobilenet_v2_0_5, 'mobilenetv2_0.25': mobilenet_v2_0_25 } name = name.lower() if name not in models: raise ValueError( 'Model %s is not supported. Available options are\n\t%s' % ( name, '\n\t'.join(sorted(models.keys())))) return models[name](**kwargs)
python
def get_model(name, **kwargs): """Returns a pre-defined model by name Parameters ---------- name : str Name of the model. pretrained : bool Whether to load the pretrained weights for model. classes : int Number of classes for the output layer. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. Returns ------- HybridBlock The model. """ models = {'resnet18_v1': resnet18_v1, 'resnet34_v1': resnet34_v1, 'resnet50_v1': resnet50_v1, 'resnet101_v1': resnet101_v1, 'resnet152_v1': resnet152_v1, 'resnet18_v2': resnet18_v2, 'resnet34_v2': resnet34_v2, 'resnet50_v2': resnet50_v2, 'resnet101_v2': resnet101_v2, 'resnet152_v2': resnet152_v2, 'vgg11': vgg11, 'vgg13': vgg13, 'vgg16': vgg16, 'vgg19': vgg19, 'vgg11_bn': vgg11_bn, 'vgg13_bn': vgg13_bn, 'vgg16_bn': vgg16_bn, 'vgg19_bn': vgg19_bn, 'alexnet': alexnet, 'densenet121': densenet121, 'densenet161': densenet161, 'densenet169': densenet169, 'densenet201': densenet201, 'squeezenet1.0': squeezenet1_0, 'squeezenet1.1': squeezenet1_1, 'inceptionv3': inception_v3, 'mobilenet1.0': mobilenet1_0, 'mobilenet0.75': mobilenet0_75, 'mobilenet0.5': mobilenet0_5, 'mobilenet0.25': mobilenet0_25, 'mobilenetv2_1.0': mobilenet_v2_1_0, 'mobilenetv2_0.75': mobilenet_v2_0_75, 'mobilenetv2_0.5': mobilenet_v2_0_5, 'mobilenetv2_0.25': mobilenet_v2_0_25 } name = name.lower() if name not in models: raise ValueError( 'Model %s is not supported. Available options are\n\t%s' % ( name, '\n\t'.join(sorted(models.keys())))) return models[name](**kwargs)
[ "def", "get_model", "(", "name", ",", "*", "*", "kwargs", ")", ":", "models", "=", "{", "'resnet18_v1'", ":", "resnet18_v1", ",", "'resnet34_v1'", ":", "resnet34_v1", ",", "'resnet50_v1'", ":", "resnet50_v1", ",", "'resnet101_v1'", ":", "resnet101_v1", ",", "'resnet152_v1'", ":", "resnet152_v1", ",", "'resnet18_v2'", ":", "resnet18_v2", ",", "'resnet34_v2'", ":", "resnet34_v2", ",", "'resnet50_v2'", ":", "resnet50_v2", ",", "'resnet101_v2'", ":", "resnet101_v2", ",", "'resnet152_v2'", ":", "resnet152_v2", ",", "'vgg11'", ":", "vgg11", ",", "'vgg13'", ":", "vgg13", ",", "'vgg16'", ":", "vgg16", ",", "'vgg19'", ":", "vgg19", ",", "'vgg11_bn'", ":", "vgg11_bn", ",", "'vgg13_bn'", ":", "vgg13_bn", ",", "'vgg16_bn'", ":", "vgg16_bn", ",", "'vgg19_bn'", ":", "vgg19_bn", ",", "'alexnet'", ":", "alexnet", ",", "'densenet121'", ":", "densenet121", ",", "'densenet161'", ":", "densenet161", ",", "'densenet169'", ":", "densenet169", ",", "'densenet201'", ":", "densenet201", ",", "'squeezenet1.0'", ":", "squeezenet1_0", ",", "'squeezenet1.1'", ":", "squeezenet1_1", ",", "'inceptionv3'", ":", "inception_v3", ",", "'mobilenet1.0'", ":", "mobilenet1_0", ",", "'mobilenet0.75'", ":", "mobilenet0_75", ",", "'mobilenet0.5'", ":", "mobilenet0_5", ",", "'mobilenet0.25'", ":", "mobilenet0_25", ",", "'mobilenetv2_1.0'", ":", "mobilenet_v2_1_0", ",", "'mobilenetv2_0.75'", ":", "mobilenet_v2_0_75", ",", "'mobilenetv2_0.5'", ":", "mobilenet_v2_0_5", ",", "'mobilenetv2_0.25'", ":", "mobilenet_v2_0_25", "}", "name", "=", "name", ".", "lower", "(", ")", "if", "name", "not", "in", "models", ":", "raise", "ValueError", "(", "'Model %s is not supported. Available options are\\n\\t%s'", "%", "(", "name", ",", "'\\n\\t'", ".", "join", "(", "sorted", "(", "models", ".", "keys", "(", ")", ")", ")", ")", ")", "return", "models", "[", "name", "]", "(", "*", "*", "kwargs", ")" ]
Returns a pre-defined model by name Parameters ---------- name : str Name of the model. pretrained : bool Whether to load the pretrained weights for model. classes : int Number of classes for the output layer. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. Returns ------- HybridBlock The model.
[ "Returns", "a", "pre", "-", "defined", "model", "by", "name" ]
train
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/model_zoo/vision/__init__.py#L91-L152
0.0009
cenkalti/kuyruk
kuyruk/worker.py
Worker.run
def run(self) -> None: """Runs the worker and consumes messages from RabbitMQ. Returns only after `shutdown()` is called. """ if self._logging_level: logging.basicConfig( level=getattr(logging, self._logging_level.upper()), format="%(levelname).1s %(name)s.%(funcName)s:%(lineno)d - %(message)s") signal.signal(signal.SIGINT, self._handle_sigint) signal.signal(signal.SIGTERM, self._handle_sigterm) if platform.system() != 'Windows': # These features will not be available on Windows, but that is OK. # Read this issue for more details: # https://github.com/cenkalti/kuyruk/issues/54 signal.signal(signal.SIGHUP, self._handle_sighup) signal.signal(signal.SIGUSR1, self._handle_sigusr1) signal.signal(signal.SIGUSR2, self._handle_sigusr2) self._started_at = os.times().elapsed for t in self._threads: t.start() try: signals.worker_start.send(self.kuyruk, worker=self) self._consume_messages() signals.worker_shutdown.send(self.kuyruk, worker=self) finally: self.shutdown_pending.set() for t in self._threads: t.join() logger.debug("End run worker")
python
def run(self) -> None: """Runs the worker and consumes messages from RabbitMQ. Returns only after `shutdown()` is called. """ if self._logging_level: logging.basicConfig( level=getattr(logging, self._logging_level.upper()), format="%(levelname).1s %(name)s.%(funcName)s:%(lineno)d - %(message)s") signal.signal(signal.SIGINT, self._handle_sigint) signal.signal(signal.SIGTERM, self._handle_sigterm) if platform.system() != 'Windows': # These features will not be available on Windows, but that is OK. # Read this issue for more details: # https://github.com/cenkalti/kuyruk/issues/54 signal.signal(signal.SIGHUP, self._handle_sighup) signal.signal(signal.SIGUSR1, self._handle_sigusr1) signal.signal(signal.SIGUSR2, self._handle_sigusr2) self._started_at = os.times().elapsed for t in self._threads: t.start() try: signals.worker_start.send(self.kuyruk, worker=self) self._consume_messages() signals.worker_shutdown.send(self.kuyruk, worker=self) finally: self.shutdown_pending.set() for t in self._threads: t.join() logger.debug("End run worker")
[ "def", "run", "(", "self", ")", "->", "None", ":", "if", "self", ".", "_logging_level", ":", "logging", ".", "basicConfig", "(", "level", "=", "getattr", "(", "logging", ",", "self", ".", "_logging_level", ".", "upper", "(", ")", ")", ",", "format", "=", "\"%(levelname).1s %(name)s.%(funcName)s:%(lineno)d - %(message)s\"", ")", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "self", ".", "_handle_sigint", ")", "signal", ".", "signal", "(", "signal", ".", "SIGTERM", ",", "self", ".", "_handle_sigterm", ")", "if", "platform", ".", "system", "(", ")", "!=", "'Windows'", ":", "# These features will not be available on Windows, but that is OK.", "# Read this issue for more details:", "# https://github.com/cenkalti/kuyruk/issues/54", "signal", ".", "signal", "(", "signal", ".", "SIGHUP", ",", "self", ".", "_handle_sighup", ")", "signal", ".", "signal", "(", "signal", ".", "SIGUSR1", ",", "self", ".", "_handle_sigusr1", ")", "signal", ".", "signal", "(", "signal", ".", "SIGUSR2", ",", "self", ".", "_handle_sigusr2", ")", "self", ".", "_started_at", "=", "os", ".", "times", "(", ")", ".", "elapsed", "for", "t", "in", "self", ".", "_threads", ":", "t", ".", "start", "(", ")", "try", ":", "signals", ".", "worker_start", ".", "send", "(", "self", ".", "kuyruk", ",", "worker", "=", "self", ")", "self", ".", "_consume_messages", "(", ")", "signals", ".", "worker_shutdown", ".", "send", "(", "self", ".", "kuyruk", ",", "worker", "=", "self", ")", "finally", ":", "self", ".", "shutdown_pending", ".", "set", "(", ")", "for", "t", "in", "self", ".", "_threads", ":", "t", ".", "join", "(", ")", "logger", ".", "debug", "(", "\"End run worker\"", ")" ]
Runs the worker and consumes messages from RabbitMQ. Returns only after `shutdown()` is called.
[ "Runs", "the", "worker", "and", "consumes", "messages", "from", "RabbitMQ", ".", "Returns", "only", "after", "shutdown", "()", "is", "called", "." ]
train
https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/worker.py#L81-L115
0.002217
ourway/auth
auth/CAS/authorization.py
Authorization.user_has_permission
def user_has_permission(self, user, name): """ verify user has permission """ targetRecord = AuthMembership.objects(creator=self.client, user=user).first() if not targetRecord: return False for group in targetRecord.groups: if self.has_permission(group.role, name): return True return False
python
def user_has_permission(self, user, name): """ verify user has permission """ targetRecord = AuthMembership.objects(creator=self.client, user=user).first() if not targetRecord: return False for group in targetRecord.groups: if self.has_permission(group.role, name): return True return False
[ "def", "user_has_permission", "(", "self", ",", "user", ",", "name", ")", ":", "targetRecord", "=", "AuthMembership", ".", "objects", "(", "creator", "=", "self", ".", "client", ",", "user", "=", "user", ")", ".", "first", "(", ")", "if", "not", "targetRecord", ":", "return", "False", "for", "group", "in", "targetRecord", ".", "groups", ":", "if", "self", ".", "has_permission", "(", "group", ".", "role", ",", "name", ")", ":", "return", "True", "return", "False" ]
verify user has permission
[ "verify", "user", "has", "permission" ]
train
https://github.com/ourway/auth/blob/f0d9676854dcec494add4fa086a9b2a3e4d8cea5/auth/CAS/authorization.py#L174-L182
0.008108
dwavesystems/dimod
dimod/decorators.py
bqm_index_labels
def bqm_index_labels(f): """Decorator to convert a bqm to index-labels and relabel the sample set output. Designed to be applied to :meth:`.Sampler.sample`. Expects the wrapped function or method to accept a :obj:`.BinaryQuadraticModel` as the second input and to return a :obj:`.SampleSet`. """ @wraps(f) def _index_label(sampler, bqm, **kwargs): if not hasattr(bqm, 'linear'): raise TypeError('expected input to be a BinaryQuadraticModel') linear = bqm.linear # if already index-labelled, just continue if all(v in linear for v in range(len(bqm))): return f(sampler, bqm, **kwargs) try: inverse_mapping = dict(enumerate(sorted(linear))) except TypeError: # in python3 unlike types cannot be sorted inverse_mapping = dict(enumerate(linear)) mapping = {v: i for i, v in iteritems(inverse_mapping)} response = f(sampler, bqm.relabel_variables(mapping, inplace=False), **kwargs) # unapply the relabeling return response.relabel_variables(inverse_mapping, inplace=True) return _index_label
python
def bqm_index_labels(f): """Decorator to convert a bqm to index-labels and relabel the sample set output. Designed to be applied to :meth:`.Sampler.sample`. Expects the wrapped function or method to accept a :obj:`.BinaryQuadraticModel` as the second input and to return a :obj:`.SampleSet`. """ @wraps(f) def _index_label(sampler, bqm, **kwargs): if not hasattr(bqm, 'linear'): raise TypeError('expected input to be a BinaryQuadraticModel') linear = bqm.linear # if already index-labelled, just continue if all(v in linear for v in range(len(bqm))): return f(sampler, bqm, **kwargs) try: inverse_mapping = dict(enumerate(sorted(linear))) except TypeError: # in python3 unlike types cannot be sorted inverse_mapping = dict(enumerate(linear)) mapping = {v: i for i, v in iteritems(inverse_mapping)} response = f(sampler, bqm.relabel_variables(mapping, inplace=False), **kwargs) # unapply the relabeling return response.relabel_variables(inverse_mapping, inplace=True) return _index_label
[ "def", "bqm_index_labels", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "_index_label", "(", "sampler", ",", "bqm", ",", "*", "*", "kwargs", ")", ":", "if", "not", "hasattr", "(", "bqm", ",", "'linear'", ")", ":", "raise", "TypeError", "(", "'expected input to be a BinaryQuadraticModel'", ")", "linear", "=", "bqm", ".", "linear", "# if already index-labelled, just continue", "if", "all", "(", "v", "in", "linear", "for", "v", "in", "range", "(", "len", "(", "bqm", ")", ")", ")", ":", "return", "f", "(", "sampler", ",", "bqm", ",", "*", "*", "kwargs", ")", "try", ":", "inverse_mapping", "=", "dict", "(", "enumerate", "(", "sorted", "(", "linear", ")", ")", ")", "except", "TypeError", ":", "# in python3 unlike types cannot be sorted", "inverse_mapping", "=", "dict", "(", "enumerate", "(", "linear", ")", ")", "mapping", "=", "{", "v", ":", "i", "for", "i", ",", "v", "in", "iteritems", "(", "inverse_mapping", ")", "}", "response", "=", "f", "(", "sampler", ",", "bqm", ".", "relabel_variables", "(", "mapping", ",", "inplace", "=", "False", ")", ",", "*", "*", "kwargs", ")", "# unapply the relabeling", "return", "response", ".", "relabel_variables", "(", "inverse_mapping", ",", "inplace", "=", "True", ")", "return", "_index_label" ]
Decorator to convert a bqm to index-labels and relabel the sample set output. Designed to be applied to :meth:`.Sampler.sample`. Expects the wrapped function or method to accept a :obj:`.BinaryQuadraticModel` as the second input and to return a :obj:`.SampleSet`.
[ "Decorator", "to", "convert", "a", "bqm", "to", "index", "-", "labels", "and", "relabel", "the", "sample", "set", "output", "." ]
train
https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/decorators.py#L42-L74
0.001709
woolfson-group/isambard
isambard/ampal/protein.py
Polypeptide.tag_torsion_angles
def tag_torsion_angles(self, force=False): """Tags each Monomer of the Polymer with its omega, phi and psi torsion angle. Parameters ---------- force : bool, optional If `True` the tag will be run even if `Residues` are already tagged. """ tagged = ['omega' in x.tags.keys() for x in self._monomers] if (not all(tagged)) or force: tas = measure_torsion_angles(self._monomers) for monomer, (omega, phi, psi) in zip(self._monomers, tas): monomer.tags['omega'] = omega monomer.tags['phi'] = phi monomer.tags['psi'] = psi monomer.tags['tas'] = (omega, phi, psi) return
python
def tag_torsion_angles(self, force=False): """Tags each Monomer of the Polymer with its omega, phi and psi torsion angle. Parameters ---------- force : bool, optional If `True` the tag will be run even if `Residues` are already tagged. """ tagged = ['omega' in x.tags.keys() for x in self._monomers] if (not all(tagged)) or force: tas = measure_torsion_angles(self._monomers) for monomer, (omega, phi, psi) in zip(self._monomers, tas): monomer.tags['omega'] = omega monomer.tags['phi'] = phi monomer.tags['psi'] = psi monomer.tags['tas'] = (omega, phi, psi) return
[ "def", "tag_torsion_angles", "(", "self", ",", "force", "=", "False", ")", ":", "tagged", "=", "[", "'omega'", "in", "x", ".", "tags", ".", "keys", "(", ")", "for", "x", "in", "self", ".", "_monomers", "]", "if", "(", "not", "all", "(", "tagged", ")", ")", "or", "force", ":", "tas", "=", "measure_torsion_angles", "(", "self", ".", "_monomers", ")", "for", "monomer", ",", "(", "omega", ",", "phi", ",", "psi", ")", "in", "zip", "(", "self", ".", "_monomers", ",", "tas", ")", ":", "monomer", ".", "tags", "[", "'omega'", "]", "=", "omega", "monomer", ".", "tags", "[", "'phi'", "]", "=", "phi", "monomer", ".", "tags", "[", "'psi'", "]", "=", "psi", "monomer", ".", "tags", "[", "'tas'", "]", "=", "(", "omega", ",", "phi", ",", "psi", ")", "return" ]
Tags each Monomer of the Polymer with its omega, phi and psi torsion angle. Parameters ---------- force : bool, optional If `True` the tag will be run even if `Residues` are already tagged.
[ "Tags", "each", "Monomer", "of", "the", "Polymer", "with", "its", "omega", "phi", "and", "psi", "torsion", "angle", "." ]
train
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/ampal/protein.py#L818-L835
0.004049
mitsei/dlkit
dlkit/handcar/repository/managers.py
RepositoryManager.get_repository_query_session
def get_repository_query_session(self): """Gets the repository query session. return: (osid.repository.RepositoryQuerySession) - a RepositoryQuerySession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_repository_query() is false compliance: optional - This method must be implemented if supports_repository_query() is true. """ if not self.supports_repository_query(): raise Unimplemented() try: from . import sessions except ImportError: raise # OperationFailed() try: session = sessions.RepositoryQuerySession(proxy=self._proxy, runtime=self._runtime) except AttributeError: raise # OperationFailed() return session
python
def get_repository_query_session(self): """Gets the repository query session. return: (osid.repository.RepositoryQuerySession) - a RepositoryQuerySession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_repository_query() is false compliance: optional - This method must be implemented if supports_repository_query() is true. """ if not self.supports_repository_query(): raise Unimplemented() try: from . import sessions except ImportError: raise # OperationFailed() try: session = sessions.RepositoryQuerySession(proxy=self._proxy, runtime=self._runtime) except AttributeError: raise # OperationFailed() return session
[ "def", "get_repository_query_session", "(", "self", ")", ":", "if", "not", "self", ".", "supports_repository_query", "(", ")", ":", "raise", "Unimplemented", "(", ")", "try", ":", "from", ".", "import", "sessions", "except", "ImportError", ":", "raise", "# OperationFailed()", "try", ":", "session", "=", "sessions", ".", "RepositoryQuerySession", "(", "proxy", "=", "self", ".", "_proxy", ",", "runtime", "=", "self", ".", "_runtime", ")", "except", "AttributeError", ":", "raise", "# OperationFailed()", "return", "session" ]
Gets the repository query session. return: (osid.repository.RepositoryQuerySession) - a RepositoryQuerySession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_repository_query() is false compliance: optional - This method must be implemented if supports_repository_query() is true.
[ "Gets", "the", "repository", "query", "session", "." ]
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/repository/managers.py#L1680-L1702
0.002208
django-danceschool/django-danceschool
danceschool/core/utils/requests.py
getDateTimeFromGet
def getDateTimeFromGet(request,key): ''' This function just parses the request GET data for the requested key, and returns it in datetime format, returning none if the key is not available or is in incorrect format. ''' if request.GET.get(key,''): try: return ensure_timezone(datetime.strptime(unquote(request.GET.get(key,'')),'%Y-%m-%d')) except (ValueError, TypeError): pass return None
python
def getDateTimeFromGet(request,key): ''' This function just parses the request GET data for the requested key, and returns it in datetime format, returning none if the key is not available or is in incorrect format. ''' if request.GET.get(key,''): try: return ensure_timezone(datetime.strptime(unquote(request.GET.get(key,'')),'%Y-%m-%d')) except (ValueError, TypeError): pass return None
[ "def", "getDateTimeFromGet", "(", "request", ",", "key", ")", ":", "if", "request", ".", "GET", ".", "get", "(", "key", ",", "''", ")", ":", "try", ":", "return", "ensure_timezone", "(", "datetime", ".", "strptime", "(", "unquote", "(", "request", ".", "GET", ".", "get", "(", "key", ",", "''", ")", ")", ",", "'%Y-%m-%d'", ")", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "pass", "return", "None" ]
This function just parses the request GET data for the requested key, and returns it in datetime format, returning none if the key is not available or is in incorrect format.
[ "This", "function", "just", "parses", "the", "request", "GET", "data", "for", "the", "requested", "key", "and", "returns", "it", "in", "datetime", "format", "returning", "none", "if", "the", "key", "is", "not", "available", "or", "is", "in", "incorrect", "format", "." ]
train
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/core/utils/requests.py#L19-L30
0.014989
google/mobly
mobly/controllers/monsoon.py
MonsoonData.update_offset
def update_offset(self, new_offset): """Updates how many data points to skip in caculations. Always use this function to update offset instead of directly setting self.offset. Args: new_offset: The new offset. """ self.offset = new_offset self.data_points = self._data_points[self.offset:] self.timestamps = self._timestamps[self.offset:]
python
def update_offset(self, new_offset): """Updates how many data points to skip in caculations. Always use this function to update offset instead of directly setting self.offset. Args: new_offset: The new offset. """ self.offset = new_offset self.data_points = self._data_points[self.offset:] self.timestamps = self._timestamps[self.offset:]
[ "def", "update_offset", "(", "self", ",", "new_offset", ")", ":", "self", ".", "offset", "=", "new_offset", "self", ".", "data_points", "=", "self", ".", "_data_points", "[", "self", ".", "offset", ":", "]", "self", ".", "timestamps", "=", "self", ".", "_timestamps", "[", "self", ".", "offset", ":", "]" ]
Updates how many data points to skip in caculations. Always use this function to update offset instead of directly setting self.offset. Args: new_offset: The new offset.
[ "Updates", "how", "many", "data", "points", "to", "skip", "in", "caculations", "." ]
train
https://github.com/google/mobly/blob/38ba2cf7d29a20e6a2fca1718eecb337df38db26/mobly/controllers/monsoon.py#L588-L599
0.004808
basho/riak-python-client
riak/transports/http/__init__.py
NoNagleHTTPConnection.connect
def connect(self): """ Set TCP_NODELAY on socket """ HTTPConnection.connect(self) self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
python
def connect(self): """ Set TCP_NODELAY on socket """ HTTPConnection.connect(self) self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
[ "def", "connect", "(", "self", ")", ":", "HTTPConnection", ".", "connect", "(", "self", ")", "self", ".", "sock", ".", "setsockopt", "(", "socket", ".", "IPPROTO_TCP", ",", "socket", ".", "TCP_NODELAY", ",", "1", ")" ]
Set TCP_NODELAY on socket
[ "Set", "TCP_NODELAY", "on", "socket" ]
train
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/http/__init__.py#L52-L57
0.010811