repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
4
175
func_name
stringlengths
1
129
whole_func_string
stringlengths
91
50.9k
language
stringclasses
1 value
func_code_string
stringlengths
91
50.9k
func_code_tokens
sequence
func_documentation_string
stringlengths
1
31.6k
func_documentation_tokens
sequence
split_name
stringclasses
1 value
func_code_url
stringlengths
89
268
score
float64
0
0.09
JukeboxPipeline/jukebox-core
src/jukeboxcore/filesys.py
delete_file
def delete_file(f): """Delete the given file :param f: the file to delete :type f: :class:`JB_File` :returns: None :rtype: None :raises: :class:`OSError` """ fp = f.get_fullpath() log.info("Deleting file %s", fp) os.remove(fp)
python
def delete_file(f): """Delete the given file :param f: the file to delete :type f: :class:`JB_File` :returns: None :rtype: None :raises: :class:`OSError` """ fp = f.get_fullpath() log.info("Deleting file %s", fp) os.remove(fp)
[ "def", "delete_file", "(", "f", ")", ":", "fp", "=", "f", ".", "get_fullpath", "(", ")", "log", ".", "info", "(", "\"Deleting file %s\"", ",", "fp", ")", "os", ".", "remove", "(", "fp", ")" ]
Delete the given file :param f: the file to delete :type f: :class:`JB_File` :returns: None :rtype: None :raises: :class:`OSError`
[ "Delete", "the", "given", "file" ]
train
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/filesys.py#L31-L42
0.003745
Dispersive-Hydrodynamics-Lab/PACE
PACE/PACE.py
DataStore.check_key
def check_key(self, key: str) -> bool: """ Checks if key exists in datastore. True if yes, False if no. :param: SHA512 hash key :return: whether or key not exists in datastore """ keys = self.get_keys() return key in keys
python
def check_key(self, key: str) -> bool: """ Checks if key exists in datastore. True if yes, False if no. :param: SHA512 hash key :return: whether or key not exists in datastore """ keys = self.get_keys() return key in keys
[ "def", "check_key", "(", "self", ",", "key", ":", "str", ")", "->", "bool", ":", "keys", "=", "self", ".", "get_keys", "(", ")", "return", "key", "in", "keys" ]
Checks if key exists in datastore. True if yes, False if no. :param: SHA512 hash key :return: whether or key not exists in datastore
[ "Checks", "if", "key", "exists", "in", "datastore", ".", "True", "if", "yes", "False", "if", "no", "." ]
train
https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L164-L173
0.007168
fracpete/python-weka-wrapper3
python/weka/core/classes.py
Environment.variable_names
def variable_names(self): """ Returns the names of all environment variables. :return: the names of the variables :rtype: list """ result = [] names = javabridge.call(self.jobject, "getVariableNames", "()Ljava/util/Set;") for name in javabridge.iterate_collection(names): result.append(javabridge.to_string(name)) return result
python
def variable_names(self): """ Returns the names of all environment variables. :return: the names of the variables :rtype: list """ result = [] names = javabridge.call(self.jobject, "getVariableNames", "()Ljava/util/Set;") for name in javabridge.iterate_collection(names): result.append(javabridge.to_string(name)) return result
[ "def", "variable_names", "(", "self", ")", ":", "result", "=", "[", "]", "names", "=", "javabridge", ".", "call", "(", "self", ".", "jobject", ",", "\"getVariableNames\"", ",", "\"()Ljava/util/Set;\"", ")", "for", "name", "in", "javabridge", ".", "iterate_collection", "(", "names", ")", ":", "result", ".", "append", "(", "javabridge", ".", "to_string", "(", "name", ")", ")", "return", "result" ]
Returns the names of all environment variables. :return: the names of the variables :rtype: list
[ "Returns", "the", "names", "of", "all", "environment", "variables", "." ]
train
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/core/classes.py#L707-L718
0.007282
Duke-GCB/DukeDSClient
ddsc/core/fileuploader.py
ParallelChunkProcessor.determine_num_chunks
def determine_num_chunks(chunk_size, file_size): """ Figure out how many pieces we are sending the file in. NOTE: duke-data-service requires an empty chunk to be uploaded for empty files. """ if file_size == 0: return 1 return int(math.ceil(float(file_size) / float(chunk_size)))
python
def determine_num_chunks(chunk_size, file_size): """ Figure out how many pieces we are sending the file in. NOTE: duke-data-service requires an empty chunk to be uploaded for empty files. """ if file_size == 0: return 1 return int(math.ceil(float(file_size) / float(chunk_size)))
[ "def", "determine_num_chunks", "(", "chunk_size", ",", "file_size", ")", ":", "if", "file_size", "==", "0", ":", "return", "1", "return", "int", "(", "math", ".", "ceil", "(", "float", "(", "file_size", ")", "/", "float", "(", "chunk_size", ")", ")", ")" ]
Figure out how many pieces we are sending the file in. NOTE: duke-data-service requires an empty chunk to be uploaded for empty files.
[ "Figure", "out", "how", "many", "pieces", "we", "are", "sending", "the", "file", "in", ".", "NOTE", ":", "duke", "-", "data", "-", "service", "requires", "an", "empty", "chunk", "to", "be", "uploaded", "for", "empty", "files", "." ]
train
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/fileuploader.py#L270-L277
0.00885
watson-developer-cloud/python-sdk
ibm_watson/assistant_v2.py
MessageResponse._to_dict
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'output') and self.output is not None: _dict['output'] = self.output._to_dict() if hasattr(self, 'context') and self.context is not None: _dict['context'] = self.context._to_dict() return _dict
python
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'output') and self.output is not None: _dict['output'] = self.output._to_dict() if hasattr(self, 'context') and self.context is not None: _dict['context'] = self.context._to_dict() return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'output'", ")", "and", "self", ".", "output", "is", "not", "None", ":", "_dict", "[", "'output'", "]", "=", "self", ".", "output", ".", "_to_dict", "(", ")", "if", "hasattr", "(", "self", ",", "'context'", ")", "and", "self", ".", "context", "is", "not", "None", ":", "_dict", "[", "'context'", "]", "=", "self", ".", "context", ".", "_to_dict", "(", ")", "return", "_dict" ]
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
train
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/assistant_v2.py#L1595-L1602
0.00554
a10networks/a10-neutron-lbaas
a10_neutron_lbaas/db/api.py
magic_session
def magic_session(db_session=None, url=None): """Either does nothing with the session you already have or makes one that commits and closes no matter what happens """ if db_session is not None: yield db_session else: session = get_session(url, expire_on_commit=False) try: try: yield session finally: session.commit() finally: session.close()
python
def magic_session(db_session=None, url=None): """Either does nothing with the session you already have or makes one that commits and closes no matter what happens """ if db_session is not None: yield db_session else: session = get_session(url, expire_on_commit=False) try: try: yield session finally: session.commit() finally: session.close()
[ "def", "magic_session", "(", "db_session", "=", "None", ",", "url", "=", "None", ")", ":", "if", "db_session", "is", "not", "None", ":", "yield", "db_session", "else", ":", "session", "=", "get_session", "(", "url", ",", "expire_on_commit", "=", "False", ")", "try", ":", "try", ":", "yield", "session", "finally", ":", "session", ".", "commit", "(", ")", "finally", ":", "session", ".", "close", "(", ")" ]
Either does nothing with the session you already have or makes one that commits and closes no matter what happens
[ "Either", "does", "nothing", "with", "the", "session", "you", "already", "have", "or", "makes", "one", "that", "commits", "and", "closes", "no", "matter", "what", "happens" ]
train
https://github.com/a10networks/a10-neutron-lbaas/blob/ff834c295c8019874ca4b209d864367e40cc9881/a10_neutron_lbaas/db/api.py#L50-L65
0.00216
dswah/pyGAM
pygam/pygam.py
GAM.sample
def sample(self, X, y, quantity='y', sample_at_X=None, weights=None, n_draws=100, n_bootstraps=5, objective='auto'): """Simulate from the posterior of the coefficients and smoothing params. Samples are drawn from the posterior of the coefficients and smoothing parameters given the response in an approximate way. The GAM must already be fitted before calling this method; if the model has not been fitted, then an exception is raised. Moreover, it is recommended that the model and its hyperparameters be chosen with `gridsearch` (with the parameter `keep_best=True`) before calling `sample`, so that the result of that gridsearch can be used to generate useful response data and so that the model's coefficients (and their covariance matrix) can be used as the first bootstrap sample. These samples are drawn as follows. Details are in the reference below. 1. ``n_bootstraps`` many "bootstrap samples" of the response (``y``) are simulated by drawing random samples from the model's distribution evaluated at the expected values (``mu``) for each sample in ``X``. 2. A copy of the model is fitted to each of those bootstrap samples of the response. The result is an approximation of the distribution over the smoothing parameter ``lam`` given the response data ``y``. 3. Samples of the coefficients are simulated from a multivariate normal using the bootstrap samples of the coefficients and their covariance matrices. Notes ----- A ``gridsearch`` is done ``n_bootstraps`` many times, so keep ``n_bootstraps`` small. Make ``n_bootstraps < n_draws`` to take advantage of the expensive bootstrap samples of the smoothing parameters. Parameters ----------- X : array of shape (n_samples, m_features) empirical input data y : array of shape (n_samples,) empirical response vector quantity : {'y', 'coef', 'mu'}, default: 'y' What quantity to return pseudorandom samples of. If `sample_at_X` is not None and `quantity` is either `'y'` or `'mu'`, then samples are drawn at the values of `X` specified in `sample_at_X`. sample_at_X : array of shape (n_samples_to_simulate, m_features) or None, optional Input data at which to draw new samples. Only applies for `quantity` equal to `'y'` or to `'mu`'. If `None`, then `sample_at_X` is replaced by `X`. weights : np.array of shape (n_samples,) sample weights n_draws : positive int, optional (default=100) The number of samples to draw from the posterior distribution of the coefficients and smoothing parameters n_bootstraps : positive int, optional (default=5) The number of bootstrap samples to draw from simulations of the response (from the already fitted model) to estimate the distribution of the smoothing parameters given the response data. If `n_bootstraps` is 1, then only the already fitted model's smoothing parameter is used, and the distribution over the smoothing parameters is not estimated using bootstrap sampling. objective : string, optional (default='auto' metric to optimize in grid search. must be in ['AIC', 'AICc', 'GCV', 'UBRE', 'auto'] if 'auto', then grid search will optimize GCV for models with unknown scale and UBRE for models with known scale. Returns ------- draws : 2D array of length n_draws Simulations of the given `quantity` using samples from the posterior distribution of the coefficients and smoothing parameter given the response data. Each row is a pseudorandom sample. If `quantity == 'coef'`, then the number of columns of `draws` is the number of coefficients (`len(self.coef_)`). Otherwise, the number of columns of `draws` is the number of rows of `sample_at_X` if `sample_at_X` is not `None` or else the number of rows of `X`. References ---------- Simon N. Wood, 2006. Generalized Additive Models: an introduction with R. Section 4.9.3 (pages 198–199) and Section 5.4.2 (page 256–257). """ if quantity not in {'mu', 'coef', 'y'}: raise ValueError("`quantity` must be one of 'mu', 'coef', 'y';" " got {}".format(quantity)) coef_draws = self._sample_coef( X, y, weights=weights, n_draws=n_draws, n_bootstraps=n_bootstraps, objective=objective) if quantity == 'coef': return coef_draws if sample_at_X is None: sample_at_X = X linear_predictor = self._modelmat(sample_at_X).dot(coef_draws.T) mu_shape_n_draws_by_n_samples = self.link.mu( linear_predictor, self.distribution).T if quantity == 'mu': return mu_shape_n_draws_by_n_samples else: return self.distribution.sample(mu_shape_n_draws_by_n_samples)
python
def sample(self, X, y, quantity='y', sample_at_X=None, weights=None, n_draws=100, n_bootstraps=5, objective='auto'): """Simulate from the posterior of the coefficients and smoothing params. Samples are drawn from the posterior of the coefficients and smoothing parameters given the response in an approximate way. The GAM must already be fitted before calling this method; if the model has not been fitted, then an exception is raised. Moreover, it is recommended that the model and its hyperparameters be chosen with `gridsearch` (with the parameter `keep_best=True`) before calling `sample`, so that the result of that gridsearch can be used to generate useful response data and so that the model's coefficients (and their covariance matrix) can be used as the first bootstrap sample. These samples are drawn as follows. Details are in the reference below. 1. ``n_bootstraps`` many "bootstrap samples" of the response (``y``) are simulated by drawing random samples from the model's distribution evaluated at the expected values (``mu``) for each sample in ``X``. 2. A copy of the model is fitted to each of those bootstrap samples of the response. The result is an approximation of the distribution over the smoothing parameter ``lam`` given the response data ``y``. 3. Samples of the coefficients are simulated from a multivariate normal using the bootstrap samples of the coefficients and their covariance matrices. Notes ----- A ``gridsearch`` is done ``n_bootstraps`` many times, so keep ``n_bootstraps`` small. Make ``n_bootstraps < n_draws`` to take advantage of the expensive bootstrap samples of the smoothing parameters. Parameters ----------- X : array of shape (n_samples, m_features) empirical input data y : array of shape (n_samples,) empirical response vector quantity : {'y', 'coef', 'mu'}, default: 'y' What quantity to return pseudorandom samples of. If `sample_at_X` is not None and `quantity` is either `'y'` or `'mu'`, then samples are drawn at the values of `X` specified in `sample_at_X`. sample_at_X : array of shape (n_samples_to_simulate, m_features) or None, optional Input data at which to draw new samples. Only applies for `quantity` equal to `'y'` or to `'mu`'. If `None`, then `sample_at_X` is replaced by `X`. weights : np.array of shape (n_samples,) sample weights n_draws : positive int, optional (default=100) The number of samples to draw from the posterior distribution of the coefficients and smoothing parameters n_bootstraps : positive int, optional (default=5) The number of bootstrap samples to draw from simulations of the response (from the already fitted model) to estimate the distribution of the smoothing parameters given the response data. If `n_bootstraps` is 1, then only the already fitted model's smoothing parameter is used, and the distribution over the smoothing parameters is not estimated using bootstrap sampling. objective : string, optional (default='auto' metric to optimize in grid search. must be in ['AIC', 'AICc', 'GCV', 'UBRE', 'auto'] if 'auto', then grid search will optimize GCV for models with unknown scale and UBRE for models with known scale. Returns ------- draws : 2D array of length n_draws Simulations of the given `quantity` using samples from the posterior distribution of the coefficients and smoothing parameter given the response data. Each row is a pseudorandom sample. If `quantity == 'coef'`, then the number of columns of `draws` is the number of coefficients (`len(self.coef_)`). Otherwise, the number of columns of `draws` is the number of rows of `sample_at_X` if `sample_at_X` is not `None` or else the number of rows of `X`. References ---------- Simon N. Wood, 2006. Generalized Additive Models: an introduction with R. Section 4.9.3 (pages 198–199) and Section 5.4.2 (page 256–257). """ if quantity not in {'mu', 'coef', 'y'}: raise ValueError("`quantity` must be one of 'mu', 'coef', 'y';" " got {}".format(quantity)) coef_draws = self._sample_coef( X, y, weights=weights, n_draws=n_draws, n_bootstraps=n_bootstraps, objective=objective) if quantity == 'coef': return coef_draws if sample_at_X is None: sample_at_X = X linear_predictor = self._modelmat(sample_at_X).dot(coef_draws.T) mu_shape_n_draws_by_n_samples = self.link.mu( linear_predictor, self.distribution).T if quantity == 'mu': return mu_shape_n_draws_by_n_samples else: return self.distribution.sample(mu_shape_n_draws_by_n_samples)
[ "def", "sample", "(", "self", ",", "X", ",", "y", ",", "quantity", "=", "'y'", ",", "sample_at_X", "=", "None", ",", "weights", "=", "None", ",", "n_draws", "=", "100", ",", "n_bootstraps", "=", "5", ",", "objective", "=", "'auto'", ")", ":", "if", "quantity", "not", "in", "{", "'mu'", ",", "'coef'", ",", "'y'", "}", ":", "raise", "ValueError", "(", "\"`quantity` must be one of 'mu', 'coef', 'y';\"", "\" got {}\"", ".", "format", "(", "quantity", ")", ")", "coef_draws", "=", "self", ".", "_sample_coef", "(", "X", ",", "y", ",", "weights", "=", "weights", ",", "n_draws", "=", "n_draws", ",", "n_bootstraps", "=", "n_bootstraps", ",", "objective", "=", "objective", ")", "if", "quantity", "==", "'coef'", ":", "return", "coef_draws", "if", "sample_at_X", "is", "None", ":", "sample_at_X", "=", "X", "linear_predictor", "=", "self", ".", "_modelmat", "(", "sample_at_X", ")", ".", "dot", "(", "coef_draws", ".", "T", ")", "mu_shape_n_draws_by_n_samples", "=", "self", ".", "link", ".", "mu", "(", "linear_predictor", ",", "self", ".", "distribution", ")", ".", "T", "if", "quantity", "==", "'mu'", ":", "return", "mu_shape_n_draws_by_n_samples", "else", ":", "return", "self", ".", "distribution", ".", "sample", "(", "mu_shape_n_draws_by_n_samples", ")" ]
Simulate from the posterior of the coefficients and smoothing params. Samples are drawn from the posterior of the coefficients and smoothing parameters given the response in an approximate way. The GAM must already be fitted before calling this method; if the model has not been fitted, then an exception is raised. Moreover, it is recommended that the model and its hyperparameters be chosen with `gridsearch` (with the parameter `keep_best=True`) before calling `sample`, so that the result of that gridsearch can be used to generate useful response data and so that the model's coefficients (and their covariance matrix) can be used as the first bootstrap sample. These samples are drawn as follows. Details are in the reference below. 1. ``n_bootstraps`` many "bootstrap samples" of the response (``y``) are simulated by drawing random samples from the model's distribution evaluated at the expected values (``mu``) for each sample in ``X``. 2. A copy of the model is fitted to each of those bootstrap samples of the response. The result is an approximation of the distribution over the smoothing parameter ``lam`` given the response data ``y``. 3. Samples of the coefficients are simulated from a multivariate normal using the bootstrap samples of the coefficients and their covariance matrices. Notes ----- A ``gridsearch`` is done ``n_bootstraps`` many times, so keep ``n_bootstraps`` small. Make ``n_bootstraps < n_draws`` to take advantage of the expensive bootstrap samples of the smoothing parameters. Parameters ----------- X : array of shape (n_samples, m_features) empirical input data y : array of shape (n_samples,) empirical response vector quantity : {'y', 'coef', 'mu'}, default: 'y' What quantity to return pseudorandom samples of. If `sample_at_X` is not None and `quantity` is either `'y'` or `'mu'`, then samples are drawn at the values of `X` specified in `sample_at_X`. sample_at_X : array of shape (n_samples_to_simulate, m_features) or None, optional Input data at which to draw new samples. Only applies for `quantity` equal to `'y'` or to `'mu`'. If `None`, then `sample_at_X` is replaced by `X`. weights : np.array of shape (n_samples,) sample weights n_draws : positive int, optional (default=100) The number of samples to draw from the posterior distribution of the coefficients and smoothing parameters n_bootstraps : positive int, optional (default=5) The number of bootstrap samples to draw from simulations of the response (from the already fitted model) to estimate the distribution of the smoothing parameters given the response data. If `n_bootstraps` is 1, then only the already fitted model's smoothing parameter is used, and the distribution over the smoothing parameters is not estimated using bootstrap sampling. objective : string, optional (default='auto' metric to optimize in grid search. must be in ['AIC', 'AICc', 'GCV', 'UBRE', 'auto'] if 'auto', then grid search will optimize GCV for models with unknown scale and UBRE for models with known scale. Returns ------- draws : 2D array of length n_draws Simulations of the given `quantity` using samples from the posterior distribution of the coefficients and smoothing parameter given the response data. Each row is a pseudorandom sample. If `quantity == 'coef'`, then the number of columns of `draws` is the number of coefficients (`len(self.coef_)`). Otherwise, the number of columns of `draws` is the number of rows of `sample_at_X` if `sample_at_X` is not `None` or else the number of rows of `X`. References ---------- Simon N. Wood, 2006. Generalized Additive Models: an introduction with R. Section 4.9.3 (pages 198–199) and Section 5.4.2 (page 256–257).
[ "Simulate", "from", "the", "posterior", "of", "the", "coefficients", "and", "smoothing", "params", "." ]
train
https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L1929-L2044
0.001124
theno/fabsetup
fabsetup/fabutils.py
extract_minors_from_setup_py
def extract_minors_from_setup_py(filename_setup_py): '''Extract supported python minor versions from setup.py and return them as a list of str. Return example: ['2.6', '2.7', '3.3', '3.4', '3.5', '3.6'] ''' # eg: minors_str = '2.6\n2.7\n3.3\n3.4\n3.5\n3.6' minors_str = fabric.api.local( flo('grep --perl-regexp --only-matching ' '"(?<=Programming Language :: Python :: )\\d+\\.\\d+" ' '{filename_setup_py}'), capture=True) # eg: minors = ['2.6', '2.7', '3.3', '3.4', '3.5', '3.6'] minors = minors_str.split() return minors
python
def extract_minors_from_setup_py(filename_setup_py): '''Extract supported python minor versions from setup.py and return them as a list of str. Return example: ['2.6', '2.7', '3.3', '3.4', '3.5', '3.6'] ''' # eg: minors_str = '2.6\n2.7\n3.3\n3.4\n3.5\n3.6' minors_str = fabric.api.local( flo('grep --perl-regexp --only-matching ' '"(?<=Programming Language :: Python :: )\\d+\\.\\d+" ' '{filename_setup_py}'), capture=True) # eg: minors = ['2.6', '2.7', '3.3', '3.4', '3.5', '3.6'] minors = minors_str.split() return minors
[ "def", "extract_minors_from_setup_py", "(", "filename_setup_py", ")", ":", "# eg: minors_str = '2.6\\n2.7\\n3.3\\n3.4\\n3.5\\n3.6'", "minors_str", "=", "fabric", ".", "api", ".", "local", "(", "flo", "(", "'grep --perl-regexp --only-matching '", "'\"(?<=Programming Language :: Python :: )\\\\d+\\\\.\\\\d+\" '", "'{filename_setup_py}'", ")", ",", "capture", "=", "True", ")", "# eg: minors = ['2.6', '2.7', '3.3', '3.4', '3.5', '3.6']", "minors", "=", "minors_str", ".", "split", "(", ")", "return", "minors" ]
Extract supported python minor versions from setup.py and return them as a list of str. Return example: ['2.6', '2.7', '3.3', '3.4', '3.5', '3.6']
[ "Extract", "supported", "python", "minor", "versions", "from", "setup", ".", "py", "and", "return", "them", "as", "a", "list", "of", "str", "." ]
train
https://github.com/theno/fabsetup/blob/ced728abff93551ba5677e63bc1bdc0ef5ca5777/fabsetup/fabutils.py#L834-L850
0.001642
kdeldycke/chessboard
chessboard/solver.py
SolverContext.solve
def solve(self): """ Solve all possible positions of pieces within the context. Depth-first, tree-traversal of the product space. """ # Create a new, empty board. board = Board(self.length, self.height) # Iterate through all combinations of positions. permutations = Permutations(self.pieces, self.vector_size) for positions in permutations: # Reuse board but flush all pieces. board.reset() for level, (piece_uid, linear_position) in enumerate(positions): # Try to place the piece on the board. try: board.add(piece_uid, linear_position) # If one of the piece can't be added, throw the whole set, skip # the rotten branch and proceed to the next. except (OccupiedPosition, VulnerablePosition, AttackablePiece): permutations.skip_branch(level) break else: # All pieces fits, save solution and proceeed to the next # permutation. self.result_counter += 1 yield board
python
def solve(self): """ Solve all possible positions of pieces within the context. Depth-first, tree-traversal of the product space. """ # Create a new, empty board. board = Board(self.length, self.height) # Iterate through all combinations of positions. permutations = Permutations(self.pieces, self.vector_size) for positions in permutations: # Reuse board but flush all pieces. board.reset() for level, (piece_uid, linear_position) in enumerate(positions): # Try to place the piece on the board. try: board.add(piece_uid, linear_position) # If one of the piece can't be added, throw the whole set, skip # the rotten branch and proceed to the next. except (OccupiedPosition, VulnerablePosition, AttackablePiece): permutations.skip_branch(level) break else: # All pieces fits, save solution and proceeed to the next # permutation. self.result_counter += 1 yield board
[ "def", "solve", "(", "self", ")", ":", "# Create a new, empty board.", "board", "=", "Board", "(", "self", ".", "length", ",", "self", ".", "height", ")", "# Iterate through all combinations of positions.", "permutations", "=", "Permutations", "(", "self", ".", "pieces", ",", "self", ".", "vector_size", ")", "for", "positions", "in", "permutations", ":", "# Reuse board but flush all pieces.", "board", ".", "reset", "(", ")", "for", "level", ",", "(", "piece_uid", ",", "linear_position", ")", "in", "enumerate", "(", "positions", ")", ":", "# Try to place the piece on the board.", "try", ":", "board", ".", "add", "(", "piece_uid", ",", "linear_position", ")", "# If one of the piece can't be added, throw the whole set, skip", "# the rotten branch and proceed to the next.", "except", "(", "OccupiedPosition", ",", "VulnerablePosition", ",", "AttackablePiece", ")", ":", "permutations", ".", "skip_branch", "(", "level", ")", "break", "else", ":", "# All pieces fits, save solution and proceeed to the next", "# permutation.", "self", ".", "result_counter", "+=", "1", "yield", "board" ]
Solve all possible positions of pieces within the context. Depth-first, tree-traversal of the product space.
[ "Solve", "all", "possible", "positions", "of", "pieces", "within", "the", "context", "." ]
train
https://github.com/kdeldycke/chessboard/blob/ac7a14dc7b6905701e3f6d4e01e8fe1869241bed/chessboard/solver.py#L160-L189
0.001686
openpermissions/perch
perch/organisation.py
Service.clean
def clean(self, user=None): """Remove internal fields""" doc = self._resource internal_fields = deepcopy(self.internal_fields) if user is None or not user.is_user(self.organisation_id): internal_fields.append('permissions') result = {k: v for k, v in doc.iteritems() if k not in internal_fields} return result
python
def clean(self, user=None): """Remove internal fields""" doc = self._resource internal_fields = deepcopy(self.internal_fields) if user is None or not user.is_user(self.organisation_id): internal_fields.append('permissions') result = {k: v for k, v in doc.iteritems() if k not in internal_fields} return result
[ "def", "clean", "(", "self", ",", "user", "=", "None", ")", ":", "doc", "=", "self", ".", "_resource", "internal_fields", "=", "deepcopy", "(", "self", ".", "internal_fields", ")", "if", "user", "is", "None", "or", "not", "user", ".", "is_user", "(", "self", ".", "organisation_id", ")", ":", "internal_fields", ".", "append", "(", "'permissions'", ")", "result", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "doc", ".", "iteritems", "(", ")", "if", "k", "not", "in", "internal_fields", "}", "return", "result" ]
Remove internal fields
[ "Remove", "internal", "fields" ]
train
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/organisation.py#L385-L394
0.005391
disqus/gutter
gutter/client/models.py
Manager.switches
def switches(self): """ List of all switches currently registered. """ results = [ switch for name, switch in self.storage.iteritems() if name.startswith(self.__joined_namespace) ] return results
python
def switches(self): """ List of all switches currently registered. """ results = [ switch for name, switch in self.storage.iteritems() if name.startswith(self.__joined_namespace) ] return results
[ "def", "switches", "(", "self", ")", ":", "results", "=", "[", "switch", "for", "name", ",", "switch", "in", "self", ".", "storage", ".", "iteritems", "(", ")", "if", "name", ".", "startswith", "(", "self", ".", "__joined_namespace", ")", "]", "return", "results" ]
List of all switches currently registered.
[ "List", "of", "all", "switches", "currently", "registered", "." ]
train
https://github.com/disqus/gutter/blob/d686fa3cd0551cacfc5630c8e7b5fa75e6dcfdf5/gutter/client/models.py#L438-L447
0.007463
nicolargo/glances
glances/plugins/glances_alert.py
Plugin.msg_curse
def msg_curse(self, args=None, max_width=None): """Return the dict to display in the curse interface.""" # Init the return message ret = [] # Only process if display plugin enable... if not self.stats or self.is_disable(): return ret # Build the string message # Header ret.append(self.curse_add_line(global_message(), "TITLE")) # Loop over alerts for alert in self.stats: # New line ret.append(self.curse_new_line()) # Start msg = str(datetime.fromtimestamp(alert[0])) ret.append(self.curse_add_line(msg)) # Duration if alert[1] > 0: # If finished display duration msg = ' ({})'.format(datetime.fromtimestamp(alert[1]) - datetime.fromtimestamp(alert[0])) else: msg = ' (ongoing)' ret.append(self.curse_add_line(msg)) ret.append(self.curse_add_line(" - ")) # Infos if alert[1] > 0: # If finished do not display status msg = '{} on {}'.format(alert[2], alert[3]) ret.append(self.curse_add_line(msg)) else: msg = str(alert[3]) ret.append(self.curse_add_line(msg, decoration=alert[2])) # Min / Mean / Max if self.approx_equal(alert[6], alert[4], tolerance=0.1): msg = ' ({:.1f})'.format(alert[5]) else: msg = ' (Min:{:.1f} Mean:{:.1f} Max:{:.1f})'.format( alert[6], alert[5], alert[4]) ret.append(self.curse_add_line(msg)) # Top processes top_process = ', '.join([p['name'] for p in alert[9]]) if top_process != '': msg = ': {}'.format(top_process) ret.append(self.curse_add_line(msg)) return ret
python
def msg_curse(self, args=None, max_width=None): """Return the dict to display in the curse interface.""" # Init the return message ret = [] # Only process if display plugin enable... if not self.stats or self.is_disable(): return ret # Build the string message # Header ret.append(self.curse_add_line(global_message(), "TITLE")) # Loop over alerts for alert in self.stats: # New line ret.append(self.curse_new_line()) # Start msg = str(datetime.fromtimestamp(alert[0])) ret.append(self.curse_add_line(msg)) # Duration if alert[1] > 0: # If finished display duration msg = ' ({})'.format(datetime.fromtimestamp(alert[1]) - datetime.fromtimestamp(alert[0])) else: msg = ' (ongoing)' ret.append(self.curse_add_line(msg)) ret.append(self.curse_add_line(" - ")) # Infos if alert[1] > 0: # If finished do not display status msg = '{} on {}'.format(alert[2], alert[3]) ret.append(self.curse_add_line(msg)) else: msg = str(alert[3]) ret.append(self.curse_add_line(msg, decoration=alert[2])) # Min / Mean / Max if self.approx_equal(alert[6], alert[4], tolerance=0.1): msg = ' ({:.1f})'.format(alert[5]) else: msg = ' (Min:{:.1f} Mean:{:.1f} Max:{:.1f})'.format( alert[6], alert[5], alert[4]) ret.append(self.curse_add_line(msg)) # Top processes top_process = ', '.join([p['name'] for p in alert[9]]) if top_process != '': msg = ': {}'.format(top_process) ret.append(self.curse_add_line(msg)) return ret
[ "def", "msg_curse", "(", "self", ",", "args", "=", "None", ",", "max_width", "=", "None", ")", ":", "# Init the return message", "ret", "=", "[", "]", "# Only process if display plugin enable...", "if", "not", "self", ".", "stats", "or", "self", ".", "is_disable", "(", ")", ":", "return", "ret", "# Build the string message", "# Header", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "global_message", "(", ")", ",", "\"TITLE\"", ")", ")", "# Loop over alerts", "for", "alert", "in", "self", ".", "stats", ":", "# New line", "ret", ".", "append", "(", "self", ".", "curse_new_line", "(", ")", ")", "# Start", "msg", "=", "str", "(", "datetime", ".", "fromtimestamp", "(", "alert", "[", "0", "]", ")", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "# Duration", "if", "alert", "[", "1", "]", ">", "0", ":", "# If finished display duration", "msg", "=", "' ({})'", ".", "format", "(", "datetime", ".", "fromtimestamp", "(", "alert", "[", "1", "]", ")", "-", "datetime", ".", "fromtimestamp", "(", "alert", "[", "0", "]", ")", ")", "else", ":", "msg", "=", "' (ongoing)'", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "\" - \"", ")", ")", "# Infos", "if", "alert", "[", "1", "]", ">", "0", ":", "# If finished do not display status", "msg", "=", "'{} on {}'", ".", "format", "(", "alert", "[", "2", "]", ",", "alert", "[", "3", "]", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "else", ":", "msg", "=", "str", "(", "alert", "[", "3", "]", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "decoration", "=", "alert", "[", "2", "]", ")", ")", "# Min / Mean / Max", "if", "self", ".", "approx_equal", "(", "alert", "[", "6", "]", ",", "alert", "[", "4", "]", ",", "tolerance", "=", "0.1", ")", ":", "msg", "=", "' ({:.1f})'", ".", "format", "(", "alert", "[", "5", "]", ")", "else", ":", "msg", "=", "' (Min:{:.1f} Mean:{:.1f} Max:{:.1f})'", ".", "format", "(", "alert", "[", "6", "]", ",", "alert", "[", "5", "]", ",", "alert", "[", "4", "]", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "# Top processes", "top_process", "=", "', '", ".", "join", "(", "[", "p", "[", "'name'", "]", "for", "p", "in", "alert", "[", "9", "]", "]", ")", "if", "top_process", "!=", "''", ":", "msg", "=", "': {}'", ".", "format", "(", "top_process", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "return", "ret" ]
Return the dict to display in the curse interface.
[ "Return", "the", "dict", "to", "display", "in", "the", "curse", "interface", "." ]
train
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_alert.py#L111-L160
0.001008
pyca/pynacl
src/nacl/signing.py
VerifyKey.to_curve25519_public_key
def to_curve25519_public_key(self): """ Converts a :class:`~nacl.signing.VerifyKey` to a :class:`~nacl.public.PublicKey` :rtype: :class:`~nacl.public.PublicKey` """ raw_pk = nacl.bindings.crypto_sign_ed25519_pk_to_curve25519(self._key) return _Curve25519_PublicKey(raw_pk)
python
def to_curve25519_public_key(self): """ Converts a :class:`~nacl.signing.VerifyKey` to a :class:`~nacl.public.PublicKey` :rtype: :class:`~nacl.public.PublicKey` """ raw_pk = nacl.bindings.crypto_sign_ed25519_pk_to_curve25519(self._key) return _Curve25519_PublicKey(raw_pk)
[ "def", "to_curve25519_public_key", "(", "self", ")", ":", "raw_pk", "=", "nacl", ".", "bindings", ".", "crypto_sign_ed25519_pk_to_curve25519", "(", "self", ".", "_key", ")", "return", "_Curve25519_PublicKey", "(", "raw_pk", ")" ]
Converts a :class:`~nacl.signing.VerifyKey` to a :class:`~nacl.public.PublicKey` :rtype: :class:`~nacl.public.PublicKey`
[ "Converts", "a", ":", "class", ":", "~nacl", ".", "signing", ".", "VerifyKey", "to", "a", ":", "class", ":", "~nacl", ".", "public", ".", "PublicKey" ]
train
https://github.com/pyca/pynacl/blob/0df0c2c7693fa5d316846111ce510702756f5feb/src/nacl/signing.py#L114-L122
0.006079
timothydmorton/VESPA
vespa/stars/populations.py
StarPopulation.bands
def bands(self): """ Bandpasses for which StarPopulation has magnitude data """ bands = [] for c in self.stars.columns: if re.search('_mag',c): bands.append(c) return bands
python
def bands(self): """ Bandpasses for which StarPopulation has magnitude data """ bands = [] for c in self.stars.columns: if re.search('_mag',c): bands.append(c) return bands
[ "def", "bands", "(", "self", ")", ":", "bands", "=", "[", "]", "for", "c", "in", "self", ".", "stars", ".", "columns", ":", "if", "re", ".", "search", "(", "'_mag'", ",", "c", ")", ":", "bands", ".", "append", "(", "c", ")", "return", "bands" ]
Bandpasses for which StarPopulation has magnitude data
[ "Bandpasses", "for", "which", "StarPopulation", "has", "magnitude", "data" ]
train
https://github.com/timothydmorton/VESPA/blob/0446b54d48009f3655cfd1a3957ceea21d3adcaa/vespa/stars/populations.py#L297-L305
0.012097
dswah/pyGAM
pygam/pygam.py
GAM._simulate_coef_from_bootstraps
def _simulate_coef_from_bootstraps( self, n_draws, coef_bootstraps, cov_bootstraps): """Simulate coefficients using bootstrap samples.""" # Sample indices uniformly from {0, ..., n_bootstraps - 1} # (Wood pg. 199 step 6) random_bootstrap_indices = np.random.choice( np.arange(len(coef_bootstraps)), size=n_draws, replace=True) # Simulate `n_draws` many random coefficient vectors from a # multivariate normal distribution with mean and covariance given by # the bootstrap samples (indexed by `random_bootstrap_indices`) of # `coef_bootstraps` and `cov_bootstraps`. Because it's faster to draw # many samples from a certain distribution all at once, we make a dict # mapping bootstrap indices to draw indices and use the `size` # parameter of `np.random.multivariate_normal` to sample the draws # needed from that bootstrap sample all at once. bootstrap_index_to_draw_indices = defaultdict(list) for draw_index, bootstrap_index in enumerate(random_bootstrap_indices): bootstrap_index_to_draw_indices[bootstrap_index].append(draw_index) coef_draws = np.empty((n_draws, len(self.coef_))) for bootstrap, draw_indices in bootstrap_index_to_draw_indices.items(): coef_draws[draw_indices] = np.random.multivariate_normal( coef_bootstraps[bootstrap], cov_bootstraps[bootstrap], size=len(draw_indices)) return coef_draws
python
def _simulate_coef_from_bootstraps( self, n_draws, coef_bootstraps, cov_bootstraps): """Simulate coefficients using bootstrap samples.""" # Sample indices uniformly from {0, ..., n_bootstraps - 1} # (Wood pg. 199 step 6) random_bootstrap_indices = np.random.choice( np.arange(len(coef_bootstraps)), size=n_draws, replace=True) # Simulate `n_draws` many random coefficient vectors from a # multivariate normal distribution with mean and covariance given by # the bootstrap samples (indexed by `random_bootstrap_indices`) of # `coef_bootstraps` and `cov_bootstraps`. Because it's faster to draw # many samples from a certain distribution all at once, we make a dict # mapping bootstrap indices to draw indices and use the `size` # parameter of `np.random.multivariate_normal` to sample the draws # needed from that bootstrap sample all at once. bootstrap_index_to_draw_indices = defaultdict(list) for draw_index, bootstrap_index in enumerate(random_bootstrap_indices): bootstrap_index_to_draw_indices[bootstrap_index].append(draw_index) coef_draws = np.empty((n_draws, len(self.coef_))) for bootstrap, draw_indices in bootstrap_index_to_draw_indices.items(): coef_draws[draw_indices] = np.random.multivariate_normal( coef_bootstraps[bootstrap], cov_bootstraps[bootstrap], size=len(draw_indices)) return coef_draws
[ "def", "_simulate_coef_from_bootstraps", "(", "self", ",", "n_draws", ",", "coef_bootstraps", ",", "cov_bootstraps", ")", ":", "# Sample indices uniformly from {0, ..., n_bootstraps - 1}", "# (Wood pg. 199 step 6)", "random_bootstrap_indices", "=", "np", ".", "random", ".", "choice", "(", "np", ".", "arange", "(", "len", "(", "coef_bootstraps", ")", ")", ",", "size", "=", "n_draws", ",", "replace", "=", "True", ")", "# Simulate `n_draws` many random coefficient vectors from a", "# multivariate normal distribution with mean and covariance given by", "# the bootstrap samples (indexed by `random_bootstrap_indices`) of", "# `coef_bootstraps` and `cov_bootstraps`. Because it's faster to draw", "# many samples from a certain distribution all at once, we make a dict", "# mapping bootstrap indices to draw indices and use the `size`", "# parameter of `np.random.multivariate_normal` to sample the draws", "# needed from that bootstrap sample all at once.", "bootstrap_index_to_draw_indices", "=", "defaultdict", "(", "list", ")", "for", "draw_index", ",", "bootstrap_index", "in", "enumerate", "(", "random_bootstrap_indices", ")", ":", "bootstrap_index_to_draw_indices", "[", "bootstrap_index", "]", ".", "append", "(", "draw_index", ")", "coef_draws", "=", "np", ".", "empty", "(", "(", "n_draws", ",", "len", "(", "self", ".", "coef_", ")", ")", ")", "for", "bootstrap", ",", "draw_indices", "in", "bootstrap_index_to_draw_indices", ".", "items", "(", ")", ":", "coef_draws", "[", "draw_indices", "]", "=", "np", ".", "random", ".", "multivariate_normal", "(", "coef_bootstraps", "[", "bootstrap", "]", ",", "cov_bootstraps", "[", "bootstrap", "]", ",", "size", "=", "len", "(", "draw_indices", ")", ")", "return", "coef_draws" ]
Simulate coefficients using bootstrap samples.
[ "Simulate", "coefficients", "using", "bootstrap", "samples", "." ]
train
https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L2164-L2191
0.001306
specialunderwear/django-easymode
easymode/views.py
preview
def preview(request, content_type_id, object_id): """ This is an override for django.views.default.shortcut. It assumes that get_absolute_url returns an absolute url, so it does not do any of the very elaborate link checking that shortcut does. This version adds the language code to the url. (/en/blaat/). """ try: content_type = ContentType.objects.get(pk=content_type_id) obj = content_type.get_object_for_this_type(pk=object_id) except ObjectDoesNotExist: raise http.Http404("Content type %s object %s doesn't exist" % (content_type_id, object_id)) try: absolute_url = obj.get_absolute_url() except AttributeError: raise http.Http404("%s objects don't have get_absolute_url() methods" % content_type.name) if absolute_url.startswith('http://') or absolute_url.startswith('https://'): http.HttpResponseRedirect(absolute_url) else: absolute_url = fix_language_code(absolute_url, request.LANGUAGE_CODE) return http.HttpResponseRedirect(absolute_url)
python
def preview(request, content_type_id, object_id): """ This is an override for django.views.default.shortcut. It assumes that get_absolute_url returns an absolute url, so it does not do any of the very elaborate link checking that shortcut does. This version adds the language code to the url. (/en/blaat/). """ try: content_type = ContentType.objects.get(pk=content_type_id) obj = content_type.get_object_for_this_type(pk=object_id) except ObjectDoesNotExist: raise http.Http404("Content type %s object %s doesn't exist" % (content_type_id, object_id)) try: absolute_url = obj.get_absolute_url() except AttributeError: raise http.Http404("%s objects don't have get_absolute_url() methods" % content_type.name) if absolute_url.startswith('http://') or absolute_url.startswith('https://'): http.HttpResponseRedirect(absolute_url) else: absolute_url = fix_language_code(absolute_url, request.LANGUAGE_CODE) return http.HttpResponseRedirect(absolute_url)
[ "def", "preview", "(", "request", ",", "content_type_id", ",", "object_id", ")", ":", "try", ":", "content_type", "=", "ContentType", ".", "objects", ".", "get", "(", "pk", "=", "content_type_id", ")", "obj", "=", "content_type", ".", "get_object_for_this_type", "(", "pk", "=", "object_id", ")", "except", "ObjectDoesNotExist", ":", "raise", "http", ".", "Http404", "(", "\"Content type %s object %s doesn't exist\"", "%", "(", "content_type_id", ",", "object_id", ")", ")", "try", ":", "absolute_url", "=", "obj", ".", "get_absolute_url", "(", ")", "except", "AttributeError", ":", "raise", "http", ".", "Http404", "(", "\"%s objects don't have get_absolute_url() methods\"", "%", "content_type", ".", "name", ")", "if", "absolute_url", ".", "startswith", "(", "'http://'", ")", "or", "absolute_url", ".", "startswith", "(", "'https://'", ")", ":", "http", ".", "HttpResponseRedirect", "(", "absolute_url", ")", "else", ":", "absolute_url", "=", "fix_language_code", "(", "absolute_url", ",", "request", ".", "LANGUAGE_CODE", ")", "return", "http", ".", "HttpResponseRedirect", "(", "absolute_url", ")" ]
This is an override for django.views.default.shortcut. It assumes that get_absolute_url returns an absolute url, so it does not do any of the very elaborate link checking that shortcut does. This version adds the language code to the url. (/en/blaat/).
[ "This", "is", "an", "override", "for", "django", ".", "views", ".", "default", ".", "shortcut", ".", "It", "assumes", "that", "get_absolute_url", "returns", "an", "absolute", "url", "so", "it", "does", "not", "do", "any", "of", "the", "very", "elaborate", "link", "checking", "that", "shortcut", "does", ".", "This", "version", "adds", "the", "language", "code", "to", "the", "url", ".", "(", "/", "en", "/", "blaat", "/", ")", "." ]
train
https://github.com/specialunderwear/django-easymode/blob/92f674b91fb8c54d6e379e2664e2000872d9c95e/easymode/views.py#L9-L32
0.008985
neo4j/neo4j-python-driver
neo4j/types/spatial.py
dehydrate_point
def dehydrate_point(value): """ Dehydrator for Point data. :param value: :type value: Point :return: """ dim = len(value) if dim == 2: return Structure(b"X", value.srid, *value) elif dim == 3: return Structure(b"Y", value.srid, *value) else: raise ValueError("Cannot dehydrate Point with %d dimensions" % dim)
python
def dehydrate_point(value): """ Dehydrator for Point data. :param value: :type value: Point :return: """ dim = len(value) if dim == 2: return Structure(b"X", value.srid, *value) elif dim == 3: return Structure(b"Y", value.srid, *value) else: raise ValueError("Cannot dehydrate Point with %d dimensions" % dim)
[ "def", "dehydrate_point", "(", "value", ")", ":", "dim", "=", "len", "(", "value", ")", "if", "dim", "==", "2", ":", "return", "Structure", "(", "b\"X\"", ",", "value", ".", "srid", ",", "*", "value", ")", "elif", "dim", "==", "3", ":", "return", "Structure", "(", "b\"Y\"", ",", "value", ".", "srid", ",", "*", "value", ")", "else", ":", "raise", "ValueError", "(", "\"Cannot dehydrate Point with %d dimensions\"", "%", "dim", ")" ]
Dehydrator for Point data. :param value: :type value: Point :return:
[ "Dehydrator", "for", "Point", "data", "." ]
train
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/types/spatial.py#L122-L135
0.002703
ksbg/sparklanes
sparklanes/_framework/spark.py
SparkContextAndSessionContainer.set_spark
def set_spark(cls, master=None, appName=None, conf=None, hive_support=False): """Creates and initializes a new `SparkSession`. Argument signature is copied from `pyspark.sql.SparkSession <https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.SparkSession>`_. """ sess = SparkSession.builder if master: sess.master(master) if appName: sess.appName(appName) if conf: sess.config(conf=conf) if hive_support: sess.enableHiveSupport() cls.spark = sess.getOrCreate()
python
def set_spark(cls, master=None, appName=None, conf=None, hive_support=False): """Creates and initializes a new `SparkSession`. Argument signature is copied from `pyspark.sql.SparkSession <https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.SparkSession>`_. """ sess = SparkSession.builder if master: sess.master(master) if appName: sess.appName(appName) if conf: sess.config(conf=conf) if hive_support: sess.enableHiveSupport() cls.spark = sess.getOrCreate()
[ "def", "set_spark", "(", "cls", ",", "master", "=", "None", ",", "appName", "=", "None", ",", "conf", "=", "None", ",", "hive_support", "=", "False", ")", ":", "sess", "=", "SparkSession", ".", "builder", "if", "master", ":", "sess", ".", "master", "(", "master", ")", "if", "appName", ":", "sess", ".", "appName", "(", "appName", ")", "if", "conf", ":", "sess", ".", "config", "(", "conf", "=", "conf", ")", "if", "hive_support", ":", "sess", ".", "enableHiveSupport", "(", ")", "cls", ".", "spark", "=", "sess", ".", "getOrCreate", "(", ")" ]
Creates and initializes a new `SparkSession`. Argument signature is copied from `pyspark.sql.SparkSession <https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.SparkSession>`_.
[ "Creates", "and", "initializes", "a", "new", "SparkSession", ".", "Argument", "signature", "is", "copied", "from", "pyspark", ".", "sql", ".", "SparkSession", "<https", ":", "//", "spark", ".", "apache", ".", "org", "/", "docs", "/", "latest", "/", "api", "/", "python", "/", "pyspark", ".", "sql", ".", "html#pyspark", ".", "sql", ".", "SparkSession", ">", "_", "." ]
train
https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/spark.py#L39-L54
0.004902
dossier/dossier.label
dossier/label/label.py
LabelStore.split_by_connected_component
def split_by_connected_component(self, idents): '''Split idents into equivalence classes based on connected components. ''' idents_remaining = set(idents) connected_components = [] for ident in idents: if ident not in idents_remaining: continue idents_remaining.remove(ident) connected_component = [ident] for label in self.connected_component(ident): cids = label.content_id1, label.content_id2 for cid in cids: if cid in idents_remaining: connected_component.append(cid) idents_remaining.remove(cid) connected_components.append(sorted(connected_component)) return connected_components
python
def split_by_connected_component(self, idents): '''Split idents into equivalence classes based on connected components. ''' idents_remaining = set(idents) connected_components = [] for ident in idents: if ident not in idents_remaining: continue idents_remaining.remove(ident) connected_component = [ident] for label in self.connected_component(ident): cids = label.content_id1, label.content_id2 for cid in cids: if cid in idents_remaining: connected_component.append(cid) idents_remaining.remove(cid) connected_components.append(sorted(connected_component)) return connected_components
[ "def", "split_by_connected_component", "(", "self", ",", "idents", ")", ":", "idents_remaining", "=", "set", "(", "idents", ")", "connected_components", "=", "[", "]", "for", "ident", "in", "idents", ":", "if", "ident", "not", "in", "idents_remaining", ":", "continue", "idents_remaining", ".", "remove", "(", "ident", ")", "connected_component", "=", "[", "ident", "]", "for", "label", "in", "self", ".", "connected_component", "(", "ident", ")", ":", "cids", "=", "label", ".", "content_id1", ",", "label", ".", "content_id2", "for", "cid", "in", "cids", ":", "if", "cid", "in", "idents_remaining", ":", "connected_component", ".", "append", "(", "cid", ")", "idents_remaining", ".", "remove", "(", "cid", ")", "connected_components", ".", "append", "(", "sorted", "(", "connected_component", ")", ")", "return", "connected_components" ]
Split idents into equivalence classes based on connected components.
[ "Split", "idents", "into", "equivalence", "classes", "based", "on", "connected", "components", "." ]
train
https://github.com/dossier/dossier.label/blob/d445e56b02ffd91ad46b0872cfbff62b9afef7ec/dossier/label/label.py#L655-L676
0.002436
wummel/dosage
dosagelib/scraper.py
Scraper.vote
def vote(cls): """Cast a public vote for this comic.""" url = configuration.VoteUrl + 'count/' uid = get_system_uid() data = {"name": cls.getName().replace('/', '_'), "uid": uid} page = urlopen(url, cls.session, data=data) return page.text
python
def vote(cls): """Cast a public vote for this comic.""" url = configuration.VoteUrl + 'count/' uid = get_system_uid() data = {"name": cls.getName().replace('/', '_'), "uid": uid} page = urlopen(url, cls.session, data=data) return page.text
[ "def", "vote", "(", "cls", ")", ":", "url", "=", "configuration", ".", "VoteUrl", "+", "'count/'", "uid", "=", "get_system_uid", "(", ")", "data", "=", "{", "\"name\"", ":", "cls", ".", "getName", "(", ")", ".", "replace", "(", "'/'", ",", "'_'", ")", ",", "\"uid\"", ":", "uid", "}", "page", "=", "urlopen", "(", "url", ",", "cls", ".", "session", ",", "data", "=", "data", ")", "return", "page", ".", "text" ]
Cast a public vote for this comic.
[ "Cast", "a", "public", "vote", "for", "this", "comic", "." ]
train
https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/scraper.py#L255-L261
0.006969
henzk/ape
ape/container_mode/tasks.py
info
def info(): """ List information about this productive environment :return: """ print() print('root directory :', tasks.conf.APE_ROOT) print() print('active container :', os.environ.get('CONTAINER_NAME', '')) print() print('active product :', os.environ.get('PRODUCT_NAME', '')) print() print('ape feature selection :', tasks.FEATURE_SELECTION) print() print('containers and products:') print('-' * 30) print() for container_name in tasks.get_containers(): print(container_name) for product_name in tasks.get_products(container_name): print(' ' + product_name) print()
python
def info(): """ List information about this productive environment :return: """ print() print('root directory :', tasks.conf.APE_ROOT) print() print('active container :', os.environ.get('CONTAINER_NAME', '')) print() print('active product :', os.environ.get('PRODUCT_NAME', '')) print() print('ape feature selection :', tasks.FEATURE_SELECTION) print() print('containers and products:') print('-' * 30) print() for container_name in tasks.get_containers(): print(container_name) for product_name in tasks.get_products(container_name): print(' ' + product_name) print()
[ "def", "info", "(", ")", ":", "print", "(", ")", "print", "(", "'root directory :'", ",", "tasks", ".", "conf", ".", "APE_ROOT", ")", "print", "(", ")", "print", "(", "'active container :'", ",", "os", ".", "environ", ".", "get", "(", "'CONTAINER_NAME'", ",", "''", ")", ")", "print", "(", ")", "print", "(", "'active product :'", ",", "os", ".", "environ", ".", "get", "(", "'PRODUCT_NAME'", ",", "''", ")", ")", "print", "(", ")", "print", "(", "'ape feature selection :'", ",", "tasks", ".", "FEATURE_SELECTION", ")", "print", "(", ")", "print", "(", "'containers and products:'", ")", "print", "(", "'-'", "*", "30", ")", "print", "(", ")", "for", "container_name", "in", "tasks", ".", "get_containers", "(", ")", ":", "print", "(", "container_name", ")", "for", "product_name", "in", "tasks", ".", "get_products", "(", "container_name", ")", ":", "print", "(", "' '", "+", "product_name", ")", "print", "(", ")" ]
List information about this productive environment :return:
[ "List", "information", "about", "this", "productive", "environment", ":", "return", ":" ]
train
https://github.com/henzk/ape/blob/a1b7ea5e5b25c42beffeaaa5c32d94ad82634819/ape/container_mode/tasks.py#L54-L75
0.001441
fabioz/PyDev.Debugger
third_party/pep8/lib2to3/lib2to3/pgen2/tokenize.py
generate_tokens
def generate_tokens(readline): """ The generate_tokens() generator requires one argment, readline, which must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as a string. Alternately, readline can be a callable function terminating with StopIteration: readline = open(myfile).next # Example of alternate readline The generator produces 5-tuples with these members: the token type; the token string; a 2-tuple (srow, scol) of ints specifying the row and column where the token begins in the source; a 2-tuple (erow, ecol) of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed is the logical line; continuation lines are included. """ lnum = parenlev = continued = 0 namechars, numchars = string.ascii_letters + '_', '0123456789' contstr, needcont = '', 0 contline = None indents = [0] while 1: # loop over lines in stream try: line = readline() except StopIteration: line = '' lnum = lnum + 1 pos, max = 0, len(line) if contstr: # continued string if not line: raise TokenError, ("EOF in multi-line string", strstart) endmatch = endprog.match(line) if endmatch: pos = end = endmatch.end(0) yield (STRING, contstr + line[:end], strstart, (lnum, end), contline + line) contstr, needcont = '', 0 contline = None elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': yield (ERRORTOKEN, contstr + line, strstart, (lnum, len(line)), contline) contstr = '' contline = None continue else: contstr = contstr + line contline = contline + line continue elif parenlev == 0 and not continued: # new statement if not line: break column = 0 while pos < max: # measure leading whitespace if line[pos] == ' ': column = column + 1 elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize elif line[pos] == '\f': column = 0 else: break pos = pos + 1 if pos == max: break if line[pos] in '#\r\n': # skip comments or blank lines if line[pos] == '#': comment_token = line[pos:].rstrip('\r\n') nl_pos = pos + len(comment_token) yield (COMMENT, comment_token, (lnum, pos), (lnum, pos + len(comment_token)), line) yield (NL, line[nl_pos:], (lnum, nl_pos), (lnum, len(line)), line) else: yield ((NL, COMMENT)[line[pos] == '#'], line[pos:], (lnum, pos), (lnum, len(line)), line) continue if column > indents[-1]: # count indents or dedents indents.append(column) yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line) while column < indents[-1]: if column not in indents: raise IndentationError( "unindent does not match any outer indentation level", ("<tokenize>", lnum, pos, line)) indents = indents[:-1] yield (DEDENT, '', (lnum, pos), (lnum, pos), line) else: # continued statement if not line: raise TokenError, ("EOF in multi-line statement", (lnum, 0)) continued = 0 while pos < max: pseudomatch = pseudoprog.match(line, pos) if pseudomatch: # scan for tokens start, end = pseudomatch.span(1) spos, epos, pos = (lnum, start), (lnum, end), end token, initial = line[start:end], line[start] if initial in numchars or \ (initial == '.' and token != '.'): # ordinary number yield (NUMBER, token, spos, epos, line) elif initial in '\r\n': newline = NEWLINE if parenlev > 0: newline = NL yield (newline, token, spos, epos, line) elif initial == '#': assert not token.endswith("\n") yield (COMMENT, token, spos, epos, line) elif token in triple_quoted: endprog = endprogs[token] endmatch = endprog.match(line, pos) if endmatch: # all on one line pos = endmatch.end(0) token = line[start:pos] yield (STRING, token, spos, (lnum, pos), line) else: strstart = (lnum, start) # multiple lines contstr = line[start:] contline = line break elif initial in single_quoted or \ token[:2] in single_quoted or \ token[:3] in single_quoted: if token[-1] == '\n': # continued string strstart = (lnum, start) endprog = (endprogs[initial] or endprogs[token[1]] or endprogs[token[2]]) contstr, needcont = line[start:], 1 contline = line break else: # ordinary string yield (STRING, token, spos, epos, line) elif initial in namechars: # ordinary name yield (NAME, token, spos, epos, line) elif initial == '\\': # continued stmt # This yield is new; needed for better idempotency: yield (NL, token, spos, (lnum, pos), line) continued = 1 else: if initial in '([{': parenlev = parenlev + 1 elif initial in ')]}': parenlev = parenlev - 1 yield (OP, token, spos, epos, line) else: yield (ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos+1), line) pos = pos + 1 for indent in indents[1:]: # pop remaining indent levels yield (DEDENT, '', (lnum, 0), (lnum, 0), '') yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
python
def generate_tokens(readline): """ The generate_tokens() generator requires one argment, readline, which must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as a string. Alternately, readline can be a callable function terminating with StopIteration: readline = open(myfile).next # Example of alternate readline The generator produces 5-tuples with these members: the token type; the token string; a 2-tuple (srow, scol) of ints specifying the row and column where the token begins in the source; a 2-tuple (erow, ecol) of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed is the logical line; continuation lines are included. """ lnum = parenlev = continued = 0 namechars, numchars = string.ascii_letters + '_', '0123456789' contstr, needcont = '', 0 contline = None indents = [0] while 1: # loop over lines in stream try: line = readline() except StopIteration: line = '' lnum = lnum + 1 pos, max = 0, len(line) if contstr: # continued string if not line: raise TokenError, ("EOF in multi-line string", strstart) endmatch = endprog.match(line) if endmatch: pos = end = endmatch.end(0) yield (STRING, contstr + line[:end], strstart, (lnum, end), contline + line) contstr, needcont = '', 0 contline = None elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': yield (ERRORTOKEN, contstr + line, strstart, (lnum, len(line)), contline) contstr = '' contline = None continue else: contstr = contstr + line contline = contline + line continue elif parenlev == 0 and not continued: # new statement if not line: break column = 0 while pos < max: # measure leading whitespace if line[pos] == ' ': column = column + 1 elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize elif line[pos] == '\f': column = 0 else: break pos = pos + 1 if pos == max: break if line[pos] in '#\r\n': # skip comments or blank lines if line[pos] == '#': comment_token = line[pos:].rstrip('\r\n') nl_pos = pos + len(comment_token) yield (COMMENT, comment_token, (lnum, pos), (lnum, pos + len(comment_token)), line) yield (NL, line[nl_pos:], (lnum, nl_pos), (lnum, len(line)), line) else: yield ((NL, COMMENT)[line[pos] == '#'], line[pos:], (lnum, pos), (lnum, len(line)), line) continue if column > indents[-1]: # count indents or dedents indents.append(column) yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line) while column < indents[-1]: if column not in indents: raise IndentationError( "unindent does not match any outer indentation level", ("<tokenize>", lnum, pos, line)) indents = indents[:-1] yield (DEDENT, '', (lnum, pos), (lnum, pos), line) else: # continued statement if not line: raise TokenError, ("EOF in multi-line statement", (lnum, 0)) continued = 0 while pos < max: pseudomatch = pseudoprog.match(line, pos) if pseudomatch: # scan for tokens start, end = pseudomatch.span(1) spos, epos, pos = (lnum, start), (lnum, end), end token, initial = line[start:end], line[start] if initial in numchars or \ (initial == '.' and token != '.'): # ordinary number yield (NUMBER, token, spos, epos, line) elif initial in '\r\n': newline = NEWLINE if parenlev > 0: newline = NL yield (newline, token, spos, epos, line) elif initial == '#': assert not token.endswith("\n") yield (COMMENT, token, spos, epos, line) elif token in triple_quoted: endprog = endprogs[token] endmatch = endprog.match(line, pos) if endmatch: # all on one line pos = endmatch.end(0) token = line[start:pos] yield (STRING, token, spos, (lnum, pos), line) else: strstart = (lnum, start) # multiple lines contstr = line[start:] contline = line break elif initial in single_quoted or \ token[:2] in single_quoted or \ token[:3] in single_quoted: if token[-1] == '\n': # continued string strstart = (lnum, start) endprog = (endprogs[initial] or endprogs[token[1]] or endprogs[token[2]]) contstr, needcont = line[start:], 1 contline = line break else: # ordinary string yield (STRING, token, spos, epos, line) elif initial in namechars: # ordinary name yield (NAME, token, spos, epos, line) elif initial == '\\': # continued stmt # This yield is new; needed for better idempotency: yield (NL, token, spos, (lnum, pos), line) continued = 1 else: if initial in '([{': parenlev = parenlev + 1 elif initial in ')]}': parenlev = parenlev - 1 yield (OP, token, spos, epos, line) else: yield (ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos+1), line) pos = pos + 1 for indent in indents[1:]: # pop remaining indent levels yield (DEDENT, '', (lnum, 0), (lnum, 0), '') yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
[ "def", "generate_tokens", "(", "readline", ")", ":", "lnum", "=", "parenlev", "=", "continued", "=", "0", "namechars", ",", "numchars", "=", "string", ".", "ascii_letters", "+", "'_'", ",", "'0123456789'", "contstr", ",", "needcont", "=", "''", ",", "0", "contline", "=", "None", "indents", "=", "[", "0", "]", "while", "1", ":", "# loop over lines in stream", "try", ":", "line", "=", "readline", "(", ")", "except", "StopIteration", ":", "line", "=", "''", "lnum", "=", "lnum", "+", "1", "pos", ",", "max", "=", "0", ",", "len", "(", "line", ")", "if", "contstr", ":", "# continued string", "if", "not", "line", ":", "raise", "TokenError", ",", "(", "\"EOF in multi-line string\"", ",", "strstart", ")", "endmatch", "=", "endprog", ".", "match", "(", "line", ")", "if", "endmatch", ":", "pos", "=", "end", "=", "endmatch", ".", "end", "(", "0", ")", "yield", "(", "STRING", ",", "contstr", "+", "line", "[", ":", "end", "]", ",", "strstart", ",", "(", "lnum", ",", "end", ")", ",", "contline", "+", "line", ")", "contstr", ",", "needcont", "=", "''", ",", "0", "contline", "=", "None", "elif", "needcont", "and", "line", "[", "-", "2", ":", "]", "!=", "'\\\\\\n'", "and", "line", "[", "-", "3", ":", "]", "!=", "'\\\\\\r\\n'", ":", "yield", "(", "ERRORTOKEN", ",", "contstr", "+", "line", ",", "strstart", ",", "(", "lnum", ",", "len", "(", "line", ")", ")", ",", "contline", ")", "contstr", "=", "''", "contline", "=", "None", "continue", "else", ":", "contstr", "=", "contstr", "+", "line", "contline", "=", "contline", "+", "line", "continue", "elif", "parenlev", "==", "0", "and", "not", "continued", ":", "# new statement", "if", "not", "line", ":", "break", "column", "=", "0", "while", "pos", "<", "max", ":", "# measure leading whitespace", "if", "line", "[", "pos", "]", "==", "' '", ":", "column", "=", "column", "+", "1", "elif", "line", "[", "pos", "]", "==", "'\\t'", ":", "column", "=", "(", "column", "//", "tabsize", "+", "1", ")", "*", "tabsize", "elif", "line", "[", "pos", "]", "==", "'\\f'", ":", "column", "=", "0", "else", ":", "break", "pos", "=", "pos", "+", "1", "if", "pos", "==", "max", ":", "break", "if", "line", "[", "pos", "]", "in", "'#\\r\\n'", ":", "# skip comments or blank lines", "if", "line", "[", "pos", "]", "==", "'#'", ":", "comment_token", "=", "line", "[", "pos", ":", "]", ".", "rstrip", "(", "'\\r\\n'", ")", "nl_pos", "=", "pos", "+", "len", "(", "comment_token", ")", "yield", "(", "COMMENT", ",", "comment_token", ",", "(", "lnum", ",", "pos", ")", ",", "(", "lnum", ",", "pos", "+", "len", "(", "comment_token", ")", ")", ",", "line", ")", "yield", "(", "NL", ",", "line", "[", "nl_pos", ":", "]", ",", "(", "lnum", ",", "nl_pos", ")", ",", "(", "lnum", ",", "len", "(", "line", ")", ")", ",", "line", ")", "else", ":", "yield", "(", "(", "NL", ",", "COMMENT", ")", "[", "line", "[", "pos", "]", "==", "'#'", "]", ",", "line", "[", "pos", ":", "]", ",", "(", "lnum", ",", "pos", ")", ",", "(", "lnum", ",", "len", "(", "line", ")", ")", ",", "line", ")", "continue", "if", "column", ">", "indents", "[", "-", "1", "]", ":", "# count indents or dedents", "indents", ".", "append", "(", "column", ")", "yield", "(", "INDENT", ",", "line", "[", ":", "pos", "]", ",", "(", "lnum", ",", "0", ")", ",", "(", "lnum", ",", "pos", ")", ",", "line", ")", "while", "column", "<", "indents", "[", "-", "1", "]", ":", "if", "column", "not", "in", "indents", ":", "raise", "IndentationError", "(", "\"unindent does not match any outer indentation level\"", ",", "(", "\"<tokenize>\"", ",", "lnum", ",", "pos", ",", "line", ")", ")", "indents", "=", "indents", "[", ":", "-", "1", "]", "yield", "(", "DEDENT", ",", "''", ",", "(", "lnum", ",", "pos", ")", ",", "(", "lnum", ",", "pos", ")", ",", "line", ")", "else", ":", "# continued statement", "if", "not", "line", ":", "raise", "TokenError", ",", "(", "\"EOF in multi-line statement\"", ",", "(", "lnum", ",", "0", ")", ")", "continued", "=", "0", "while", "pos", "<", "max", ":", "pseudomatch", "=", "pseudoprog", ".", "match", "(", "line", ",", "pos", ")", "if", "pseudomatch", ":", "# scan for tokens", "start", ",", "end", "=", "pseudomatch", ".", "span", "(", "1", ")", "spos", ",", "epos", ",", "pos", "=", "(", "lnum", ",", "start", ")", ",", "(", "lnum", ",", "end", ")", ",", "end", "token", ",", "initial", "=", "line", "[", "start", ":", "end", "]", ",", "line", "[", "start", "]", "if", "initial", "in", "numchars", "or", "(", "initial", "==", "'.'", "and", "token", "!=", "'.'", ")", ":", "# ordinary number", "yield", "(", "NUMBER", ",", "token", ",", "spos", ",", "epos", ",", "line", ")", "elif", "initial", "in", "'\\r\\n'", ":", "newline", "=", "NEWLINE", "if", "parenlev", ">", "0", ":", "newline", "=", "NL", "yield", "(", "newline", ",", "token", ",", "spos", ",", "epos", ",", "line", ")", "elif", "initial", "==", "'#'", ":", "assert", "not", "token", ".", "endswith", "(", "\"\\n\"", ")", "yield", "(", "COMMENT", ",", "token", ",", "spos", ",", "epos", ",", "line", ")", "elif", "token", "in", "triple_quoted", ":", "endprog", "=", "endprogs", "[", "token", "]", "endmatch", "=", "endprog", ".", "match", "(", "line", ",", "pos", ")", "if", "endmatch", ":", "# all on one line", "pos", "=", "endmatch", ".", "end", "(", "0", ")", "token", "=", "line", "[", "start", ":", "pos", "]", "yield", "(", "STRING", ",", "token", ",", "spos", ",", "(", "lnum", ",", "pos", ")", ",", "line", ")", "else", ":", "strstart", "=", "(", "lnum", ",", "start", ")", "# multiple lines", "contstr", "=", "line", "[", "start", ":", "]", "contline", "=", "line", "break", "elif", "initial", "in", "single_quoted", "or", "token", "[", ":", "2", "]", "in", "single_quoted", "or", "token", "[", ":", "3", "]", "in", "single_quoted", ":", "if", "token", "[", "-", "1", "]", "==", "'\\n'", ":", "# continued string", "strstart", "=", "(", "lnum", ",", "start", ")", "endprog", "=", "(", "endprogs", "[", "initial", "]", "or", "endprogs", "[", "token", "[", "1", "]", "]", "or", "endprogs", "[", "token", "[", "2", "]", "]", ")", "contstr", ",", "needcont", "=", "line", "[", "start", ":", "]", ",", "1", "contline", "=", "line", "break", "else", ":", "# ordinary string", "yield", "(", "STRING", ",", "token", ",", "spos", ",", "epos", ",", "line", ")", "elif", "initial", "in", "namechars", ":", "# ordinary name", "yield", "(", "NAME", ",", "token", ",", "spos", ",", "epos", ",", "line", ")", "elif", "initial", "==", "'\\\\'", ":", "# continued stmt", "# This yield is new; needed for better idempotency:", "yield", "(", "NL", ",", "token", ",", "spos", ",", "(", "lnum", ",", "pos", ")", ",", "line", ")", "continued", "=", "1", "else", ":", "if", "initial", "in", "'([{'", ":", "parenlev", "=", "parenlev", "+", "1", "elif", "initial", "in", "')]}'", ":", "parenlev", "=", "parenlev", "-", "1", "yield", "(", "OP", ",", "token", ",", "spos", ",", "epos", ",", "line", ")", "else", ":", "yield", "(", "ERRORTOKEN", ",", "line", "[", "pos", "]", ",", "(", "lnum", ",", "pos", ")", ",", "(", "lnum", ",", "pos", "+", "1", ")", ",", "line", ")", "pos", "=", "pos", "+", "1", "for", "indent", "in", "indents", "[", "1", ":", "]", ":", "# pop remaining indent levels", "yield", "(", "DEDENT", ",", "''", ",", "(", "lnum", ",", "0", ")", ",", "(", "lnum", ",", "0", ")", ",", "''", ")", "yield", "(", "ENDMARKER", ",", "''", ",", "(", "lnum", ",", "0", ")", ",", "(", "lnum", ",", "0", ")", ",", "''", ")" ]
The generate_tokens() generator requires one argment, readline, which must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as a string. Alternately, readline can be a callable function terminating with StopIteration: readline = open(myfile).next # Example of alternate readline The generator produces 5-tuples with these members: the token type; the token string; a 2-tuple (srow, scol) of ints specifying the row and column where the token begins in the source; a 2-tuple (erow, ecol) of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed is the logical line; continuation lines are included.
[ "The", "generate_tokens", "()", "generator", "requires", "one", "argment", "readline", "which", "must", "be", "a", "callable", "object", "which", "provides", "the", "same", "interface", "as", "the", "readline", "()", "method", "of", "built", "-", "in", "file", "objects", ".", "Each", "call", "to", "the", "function", "should", "return", "one", "line", "of", "input", "as", "a", "string", ".", "Alternately", "readline", "can", "be", "a", "callable", "function", "terminating", "with", "StopIteration", ":", "readline", "=", "open", "(", "myfile", ")", ".", "next", "#", "Example", "of", "alternate", "readline" ]
train
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/lib2to3/lib2to3/pgen2/tokenize.py#L344-L494
0.001959
awslabs/serverless-application-model
samtranslator/plugins/api/implicit_api_plugin.py
ImplicitApiPlugin._get_api_id
def _get_api_id(self, event_properties): """ Get API logical id from API event properties. Handles case where API id is not specified or is a reference to a logical id. """ api_id = event_properties.get("RestApiId") if isinstance(api_id, dict) and "Ref" in api_id: api_id = api_id["Ref"] return api_id
python
def _get_api_id(self, event_properties): """ Get API logical id from API event properties. Handles case where API id is not specified or is a reference to a logical id. """ api_id = event_properties.get("RestApiId") if isinstance(api_id, dict) and "Ref" in api_id: api_id = api_id["Ref"] return api_id
[ "def", "_get_api_id", "(", "self", ",", "event_properties", ")", ":", "api_id", "=", "event_properties", ".", "get", "(", "\"RestApiId\"", ")", "if", "isinstance", "(", "api_id", ",", "dict", ")", "and", "\"Ref\"", "in", "api_id", ":", "api_id", "=", "api_id", "[", "\"Ref\"", "]", "return", "api_id" ]
Get API logical id from API event properties. Handles case where API id is not specified or is a reference to a logical id.
[ "Get", "API", "logical", "id", "from", "API", "event", "properties", "." ]
train
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/plugins/api/implicit_api_plugin.py#L223-L232
0.008108
gem/oq-engine
openquake/hmtk/faults/fault_models.py
mtkActiveFault.get_tectonic_regionalisation
def get_tectonic_regionalisation(self, regionalisation, region_type=None): ''' Defines the tectonic region and updates the shear modulus, magnitude scaling relation and displacement to length ratio using the regional values, if not previously defined for the fault :param regionalistion: Instance of the :class: openquake.hmtk.faults.tectonic_regionalisaion.TectonicRegionalisation :param str region_type: Name of the region type - if not in regionalisation an error will be raised ''' if region_type: self.trt = region_type if not self.trt in regionalisation.key_list: raise ValueError('Tectonic region classification missing or ' 'not defined in regionalisation') for iloc, key_val in enumerate(regionalisation.key_list): if self.trt in key_val: self.regionalisation = regionalisation.regionalisation[iloc] # Update undefined shear modulus from tectonic regionalisation if not self.shear_modulus: self.shear_modulus = self.regionalisation.shear_modulus # Update undefined scaling relation from tectonic # regionalisation if not self.msr: self.msr = self.regionalisation.scaling_rel # Update undefined displacement to length ratio from tectonic # regionalisation if not self.disp_length_ratio: self.disp_length_ratio = \ self.regionalisation.disp_length_ratio break return
python
def get_tectonic_regionalisation(self, regionalisation, region_type=None): ''' Defines the tectonic region and updates the shear modulus, magnitude scaling relation and displacement to length ratio using the regional values, if not previously defined for the fault :param regionalistion: Instance of the :class: openquake.hmtk.faults.tectonic_regionalisaion.TectonicRegionalisation :param str region_type: Name of the region type - if not in regionalisation an error will be raised ''' if region_type: self.trt = region_type if not self.trt in regionalisation.key_list: raise ValueError('Tectonic region classification missing or ' 'not defined in regionalisation') for iloc, key_val in enumerate(regionalisation.key_list): if self.trt in key_val: self.regionalisation = regionalisation.regionalisation[iloc] # Update undefined shear modulus from tectonic regionalisation if not self.shear_modulus: self.shear_modulus = self.regionalisation.shear_modulus # Update undefined scaling relation from tectonic # regionalisation if not self.msr: self.msr = self.regionalisation.scaling_rel # Update undefined displacement to length ratio from tectonic # regionalisation if not self.disp_length_ratio: self.disp_length_ratio = \ self.regionalisation.disp_length_ratio break return
[ "def", "get_tectonic_regionalisation", "(", "self", ",", "regionalisation", ",", "region_type", "=", "None", ")", ":", "if", "region_type", ":", "self", ".", "trt", "=", "region_type", "if", "not", "self", ".", "trt", "in", "regionalisation", ".", "key_list", ":", "raise", "ValueError", "(", "'Tectonic region classification missing or '", "'not defined in regionalisation'", ")", "for", "iloc", ",", "key_val", "in", "enumerate", "(", "regionalisation", ".", "key_list", ")", ":", "if", "self", ".", "trt", "in", "key_val", ":", "self", ".", "regionalisation", "=", "regionalisation", ".", "regionalisation", "[", "iloc", "]", "# Update undefined shear modulus from tectonic regionalisation", "if", "not", "self", ".", "shear_modulus", ":", "self", ".", "shear_modulus", "=", "self", ".", "regionalisation", ".", "shear_modulus", "# Update undefined scaling relation from tectonic", "# regionalisation", "if", "not", "self", ".", "msr", ":", "self", ".", "msr", "=", "self", ".", "regionalisation", ".", "scaling_rel", "# Update undefined displacement to length ratio from tectonic", "# regionalisation", "if", "not", "self", ".", "disp_length_ratio", ":", "self", ".", "disp_length_ratio", "=", "self", ".", "regionalisation", ".", "disp_length_ratio", "break", "return" ]
Defines the tectonic region and updates the shear modulus, magnitude scaling relation and displacement to length ratio using the regional values, if not previously defined for the fault :param regionalistion: Instance of the :class: openquake.hmtk.faults.tectonic_regionalisaion.TectonicRegionalisation :param str region_type: Name of the region type - if not in regionalisation an error will be raised
[ "Defines", "the", "tectonic", "region", "and", "updates", "the", "shear", "modulus", "magnitude", "scaling", "relation", "and", "displacement", "to", "length", "ratio", "using", "the", "regional", "values", "if", "not", "previously", "defined", "for", "the", "fault" ]
train
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/faults/fault_models.py#L265-L301
0.001743
seleniumbase/SeleniumBase
seleniumbase/core/tour_helper.py
play_bootstrap_tour
def play_bootstrap_tour( driver, tour_steps, browser, msg_dur, name=None, interval=0): """ Plays a Bootstrap tour on the current website. """ instructions = "" for tour_step in tour_steps[name]: instructions += tour_step instructions += ( """]); // Initialize the tour tour.init(); // Start the tour tour.start(); // Fix timing issue by restarting tour immediately tour.restart(); // Save for later $tour = tour;""") if interval and interval > 0: if interval < 1: interval = 1 interval = str(float(interval) * 1000.0) instructions = instructions.replace( 'duration: 0,', 'duration: %s,' % interval) if not is_bootstrap_activated(driver): activate_bootstrap(driver) if len(tour_steps[name]) > 1: try: if "element: " in tour_steps[name][1]: selector = re.search( r"[\S\s]+element: '([\S\s]+)',[\S\s]+title: '", tour_steps[name][1]).group(1) selector = selector.replace('\\', '').replace(':first', '') page_actions.wait_for_element_present( driver, selector, by=By.CSS_SELECTOR, timeout=settings.SMALL_TIMEOUT) else: selector = "html" except Exception: js_utils.post_messenger_error_message( driver, "Tour Error: {'%s'} was not found!" % selector, msg_dur) raise Exception( "Tour Error: {'%s'} was not found! " "Exiting due to failure on first tour step!" "" % selector) driver.execute_script(instructions) tour_on = True while tour_on: try: time.sleep(0.01) if browser != "firefox": result = driver.execute_script( "return $tour.ended()") else: page_actions.wait_for_element_present( driver, ".tour-tour", by=By.CSS_SELECTOR, timeout=0.4) result = False except Exception: tour_on = False result = None if result is False: tour_on = True else: try: time.sleep(0.01) if browser != "firefox": result = driver.execute_script( "return $tour.ended()") else: page_actions.wait_for_element_present( driver, ".tour-tour", by=By.CSS_SELECTOR, timeout=0.4) result = False if result is False: time.sleep(0.1) continue else: return except Exception: tour_on = False time.sleep(0.1)
python
def play_bootstrap_tour( driver, tour_steps, browser, msg_dur, name=None, interval=0): """ Plays a Bootstrap tour on the current website. """ instructions = "" for tour_step in tour_steps[name]: instructions += tour_step instructions += ( """]); // Initialize the tour tour.init(); // Start the tour tour.start(); // Fix timing issue by restarting tour immediately tour.restart(); // Save for later $tour = tour;""") if interval and interval > 0: if interval < 1: interval = 1 interval = str(float(interval) * 1000.0) instructions = instructions.replace( 'duration: 0,', 'duration: %s,' % interval) if not is_bootstrap_activated(driver): activate_bootstrap(driver) if len(tour_steps[name]) > 1: try: if "element: " in tour_steps[name][1]: selector = re.search( r"[\S\s]+element: '([\S\s]+)',[\S\s]+title: '", tour_steps[name][1]).group(1) selector = selector.replace('\\', '').replace(':first', '') page_actions.wait_for_element_present( driver, selector, by=By.CSS_SELECTOR, timeout=settings.SMALL_TIMEOUT) else: selector = "html" except Exception: js_utils.post_messenger_error_message( driver, "Tour Error: {'%s'} was not found!" % selector, msg_dur) raise Exception( "Tour Error: {'%s'} was not found! " "Exiting due to failure on first tour step!" "" % selector) driver.execute_script(instructions) tour_on = True while tour_on: try: time.sleep(0.01) if browser != "firefox": result = driver.execute_script( "return $tour.ended()") else: page_actions.wait_for_element_present( driver, ".tour-tour", by=By.CSS_SELECTOR, timeout=0.4) result = False except Exception: tour_on = False result = None if result is False: tour_on = True else: try: time.sleep(0.01) if browser != "firefox": result = driver.execute_script( "return $tour.ended()") else: page_actions.wait_for_element_present( driver, ".tour-tour", by=By.CSS_SELECTOR, timeout=0.4) result = False if result is False: time.sleep(0.1) continue else: return except Exception: tour_on = False time.sleep(0.1)
[ "def", "play_bootstrap_tour", "(", "driver", ",", "tour_steps", ",", "browser", ",", "msg_dur", ",", "name", "=", "None", ",", "interval", "=", "0", ")", ":", "instructions", "=", "\"\"", "for", "tour_step", "in", "tour_steps", "[", "name", "]", ":", "instructions", "+=", "tour_step", "instructions", "+=", "(", "\"\"\"]);\n // Initialize the tour\n tour.init();\n // Start the tour\n tour.start();\n // Fix timing issue by restarting tour immediately\n tour.restart();\n // Save for later\n $tour = tour;\"\"\"", ")", "if", "interval", "and", "interval", ">", "0", ":", "if", "interval", "<", "1", ":", "interval", "=", "1", "interval", "=", "str", "(", "float", "(", "interval", ")", "*", "1000.0", ")", "instructions", "=", "instructions", ".", "replace", "(", "'duration: 0,'", ",", "'duration: %s,'", "%", "interval", ")", "if", "not", "is_bootstrap_activated", "(", "driver", ")", ":", "activate_bootstrap", "(", "driver", ")", "if", "len", "(", "tour_steps", "[", "name", "]", ")", ">", "1", ":", "try", ":", "if", "\"element: \"", "in", "tour_steps", "[", "name", "]", "[", "1", "]", ":", "selector", "=", "re", ".", "search", "(", "r\"[\\S\\s]+element: '([\\S\\s]+)',[\\S\\s]+title: '\"", ",", "tour_steps", "[", "name", "]", "[", "1", "]", ")", ".", "group", "(", "1", ")", "selector", "=", "selector", ".", "replace", "(", "'\\\\'", ",", "''", ")", ".", "replace", "(", "':first'", ",", "''", ")", "page_actions", ".", "wait_for_element_present", "(", "driver", ",", "selector", ",", "by", "=", "By", ".", "CSS_SELECTOR", ",", "timeout", "=", "settings", ".", "SMALL_TIMEOUT", ")", "else", ":", "selector", "=", "\"html\"", "except", "Exception", ":", "js_utils", ".", "post_messenger_error_message", "(", "driver", ",", "\"Tour Error: {'%s'} was not found!\"", "%", "selector", ",", "msg_dur", ")", "raise", "Exception", "(", "\"Tour Error: {'%s'} was not found! \"", "\"Exiting due to failure on first tour step!\"", "\"\"", "%", "selector", ")", "driver", ".", "execute_script", "(", "instructions", ")", "tour_on", "=", "True", "while", "tour_on", ":", "try", ":", "time", ".", "sleep", "(", "0.01", ")", "if", "browser", "!=", "\"firefox\"", ":", "result", "=", "driver", ".", "execute_script", "(", "\"return $tour.ended()\"", ")", "else", ":", "page_actions", ".", "wait_for_element_present", "(", "driver", ",", "\".tour-tour\"", ",", "by", "=", "By", ".", "CSS_SELECTOR", ",", "timeout", "=", "0.4", ")", "result", "=", "False", "except", "Exception", ":", "tour_on", "=", "False", "result", "=", "None", "if", "result", "is", "False", ":", "tour_on", "=", "True", "else", ":", "try", ":", "time", ".", "sleep", "(", "0.01", ")", "if", "browser", "!=", "\"firefox\"", ":", "result", "=", "driver", ".", "execute_script", "(", "\"return $tour.ended()\"", ")", "else", ":", "page_actions", ".", "wait_for_element_present", "(", "driver", ",", "\".tour-tour\"", ",", "by", "=", "By", ".", "CSS_SELECTOR", ",", "timeout", "=", "0.4", ")", "result", "=", "False", "if", "result", "is", "False", ":", "time", ".", "sleep", "(", "0.1", ")", "continue", "else", ":", "return", "except", "Exception", ":", "tour_on", "=", "False", "time", ".", "sleep", "(", "0.1", ")" ]
Plays a Bootstrap tour on the current website.
[ "Plays", "a", "Bootstrap", "tour", "on", "the", "current", "website", "." ]
train
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/core/tour_helper.py#L326-L408
0.00034
brechtm/rinohtype
src/rinoh/backend/pdf/xobject/purepng.py
Writer.set_white_point
def set_white_point(self, white_point, point2=None): """Set white point part of cHRM chunk""" if isinstance(white_point, float) and isinstance(point2, float): white_point = (white_point, point2) self.white_point = white_point
python
def set_white_point(self, white_point, point2=None): """Set white point part of cHRM chunk""" if isinstance(white_point, float) and isinstance(point2, float): white_point = (white_point, point2) self.white_point = white_point
[ "def", "set_white_point", "(", "self", ",", "white_point", ",", "point2", "=", "None", ")", ":", "if", "isinstance", "(", "white_point", ",", "float", ")", "and", "isinstance", "(", "point2", ",", "float", ")", ":", "white_point", "=", "(", "white_point", ",", "point2", ")", "self", ".", "white_point", "=", "white_point" ]
Set white point part of cHRM chunk
[ "Set", "white", "point", "part", "of", "cHRM", "chunk" ]
train
https://github.com/brechtm/rinohtype/blob/40a63c4e5ad7550f62b6860f1812cb67cafb9dc7/src/rinoh/backend/pdf/xobject/purepng.py#L1102-L1106
0.007663
asweigart/pysimplevalidate
src/pysimplevalidate/__init__.py
validateDate
def validateDate(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, formats=('%Y/%m/%d', '%y/%m/%d', '%m/%d/%Y', '%m/%d/%y', '%x'), excMsg=None): """Raises ValidationException if value is not a time formatted in one of the formats formats. Returns a datetime.date object of value. * value (str): The value being validated as a time. * blank (bool): If True, a blank string for value will be accepted. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * formats: A tuple of strings that can be passed to time.strftime, dictating the possible formats for a valid date. * excMsg (str): A custom message to use in the raised ValidationException. >>> import pysimplevalidate as pysv >>> pysv.validateDate('2/29/2004') datetime.date(2004, 2, 29) >>> pysv.validateDate('2/29/2005') Traceback (most recent call last): ... pysimplevalidate.ValidationException: '2/29/2005' is not a valid date. >>> pysv.validateDate('September 2019', formats=['%B %Y']) datetime.date(2019, 9, 1) """ # Reuse the logic in _validateToDateTimeFormat() for this function. try: dt = _validateToDateTimeFormat(value, formats, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) return datetime.date(dt.year, dt.month, dt.day) except ValidationException: _raiseValidationException(_('%r is not a valid date.') % (_errstr(value)), excMsg)
python
def validateDate(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, formats=('%Y/%m/%d', '%y/%m/%d', '%m/%d/%Y', '%m/%d/%y', '%x'), excMsg=None): """Raises ValidationException if value is not a time formatted in one of the formats formats. Returns a datetime.date object of value. * value (str): The value being validated as a time. * blank (bool): If True, a blank string for value will be accepted. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * formats: A tuple of strings that can be passed to time.strftime, dictating the possible formats for a valid date. * excMsg (str): A custom message to use in the raised ValidationException. >>> import pysimplevalidate as pysv >>> pysv.validateDate('2/29/2004') datetime.date(2004, 2, 29) >>> pysv.validateDate('2/29/2005') Traceback (most recent call last): ... pysimplevalidate.ValidationException: '2/29/2005' is not a valid date. >>> pysv.validateDate('September 2019', formats=['%B %Y']) datetime.date(2019, 9, 1) """ # Reuse the logic in _validateToDateTimeFormat() for this function. try: dt = _validateToDateTimeFormat(value, formats, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) return datetime.date(dt.year, dt.month, dt.day) except ValidationException: _raiseValidationException(_('%r is not a valid date.') % (_errstr(value)), excMsg)
[ "def", "validateDate", "(", "value", ",", "blank", "=", "False", ",", "strip", "=", "None", ",", "allowlistRegexes", "=", "None", ",", "blocklistRegexes", "=", "None", ",", "formats", "=", "(", "'%Y/%m/%d'", ",", "'%y/%m/%d'", ",", "'%m/%d/%Y'", ",", "'%m/%d/%y'", ",", "'%x'", ")", ",", "excMsg", "=", "None", ")", ":", "# Reuse the logic in _validateToDateTimeFormat() for this function.", "try", ":", "dt", "=", "_validateToDateTimeFormat", "(", "value", ",", "formats", ",", "blank", "=", "blank", ",", "strip", "=", "strip", ",", "allowlistRegexes", "=", "allowlistRegexes", ",", "blocklistRegexes", "=", "blocklistRegexes", ")", "return", "datetime", ".", "date", "(", "dt", ".", "year", ",", "dt", ".", "month", ",", "dt", ".", "day", ")", "except", "ValidationException", ":", "_raiseValidationException", "(", "_", "(", "'%r is not a valid date.'", ")", "%", "(", "_errstr", "(", "value", ")", ")", ",", "excMsg", ")" ]
Raises ValidationException if value is not a time formatted in one of the formats formats. Returns a datetime.date object of value. * value (str): The value being validated as a time. * blank (bool): If True, a blank string for value will be accepted. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * formats: A tuple of strings that can be passed to time.strftime, dictating the possible formats for a valid date. * excMsg (str): A custom message to use in the raised ValidationException. >>> import pysimplevalidate as pysv >>> pysv.validateDate('2/29/2004') datetime.date(2004, 2, 29) >>> pysv.validateDate('2/29/2005') Traceback (most recent call last): ... pysimplevalidate.ValidationException: '2/29/2005' is not a valid date. >>> pysv.validateDate('September 2019', formats=['%B %Y']) datetime.date(2019, 9, 1)
[ "Raises", "ValidationException", "if", "value", "is", "not", "a", "time", "formatted", "in", "one", "of", "the", "formats", "formats", ".", "Returns", "a", "datetime", ".", "date", "object", "of", "value", "." ]
train
https://github.com/asweigart/pysimplevalidate/blob/3ca27228abb7355d14bbf8abc225c63366379e44/src/pysimplevalidate/__init__.py#L694-L722
0.004752
inveniosoftware/invenio-oaiserver
invenio_oaiserver/query.py
query_string_parser
def query_string_parser(search_pattern): """Elasticsearch query string parser.""" if not hasattr(current_oaiserver, 'query_parser'): query_parser = current_app.config['OAISERVER_QUERY_PARSER'] if isinstance(query_parser, six.string_types): query_parser = import_string(query_parser) current_oaiserver.query_parser = query_parser return current_oaiserver.query_parser('query_string', query=search_pattern)
python
def query_string_parser(search_pattern): """Elasticsearch query string parser.""" if not hasattr(current_oaiserver, 'query_parser'): query_parser = current_app.config['OAISERVER_QUERY_PARSER'] if isinstance(query_parser, six.string_types): query_parser = import_string(query_parser) current_oaiserver.query_parser = query_parser return current_oaiserver.query_parser('query_string', query=search_pattern)
[ "def", "query_string_parser", "(", "search_pattern", ")", ":", "if", "not", "hasattr", "(", "current_oaiserver", ",", "'query_parser'", ")", ":", "query_parser", "=", "current_app", ".", "config", "[", "'OAISERVER_QUERY_PARSER'", "]", "if", "isinstance", "(", "query_parser", ",", "six", ".", "string_types", ")", ":", "query_parser", "=", "import_string", "(", "query_parser", ")", "current_oaiserver", ".", "query_parser", "=", "query_parser", "return", "current_oaiserver", ".", "query_parser", "(", "'query_string'", ",", "query", "=", "search_pattern", ")" ]
Elasticsearch query string parser.
[ "Elasticsearch", "query", "string", "parser", "." ]
train
https://github.com/inveniosoftware/invenio-oaiserver/blob/eae765e32bd816ddc5612d4b281caf205518b512/invenio_oaiserver/query.py#L20-L27
0.002212
metacloud/gilt
gilt/util.py
run_command
def run_command(cmd, debug=False): """ Execute the given command and return None. :param cmd: A `sh.Command` object to execute. :param debug: An optional bool to toggle debug output. :return: None """ if debug: msg = ' PWD: {}'.format(os.getcwd()) print_warn(msg) msg = ' COMMAND: {}'.format(cmd) print_warn(msg) cmd()
python
def run_command(cmd, debug=False): """ Execute the given command and return None. :param cmd: A `sh.Command` object to execute. :param debug: An optional bool to toggle debug output. :return: None """ if debug: msg = ' PWD: {}'.format(os.getcwd()) print_warn(msg) msg = ' COMMAND: {}'.format(cmd) print_warn(msg) cmd()
[ "def", "run_command", "(", "cmd", ",", "debug", "=", "False", ")", ":", "if", "debug", ":", "msg", "=", "' PWD: {}'", ".", "format", "(", "os", ".", "getcwd", "(", ")", ")", "print_warn", "(", "msg", ")", "msg", "=", "' COMMAND: {}'", ".", "format", "(", "cmd", ")", "print_warn", "(", "msg", ")", "cmd", "(", ")" ]
Execute the given command and return None. :param cmd: A `sh.Command` object to execute. :param debug: An optional bool to toggle debug output. :return: None
[ "Execute", "the", "given", "command", "and", "return", "None", "." ]
train
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/util.py#L46-L59
0.002597
Unity-Technologies/ml-agents
ml-agents/mlagents/trainers/meta_curriculum.py
MetaCurriculum.lesson_nums
def lesson_nums(self): """A dict from brain name to the brain's curriculum's lesson number.""" lesson_nums = {} for brain_name, curriculum in self.brains_to_curriculums.items(): lesson_nums[brain_name] = curriculum.lesson_num return lesson_nums
python
def lesson_nums(self): """A dict from brain name to the brain's curriculum's lesson number.""" lesson_nums = {} for brain_name, curriculum in self.brains_to_curriculums.items(): lesson_nums[brain_name] = curriculum.lesson_num return lesson_nums
[ "def", "lesson_nums", "(", "self", ")", ":", "lesson_nums", "=", "{", "}", "for", "brain_name", ",", "curriculum", "in", "self", ".", "brains_to_curriculums", ".", "items", "(", ")", ":", "lesson_nums", "[", "brain_name", "]", "=", "curriculum", ".", "lesson_num", "return", "lesson_nums" ]
A dict from brain name to the brain's curriculum's lesson number.
[ "A", "dict", "from", "brain", "name", "to", "the", "brain", "s", "curriculum", "s", "lesson", "number", "." ]
train
https://github.com/Unity-Technologies/ml-agents/blob/37d139af636e4a2351751fbf0f2fca5a9ed7457f/ml-agents/mlagents/trainers/meta_curriculum.py#L61-L67
0.00692
shmir/PyIxExplorer
ixexplorer/ixe_app.py
IxeApp.connect
def connect(self, user=None): """ Connect to host. :param user: if user - login session. """ self.api._tcl_handler.connect() if user: self.session.login(user)
python
def connect(self, user=None): """ Connect to host. :param user: if user - login session. """ self.api._tcl_handler.connect() if user: self.session.login(user)
[ "def", "connect", "(", "self", ",", "user", "=", "None", ")", ":", "self", ".", "api", ".", "_tcl_handler", ".", "connect", "(", ")", "if", "user", ":", "self", ".", "session", ".", "login", "(", "user", ")" ]
Connect to host. :param user: if user - login session.
[ "Connect", "to", "host", "." ]
train
https://github.com/shmir/PyIxExplorer/blob/d6946b9ce0e8961507cc912062e10c365d4beee2/ixexplorer/ixe_app.py#L42-L50
0.009434
okfn/ofs
ofs/remote/swiftstore.py
SwiftOFS.put_stream
def put_stream(self, bucket, label, stream_object, params={}): ''' Create a new file to swift object storage. ''' self.claim_bucket(bucket) self.connection.put_object(bucket, label, stream_object, headers=self._convert_to_meta(params))
python
def put_stream(self, bucket, label, stream_object, params={}): ''' Create a new file to swift object storage. ''' self.claim_bucket(bucket) self.connection.put_object(bucket, label, stream_object, headers=self._convert_to_meta(params))
[ "def", "put_stream", "(", "self", ",", "bucket", ",", "label", ",", "stream_object", ",", "params", "=", "{", "}", ")", ":", "self", ".", "claim_bucket", "(", "bucket", ")", "self", ".", "connection", ".", "put_object", "(", "bucket", ",", "label", ",", "stream_object", ",", "headers", "=", "self", ".", "_convert_to_meta", "(", "params", ")", ")" ]
Create a new file to swift object storage.
[ "Create", "a", "new", "file", "to", "swift", "object", "storage", "." ]
train
https://github.com/okfn/ofs/blob/c110cbecd7d0ae7e877963914a1a5af030cd6d45/ofs/remote/swiftstore.py#L116-L120
0.010169
openstack/networking-cisco
networking_cisco/apps/saf/server/dfa_openstack_helper.py
DfaNeutronHelper.get_rtr_name
def get_rtr_name(self, router_id): """Retrieve the router name. Incomplete. """ try: body = {} router = self.neutronclient.show_router(router_id, body=body) return router.get('router').get('name') except Exception as exc: LOG.error("Failed to show router interface %(id)s " "Exc %(exc)s", {'id': router_id, 'exc': str(exc)})
python
def get_rtr_name(self, router_id): """Retrieve the router name. Incomplete. """ try: body = {} router = self.neutronclient.show_router(router_id, body=body) return router.get('router').get('name') except Exception as exc: LOG.error("Failed to show router interface %(id)s " "Exc %(exc)s", {'id': router_id, 'exc': str(exc)})
[ "def", "get_rtr_name", "(", "self", ",", "router_id", ")", ":", "try", ":", "body", "=", "{", "}", "router", "=", "self", ".", "neutronclient", ".", "show_router", "(", "router_id", ",", "body", "=", "body", ")", "return", "router", ".", "get", "(", "'router'", ")", ".", "get", "(", "'name'", ")", "except", "Exception", "as", "exc", ":", "LOG", ".", "error", "(", "\"Failed to show router interface %(id)s \"", "\"Exc %(exc)s\"", ",", "{", "'id'", ":", "router_id", ",", "'exc'", ":", "str", "(", "exc", ")", "}", ")" ]
Retrieve the router name. Incomplete.
[ "Retrieve", "the", "router", "name", ".", "Incomplete", "." ]
train
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L344-L352
0.004785
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/quaternion.py
QuaternionBase.euler
def euler(self, euler): """ Set the euler angles :param euler: list or array of the euler angles [roll, pitch, yaw] """ assert(len(euler) == 3) self._euler = np.array(euler) # mark other representations as outdated, will get generated on next # read self._q = None self._dcm = None
python
def euler(self, euler): """ Set the euler angles :param euler: list or array of the euler angles [roll, pitch, yaw] """ assert(len(euler) == 3) self._euler = np.array(euler) # mark other representations as outdated, will get generated on next # read self._q = None self._dcm = None
[ "def", "euler", "(", "self", ",", "euler", ")", ":", "assert", "(", "len", "(", "euler", ")", "==", "3", ")", "self", ".", "_euler", "=", "np", ".", "array", "(", "euler", ")", "# mark other representations as outdated, will get generated on next", "# read", "self", ".", "_q", "=", "None", "self", ".", "_dcm", "=", "None" ]
Set the euler angles :param euler: list or array of the euler angles [roll, pitch, yaw]
[ "Set", "the", "euler", "angles", ":", "param", "euler", ":", "list", "or", "array", "of", "the", "euler", "angles", "[", "roll", "pitch", "yaw", "]" ]
train
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/quaternion.py#L113-L125
0.008264
saltstack/salt
salt/modules/opkg.py
hold
def hold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613 ''' Set package in 'hold' state, meaning it will not be upgraded. name The name of the package, e.g., 'tmux' CLI Example: .. code-block:: bash salt '*' pkg.hold <package name> pkgs A list of packages to hold. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.hold pkgs='["foo", "bar"]' ''' if not name and not pkgs and not sources: raise SaltInvocationError( 'One of name, pkgs, or sources must be specified.' ) if pkgs and sources: raise SaltInvocationError( 'Only one of pkgs or sources can be specified.' ) targets = [] if pkgs: targets.extend(pkgs) elif sources: for source in sources: targets.append(next(iter(source))) else: targets.append(name) ret = {} for target in targets: if isinstance(target, dict): target = next(iter(target)) ret[target] = {'name': target, 'changes': {}, 'result': False, 'comment': ''} state = _get_state(target) if not state: ret[target]['comment'] = ('Package {0} not currently held.' .format(target)) elif state != 'hold': if 'test' in __opts__ and __opts__['test']: ret[target].update(result=None) ret[target]['comment'] = ('Package {0} is set to be held.' .format(target)) else: result = _set_state(target, 'hold') ret[target].update(changes=result[target], result=True) ret[target]['comment'] = ('Package {0} is now being held.' .format(target)) else: ret[target].update(result=True) ret[target]['comment'] = ('Package {0} is already set to be held.' .format(target)) return ret
python
def hold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613 ''' Set package in 'hold' state, meaning it will not be upgraded. name The name of the package, e.g., 'tmux' CLI Example: .. code-block:: bash salt '*' pkg.hold <package name> pkgs A list of packages to hold. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.hold pkgs='["foo", "bar"]' ''' if not name and not pkgs and not sources: raise SaltInvocationError( 'One of name, pkgs, or sources must be specified.' ) if pkgs and sources: raise SaltInvocationError( 'Only one of pkgs or sources can be specified.' ) targets = [] if pkgs: targets.extend(pkgs) elif sources: for source in sources: targets.append(next(iter(source))) else: targets.append(name) ret = {} for target in targets: if isinstance(target, dict): target = next(iter(target)) ret[target] = {'name': target, 'changes': {}, 'result': False, 'comment': ''} state = _get_state(target) if not state: ret[target]['comment'] = ('Package {0} not currently held.' .format(target)) elif state != 'hold': if 'test' in __opts__ and __opts__['test']: ret[target].update(result=None) ret[target]['comment'] = ('Package {0} is set to be held.' .format(target)) else: result = _set_state(target, 'hold') ret[target].update(changes=result[target], result=True) ret[target]['comment'] = ('Package {0} is now being held.' .format(target)) else: ret[target].update(result=True) ret[target]['comment'] = ('Package {0} is already set to be held.' .format(target)) return ret
[ "def", "hold", "(", "name", "=", "None", ",", "pkgs", "=", "None", ",", "sources", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=W0613", "if", "not", "name", "and", "not", "pkgs", "and", "not", "sources", ":", "raise", "SaltInvocationError", "(", "'One of name, pkgs, or sources must be specified.'", ")", "if", "pkgs", "and", "sources", ":", "raise", "SaltInvocationError", "(", "'Only one of pkgs or sources can be specified.'", ")", "targets", "=", "[", "]", "if", "pkgs", ":", "targets", ".", "extend", "(", "pkgs", ")", "elif", "sources", ":", "for", "source", "in", "sources", ":", "targets", ".", "append", "(", "next", "(", "iter", "(", "source", ")", ")", ")", "else", ":", "targets", ".", "append", "(", "name", ")", "ret", "=", "{", "}", "for", "target", "in", "targets", ":", "if", "isinstance", "(", "target", ",", "dict", ")", ":", "target", "=", "next", "(", "iter", "(", "target", ")", ")", "ret", "[", "target", "]", "=", "{", "'name'", ":", "target", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "False", ",", "'comment'", ":", "''", "}", "state", "=", "_get_state", "(", "target", ")", "if", "not", "state", ":", "ret", "[", "target", "]", "[", "'comment'", "]", "=", "(", "'Package {0} not currently held.'", ".", "format", "(", "target", ")", ")", "elif", "state", "!=", "'hold'", ":", "if", "'test'", "in", "__opts__", "and", "__opts__", "[", "'test'", "]", ":", "ret", "[", "target", "]", ".", "update", "(", "result", "=", "None", ")", "ret", "[", "target", "]", "[", "'comment'", "]", "=", "(", "'Package {0} is set to be held.'", ".", "format", "(", "target", ")", ")", "else", ":", "result", "=", "_set_state", "(", "target", ",", "'hold'", ")", "ret", "[", "target", "]", ".", "update", "(", "changes", "=", "result", "[", "target", "]", ",", "result", "=", "True", ")", "ret", "[", "target", "]", "[", "'comment'", "]", "=", "(", "'Package {0} is now being held.'", ".", "format", "(", "target", ")", ")", "else", ":", "ret", "[", "target", "]", ".", "update", "(", "result", "=", "True", ")", "ret", "[", "target", "]", "[", "'comment'", "]", "=", "(", "'Package {0} is already set to be held.'", ".", "format", "(", "target", ")", ")", "return", "ret" ]
Set package in 'hold' state, meaning it will not be upgraded. name The name of the package, e.g., 'tmux' CLI Example: .. code-block:: bash salt '*' pkg.hold <package name> pkgs A list of packages to hold. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.hold pkgs='["foo", "bar"]'
[ "Set", "package", "in", "hold", "state", "meaning", "it", "will", "not", "be", "upgraded", "." ]
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/opkg.py#L776-L844
0.000914
ic-labs/django-icekit
icekit_events/managers.py
OccurrenceQueryset.overlapping
def overlapping(self, start=None, end=None): """ :return: occurrences overlapping the given start and end datetimes, inclusive. Special logic is applied for all-day occurrences, for which the start and end times are zeroed to find all occurrences that occur on a DATE as opposed to within DATETIMEs. """ qs = self if start: dt_start=coerce_dt_awareness(start) qs = qs.filter( # Exclusive for datetime, inclusive for date. Q(is_all_day=False, end__gt=dt_start) | Q(is_all_day=True, end__gte=zero_datetime(dt_start)) ) if end: dt_end=coerce_dt_awareness(end, t=time.max) qs = qs.filter( Q(is_all_day=False, start__lt=dt_end) | Q(is_all_day=True, start__lt=zero_datetime(dt_end)) ) return qs
python
def overlapping(self, start=None, end=None): """ :return: occurrences overlapping the given start and end datetimes, inclusive. Special logic is applied for all-day occurrences, for which the start and end times are zeroed to find all occurrences that occur on a DATE as opposed to within DATETIMEs. """ qs = self if start: dt_start=coerce_dt_awareness(start) qs = qs.filter( # Exclusive for datetime, inclusive for date. Q(is_all_day=False, end__gt=dt_start) | Q(is_all_day=True, end__gte=zero_datetime(dt_start)) ) if end: dt_end=coerce_dt_awareness(end, t=time.max) qs = qs.filter( Q(is_all_day=False, start__lt=dt_end) | Q(is_all_day=True, start__lt=zero_datetime(dt_end)) ) return qs
[ "def", "overlapping", "(", "self", ",", "start", "=", "None", ",", "end", "=", "None", ")", ":", "qs", "=", "self", "if", "start", ":", "dt_start", "=", "coerce_dt_awareness", "(", "start", ")", "qs", "=", "qs", ".", "filter", "(", "# Exclusive for datetime, inclusive for date.", "Q", "(", "is_all_day", "=", "False", ",", "end__gt", "=", "dt_start", ")", "|", "Q", "(", "is_all_day", "=", "True", ",", "end__gte", "=", "zero_datetime", "(", "dt_start", ")", ")", ")", "if", "end", ":", "dt_end", "=", "coerce_dt_awareness", "(", "end", ",", "t", "=", "time", ".", "max", ")", "qs", "=", "qs", ".", "filter", "(", "Q", "(", "is_all_day", "=", "False", ",", "start__lt", "=", "dt_end", ")", "|", "Q", "(", "is_all_day", "=", "True", ",", "start__lt", "=", "zero_datetime", "(", "dt_end", ")", ")", ")", "return", "qs" ]
:return: occurrences overlapping the given start and end datetimes, inclusive. Special logic is applied for all-day occurrences, for which the start and end times are zeroed to find all occurrences that occur on a DATE as opposed to within DATETIMEs.
[ ":", "return", ":", "occurrences", "overlapping", "the", "given", "start", "and", "end", "datetimes", "inclusive", ".", "Special", "logic", "is", "applied", "for", "all", "-", "day", "occurrences", "for", "which", "the", "start", "and", "end", "times", "are", "zeroed", "to", "find", "all", "occurrences", "that", "occur", "on", "a", "DATE", "as", "opposed", "to", "within", "DATETIMEs", "." ]
train
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit_events/managers.py#L145-L169
0.004301
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/exmaralda.py
ExmaraldaFile.__add_document_structure
def __add_document_structure(self, docgraph, remove_redundant_layers=True): """return an Exmaralda XML etree representation a docgraph""" E = self.E root = self.__create_document_header() body = E('basic-body') timeline = E('common-timeline') # for n tokens we need to create n+1 timeline indices for i in xrange(len(docgraph.tokens)+1): idx = str(i) # example: <tli id="T0" time="0"/> timeline.append(E('tli', {'id': 'T'+idx, 'time': idx})) body.append(timeline) body = self.__add_token_tiers(docgraph, body) annotation_layers = get_annotation_layers(docgraph) for layer in annotation_layers: if not remove_redundant_layers: # add all layers self.__add_annotation_tier(docgraph, body, layer) elif is_informative(layer): # only add informative layers self.__add_annotation_tier(docgraph, body, layer) self.__add_coreference_chain_tiers(docgraph, body) root.append(body) return root
python
def __add_document_structure(self, docgraph, remove_redundant_layers=True): """return an Exmaralda XML etree representation a docgraph""" E = self.E root = self.__create_document_header() body = E('basic-body') timeline = E('common-timeline') # for n tokens we need to create n+1 timeline indices for i in xrange(len(docgraph.tokens)+1): idx = str(i) # example: <tli id="T0" time="0"/> timeline.append(E('tli', {'id': 'T'+idx, 'time': idx})) body.append(timeline) body = self.__add_token_tiers(docgraph, body) annotation_layers = get_annotation_layers(docgraph) for layer in annotation_layers: if not remove_redundant_layers: # add all layers self.__add_annotation_tier(docgraph, body, layer) elif is_informative(layer): # only add informative layers self.__add_annotation_tier(docgraph, body, layer) self.__add_coreference_chain_tiers(docgraph, body) root.append(body) return root
[ "def", "__add_document_structure", "(", "self", ",", "docgraph", ",", "remove_redundant_layers", "=", "True", ")", ":", "E", "=", "self", ".", "E", "root", "=", "self", ".", "__create_document_header", "(", ")", "body", "=", "E", "(", "'basic-body'", ")", "timeline", "=", "E", "(", "'common-timeline'", ")", "# for n tokens we need to create n+1 timeline indices", "for", "i", "in", "xrange", "(", "len", "(", "docgraph", ".", "tokens", ")", "+", "1", ")", ":", "idx", "=", "str", "(", "i", ")", "# example: <tli id=\"T0\" time=\"0\"/>", "timeline", ".", "append", "(", "E", "(", "'tli'", ",", "{", "'id'", ":", "'T'", "+", "idx", ",", "'time'", ":", "idx", "}", ")", ")", "body", ".", "append", "(", "timeline", ")", "body", "=", "self", ".", "__add_token_tiers", "(", "docgraph", ",", "body", ")", "annotation_layers", "=", "get_annotation_layers", "(", "docgraph", ")", "for", "layer", "in", "annotation_layers", ":", "if", "not", "remove_redundant_layers", ":", "# add all layers", "self", ".", "__add_annotation_tier", "(", "docgraph", ",", "body", ",", "layer", ")", "elif", "is_informative", "(", "layer", ")", ":", "# only add informative layers", "self", ".", "__add_annotation_tier", "(", "docgraph", ",", "body", ",", "layer", ")", "self", ".", "__add_coreference_chain_tiers", "(", "docgraph", ",", "body", ")", "root", ".", "append", "(", "body", ")", "return", "root" ]
return an Exmaralda XML etree representation a docgraph
[ "return", "an", "Exmaralda", "XML", "etree", "representation", "a", "docgraph" ]
train
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/exmaralda.py#L98-L125
0.002667
sethmlarson/virtualbox-python
virtualbox/library.py
IGuestSession.file_copy
def file_copy(self, source, destination, flags): """Copies a file from one guest location to another. Will overwrite the destination file unless :py:attr:`FileCopyFlag.no_replace` is specified. in source of type str The path to the file to copy (in the guest). Guest path style. in destination of type str The path to the target file (in the guest). This cannot be a directory. Guest path style. in flags of type :class:`FileCopyFlag` Zero or more :py:class:`FileCopyFlag` values. return progress of type :class:`IProgress` Progress object to track the operation to completion. raises :class:`OleErrorNotimpl` Not yet implemented. """ if not isinstance(source, basestring): raise TypeError("source can only be an instance of type basestring") if not isinstance(destination, basestring): raise TypeError("destination can only be an instance of type basestring") if not isinstance(flags, list): raise TypeError("flags can only be an instance of type list") for a in flags[:10]: if not isinstance(a, FileCopyFlag): raise TypeError( "array can only contain objects of type FileCopyFlag") progress = self._call("fileCopy", in_p=[source, destination, flags]) progress = IProgress(progress) return progress
python
def file_copy(self, source, destination, flags): """Copies a file from one guest location to another. Will overwrite the destination file unless :py:attr:`FileCopyFlag.no_replace` is specified. in source of type str The path to the file to copy (in the guest). Guest path style. in destination of type str The path to the target file (in the guest). This cannot be a directory. Guest path style. in flags of type :class:`FileCopyFlag` Zero or more :py:class:`FileCopyFlag` values. return progress of type :class:`IProgress` Progress object to track the operation to completion. raises :class:`OleErrorNotimpl` Not yet implemented. """ if not isinstance(source, basestring): raise TypeError("source can only be an instance of type basestring") if not isinstance(destination, basestring): raise TypeError("destination can only be an instance of type basestring") if not isinstance(flags, list): raise TypeError("flags can only be an instance of type list") for a in flags[:10]: if not isinstance(a, FileCopyFlag): raise TypeError( "array can only contain objects of type FileCopyFlag") progress = self._call("fileCopy", in_p=[source, destination, flags]) progress = IProgress(progress) return progress
[ "def", "file_copy", "(", "self", ",", "source", ",", "destination", ",", "flags", ")", ":", "if", "not", "isinstance", "(", "source", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"source can only be an instance of type basestring\"", ")", "if", "not", "isinstance", "(", "destination", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"destination can only be an instance of type basestring\"", ")", "if", "not", "isinstance", "(", "flags", ",", "list", ")", ":", "raise", "TypeError", "(", "\"flags can only be an instance of type list\"", ")", "for", "a", "in", "flags", "[", ":", "10", "]", ":", "if", "not", "isinstance", "(", "a", ",", "FileCopyFlag", ")", ":", "raise", "TypeError", "(", "\"array can only contain objects of type FileCopyFlag\"", ")", "progress", "=", "self", ".", "_call", "(", "\"fileCopy\"", ",", "in_p", "=", "[", "source", ",", "destination", ",", "flags", "]", ")", "progress", "=", "IProgress", "(", "progress", ")", "return", "progress" ]
Copies a file from one guest location to another. Will overwrite the destination file unless :py:attr:`FileCopyFlag.no_replace` is specified. in source of type str The path to the file to copy (in the guest). Guest path style. in destination of type str The path to the target file (in the guest). This cannot be a directory. Guest path style. in flags of type :class:`FileCopyFlag` Zero or more :py:class:`FileCopyFlag` values. return progress of type :class:`IProgress` Progress object to track the operation to completion. raises :class:`OleErrorNotimpl` Not yet implemented.
[ "Copies", "a", "file", "from", "one", "guest", "location", "to", "another", ".", "Will", "overwrite", "the", "destination", "file", "unless", ":", "py", ":", "attr", ":", "FileCopyFlag", ".", "no_replace", "is", "specified", "." ]
train
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L19446-L19482
0.004557
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
Index.exists
def exists(self, using=None, **kwargs): """ Returns ``True`` if the index already exists in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.exists`` unchanged. """ return self._get_connection(using).indices.exists(index=self._name, **kwargs)
python
def exists(self, using=None, **kwargs): """ Returns ``True`` if the index already exists in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.exists`` unchanged. """ return self._get_connection(using).indices.exists(index=self._name, **kwargs)
[ "def", "exists", "(", "self", ",", "using", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_get_connection", "(", "using", ")", ".", "indices", ".", "exists", "(", "index", "=", "self", ".", "_name", ",", "*", "*", "kwargs", ")" ]
Returns ``True`` if the index already exists in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.exists`` unchanged.
[ "Returns", "True", "if", "the", "index", "already", "exists", "in", "elasticsearch", "." ]
train
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L373-L380
0.009036
goldsborough/ecstasy
ecstasy/parser.py
Parser.handle_arguments
def handle_arguments(self, string, root, opening, closing): """ Handles phrase-arguments. Sets the override and increment flags if found. Also makes sure that the argument sequence is at the start of the phrase and else warns about the unescaped meta characters. If the arguments are indeed at the start but do not match the arguments regular expression, an error is raised. Arguments: string (str): The string being parsed. root (str): The current root phrase. opening (int): The index of the opening paranthese. closing (int): The index of the closing paranthese. Returns: The (possibly escaped) string, the root phrase (if no escaping, then with arguments and flags) and the next meta match. Raises: errors.ParseError: If the arguments are invalid. """ # The actual argument string (ignore whitespace) args = string[opening + 1 : closing].replace(" ", "") # The argument sequence must be at the start of the phrase # and must match the allowed argument regular expression if opening > 0 or not self.arguments.match(args): if opening == 0: raise errors.ParseError("Invalid argument sequence!") # If escape_meta does indeed escape a character and removes # a backward slash, the positions 'opening' and 'closing' are no # longer valid. escape_meta does a search for the next meta # character though, which is then the closing parantheses, # so we can use its index value (in the now escaped string) string, meta = self.escape_meta(string, opening) string, meta = self.escape_meta(string, meta.start()) return string, root, meta if "!" in args: root.override = True args = args.replace("!", "") if "+" in args: root.increment = True args = args.replace("+", "") root.arguments = [int(i) for i in args.split(",") if i] # Remove the argument string including parantheses string = string[closing + 1:] meta = self.meta.search(string) return string, root, meta
python
def handle_arguments(self, string, root, opening, closing): """ Handles phrase-arguments. Sets the override and increment flags if found. Also makes sure that the argument sequence is at the start of the phrase and else warns about the unescaped meta characters. If the arguments are indeed at the start but do not match the arguments regular expression, an error is raised. Arguments: string (str): The string being parsed. root (str): The current root phrase. opening (int): The index of the opening paranthese. closing (int): The index of the closing paranthese. Returns: The (possibly escaped) string, the root phrase (if no escaping, then with arguments and flags) and the next meta match. Raises: errors.ParseError: If the arguments are invalid. """ # The actual argument string (ignore whitespace) args = string[opening + 1 : closing].replace(" ", "") # The argument sequence must be at the start of the phrase # and must match the allowed argument regular expression if opening > 0 or not self.arguments.match(args): if opening == 0: raise errors.ParseError("Invalid argument sequence!") # If escape_meta does indeed escape a character and removes # a backward slash, the positions 'opening' and 'closing' are no # longer valid. escape_meta does a search for the next meta # character though, which is then the closing parantheses, # so we can use its index value (in the now escaped string) string, meta = self.escape_meta(string, opening) string, meta = self.escape_meta(string, meta.start()) return string, root, meta if "!" in args: root.override = True args = args.replace("!", "") if "+" in args: root.increment = True args = args.replace("+", "") root.arguments = [int(i) for i in args.split(",") if i] # Remove the argument string including parantheses string = string[closing + 1:] meta = self.meta.search(string) return string, root, meta
[ "def", "handle_arguments", "(", "self", ",", "string", ",", "root", ",", "opening", ",", "closing", ")", ":", "# The actual argument string (ignore whitespace)", "args", "=", "string", "[", "opening", "+", "1", ":", "closing", "]", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "# The argument sequence must be at the start of the phrase", "# and must match the allowed argument regular expression", "if", "opening", ">", "0", "or", "not", "self", ".", "arguments", ".", "match", "(", "args", ")", ":", "if", "opening", "==", "0", ":", "raise", "errors", ".", "ParseError", "(", "\"Invalid argument sequence!\"", ")", "# If escape_meta does indeed escape a character and removes", "# a backward slash, the positions 'opening' and 'closing' are no", "# longer valid. escape_meta does a search for the next meta", "# character though, which is then the closing parantheses,", "# so we can use its index value (in the now escaped string)", "string", ",", "meta", "=", "self", ".", "escape_meta", "(", "string", ",", "opening", ")", "string", ",", "meta", "=", "self", ".", "escape_meta", "(", "string", ",", "meta", ".", "start", "(", ")", ")", "return", "string", ",", "root", ",", "meta", "if", "\"!\"", "in", "args", ":", "root", ".", "override", "=", "True", "args", "=", "args", ".", "replace", "(", "\"!\"", ",", "\"\"", ")", "if", "\"+\"", "in", "args", ":", "root", ".", "increment", "=", "True", "args", "=", "args", ".", "replace", "(", "\"+\"", ",", "\"\"", ")", "root", ".", "arguments", "=", "[", "int", "(", "i", ")", "for", "i", "in", "args", ".", "split", "(", "\",\"", ")", "if", "i", "]", "# Remove the argument string including parantheses", "string", "=", "string", "[", "closing", "+", "1", ":", "]", "meta", "=", "self", ".", "meta", ".", "search", "(", "string", ")", "return", "string", ",", "root", ",", "meta" ]
Handles phrase-arguments. Sets the override and increment flags if found. Also makes sure that the argument sequence is at the start of the phrase and else warns about the unescaped meta characters. If the arguments are indeed at the start but do not match the arguments regular expression, an error is raised. Arguments: string (str): The string being parsed. root (str): The current root phrase. opening (int): The index of the opening paranthese. closing (int): The index of the closing paranthese. Returns: The (possibly escaped) string, the root phrase (if no escaping, then with arguments and flags) and the next meta match. Raises: errors.ParseError: If the arguments are invalid.
[ "Handles", "phrase", "-", "arguments", "." ]
train
https://github.com/goldsborough/ecstasy/blob/7faa54708d506696c2607ddb68866e66768072ad/ecstasy/parser.py#L471-L531
0.023713
openstack/python-scciclient
scciclient/irmc/ipmi.py
get_tpm_status
def get_tpm_status(d_info): """Get the TPM support status. Get the TPM support status of the node. :param d_info: the list of ipmitool parameters for accessing a node. :returns: TPM support status """ # note: # Get TPM support status : ipmi cmd '0xF5', valid flags '0xC0' # # $ ipmitool raw 0x2E 0xF5 0x80 0x28 0x00 0x81 0xC0 # # Raw response: # 80 28 00 C0 C0: True # 80 28 00 -- --: False (other values than 'C0 C0') ipmicmd = ipmi_command.Command(bmc=d_info['irmc_address'], userid=d_info['irmc_username'], password=d_info['irmc_password']) try: response = _send_raw_command(ipmicmd, GET_TPM_STATUS) if response['code'] != 0: raise IPMIFailure( "IPMI operation '%(operation)s' failed: %(error)s" % {'operation': "GET TMP status", 'error': response.get('error')}) out = ' '.join('{:02X}'.format(x) for x in response['data']) return out is not None and out[-5:] == 'C0 C0' except ipmi_exception.IpmiException as e: raise IPMIFailure( "IPMI operation '%(operation)s' failed: %(error)s" % {'operation': "GET TMP status", 'error': e})
python
def get_tpm_status(d_info): """Get the TPM support status. Get the TPM support status of the node. :param d_info: the list of ipmitool parameters for accessing a node. :returns: TPM support status """ # note: # Get TPM support status : ipmi cmd '0xF5', valid flags '0xC0' # # $ ipmitool raw 0x2E 0xF5 0x80 0x28 0x00 0x81 0xC0 # # Raw response: # 80 28 00 C0 C0: True # 80 28 00 -- --: False (other values than 'C0 C0') ipmicmd = ipmi_command.Command(bmc=d_info['irmc_address'], userid=d_info['irmc_username'], password=d_info['irmc_password']) try: response = _send_raw_command(ipmicmd, GET_TPM_STATUS) if response['code'] != 0: raise IPMIFailure( "IPMI operation '%(operation)s' failed: %(error)s" % {'operation': "GET TMP status", 'error': response.get('error')}) out = ' '.join('{:02X}'.format(x) for x in response['data']) return out is not None and out[-5:] == 'C0 C0' except ipmi_exception.IpmiException as e: raise IPMIFailure( "IPMI operation '%(operation)s' failed: %(error)s" % {'operation': "GET TMP status", 'error': e})
[ "def", "get_tpm_status", "(", "d_info", ")", ":", "# note:", "# Get TPM support status : ipmi cmd '0xF5', valid flags '0xC0'", "#", "# $ ipmitool raw 0x2E 0xF5 0x80 0x28 0x00 0x81 0xC0", "#", "# Raw response:", "# 80 28 00 C0 C0: True", "# 80 28 00 -- --: False (other values than 'C0 C0')", "ipmicmd", "=", "ipmi_command", ".", "Command", "(", "bmc", "=", "d_info", "[", "'irmc_address'", "]", ",", "userid", "=", "d_info", "[", "'irmc_username'", "]", ",", "password", "=", "d_info", "[", "'irmc_password'", "]", ")", "try", ":", "response", "=", "_send_raw_command", "(", "ipmicmd", ",", "GET_TPM_STATUS", ")", "if", "response", "[", "'code'", "]", "!=", "0", ":", "raise", "IPMIFailure", "(", "\"IPMI operation '%(operation)s' failed: %(error)s\"", "%", "{", "'operation'", ":", "\"GET TMP status\"", ",", "'error'", ":", "response", ".", "get", "(", "'error'", ")", "}", ")", "out", "=", "' '", ".", "join", "(", "'{:02X}'", ".", "format", "(", "x", ")", "for", "x", "in", "response", "[", "'data'", "]", ")", "return", "out", "is", "not", "None", "and", "out", "[", "-", "5", ":", "]", "==", "'C0 C0'", "except", "ipmi_exception", ".", "IpmiException", "as", "e", ":", "raise", "IPMIFailure", "(", "\"IPMI operation '%(operation)s' failed: %(error)s\"", "%", "{", "'operation'", ":", "\"GET TMP status\"", ",", "'error'", ":", "e", "}", ")" ]
Get the TPM support status. Get the TPM support status of the node. :param d_info: the list of ipmitool parameters for accessing a node. :returns: TPM support status
[ "Get", "the", "TPM", "support", "status", "." ]
train
https://github.com/openstack/python-scciclient/blob/4585ce2f76853b9773fb190ca0cfff0aa04a7cf8/scciclient/irmc/ipmi.py#L75-L109
0.000772
spyder-ide/spyder
spyder/plugins/editor/utils/editor.py
TextHelper.set_line_text
def set_line_text(self, line_nbr, new_text): """ Replace an entire line with ``new_text``. :param line_nbr: line number of the line to change. :param new_text: The replacement text. """ editor = self._editor text_cursor = self._move_cursor_to(line_nbr) text_cursor.select(text_cursor.LineUnderCursor) text_cursor.insertText(new_text) editor.setTextCursor(text_cursor)
python
def set_line_text(self, line_nbr, new_text): """ Replace an entire line with ``new_text``. :param line_nbr: line number of the line to change. :param new_text: The replacement text. """ editor = self._editor text_cursor = self._move_cursor_to(line_nbr) text_cursor.select(text_cursor.LineUnderCursor) text_cursor.insertText(new_text) editor.setTextCursor(text_cursor)
[ "def", "set_line_text", "(", "self", ",", "line_nbr", ",", "new_text", ")", ":", "editor", "=", "self", ".", "_editor", "text_cursor", "=", "self", ".", "_move_cursor_to", "(", "line_nbr", ")", "text_cursor", ".", "select", "(", "text_cursor", ".", "LineUnderCursor", ")", "text_cursor", ".", "insertText", "(", "new_text", ")", "editor", ".", "setTextCursor", "(", "text_cursor", ")" ]
Replace an entire line with ``new_text``. :param line_nbr: line number of the line to change. :param new_text: The replacement text.
[ "Replace", "an", "entire", "line", "with", "new_text", "." ]
train
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/utils/editor.py#L342-L354
0.004454
openid/JWTConnect-Python-CryptoJWT
src/cryptojwt/key_bundle.py
KeyBundle._parse_remote_response
def _parse_remote_response(self, response): """ Parse JWKS from the HTTP response. Should be overriden by subclasses for adding support of e.g. signed JWKS. :param response: HTTP response from the 'jwks_uri' endpoint :return: response parsed as JSON """ # Check if the content type is the right one. try: if response.headers["Content-Type"] != 'application/json': logger.warning('Wrong Content_type ({})'.format( response.headers["Content-Type"])) except KeyError: pass logger.debug("Loaded JWKS: %s from %s" % (response.text, self.source)) try: return json.loads(response.text) except ValueError: return None
python
def _parse_remote_response(self, response): """ Parse JWKS from the HTTP response. Should be overriden by subclasses for adding support of e.g. signed JWKS. :param response: HTTP response from the 'jwks_uri' endpoint :return: response parsed as JSON """ # Check if the content type is the right one. try: if response.headers["Content-Type"] != 'application/json': logger.warning('Wrong Content_type ({})'.format( response.headers["Content-Type"])) except KeyError: pass logger.debug("Loaded JWKS: %s from %s" % (response.text, self.source)) try: return json.loads(response.text) except ValueError: return None
[ "def", "_parse_remote_response", "(", "self", ",", "response", ")", ":", "# Check if the content type is the right one.", "try", ":", "if", "response", ".", "headers", "[", "\"Content-Type\"", "]", "!=", "'application/json'", ":", "logger", ".", "warning", "(", "'Wrong Content_type ({})'", ".", "format", "(", "response", ".", "headers", "[", "\"Content-Type\"", "]", ")", ")", "except", "KeyError", ":", "pass", "logger", ".", "debug", "(", "\"Loaded JWKS: %s from %s\"", "%", "(", "response", ".", "text", ",", "self", ".", "source", ")", ")", "try", ":", "return", "json", ".", "loads", "(", "response", ".", "text", ")", "except", "ValueError", ":", "return", "None" ]
Parse JWKS from the HTTP response. Should be overriden by subclasses for adding support of e.g. signed JWKS. :param response: HTTP response from the 'jwks_uri' endpoint :return: response parsed as JSON
[ "Parse", "JWKS", "from", "the", "HTTP", "response", "." ]
train
https://github.com/openid/JWTConnect-Python-CryptoJWT/blob/8863cfbfe77ca885084870b234a66b55bd52930c/src/cryptojwt/key_bundle.py#L300-L321
0.002503
bokeh/bokeh
bokeh/protocol/message.py
Message.assemble_buffer
def assemble_buffer(self, buf_header, buf_payload): ''' Add a buffer header and payload that we read from the socket. This differs from add_buffer() because we're validating vs. the header's num_buffers, instead of filling in the header. Args: buf_header (``JSON``) : a buffer header buf_payload (``JSON`` or bytes) : a buffer payload Returns: None Raises: ProtocolError ''' if self.header.get('num_buffers', 0) <= len(self._buffers): raise ProtocolError("too many buffers received expecting " + str(self.header['num_buffers'])) self._buffers.append((buf_header, buf_payload))
python
def assemble_buffer(self, buf_header, buf_payload): ''' Add a buffer header and payload that we read from the socket. This differs from add_buffer() because we're validating vs. the header's num_buffers, instead of filling in the header. Args: buf_header (``JSON``) : a buffer header buf_payload (``JSON`` or bytes) : a buffer payload Returns: None Raises: ProtocolError ''' if self.header.get('num_buffers', 0) <= len(self._buffers): raise ProtocolError("too many buffers received expecting " + str(self.header['num_buffers'])) self._buffers.append((buf_header, buf_payload))
[ "def", "assemble_buffer", "(", "self", ",", "buf_header", ",", "buf_payload", ")", ":", "if", "self", ".", "header", ".", "get", "(", "'num_buffers'", ",", "0", ")", "<=", "len", "(", "self", ".", "_buffers", ")", ":", "raise", "ProtocolError", "(", "\"too many buffers received expecting \"", "+", "str", "(", "self", ".", "header", "[", "'num_buffers'", "]", ")", ")", "self", ".", "_buffers", ".", "append", "(", "(", "buf_header", ",", "buf_payload", ")", ")" ]
Add a buffer header and payload that we read from the socket. This differs from add_buffer() because we're validating vs. the header's num_buffers, instead of filling in the header. Args: buf_header (``JSON``) : a buffer header buf_payload (``JSON`` or bytes) : a buffer payload Returns: None Raises: ProtocolError
[ "Add", "a", "buffer", "header", "and", "payload", "that", "we", "read", "from", "the", "socket", "." ]
train
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/protocol/message.py#L183-L201
0.004213
martinblech/xmltodict
xmltodict.py
unparse
def unparse(input_dict, output=None, encoding='utf-8', full_document=True, short_empty_elements=False, **kwargs): """Emit an XML document for the given `input_dict` (reverse of `parse`). The resulting XML document is returned as a string, but if `output` (a file-like object) is specified, it is written there instead. Dictionary keys prefixed with `attr_prefix` (default=`'@'`) are interpreted as XML node attributes, whereas keys equal to `cdata_key` (default=`'#text'`) are treated as character data. The `pretty` parameter (default=`False`) enables pretty-printing. In this mode, lines are terminated with `'\n'` and indented with `'\t'`, but this can be customized with the `newl` and `indent` parameters. """ if full_document and len(input_dict) != 1: raise ValueError('Document must have exactly one root.') must_return = False if output is None: output = StringIO() must_return = True if short_empty_elements: content_handler = XMLGenerator(output, encoding, True) else: content_handler = XMLGenerator(output, encoding) if full_document: content_handler.startDocument() for key, value in input_dict.items(): _emit(key, value, content_handler, full_document=full_document, **kwargs) if full_document: content_handler.endDocument() if must_return: value = output.getvalue() try: # pragma no cover value = value.decode(encoding) except AttributeError: # pragma no cover pass return value
python
def unparse(input_dict, output=None, encoding='utf-8', full_document=True, short_empty_elements=False, **kwargs): """Emit an XML document for the given `input_dict` (reverse of `parse`). The resulting XML document is returned as a string, but if `output` (a file-like object) is specified, it is written there instead. Dictionary keys prefixed with `attr_prefix` (default=`'@'`) are interpreted as XML node attributes, whereas keys equal to `cdata_key` (default=`'#text'`) are treated as character data. The `pretty` parameter (default=`False`) enables pretty-printing. In this mode, lines are terminated with `'\n'` and indented with `'\t'`, but this can be customized with the `newl` and `indent` parameters. """ if full_document and len(input_dict) != 1: raise ValueError('Document must have exactly one root.') must_return = False if output is None: output = StringIO() must_return = True if short_empty_elements: content_handler = XMLGenerator(output, encoding, True) else: content_handler = XMLGenerator(output, encoding) if full_document: content_handler.startDocument() for key, value in input_dict.items(): _emit(key, value, content_handler, full_document=full_document, **kwargs) if full_document: content_handler.endDocument() if must_return: value = output.getvalue() try: # pragma no cover value = value.decode(encoding) except AttributeError: # pragma no cover pass return value
[ "def", "unparse", "(", "input_dict", ",", "output", "=", "None", ",", "encoding", "=", "'utf-8'", ",", "full_document", "=", "True", ",", "short_empty_elements", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "full_document", "and", "len", "(", "input_dict", ")", "!=", "1", ":", "raise", "ValueError", "(", "'Document must have exactly one root.'", ")", "must_return", "=", "False", "if", "output", "is", "None", ":", "output", "=", "StringIO", "(", ")", "must_return", "=", "True", "if", "short_empty_elements", ":", "content_handler", "=", "XMLGenerator", "(", "output", ",", "encoding", ",", "True", ")", "else", ":", "content_handler", "=", "XMLGenerator", "(", "output", ",", "encoding", ")", "if", "full_document", ":", "content_handler", ".", "startDocument", "(", ")", "for", "key", ",", "value", "in", "input_dict", ".", "items", "(", ")", ":", "_emit", "(", "key", ",", "value", ",", "content_handler", ",", "full_document", "=", "full_document", ",", "*", "*", "kwargs", ")", "if", "full_document", ":", "content_handler", ".", "endDocument", "(", ")", "if", "must_return", ":", "value", "=", "output", ".", "getvalue", "(", ")", "try", ":", "# pragma no cover", "value", "=", "value", ".", "decode", "(", "encoding", ")", "except", "AttributeError", ":", "# pragma no cover", "pass", "return", "value" ]
Emit an XML document for the given `input_dict` (reverse of `parse`). The resulting XML document is returned as a string, but if `output` (a file-like object) is specified, it is written there instead. Dictionary keys prefixed with `attr_prefix` (default=`'@'`) are interpreted as XML node attributes, whereas keys equal to `cdata_key` (default=`'#text'`) are treated as character data. The `pretty` parameter (default=`False`) enables pretty-printing. In this mode, lines are terminated with `'\n'` and indented with `'\t'`, but this can be customized with the `newl` and `indent` parameters.
[ "Emit", "an", "XML", "document", "for", "the", "given", "input_dict", "(", "reverse", "of", "parse", ")", "." ]
train
https://github.com/martinblech/xmltodict/blob/f3ab7e1740d37d585ffab0154edb4cb664afe4a9/xmltodict.py#L428-L468
0.000613
saltstack/salt
salt/modules/incron.py
_write_incron_lines
def _write_incron_lines(user, lines): ''' Takes a list of lines to be committed to a user's incrontab and writes it ''' if user == 'system': ret = {} ret['retcode'] = _write_file(_INCRON_SYSTEM_TAB, 'salt', ''.join(lines)) return ret else: path = salt.utils.files.mkstemp() with salt.utils.files.fopen(path, 'wb') as fp_: fp_.writelines(salt.utils.data.encode(lines)) if __grains__['os_family'] == 'Solaris' and user != "root": __salt__['cmd.run']('chown {0} {1}'.format(user, path), python_shell=False) ret = __salt__['cmd.run_all'](_get_incron_cmdstr(path), runas=user, python_shell=False) os.remove(path) return ret
python
def _write_incron_lines(user, lines): ''' Takes a list of lines to be committed to a user's incrontab and writes it ''' if user == 'system': ret = {} ret['retcode'] = _write_file(_INCRON_SYSTEM_TAB, 'salt', ''.join(lines)) return ret else: path = salt.utils.files.mkstemp() with salt.utils.files.fopen(path, 'wb') as fp_: fp_.writelines(salt.utils.data.encode(lines)) if __grains__['os_family'] == 'Solaris' and user != "root": __salt__['cmd.run']('chown {0} {1}'.format(user, path), python_shell=False) ret = __salt__['cmd.run_all'](_get_incron_cmdstr(path), runas=user, python_shell=False) os.remove(path) return ret
[ "def", "_write_incron_lines", "(", "user", ",", "lines", ")", ":", "if", "user", "==", "'system'", ":", "ret", "=", "{", "}", "ret", "[", "'retcode'", "]", "=", "_write_file", "(", "_INCRON_SYSTEM_TAB", ",", "'salt'", ",", "''", ".", "join", "(", "lines", ")", ")", "return", "ret", "else", ":", "path", "=", "salt", ".", "utils", ".", "files", ".", "mkstemp", "(", ")", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "path", ",", "'wb'", ")", "as", "fp_", ":", "fp_", ".", "writelines", "(", "salt", ".", "utils", ".", "data", ".", "encode", "(", "lines", ")", ")", "if", "__grains__", "[", "'os_family'", "]", "==", "'Solaris'", "and", "user", "!=", "\"root\"", ":", "__salt__", "[", "'cmd.run'", "]", "(", "'chown {0} {1}'", ".", "format", "(", "user", ",", "path", ")", ",", "python_shell", "=", "False", ")", "ret", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "_get_incron_cmdstr", "(", "path", ")", ",", "runas", "=", "user", ",", "python_shell", "=", "False", ")", "os", ".", "remove", "(", "path", ")", "return", "ret" ]
Takes a list of lines to be committed to a user's incrontab and writes it
[ "Takes", "a", "list", "of", "lines", "to", "be", "committed", "to", "a", "user", "s", "incrontab", "and", "writes", "it" ]
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/incron.py#L98-L114
0.00545
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_zone.py
brocade_zone.zoning_defined_configuration_cfg_cfg_name
def zoning_defined_configuration_cfg_cfg_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") zoning = ET.SubElement(config, "zoning", xmlns="urn:brocade.com:mgmt:brocade-zone") defined_configuration = ET.SubElement(zoning, "defined-configuration") cfg = ET.SubElement(defined_configuration, "cfg") cfg_name = ET.SubElement(cfg, "cfg-name") cfg_name.text = kwargs.pop('cfg_name') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def zoning_defined_configuration_cfg_cfg_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") zoning = ET.SubElement(config, "zoning", xmlns="urn:brocade.com:mgmt:brocade-zone") defined_configuration = ET.SubElement(zoning, "defined-configuration") cfg = ET.SubElement(defined_configuration, "cfg") cfg_name = ET.SubElement(cfg, "cfg-name") cfg_name.text = kwargs.pop('cfg_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "zoning_defined_configuration_cfg_cfg_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "zoning", "=", "ET", ".", "SubElement", "(", "config", ",", "\"zoning\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-zone\"", ")", "defined_configuration", "=", "ET", ".", "SubElement", "(", "zoning", ",", "\"defined-configuration\"", ")", "cfg", "=", "ET", ".", "SubElement", "(", "defined_configuration", ",", "\"cfg\"", ")", "cfg_name", "=", "ET", ".", "SubElement", "(", "cfg", ",", "\"cfg-name\"", ")", "cfg_name", ".", "text", "=", "kwargs", ".", "pop", "(", "'cfg_name'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_zone.py#L12-L23
0.005357
all-umass/graphs
graphs/mixins/transformation.py
TransformMixin.cycle_cut
def cycle_cut(self, cycle_len_thresh=12, directed=False, copy=True): '''CycleCut algorithm: removes bottleneck edges. Paper DOI: 10.1.1.225.5335 ''' symmetric = not directed adj = self.kernelize('binary').matrix('csr', 'dense', copy=True) if symmetric: adj = adj + adj.T removed_edges = [] while True: c = _atomic_cycle(adj, cycle_len_thresh, directed=directed) if c is None: break # remove edges in the cycle ii, jj = c.T adj[ii,jj] = 0 if symmetric: adj[jj,ii] = 0 removed_edges.extend(c) #XXX: if _atomic_cycle changes, may need to do this on each loop if ss.issparse(adj): adj.eliminate_zeros() # select only the necessary cuts ii, jj = _find_cycle_inducers(adj, removed_edges, cycle_len_thresh, directed=directed) # remove the bad edges return self.remove_edges(ii, jj, symmetric=symmetric, copy=copy)
python
def cycle_cut(self, cycle_len_thresh=12, directed=False, copy=True): '''CycleCut algorithm: removes bottleneck edges. Paper DOI: 10.1.1.225.5335 ''' symmetric = not directed adj = self.kernelize('binary').matrix('csr', 'dense', copy=True) if symmetric: adj = adj + adj.T removed_edges = [] while True: c = _atomic_cycle(adj, cycle_len_thresh, directed=directed) if c is None: break # remove edges in the cycle ii, jj = c.T adj[ii,jj] = 0 if symmetric: adj[jj,ii] = 0 removed_edges.extend(c) #XXX: if _atomic_cycle changes, may need to do this on each loop if ss.issparse(adj): adj.eliminate_zeros() # select only the necessary cuts ii, jj = _find_cycle_inducers(adj, removed_edges, cycle_len_thresh, directed=directed) # remove the bad edges return self.remove_edges(ii, jj, symmetric=symmetric, copy=copy)
[ "def", "cycle_cut", "(", "self", ",", "cycle_len_thresh", "=", "12", ",", "directed", "=", "False", ",", "copy", "=", "True", ")", ":", "symmetric", "=", "not", "directed", "adj", "=", "self", ".", "kernelize", "(", "'binary'", ")", ".", "matrix", "(", "'csr'", ",", "'dense'", ",", "copy", "=", "True", ")", "if", "symmetric", ":", "adj", "=", "adj", "+", "adj", ".", "T", "removed_edges", "=", "[", "]", "while", "True", ":", "c", "=", "_atomic_cycle", "(", "adj", ",", "cycle_len_thresh", ",", "directed", "=", "directed", ")", "if", "c", "is", "None", ":", "break", "# remove edges in the cycle", "ii", ",", "jj", "=", "c", ".", "T", "adj", "[", "ii", ",", "jj", "]", "=", "0", "if", "symmetric", ":", "adj", "[", "jj", ",", "ii", "]", "=", "0", "removed_edges", ".", "extend", "(", "c", ")", "#XXX: if _atomic_cycle changes, may need to do this on each loop", "if", "ss", ".", "issparse", "(", "adj", ")", ":", "adj", ".", "eliminate_zeros", "(", ")", "# select only the necessary cuts", "ii", ",", "jj", "=", "_find_cycle_inducers", "(", "adj", ",", "removed_edges", ",", "cycle_len_thresh", ",", "directed", "=", "directed", ")", "# remove the bad edges", "return", "self", ".", "remove_edges", "(", "ii", ",", "jj", ",", "symmetric", "=", "symmetric", ",", "copy", "=", "copy", ")" ]
CycleCut algorithm: removes bottleneck edges. Paper DOI: 10.1.1.225.5335
[ "CycleCut", "algorithm", ":", "removes", "bottleneck", "edges", ".", "Paper", "DOI", ":", "10", ".", "1", ".", "1", ".", "225", ".", "5335" ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/mixins/transformation.py#L169-L198
0.013416
saltstack/salt
salt/sdb/sqlite3.py
set_
def set_(key, value, profile=None): ''' Set a key/value pair in sqlite3 ''' if not profile: return False conn, cur, table = _connect(profile) if six.PY2: value = buffer(salt.utils.msgpack.packb(value)) else: value = memoryview(salt.utils.msgpack.packb(value)) q = profile.get('set_query', ('INSERT OR REPLACE INTO {0} VALUES ' '(:key, :value)').format(table)) conn.execute(q, {'key': key, 'value': value}) conn.commit() return True
python
def set_(key, value, profile=None): ''' Set a key/value pair in sqlite3 ''' if not profile: return False conn, cur, table = _connect(profile) if six.PY2: value = buffer(salt.utils.msgpack.packb(value)) else: value = memoryview(salt.utils.msgpack.packb(value)) q = profile.get('set_query', ('INSERT OR REPLACE INTO {0} VALUES ' '(:key, :value)').format(table)) conn.execute(q, {'key': key, 'value': value}) conn.commit() return True
[ "def", "set_", "(", "key", ",", "value", ",", "profile", "=", "None", ")", ":", "if", "not", "profile", ":", "return", "False", "conn", ",", "cur", ",", "table", "=", "_connect", "(", "profile", ")", "if", "six", ".", "PY2", ":", "value", "=", "buffer", "(", "salt", ".", "utils", ".", "msgpack", ".", "packb", "(", "value", ")", ")", "else", ":", "value", "=", "memoryview", "(", "salt", ".", "utils", ".", "msgpack", ".", "packb", "(", "value", ")", ")", "q", "=", "profile", ".", "get", "(", "'set_query'", ",", "(", "'INSERT OR REPLACE INTO {0} VALUES '", "'(:key, :value)'", ")", ".", "format", "(", "table", ")", ")", "conn", ".", "execute", "(", "q", ",", "{", "'key'", ":", "key", ",", "'value'", ":", "value", "}", ")", "conn", ".", "commit", "(", ")", "return", "True" ]
Set a key/value pair in sqlite3
[ "Set", "a", "key", "/", "value", "pair", "in", "sqlite3" ]
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/sdb/sqlite3.py#L119-L134
0.001876
FutureSharks/invokust
invokust/settings.py
create_settings
def create_settings(from_environment=False, locustfile=None, classes=None, host=None, num_clients=None, hatch_rate=None, reset_stats=False, run_time="3m"): ''' Returns a settings object to be used by a LocalLocustRunner. Arguments from_environment: get settings from environment variables locustfile: locustfile to use for loadtest classes: locust classes to use for load test host: host for load testing num_clients: number of clients to simulate in load test hatch_rate: number of clients per second to start reset_stats: Whether to reset stats after all clients are hatched run_time: The length of time to run the test for. Cannot exceed the duration limit set by lambda If from_environment is set to True then this function will attempt to set the attributes from environment variables. The environment variables are named LOCUST_ + attribute name in upper case. ''' settings = type('', (), {})() settings.from_environment = from_environment settings.locustfile = locustfile settings.classes = classes settings.host = host settings.num_clients = num_clients settings.hatch_rate = hatch_rate settings.reset_stats = reset_stats settings.run_time = run_time # Default settings that are not to be changed settings.no_web = True settings.master = False settings.show_task_ratio_json = False settings.list_commands = False settings.loglevel = 'INFO' settings.slave = False settings.only_summary = True settings.logfile = None settings.show_task_ratio = False settings.print_stats = False if from_environment: for attribute in ['locustfile', 'classes', 'host', 'run_time', 'num_clients', 'hatch_rate']: var_name = 'LOCUST_{0}'.format(attribute.upper()) var_value = os.environ.get(var_name) if var_value: setattr(settings, attribute, var_value) if settings.locustfile is None and settings.classes is None: raise Exception('One of locustfile or classes must be specified') if settings.locustfile and settings.classes: raise Exception('Only one of locustfile or classes can be specified') if settings.locustfile: docstring, classes = load_locustfile(settings.locustfile) settings.classes = [classes[n] for n in classes] else: if isinstance(settings.classes, str): settings.classes = settings.classes.split(',') for idx, val in enumerate(settings.classes): # This needs fixing settings.classes[idx] = eval(val) for attribute in ['classes', 'host', 'num_clients', 'hatch_rate']: val = getattr(settings, attribute, None) if not val: raise Exception('configuration error, attribute not set: {0}'.format(attribute)) if isinstance(val, str) and val.isdigit(): setattr(settings, attribute, int(val)) return settings
python
def create_settings(from_environment=False, locustfile=None, classes=None, host=None, num_clients=None, hatch_rate=None, reset_stats=False, run_time="3m"): ''' Returns a settings object to be used by a LocalLocustRunner. Arguments from_environment: get settings from environment variables locustfile: locustfile to use for loadtest classes: locust classes to use for load test host: host for load testing num_clients: number of clients to simulate in load test hatch_rate: number of clients per second to start reset_stats: Whether to reset stats after all clients are hatched run_time: The length of time to run the test for. Cannot exceed the duration limit set by lambda If from_environment is set to True then this function will attempt to set the attributes from environment variables. The environment variables are named LOCUST_ + attribute name in upper case. ''' settings = type('', (), {})() settings.from_environment = from_environment settings.locustfile = locustfile settings.classes = classes settings.host = host settings.num_clients = num_clients settings.hatch_rate = hatch_rate settings.reset_stats = reset_stats settings.run_time = run_time # Default settings that are not to be changed settings.no_web = True settings.master = False settings.show_task_ratio_json = False settings.list_commands = False settings.loglevel = 'INFO' settings.slave = False settings.only_summary = True settings.logfile = None settings.show_task_ratio = False settings.print_stats = False if from_environment: for attribute in ['locustfile', 'classes', 'host', 'run_time', 'num_clients', 'hatch_rate']: var_name = 'LOCUST_{0}'.format(attribute.upper()) var_value = os.environ.get(var_name) if var_value: setattr(settings, attribute, var_value) if settings.locustfile is None and settings.classes is None: raise Exception('One of locustfile or classes must be specified') if settings.locustfile and settings.classes: raise Exception('Only one of locustfile or classes can be specified') if settings.locustfile: docstring, classes = load_locustfile(settings.locustfile) settings.classes = [classes[n] for n in classes] else: if isinstance(settings.classes, str): settings.classes = settings.classes.split(',') for idx, val in enumerate(settings.classes): # This needs fixing settings.classes[idx] = eval(val) for attribute in ['classes', 'host', 'num_clients', 'hatch_rate']: val = getattr(settings, attribute, None) if not val: raise Exception('configuration error, attribute not set: {0}'.format(attribute)) if isinstance(val, str) and val.isdigit(): setattr(settings, attribute, int(val)) return settings
[ "def", "create_settings", "(", "from_environment", "=", "False", ",", "locustfile", "=", "None", ",", "classes", "=", "None", ",", "host", "=", "None", ",", "num_clients", "=", "None", ",", "hatch_rate", "=", "None", ",", "reset_stats", "=", "False", ",", "run_time", "=", "\"3m\"", ")", ":", "settings", "=", "type", "(", "''", ",", "(", ")", ",", "{", "}", ")", "(", ")", "settings", ".", "from_environment", "=", "from_environment", "settings", ".", "locustfile", "=", "locustfile", "settings", ".", "classes", "=", "classes", "settings", ".", "host", "=", "host", "settings", ".", "num_clients", "=", "num_clients", "settings", ".", "hatch_rate", "=", "hatch_rate", "settings", ".", "reset_stats", "=", "reset_stats", "settings", ".", "run_time", "=", "run_time", "# Default settings that are not to be changed", "settings", ".", "no_web", "=", "True", "settings", ".", "master", "=", "False", "settings", ".", "show_task_ratio_json", "=", "False", "settings", ".", "list_commands", "=", "False", "settings", ".", "loglevel", "=", "'INFO'", "settings", ".", "slave", "=", "False", "settings", ".", "only_summary", "=", "True", "settings", ".", "logfile", "=", "None", "settings", ".", "show_task_ratio", "=", "False", "settings", ".", "print_stats", "=", "False", "if", "from_environment", ":", "for", "attribute", "in", "[", "'locustfile'", ",", "'classes'", ",", "'host'", ",", "'run_time'", ",", "'num_clients'", ",", "'hatch_rate'", "]", ":", "var_name", "=", "'LOCUST_{0}'", ".", "format", "(", "attribute", ".", "upper", "(", ")", ")", "var_value", "=", "os", ".", "environ", ".", "get", "(", "var_name", ")", "if", "var_value", ":", "setattr", "(", "settings", ",", "attribute", ",", "var_value", ")", "if", "settings", ".", "locustfile", "is", "None", "and", "settings", ".", "classes", "is", "None", ":", "raise", "Exception", "(", "'One of locustfile or classes must be specified'", ")", "if", "settings", ".", "locustfile", "and", "settings", ".", "classes", ":", "raise", "Exception", "(", "'Only one of locustfile or classes can be specified'", ")", "if", "settings", ".", "locustfile", ":", "docstring", ",", "classes", "=", "load_locustfile", "(", "settings", ".", "locustfile", ")", "settings", ".", "classes", "=", "[", "classes", "[", "n", "]", "for", "n", "in", "classes", "]", "else", ":", "if", "isinstance", "(", "settings", ".", "classes", ",", "str", ")", ":", "settings", ".", "classes", "=", "settings", ".", "classes", ".", "split", "(", "','", ")", "for", "idx", ",", "val", "in", "enumerate", "(", "settings", ".", "classes", ")", ":", "# This needs fixing", "settings", ".", "classes", "[", "idx", "]", "=", "eval", "(", "val", ")", "for", "attribute", "in", "[", "'classes'", ",", "'host'", ",", "'num_clients'", ",", "'hatch_rate'", "]", ":", "val", "=", "getattr", "(", "settings", ",", "attribute", ",", "None", ")", "if", "not", "val", ":", "raise", "Exception", "(", "'configuration error, attribute not set: {0}'", ".", "format", "(", "attribute", ")", ")", "if", "isinstance", "(", "val", ",", "str", ")", "and", "val", ".", "isdigit", "(", ")", ":", "setattr", "(", "settings", ",", "attribute", ",", "int", "(", "val", ")", ")", "return", "settings" ]
Returns a settings object to be used by a LocalLocustRunner. Arguments from_environment: get settings from environment variables locustfile: locustfile to use for loadtest classes: locust classes to use for load test host: host for load testing num_clients: number of clients to simulate in load test hatch_rate: number of clients per second to start reset_stats: Whether to reset stats after all clients are hatched run_time: The length of time to run the test for. Cannot exceed the duration limit set by lambda If from_environment is set to True then this function will attempt to set the attributes from environment variables. The environment variables are named LOCUST_ + attribute name in upper case.
[ "Returns", "a", "settings", "object", "to", "be", "used", "by", "a", "LocalLocustRunner", "." ]
train
https://github.com/FutureSharks/invokust/blob/af0830ade8b08ebdd6ec2a0ea3188dee2b123a33/invokust/settings.py#L8-L84
0.002303
SergeySatskiy/cdm-pythonparser
legacy/src/cdmbriefparser.py
ModuleInfoBase._getLPA
def _getLPA( self ): " Provides line, pos and absPosition line as string " return str( self.line ) + ":" + \ str( self.pos ) + ":" + \ str( self.absPosition )
python
def _getLPA( self ): " Provides line, pos and absPosition line as string " return str( self.line ) + ":" + \ str( self.pos ) + ":" + \ str( self.absPosition )
[ "def", "_getLPA", "(", "self", ")", ":", "return", "str", "(", "self", ".", "line", ")", "+", "\":\"", "+", "str", "(", "self", ".", "pos", ")", "+", "\":\"", "+", "str", "(", "self", ".", "absPosition", ")" ]
Provides line, pos and absPosition line as string
[ "Provides", "line", "pos", "and", "absPosition", "line", "as", "string" ]
train
https://github.com/SergeySatskiy/cdm-pythonparser/blob/7e933aca899b1853d744082313ffc3a8b1154505/legacy/src/cdmbriefparser.py#L91-L95
0.058824
jwkvam/plotlywrapper
plotlywrapper.py
spark_shape
def spark_shape(points, shapes, fill=None, color='blue', width=5, yindex=0, heights=None): """TODO: Docstring for spark. Parameters ---------- points : array-like shapes : array-like fill : array-like, optional Returns ------- Chart """ assert len(points) == len(shapes) + 1 data = [{'marker': {'color': 'white'}, 'x': [points[0], points[-1]], 'y': [yindex, yindex]}] if fill is None: fill = [False] * len(shapes) if heights is None: heights = [0.4] * len(shapes) lays = [] for i, (shape, height) in enumerate(zip(shapes, heights)): if shape is None: continue if fill[i]: fillcolor = color else: fillcolor = 'white' lays.append( dict( type=shape, x0=points[i], x1=points[i + 1], y0=yindex - height, y1=yindex + height, xref='x', yref='y', fillcolor=fillcolor, line=dict(color=color, width=width), ) ) layout = dict(shapes=lays) return Chart(data=data, layout=layout)
python
def spark_shape(points, shapes, fill=None, color='blue', width=5, yindex=0, heights=None): """TODO: Docstring for spark. Parameters ---------- points : array-like shapes : array-like fill : array-like, optional Returns ------- Chart """ assert len(points) == len(shapes) + 1 data = [{'marker': {'color': 'white'}, 'x': [points[0], points[-1]], 'y': [yindex, yindex]}] if fill is None: fill = [False] * len(shapes) if heights is None: heights = [0.4] * len(shapes) lays = [] for i, (shape, height) in enumerate(zip(shapes, heights)): if shape is None: continue if fill[i]: fillcolor = color else: fillcolor = 'white' lays.append( dict( type=shape, x0=points[i], x1=points[i + 1], y0=yindex - height, y1=yindex + height, xref='x', yref='y', fillcolor=fillcolor, line=dict(color=color, width=width), ) ) layout = dict(shapes=lays) return Chart(data=data, layout=layout)
[ "def", "spark_shape", "(", "points", ",", "shapes", ",", "fill", "=", "None", ",", "color", "=", "'blue'", ",", "width", "=", "5", ",", "yindex", "=", "0", ",", "heights", "=", "None", ")", ":", "assert", "len", "(", "points", ")", "==", "len", "(", "shapes", ")", "+", "1", "data", "=", "[", "{", "'marker'", ":", "{", "'color'", ":", "'white'", "}", ",", "'x'", ":", "[", "points", "[", "0", "]", ",", "points", "[", "-", "1", "]", "]", ",", "'y'", ":", "[", "yindex", ",", "yindex", "]", "}", "]", "if", "fill", "is", "None", ":", "fill", "=", "[", "False", "]", "*", "len", "(", "shapes", ")", "if", "heights", "is", "None", ":", "heights", "=", "[", "0.4", "]", "*", "len", "(", "shapes", ")", "lays", "=", "[", "]", "for", "i", ",", "(", "shape", ",", "height", ")", "in", "enumerate", "(", "zip", "(", "shapes", ",", "heights", ")", ")", ":", "if", "shape", "is", "None", ":", "continue", "if", "fill", "[", "i", "]", ":", "fillcolor", "=", "color", "else", ":", "fillcolor", "=", "'white'", "lays", ".", "append", "(", "dict", "(", "type", "=", "shape", ",", "x0", "=", "points", "[", "i", "]", ",", "x1", "=", "points", "[", "i", "+", "1", "]", ",", "y0", "=", "yindex", "-", "height", ",", "y1", "=", "yindex", "+", "height", ",", "xref", "=", "'x'", ",", "yref", "=", "'y'", ",", "fillcolor", "=", "fillcolor", ",", "line", "=", "dict", "(", "color", "=", "color", ",", "width", "=", "width", ")", ",", ")", ")", "layout", "=", "dict", "(", "shapes", "=", "lays", ")", "return", "Chart", "(", "data", "=", "data", ",", "layout", "=", "layout", ")" ]
TODO: Docstring for spark. Parameters ---------- points : array-like shapes : array-like fill : array-like, optional Returns ------- Chart
[ "TODO", ":", "Docstring", "for", "spark", "." ]
train
https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L472-L519
0.002488
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/git/git_client_base.py
GitClientBase.get_ref_favorite
def get_ref_favorite(self, project, favorite_id): """GetRefFavorite. [Preview API] Gets the refs favorite for a favorite Id. :param str project: Project ID or project name :param int favorite_id: The Id of the requested ref favorite. :rtype: :class:`<GitRefFavorite> <azure.devops.v5_1.git.models.GitRefFavorite>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if favorite_id is not None: route_values['favoriteId'] = self._serialize.url('favorite_id', favorite_id, 'int') response = self._send(http_method='GET', location_id='876f70af-5792-485a-a1c7-d0a7b2f42bbb', version='5.1-preview.1', route_values=route_values) return self._deserialize('GitRefFavorite', response)
python
def get_ref_favorite(self, project, favorite_id): """GetRefFavorite. [Preview API] Gets the refs favorite for a favorite Id. :param str project: Project ID or project name :param int favorite_id: The Id of the requested ref favorite. :rtype: :class:`<GitRefFavorite> <azure.devops.v5_1.git.models.GitRefFavorite>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if favorite_id is not None: route_values['favoriteId'] = self._serialize.url('favorite_id', favorite_id, 'int') response = self._send(http_method='GET', location_id='876f70af-5792-485a-a1c7-d0a7b2f42bbb', version='5.1-preview.1', route_values=route_values) return self._deserialize('GitRefFavorite', response)
[ "def", "get_ref_favorite", "(", "self", ",", "project", ",", "favorite_id", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "if", "favorite_id", "is", "not", "None", ":", "route_values", "[", "'favoriteId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'favorite_id'", ",", "favorite_id", ",", "'int'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'876f70af-5792-485a-a1c7-d0a7b2f42bbb'", ",", "version", "=", "'5.1-preview.1'", ",", "route_values", "=", "route_values", ")", "return", "self", ".", "_deserialize", "(", "'GitRefFavorite'", ",", "response", ")" ]
GetRefFavorite. [Preview API] Gets the refs favorite for a favorite Id. :param str project: Project ID or project name :param int favorite_id: The Id of the requested ref favorite. :rtype: :class:`<GitRefFavorite> <azure.devops.v5_1.git.models.GitRefFavorite>`
[ "GetRefFavorite", ".", "[", "Preview", "API", "]", "Gets", "the", "refs", "favorite", "for", "a", "favorite", "Id", ".", ":", "param", "str", "project", ":", "Project", "ID", "or", "project", "name", ":", "param", "int", "favorite_id", ":", "The", "Id", "of", "the", "requested", "ref", "favorite", ".", ":", "rtype", ":", ":", "class", ":", "<GitRefFavorite", ">", "<azure", ".", "devops", ".", "v5_1", ".", "git", ".", "models", ".", "GitRefFavorite", ">" ]
train
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/git/git_client_base.py#L2852-L2868
0.006356
razorpay/razorpay-python
razorpay/resources/virtual_account.py
VirtualAccount.close
def close(self, virtual_account_id, data={}, **kwargs): """" Close Virtual Account from given Id Args: virtual_account_id : Id for which Virtual Account objects has to be Closed """ url = "{}/{}".format(self.base_url, virtual_account_id) data['status'] = 'closed' return self.patch_url(url, data, **kwargs)
python
def close(self, virtual_account_id, data={}, **kwargs): """" Close Virtual Account from given Id Args: virtual_account_id : Id for which Virtual Account objects has to be Closed """ url = "{}/{}".format(self.base_url, virtual_account_id) data['status'] = 'closed' return self.patch_url(url, data, **kwargs)
[ "def", "close", "(", "self", ",", "virtual_account_id", ",", "data", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "url", "=", "\"{}/{}\"", ".", "format", "(", "self", ".", "base_url", ",", "virtual_account_id", ")", "data", "[", "'status'", "]", "=", "'closed'", "return", "self", ".", "patch_url", "(", "url", ",", "data", ",", "*", "*", "kwargs", ")" ]
Close Virtual Account from given Id Args: virtual_account_id : Id for which Virtual Account objects has to be Closed
[ "Close", "Virtual", "Account", "from", "given", "Id" ]
train
https://github.com/razorpay/razorpay-python/blob/5bc63fd8452165a4b54556888492e555222c8afe/razorpay/resources/virtual_account.py#L48-L58
0.005115
jalmeroth/pymusiccast
pymusiccast/zone.py
Zone.set_volume
def set_volume(self, volume): """Send Volume command.""" req_url = ENDPOINTS["setVolume"].format(self.ip_address, self.zone_id) params = {"volume": int(volume)} return request(req_url, params=params)
python
def set_volume(self, volume): """Send Volume command.""" req_url = ENDPOINTS["setVolume"].format(self.ip_address, self.zone_id) params = {"volume": int(volume)} return request(req_url, params=params)
[ "def", "set_volume", "(", "self", ",", "volume", ")", ":", "req_url", "=", "ENDPOINTS", "[", "\"setVolume\"", "]", ".", "format", "(", "self", ".", "ip_address", ",", "self", ".", "zone_id", ")", "params", "=", "{", "\"volume\"", ":", "int", "(", "volume", ")", "}", "return", "request", "(", "req_url", ",", "params", "=", "params", ")" ]
Send Volume command.
[ "Send", "Volume", "command", "." ]
train
https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/zone.py#L145-L149
0.008658
samjabrahams/anchorhub
anchorhub/validation/validate_files.py
validate
def validate(file_paths, opts): """ Client facing validate function. Runs _validate() and returns True if the the file_paths pass all of the validations. Handles exceptions automatically if _validate() throws any and exits the program. :param file_paths: List of string file paths to test :param opts: command-line arguments namespace - used for creating useful messages to the client if any tests fail :return: True if the file passes all validation tests """ try: return _validate(file_paths) except ValidationException as e: if str(e) == "No files found": messages.print_no_files_found(opts) else: print(e) sys.exit(0)
python
def validate(file_paths, opts): """ Client facing validate function. Runs _validate() and returns True if the the file_paths pass all of the validations. Handles exceptions automatically if _validate() throws any and exits the program. :param file_paths: List of string file paths to test :param opts: command-line arguments namespace - used for creating useful messages to the client if any tests fail :return: True if the file passes all validation tests """ try: return _validate(file_paths) except ValidationException as e: if str(e) == "No files found": messages.print_no_files_found(opts) else: print(e) sys.exit(0)
[ "def", "validate", "(", "file_paths", ",", "opts", ")", ":", "try", ":", "return", "_validate", "(", "file_paths", ")", "except", "ValidationException", "as", "e", ":", "if", "str", "(", "e", ")", "==", "\"No files found\"", ":", "messages", ".", "print_no_files_found", "(", "opts", ")", "else", ":", "print", "(", "e", ")", "sys", ".", "exit", "(", "0", ")" ]
Client facing validate function. Runs _validate() and returns True if the the file_paths pass all of the validations. Handles exceptions automatically if _validate() throws any and exits the program. :param file_paths: List of string file paths to test :param opts: command-line arguments namespace - used for creating useful messages to the client if any tests fail :return: True if the file passes all validation tests
[ "Client", "facing", "validate", "function", ".", "Runs", "_validate", "()", "and", "returns", "True", "if", "the", "the", "file_paths", "pass", "all", "of", "the", "validations", ".", "Handles", "exceptions", "automatically", "if", "_validate", "()", "throws", "any", "and", "exits", "the", "program", "." ]
train
https://github.com/samjabrahams/anchorhub/blob/5ade359b08297d4003a5f477389c01de9e634b54/anchorhub/validation/validate_files.py#L10-L28
0.001377
JoelBender/bacpypes
py25/bacpypes/primitivedata.py
Enumerated.is_valid
def is_valid(cls, arg): """Return True if arg is valid value for the class. If the string value is wrong for the enumeration, the encoding will fail. """ return (isinstance(arg, (int, long)) and (arg >= 0)) or \ isinstance(arg, basestring)
python
def is_valid(cls, arg): """Return True if arg is valid value for the class. If the string value is wrong for the enumeration, the encoding will fail. """ return (isinstance(arg, (int, long)) and (arg >= 0)) or \ isinstance(arg, basestring)
[ "def", "is_valid", "(", "cls", ",", "arg", ")", ":", "return", "(", "isinstance", "(", "arg", ",", "(", "int", ",", "long", ")", ")", "and", "(", "arg", ">=", "0", ")", ")", "or", "isinstance", "(", "arg", ",", "basestring", ")" ]
Return True if arg is valid value for the class. If the string value is wrong for the enumeration, the encoding will fail.
[ "Return", "True", "if", "arg", "is", "valid", "value", "for", "the", "class", ".", "If", "the", "string", "value", "is", "wrong", "for", "the", "enumeration", "the", "encoding", "will", "fail", "." ]
train
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/primitivedata.py#L1211-L1216
0.007042
MacHu-GWU/pymongo_mate-project
pymongo_mate/pkg/pandas_mate/pkg/rolex/__init__.py
Rolex.from_utctimestamp
def from_utctimestamp(self, timestamp): """Create a **UTC datetime** object that number of seconds after UTC 1970-01-01 00:00:00. If you want local time, use :meth:`Rolex.from_timestamp` Because python doesn't support negative timestamp to datetime so we have to implement my own method. **中文文档** 返回一个在UTC 1970-01-01 00:00:00 之后 #timestamp 秒后的时间。默认为 UTC时间。即返回的datetime不带tzinfo """ if timestamp >= 0: return datetime.utcfromtimestamp(timestamp) else: return datetime(1970, 1, 1) + timedelta(seconds=timestamp)
python
def from_utctimestamp(self, timestamp): """Create a **UTC datetime** object that number of seconds after UTC 1970-01-01 00:00:00. If you want local time, use :meth:`Rolex.from_timestamp` Because python doesn't support negative timestamp to datetime so we have to implement my own method. **中文文档** 返回一个在UTC 1970-01-01 00:00:00 之后 #timestamp 秒后的时间。默认为 UTC时间。即返回的datetime不带tzinfo """ if timestamp >= 0: return datetime.utcfromtimestamp(timestamp) else: return datetime(1970, 1, 1) + timedelta(seconds=timestamp)
[ "def", "from_utctimestamp", "(", "self", ",", "timestamp", ")", ":", "if", "timestamp", ">=", "0", ":", "return", "datetime", ".", "utcfromtimestamp", "(", "timestamp", ")", "else", ":", "return", "datetime", "(", "1970", ",", "1", ",", "1", ")", "+", "timedelta", "(", "seconds", "=", "timestamp", ")" ]
Create a **UTC datetime** object that number of seconds after UTC 1970-01-01 00:00:00. If you want local time, use :meth:`Rolex.from_timestamp` Because python doesn't support negative timestamp to datetime so we have to implement my own method. **中文文档** 返回一个在UTC 1970-01-01 00:00:00 之后 #timestamp 秒后的时间。默认为 UTC时间。即返回的datetime不带tzinfo
[ "Create", "a", "**", "UTC", "datetime", "**", "object", "that", "number", "of", "seconds", "after", "UTC", "1970", "-", "01", "-", "01", "00", ":", "00", ":", "00", ".", "If", "you", "want", "local", "time", "use", ":", "meth", ":", "Rolex", ".", "from_timestamp" ]
train
https://github.com/MacHu-GWU/pymongo_mate-project/blob/be53170c2db54cb705b9e548d32ef26c773ff7f3/pymongo_mate/pkg/pandas_mate/pkg/rolex/__init__.py#L242-L258
0.00321
suds-community/suds
suds/xsd/sxbase.py
SchemaObject.find
def find(self, qref, classes=[], ignore=None): """ Find a referenced type in self or children. Return None if not found. Qualified references for all schema objects checked in this search will be added to the set of ignored qualified references to avoid the find operation going into an infinite loop in case of recursively defined structures. @param qref: A qualified reference. @type qref: qref @param classes: A collection of classes used to qualify the match. @type classes: Collection(I{class},...), e.g. [I(class),...] @param ignore: A set of qualified references to ignore in this search. @type ignore: {qref,...} @return: The referenced type. @rtype: L{SchemaObject} @see: L{qualify()} """ if not len(classes): classes = (self.__class__,) if ignore is None: ignore = set() if self.qname in ignore: return ignore.add(self.qname) if self.qname == qref and self.__class__ in classes: return self for c in self.rawchildren: p = c.find(qref, classes, ignore=ignore) if p is not None: return p
python
def find(self, qref, classes=[], ignore=None): """ Find a referenced type in self or children. Return None if not found. Qualified references for all schema objects checked in this search will be added to the set of ignored qualified references to avoid the find operation going into an infinite loop in case of recursively defined structures. @param qref: A qualified reference. @type qref: qref @param classes: A collection of classes used to qualify the match. @type classes: Collection(I{class},...), e.g. [I(class),...] @param ignore: A set of qualified references to ignore in this search. @type ignore: {qref,...} @return: The referenced type. @rtype: L{SchemaObject} @see: L{qualify()} """ if not len(classes): classes = (self.__class__,) if ignore is None: ignore = set() if self.qname in ignore: return ignore.add(self.qname) if self.qname == qref and self.__class__ in classes: return self for c in self.rawchildren: p = c.find(qref, classes, ignore=ignore) if p is not None: return p
[ "def", "find", "(", "self", ",", "qref", ",", "classes", "=", "[", "]", ",", "ignore", "=", "None", ")", ":", "if", "not", "len", "(", "classes", ")", ":", "classes", "=", "(", "self", ".", "__class__", ",", ")", "if", "ignore", "is", "None", ":", "ignore", "=", "set", "(", ")", "if", "self", ".", "qname", "in", "ignore", ":", "return", "ignore", ".", "add", "(", "self", ".", "qname", ")", "if", "self", ".", "qname", "==", "qref", "and", "self", ".", "__class__", "in", "classes", ":", "return", "self", "for", "c", "in", "self", ".", "rawchildren", ":", "p", "=", "c", ".", "find", "(", "qref", ",", "classes", ",", "ignore", "=", "ignore", ")", "if", "p", "is", "not", "None", ":", "return", "p" ]
Find a referenced type in self or children. Return None if not found. Qualified references for all schema objects checked in this search will be added to the set of ignored qualified references to avoid the find operation going into an infinite loop in case of recursively defined structures. @param qref: A qualified reference. @type qref: qref @param classes: A collection of classes used to qualify the match. @type classes: Collection(I{class},...), e.g. [I(class),...] @param ignore: A set of qualified references to ignore in this search. @type ignore: {qref,...} @return: The referenced type. @rtype: L{SchemaObject} @see: L{qualify()}
[ "Find", "a", "referenced", "type", "in", "self", "or", "children", ".", "Return", "None", "if", "not", "found", "." ]
train
https://github.com/suds-community/suds/blob/6fb0a829337b5037a66c20aae6f89b41acd77e40/suds/xsd/sxbase.py#L342-L374
0.001585
ilblackdragon/django-misc
misc/views.py
handler404
def handler404(request, template_name='404.html'): """ 404 error handler. Templates: `404.html` Context: MEDIA_URL Path of static media (e.g. "media.example.org") STATIC_URL """ t = loader.get_template(template_name) # You need to create a 404.html template. return http.HttpResponseNotFound(t.render(Context({ 'MEDIA_URL': settings.MEDIA_URL, 'STATIC_URL': settings.STATIC_URL })))
python
def handler404(request, template_name='404.html'): """ 404 error handler. Templates: `404.html` Context: MEDIA_URL Path of static media (e.g. "media.example.org") STATIC_URL """ t = loader.get_template(template_name) # You need to create a 404.html template. return http.HttpResponseNotFound(t.render(Context({ 'MEDIA_URL': settings.MEDIA_URL, 'STATIC_URL': settings.STATIC_URL })))
[ "def", "handler404", "(", "request", ",", "template_name", "=", "'404.html'", ")", ":", "t", "=", "loader", ".", "get_template", "(", "template_name", ")", "# You need to create a 404.html template.", "return", "http", ".", "HttpResponseNotFound", "(", "t", ".", "render", "(", "Context", "(", "{", "'MEDIA_URL'", ":", "settings", ".", "MEDIA_URL", ",", "'STATIC_URL'", ":", "settings", ".", "STATIC_URL", "}", ")", ")", ")" ]
404 error handler. Templates: `404.html` Context: MEDIA_URL Path of static media (e.g. "media.example.org") STATIC_URL
[ "404", "error", "handler", "." ]
train
https://github.com/ilblackdragon/django-misc/blob/0accd2dc97de656a1c9e275be81e817f78a2eb9d/misc/views.py#L38-L52
0.006211
spyder-ide/spyder
spyder/plugins/editor/panels/manager.py
PanelsManager._compute_zones_sizes
def _compute_zones_sizes(self): """Compute panel zone sizes.""" # Left panels left = 0 for panel in self.panels_for_zone(Panel.Position.LEFT): if not panel.isVisible(): continue size_hint = panel.sizeHint() left += size_hint.width() # Right panels right = 0 for panel in self.panels_for_zone(Panel.Position.RIGHT): if not panel.isVisible(): continue size_hint = panel.sizeHint() right += size_hint.width() # Top panels top = 0 for panel in self.panels_for_zone(Panel.Position.TOP): if not panel.isVisible(): continue size_hint = panel.sizeHint() top += size_hint.height() # Bottom panels bottom = 0 for panel in self.panels_for_zone(Panel.Position.BOTTOM): if not panel.isVisible(): continue size_hint = panel.sizeHint() bottom += size_hint.height() self._top, self._left, self._right, self._bottom = ( top, left, right, bottom) return bottom, left, right, top
python
def _compute_zones_sizes(self): """Compute panel zone sizes.""" # Left panels left = 0 for panel in self.panels_for_zone(Panel.Position.LEFT): if not panel.isVisible(): continue size_hint = panel.sizeHint() left += size_hint.width() # Right panels right = 0 for panel in self.panels_for_zone(Panel.Position.RIGHT): if not panel.isVisible(): continue size_hint = panel.sizeHint() right += size_hint.width() # Top panels top = 0 for panel in self.panels_for_zone(Panel.Position.TOP): if not panel.isVisible(): continue size_hint = panel.sizeHint() top += size_hint.height() # Bottom panels bottom = 0 for panel in self.panels_for_zone(Panel.Position.BOTTOM): if not panel.isVisible(): continue size_hint = panel.sizeHint() bottom += size_hint.height() self._top, self._left, self._right, self._bottom = ( top, left, right, bottom) return bottom, left, right, top
[ "def", "_compute_zones_sizes", "(", "self", ")", ":", "# Left panels", "left", "=", "0", "for", "panel", "in", "self", ".", "panels_for_zone", "(", "Panel", ".", "Position", ".", "LEFT", ")", ":", "if", "not", "panel", ".", "isVisible", "(", ")", ":", "continue", "size_hint", "=", "panel", ".", "sizeHint", "(", ")", "left", "+=", "size_hint", ".", "width", "(", ")", "# Right panels", "right", "=", "0", "for", "panel", "in", "self", ".", "panels_for_zone", "(", "Panel", ".", "Position", ".", "RIGHT", ")", ":", "if", "not", "panel", ".", "isVisible", "(", ")", ":", "continue", "size_hint", "=", "panel", ".", "sizeHint", "(", ")", "right", "+=", "size_hint", ".", "width", "(", ")", "# Top panels", "top", "=", "0", "for", "panel", "in", "self", ".", "panels_for_zone", "(", "Panel", ".", "Position", ".", "TOP", ")", ":", "if", "not", "panel", ".", "isVisible", "(", ")", ":", "continue", "size_hint", "=", "panel", ".", "sizeHint", "(", ")", "top", "+=", "size_hint", ".", "height", "(", ")", "# Bottom panels", "bottom", "=", "0", "for", "panel", "in", "self", ".", "panels_for_zone", "(", "Panel", ".", "Position", ".", "BOTTOM", ")", ":", "if", "not", "panel", ".", "isVisible", "(", ")", ":", "continue", "size_hint", "=", "panel", ".", "sizeHint", "(", ")", "bottom", "+=", "size_hint", ".", "height", "(", ")", "self", ".", "_top", ",", "self", ".", "_left", ",", "self", ".", "_right", ",", "self", ".", "_bottom", "=", "(", "top", ",", "left", ",", "right", ",", "bottom", ")", "return", "bottom", ",", "left", ",", "right", ",", "top" ]
Compute panel zone sizes.
[ "Compute", "panel", "zone", "sizes", "." ]
train
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/panels/manager.py#L291-L323
0.001667
twilio/twilio-python
twilio/rest/flex_api/v1/configuration.py
ConfigurationInstance.fetch
def fetch(self, ui_version=values.unset): """ Fetch a ConfigurationInstance :param unicode ui_version: Pinned UI version :returns: Fetched ConfigurationInstance :rtype: twilio.rest.flex_api.v1.configuration.ConfigurationInstance """ return self._proxy.fetch(ui_version=ui_version, )
python
def fetch(self, ui_version=values.unset): """ Fetch a ConfigurationInstance :param unicode ui_version: Pinned UI version :returns: Fetched ConfigurationInstance :rtype: twilio.rest.flex_api.v1.configuration.ConfigurationInstance """ return self._proxy.fetch(ui_version=ui_version, )
[ "def", "fetch", "(", "self", ",", "ui_version", "=", "values", ".", "unset", ")", ":", "return", "self", ".", "_proxy", ".", "fetch", "(", "ui_version", "=", "ui_version", ",", ")" ]
Fetch a ConfigurationInstance :param unicode ui_version: Pinned UI version :returns: Fetched ConfigurationInstance :rtype: twilio.rest.flex_api.v1.configuration.ConfigurationInstance
[ "Fetch", "a", "ConfigurationInstance" ]
train
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/flex_api/v1/configuration.py#L500-L509
0.005882
iotile/coretools
iotilecore/iotile/core/hw/transport/adapter/async_wrapper.py
_on_scan
def _on_scan(_loop, adapter, _adapter_id, info, expiration_time): """Callback when a new device is seen.""" info['validity_period'] = expiration_time adapter.notify_event_nowait(info.get('connection_string'), 'device_seen', info)
python
def _on_scan(_loop, adapter, _adapter_id, info, expiration_time): """Callback when a new device is seen.""" info['validity_period'] = expiration_time adapter.notify_event_nowait(info.get('connection_string'), 'device_seen', info)
[ "def", "_on_scan", "(", "_loop", ",", "adapter", ",", "_adapter_id", ",", "info", ",", "expiration_time", ")", ":", "info", "[", "'validity_period'", "]", "=", "expiration_time", "adapter", ".", "notify_event_nowait", "(", "info", ".", "get", "(", "'connection_string'", ")", ",", "'device_seen'", ",", "info", ")" ]
Callback when a new device is seen.
[ "Callback", "when", "a", "new", "device", "is", "seen", "." ]
train
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/transport/adapter/async_wrapper.py#L227-L231
0.008264
a1ezzz/wasp-general
wasp_general/network/web/service.py
WWebService.create_presenter
def create_presenter(self, request, target_route): """ Create presenter from the given requests and target routes :param request: client request :param target_route: route to use :return: WWebPresenter """ presenter_name = target_route.presenter_name() if self.presenter_collection().has(presenter_name) is False: raise RuntimeError('No such presenter: %s' % presenter_name) presenter_class = self.presenter_collection().presenter(presenter_name) return self.presenter_factory().instantiate(presenter_class, request, target_route, self)
python
def create_presenter(self, request, target_route): """ Create presenter from the given requests and target routes :param request: client request :param target_route: route to use :return: WWebPresenter """ presenter_name = target_route.presenter_name() if self.presenter_collection().has(presenter_name) is False: raise RuntimeError('No such presenter: %s' % presenter_name) presenter_class = self.presenter_collection().presenter(presenter_name) return self.presenter_factory().instantiate(presenter_class, request, target_route, self)
[ "def", "create_presenter", "(", "self", ",", "request", ",", "target_route", ")", ":", "presenter_name", "=", "target_route", ".", "presenter_name", "(", ")", "if", "self", ".", "presenter_collection", "(", ")", ".", "has", "(", "presenter_name", ")", "is", "False", ":", "raise", "RuntimeError", "(", "'No such presenter: %s'", "%", "presenter_name", ")", "presenter_class", "=", "self", ".", "presenter_collection", "(", ")", ".", "presenter", "(", "presenter_name", ")", "return", "self", ".", "presenter_factory", "(", ")", ".", "instantiate", "(", "presenter_class", ",", "request", ",", "target_route", ",", "self", ")" ]
Create presenter from the given requests and target routes :param request: client request :param target_route: route to use :return: WWebPresenter
[ "Create", "presenter", "from", "the", "given", "requests", "and", "target", "routes" ]
train
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/network/web/service.py#L721-L732
0.023297
python-fedex-devs/python-fedex
fedex/tools/conversion.py
basic_sobject_to_dict
def basic_sobject_to_dict(obj): """Converts suds object to dict very quickly. Does not serialize date time or normalize key case. :param obj: suds object :return: dict object """ if not hasattr(obj, '__keylist__'): return obj data = {} fields = obj.__keylist__ for field in fields: val = getattr(obj, field) if isinstance(val, list): data[field] = [] for item in val: data[field].append(basic_sobject_to_dict(item)) else: data[field] = basic_sobject_to_dict(val) return data
python
def basic_sobject_to_dict(obj): """Converts suds object to dict very quickly. Does not serialize date time or normalize key case. :param obj: suds object :return: dict object """ if not hasattr(obj, '__keylist__'): return obj data = {} fields = obj.__keylist__ for field in fields: val = getattr(obj, field) if isinstance(val, list): data[field] = [] for item in val: data[field].append(basic_sobject_to_dict(item)) else: data[field] = basic_sobject_to_dict(val) return data
[ "def", "basic_sobject_to_dict", "(", "obj", ")", ":", "if", "not", "hasattr", "(", "obj", ",", "'__keylist__'", ")", ":", "return", "obj", "data", "=", "{", "}", "fields", "=", "obj", ".", "__keylist__", "for", "field", "in", "fields", ":", "val", "=", "getattr", "(", "obj", ",", "field", ")", "if", "isinstance", "(", "val", ",", "list", ")", ":", "data", "[", "field", "]", "=", "[", "]", "for", "item", "in", "val", ":", "data", "[", "field", "]", ".", "append", "(", "basic_sobject_to_dict", "(", "item", ")", ")", "else", ":", "data", "[", "field", "]", "=", "basic_sobject_to_dict", "(", "val", ")", "return", "data" ]
Converts suds object to dict very quickly. Does not serialize date time or normalize key case. :param obj: suds object :return: dict object
[ "Converts", "suds", "object", "to", "dict", "very", "quickly", ".", "Does", "not", "serialize", "date", "time", "or", "normalize", "key", "case", ".", ":", "param", "obj", ":", "suds", "object", ":", "return", ":", "dict", "object" ]
train
https://github.com/python-fedex-devs/python-fedex/blob/7ea2ca80c362f5dbbc8d959ab47648c7a4ab24eb/fedex/tools/conversion.py#L33-L51
0.001672
inveniosoftware-contrib/invenio-classifier
invenio_classifier/find.py
find_end_of_reference_section
def find_end_of_reference_section(docbody, ref_start_line, ref_line_marker, ref_line_marker_ptn): """Find end of reference section. Given that the start of a document's reference section has already been recognised, this function is tasked with finding the line-number in the document of the last line of the reference section. @param docbody: (list) of strings - the entire plain-text document body. @param ref_start_line: (integer) - the index in docbody of the first line of the reference section. @param ref_line_marker: (string) - the line marker of the first reference line. @param ref_line_marker_ptn: (string) - the pattern used to search for a reference line marker. @return: (integer) - index in docbody of the last reference line -- OR -- (None) - if ref_start_line was invalid. """ section_ended = False x = ref_start_line if type(x) is not int or x < 0 or x > len(docbody) or len(docbody) < 1: # The provided 'first line' of the reference section was invalid. # Either it was out of bounds in the document body, or it was not a # valid integer. # Can't safely find end of refs with this info - quit. return None # Get patterns for testing line: t_patterns = get_post_reference_section_title_patterns() kw_patterns = get_post_reference_section_keyword_patterns() if None not in (ref_line_marker, ref_line_marker_ptn): mk_patterns = [re.compile(ref_line_marker_ptn, re.I | re.UNICODE)] else: mk_patterns = get_reference_line_numeration_marker_patterns() current_reference_count = 0 while x < len(docbody) and not section_ended: # save the reference count num_match = regex_match_list(docbody[x].strip(), mk_patterns) if num_match: try: current_reference_count = int(num_match.group('marknum')) except (ValueError, IndexError): # non numerical references marking pass # look for a likely section title that would follow a reference section end_match = regex_match_list(docbody[x].strip(), t_patterns) if not end_match: # didn't match a section title - try looking for keywords that # suggest the end of a reference section: end_match = regex_match_list(docbody[x].strip(), kw_patterns) else: # Is it really the end of the reference section? Check within the # next 5 lines for other reference numeration markers: y = x + 1 line_found = False while y < x + 200 and y < len(docbody) and not line_found: num_match = regex_match_list(docbody[y].strip(), mk_patterns) if num_match and not num_match.group(0).isdigit(): try: num = int(num_match.group('marknum')) if current_reference_count + 1 == num: line_found = True except ValueError: # We have the marknum index so it is # numeric pattern for references like # [1], [2] but this match is not a number pass except IndexError: # We have a non numerical references marking # we don't check for a number continuity line_found = True y += 1 if not line_found: # No ref line found-end section section_ended = True if not section_ended: # Does this & the next 5 lines simply contain numbers? If yes, it's # probably the axis scale of a graph in a fig. End refs section digit_test_str = docbody[x].replace(" ", "").\ replace(".", "").\ replace("-", "").\ replace("+", "").\ replace(u"\u00D7", "").\ replace(u"\u2212", "").\ strip() if len(digit_test_str) > 10 and digit_test_str.isdigit(): # The line contains only digits and is longer than 10 chars: y = x + 1 digit_lines = 4 num_digit_lines = 1 while y < x + digit_lines and y < len(docbody): digit_test_str = docbody[y].replace(" ", "").\ replace(".", "").\ replace("-", "").\ replace("+", "").\ replace(u"\u00D7", "").\ replace(u"\u2212", "").\ strip() if len(digit_test_str) > 10 and digit_test_str.isdigit(): num_digit_lines += 1 elif len(digit_test_str) == 0: # This is a blank line. Don't count it, to accommodate # documents that are double-line spaced: digit_lines += 1 y = y + 1 if num_digit_lines == digit_lines: section_ended = True x += 1 return x - 1
python
def find_end_of_reference_section(docbody, ref_start_line, ref_line_marker, ref_line_marker_ptn): """Find end of reference section. Given that the start of a document's reference section has already been recognised, this function is tasked with finding the line-number in the document of the last line of the reference section. @param docbody: (list) of strings - the entire plain-text document body. @param ref_start_line: (integer) - the index in docbody of the first line of the reference section. @param ref_line_marker: (string) - the line marker of the first reference line. @param ref_line_marker_ptn: (string) - the pattern used to search for a reference line marker. @return: (integer) - index in docbody of the last reference line -- OR -- (None) - if ref_start_line was invalid. """ section_ended = False x = ref_start_line if type(x) is not int or x < 0 or x > len(docbody) or len(docbody) < 1: # The provided 'first line' of the reference section was invalid. # Either it was out of bounds in the document body, or it was not a # valid integer. # Can't safely find end of refs with this info - quit. return None # Get patterns for testing line: t_patterns = get_post_reference_section_title_patterns() kw_patterns = get_post_reference_section_keyword_patterns() if None not in (ref_line_marker, ref_line_marker_ptn): mk_patterns = [re.compile(ref_line_marker_ptn, re.I | re.UNICODE)] else: mk_patterns = get_reference_line_numeration_marker_patterns() current_reference_count = 0 while x < len(docbody) and not section_ended: # save the reference count num_match = regex_match_list(docbody[x].strip(), mk_patterns) if num_match: try: current_reference_count = int(num_match.group('marknum')) except (ValueError, IndexError): # non numerical references marking pass # look for a likely section title that would follow a reference section end_match = regex_match_list(docbody[x].strip(), t_patterns) if not end_match: # didn't match a section title - try looking for keywords that # suggest the end of a reference section: end_match = regex_match_list(docbody[x].strip(), kw_patterns) else: # Is it really the end of the reference section? Check within the # next 5 lines for other reference numeration markers: y = x + 1 line_found = False while y < x + 200 and y < len(docbody) and not line_found: num_match = regex_match_list(docbody[y].strip(), mk_patterns) if num_match and not num_match.group(0).isdigit(): try: num = int(num_match.group('marknum')) if current_reference_count + 1 == num: line_found = True except ValueError: # We have the marknum index so it is # numeric pattern for references like # [1], [2] but this match is not a number pass except IndexError: # We have a non numerical references marking # we don't check for a number continuity line_found = True y += 1 if not line_found: # No ref line found-end section section_ended = True if not section_ended: # Does this & the next 5 lines simply contain numbers? If yes, it's # probably the axis scale of a graph in a fig. End refs section digit_test_str = docbody[x].replace(" ", "").\ replace(".", "").\ replace("-", "").\ replace("+", "").\ replace(u"\u00D7", "").\ replace(u"\u2212", "").\ strip() if len(digit_test_str) > 10 and digit_test_str.isdigit(): # The line contains only digits and is longer than 10 chars: y = x + 1 digit_lines = 4 num_digit_lines = 1 while y < x + digit_lines and y < len(docbody): digit_test_str = docbody[y].replace(" ", "").\ replace(".", "").\ replace("-", "").\ replace("+", "").\ replace(u"\u00D7", "").\ replace(u"\u2212", "").\ strip() if len(digit_test_str) > 10 and digit_test_str.isdigit(): num_digit_lines += 1 elif len(digit_test_str) == 0: # This is a blank line. Don't count it, to accommodate # documents that are double-line spaced: digit_lines += 1 y = y + 1 if num_digit_lines == digit_lines: section_ended = True x += 1 return x - 1
[ "def", "find_end_of_reference_section", "(", "docbody", ",", "ref_start_line", ",", "ref_line_marker", ",", "ref_line_marker_ptn", ")", ":", "section_ended", "=", "False", "x", "=", "ref_start_line", "if", "type", "(", "x", ")", "is", "not", "int", "or", "x", "<", "0", "or", "x", ">", "len", "(", "docbody", ")", "or", "len", "(", "docbody", ")", "<", "1", ":", "# The provided 'first line' of the reference section was invalid.", "# Either it was out of bounds in the document body, or it was not a", "# valid integer.", "# Can't safely find end of refs with this info - quit.", "return", "None", "# Get patterns for testing line:", "t_patterns", "=", "get_post_reference_section_title_patterns", "(", ")", "kw_patterns", "=", "get_post_reference_section_keyword_patterns", "(", ")", "if", "None", "not", "in", "(", "ref_line_marker", ",", "ref_line_marker_ptn", ")", ":", "mk_patterns", "=", "[", "re", ".", "compile", "(", "ref_line_marker_ptn", ",", "re", ".", "I", "|", "re", ".", "UNICODE", ")", "]", "else", ":", "mk_patterns", "=", "get_reference_line_numeration_marker_patterns", "(", ")", "current_reference_count", "=", "0", "while", "x", "<", "len", "(", "docbody", ")", "and", "not", "section_ended", ":", "# save the reference count", "num_match", "=", "regex_match_list", "(", "docbody", "[", "x", "]", ".", "strip", "(", ")", ",", "mk_patterns", ")", "if", "num_match", ":", "try", ":", "current_reference_count", "=", "int", "(", "num_match", ".", "group", "(", "'marknum'", ")", ")", "except", "(", "ValueError", ",", "IndexError", ")", ":", "# non numerical references marking", "pass", "# look for a likely section title that would follow a reference section", "end_match", "=", "regex_match_list", "(", "docbody", "[", "x", "]", ".", "strip", "(", ")", ",", "t_patterns", ")", "if", "not", "end_match", ":", "# didn't match a section title - try looking for keywords that", "# suggest the end of a reference section:", "end_match", "=", "regex_match_list", "(", "docbody", "[", "x", "]", ".", "strip", "(", ")", ",", "kw_patterns", ")", "else", ":", "# Is it really the end of the reference section? Check within the", "# next 5 lines for other reference numeration markers:", "y", "=", "x", "+", "1", "line_found", "=", "False", "while", "y", "<", "x", "+", "200", "and", "y", "<", "len", "(", "docbody", ")", "and", "not", "line_found", ":", "num_match", "=", "regex_match_list", "(", "docbody", "[", "y", "]", ".", "strip", "(", ")", ",", "mk_patterns", ")", "if", "num_match", "and", "not", "num_match", ".", "group", "(", "0", ")", ".", "isdigit", "(", ")", ":", "try", ":", "num", "=", "int", "(", "num_match", ".", "group", "(", "'marknum'", ")", ")", "if", "current_reference_count", "+", "1", "==", "num", ":", "line_found", "=", "True", "except", "ValueError", ":", "# We have the marknum index so it is", "# numeric pattern for references like", "# [1], [2] but this match is not a number", "pass", "except", "IndexError", ":", "# We have a non numerical references marking", "# we don't check for a number continuity", "line_found", "=", "True", "y", "+=", "1", "if", "not", "line_found", ":", "# No ref line found-end section", "section_ended", "=", "True", "if", "not", "section_ended", ":", "# Does this & the next 5 lines simply contain numbers? If yes, it's", "# probably the axis scale of a graph in a fig. End refs section", "digit_test_str", "=", "docbody", "[", "x", "]", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ".", "replace", "(", "\".\"", ",", "\"\"", ")", ".", "replace", "(", "\"-\"", ",", "\"\"", ")", ".", "replace", "(", "\"+\"", ",", "\"\"", ")", ".", "replace", "(", "u\"\\u00D7\"", ",", "\"\"", ")", ".", "replace", "(", "u\"\\u2212\"", ",", "\"\"", ")", ".", "strip", "(", ")", "if", "len", "(", "digit_test_str", ")", ">", "10", "and", "digit_test_str", ".", "isdigit", "(", ")", ":", "# The line contains only digits and is longer than 10 chars:", "y", "=", "x", "+", "1", "digit_lines", "=", "4", "num_digit_lines", "=", "1", "while", "y", "<", "x", "+", "digit_lines", "and", "y", "<", "len", "(", "docbody", ")", ":", "digit_test_str", "=", "docbody", "[", "y", "]", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ".", "replace", "(", "\".\"", ",", "\"\"", ")", ".", "replace", "(", "\"-\"", ",", "\"\"", ")", ".", "replace", "(", "\"+\"", ",", "\"\"", ")", ".", "replace", "(", "u\"\\u00D7\"", ",", "\"\"", ")", ".", "replace", "(", "u\"\\u2212\"", ",", "\"\"", ")", ".", "strip", "(", ")", "if", "len", "(", "digit_test_str", ")", ">", "10", "and", "digit_test_str", ".", "isdigit", "(", ")", ":", "num_digit_lines", "+=", "1", "elif", "len", "(", "digit_test_str", ")", "==", "0", ":", "# This is a blank line. Don't count it, to accommodate", "# documents that are double-line spaced:", "digit_lines", "+=", "1", "y", "=", "y", "+", "1", "if", "num_digit_lines", "==", "digit_lines", ":", "section_ended", "=", "True", "x", "+=", "1", "return", "x", "-", "1" ]
Find end of reference section. Given that the start of a document's reference section has already been recognised, this function is tasked with finding the line-number in the document of the last line of the reference section. @param docbody: (list) of strings - the entire plain-text document body. @param ref_start_line: (integer) - the index in docbody of the first line of the reference section. @param ref_line_marker: (string) - the line marker of the first reference line. @param ref_line_marker_ptn: (string) - the pattern used to search for a reference line marker. @return: (integer) - index in docbody of the last reference line -- OR -- (None) - if ref_start_line was invalid.
[ "Find", "end", "of", "reference", "section", "." ]
train
https://github.com/inveniosoftware-contrib/invenio-classifier/blob/3c758cf34dca6bf0548e7da5de34e5f72e3b255e/invenio_classifier/find.py#L361-L473
0.000187
tuxpiper/cloudcast
cloudcast/iscm/phased.py
PhasedISCM.get_possible_builds
def get_possible_builds(self, purpose=PURPOSE_RUN): """ Returns a list of possible status ids that are valid entry points into this ISCM. For each status, there is a list of phases that need to be executed, i.e.: [ { status_id: "ykyk...", run_phases: [ ] }, { status_id: "znzn...", run_phases: [ <phase3> ] }, { status_id: "xyxy...", run_phases: [ <phase2>, <phase3> ] }, { status_id: "", run_phases: [ <phase1>, <phase2>, <phase3> ] }, ] the array is sorted in such a way that entries with the least pending phases are found first. When purpose = PURPOSE_BUILD, each runnable path contains a list of the buildable targets and the phases that need to be run to achieve them: [ { status_id: "ykyk...", targets = [] }, { status_id: "znzn...", targets=[ { target_id: "ykyk...", run_phases: [ <phase3> ] }, ]}, { status_id: "xyxy...", targets=[ { target_id: "ykyk...", run_phases: [ <phase2>, <phase3> ] }, { target_id: "znzn...", run_phases: [ <phase2> ] }, ]}, { status_id: "", targets=[ { target_id: "ykyk...", run_phases: [ <phase1>, <phase2>, <phase3> ] }, { target_id: "znzn...", run_phases: [ <phase1>, <phase2> ] }, { target_id: "xyxy...", run_phases: [ <phase1> ] }, ]} ] """ import hashlib from copy import copy phases = self.phases pending_list = copy(phases) must_run_list = [] stages = [ dict(status_id="", must_run=copy(must_run_list), pending=copy(pending_list)) ] hashsum = hashlib.sha256() status_id_after = {} # after phase_name # for p in copy(pending_list): hashsum.update(_dict_to_stable_str(p.get_dict_repr())) status_id_after[p.phase_name] = hashsum.hexdigest() pending_list = pending_list[1:] if p.phase_type == RUN_EVERY_TIME: must_run_list.append(p) continue elif p.phase_type == RUN_ON_UPDATE and purpose == PURPOSE_BUILD: must_run_list.append(p) continue elif p.phase_type == RUN_ONCE: # possible point of entry for AMIs stages.insert(0, dict(status_id=hashsum.hexdigest(), must_run=copy(must_run_list), pending=copy(pending_list))) elif p.phase_type == RUN_ON_DEPLOY: # no more saved entry points possible break # # If we are building images, add possible builds from each entry point if purpose == PURPOSE_BUILD: for rp in stages: targets = [] must_run = rp["must_run"] pending = rp["pending"] del rp["must_run"] del rp["pending"] iterated = [] for p in pending: iterated.append(p) if p.phase_type == RUN_ONCE: # this makes a new target target = dict(target_id=status_id_after[p.phase_name], run_phases=must_run + iterated) targets.insert(0, target) rp["targets"] = targets else: for rp in stages: rp["run_phases"] = rp["must_run"] + rp["pending"] del rp["must_run"] del rp["pending"] # return stages
python
def get_possible_builds(self, purpose=PURPOSE_RUN): """ Returns a list of possible status ids that are valid entry points into this ISCM. For each status, there is a list of phases that need to be executed, i.e.: [ { status_id: "ykyk...", run_phases: [ ] }, { status_id: "znzn...", run_phases: [ <phase3> ] }, { status_id: "xyxy...", run_phases: [ <phase2>, <phase3> ] }, { status_id: "", run_phases: [ <phase1>, <phase2>, <phase3> ] }, ] the array is sorted in such a way that entries with the least pending phases are found first. When purpose = PURPOSE_BUILD, each runnable path contains a list of the buildable targets and the phases that need to be run to achieve them: [ { status_id: "ykyk...", targets = [] }, { status_id: "znzn...", targets=[ { target_id: "ykyk...", run_phases: [ <phase3> ] }, ]}, { status_id: "xyxy...", targets=[ { target_id: "ykyk...", run_phases: [ <phase2>, <phase3> ] }, { target_id: "znzn...", run_phases: [ <phase2> ] }, ]}, { status_id: "", targets=[ { target_id: "ykyk...", run_phases: [ <phase1>, <phase2>, <phase3> ] }, { target_id: "znzn...", run_phases: [ <phase1>, <phase2> ] }, { target_id: "xyxy...", run_phases: [ <phase1> ] }, ]} ] """ import hashlib from copy import copy phases = self.phases pending_list = copy(phases) must_run_list = [] stages = [ dict(status_id="", must_run=copy(must_run_list), pending=copy(pending_list)) ] hashsum = hashlib.sha256() status_id_after = {} # after phase_name # for p in copy(pending_list): hashsum.update(_dict_to_stable_str(p.get_dict_repr())) status_id_after[p.phase_name] = hashsum.hexdigest() pending_list = pending_list[1:] if p.phase_type == RUN_EVERY_TIME: must_run_list.append(p) continue elif p.phase_type == RUN_ON_UPDATE and purpose == PURPOSE_BUILD: must_run_list.append(p) continue elif p.phase_type == RUN_ONCE: # possible point of entry for AMIs stages.insert(0, dict(status_id=hashsum.hexdigest(), must_run=copy(must_run_list), pending=copy(pending_list))) elif p.phase_type == RUN_ON_DEPLOY: # no more saved entry points possible break # # If we are building images, add possible builds from each entry point if purpose == PURPOSE_BUILD: for rp in stages: targets = [] must_run = rp["must_run"] pending = rp["pending"] del rp["must_run"] del rp["pending"] iterated = [] for p in pending: iterated.append(p) if p.phase_type == RUN_ONCE: # this makes a new target target = dict(target_id=status_id_after[p.phase_name], run_phases=must_run + iterated) targets.insert(0, target) rp["targets"] = targets else: for rp in stages: rp["run_phases"] = rp["must_run"] + rp["pending"] del rp["must_run"] del rp["pending"] # return stages
[ "def", "get_possible_builds", "(", "self", ",", "purpose", "=", "PURPOSE_RUN", ")", ":", "import", "hashlib", "from", "copy", "import", "copy", "phases", "=", "self", ".", "phases", "pending_list", "=", "copy", "(", "phases", ")", "must_run_list", "=", "[", "]", "stages", "=", "[", "dict", "(", "status_id", "=", "\"\"", ",", "must_run", "=", "copy", "(", "must_run_list", ")", ",", "pending", "=", "copy", "(", "pending_list", ")", ")", "]", "hashsum", "=", "hashlib", ".", "sha256", "(", ")", "status_id_after", "=", "{", "}", "# after phase_name", "#", "for", "p", "in", "copy", "(", "pending_list", ")", ":", "hashsum", ".", "update", "(", "_dict_to_stable_str", "(", "p", ".", "get_dict_repr", "(", ")", ")", ")", "status_id_after", "[", "p", ".", "phase_name", "]", "=", "hashsum", ".", "hexdigest", "(", ")", "pending_list", "=", "pending_list", "[", "1", ":", "]", "if", "p", ".", "phase_type", "==", "RUN_EVERY_TIME", ":", "must_run_list", ".", "append", "(", "p", ")", "continue", "elif", "p", ".", "phase_type", "==", "RUN_ON_UPDATE", "and", "purpose", "==", "PURPOSE_BUILD", ":", "must_run_list", ".", "append", "(", "p", ")", "continue", "elif", "p", ".", "phase_type", "==", "RUN_ONCE", ":", "# possible point of entry for AMIs", "stages", ".", "insert", "(", "0", ",", "dict", "(", "status_id", "=", "hashsum", ".", "hexdigest", "(", ")", ",", "must_run", "=", "copy", "(", "must_run_list", ")", ",", "pending", "=", "copy", "(", "pending_list", ")", ")", ")", "elif", "p", ".", "phase_type", "==", "RUN_ON_DEPLOY", ":", "# no more saved entry points possible", "break", "#", "# If we are building images, add possible builds from each entry point", "if", "purpose", "==", "PURPOSE_BUILD", ":", "for", "rp", "in", "stages", ":", "targets", "=", "[", "]", "must_run", "=", "rp", "[", "\"must_run\"", "]", "pending", "=", "rp", "[", "\"pending\"", "]", "del", "rp", "[", "\"must_run\"", "]", "del", "rp", "[", "\"pending\"", "]", "iterated", "=", "[", "]", "for", "p", "in", "pending", ":", "iterated", ".", "append", "(", "p", ")", "if", "p", ".", "phase_type", "==", "RUN_ONCE", ":", "# this makes a new target", "target", "=", "dict", "(", "target_id", "=", "status_id_after", "[", "p", ".", "phase_name", "]", ",", "run_phases", "=", "must_run", "+", "iterated", ")", "targets", ".", "insert", "(", "0", ",", "target", ")", "rp", "[", "\"targets\"", "]", "=", "targets", "else", ":", "for", "rp", "in", "stages", ":", "rp", "[", "\"run_phases\"", "]", "=", "rp", "[", "\"must_run\"", "]", "+", "rp", "[", "\"pending\"", "]", "del", "rp", "[", "\"must_run\"", "]", "del", "rp", "[", "\"pending\"", "]", "#", "return", "stages" ]
Returns a list of possible status ids that are valid entry points into this ISCM. For each status, there is a list of phases that need to be executed, i.e.: [ { status_id: "ykyk...", run_phases: [ ] }, { status_id: "znzn...", run_phases: [ <phase3> ] }, { status_id: "xyxy...", run_phases: [ <phase2>, <phase3> ] }, { status_id: "", run_phases: [ <phase1>, <phase2>, <phase3> ] }, ] the array is sorted in such a way that entries with the least pending phases are found first. When purpose = PURPOSE_BUILD, each runnable path contains a list of the buildable targets and the phases that need to be run to achieve them: [ { status_id: "ykyk...", targets = [] }, { status_id: "znzn...", targets=[ { target_id: "ykyk...", run_phases: [ <phase3> ] }, ]}, { status_id: "xyxy...", targets=[ { target_id: "ykyk...", run_phases: [ <phase2>, <phase3> ] }, { target_id: "znzn...", run_phases: [ <phase2> ] }, ]}, { status_id: "", targets=[ { target_id: "ykyk...", run_phases: [ <phase1>, <phase2>, <phase3> ] }, { target_id: "znzn...", run_phases: [ <phase1>, <phase2> ] }, { target_id: "xyxy...", run_phases: [ <phase1> ] }, ]} ]
[ "Returns", "a", "list", "of", "possible", "status", "ids", "that", "are", "valid", "entry", "points", "into", "this", "ISCM", ".", "For", "each", "status", "there", "is", "a", "list", "of", "phases", "that", "need", "to", "be", "executed", "i", ".", "e", ".", ":" ]
train
https://github.com/tuxpiper/cloudcast/blob/06ca62045c483e9c3e7ee960ba70d90ea6a13776/cloudcast/iscm/phased.py#L31-L115
0.002806
sosy-lab/benchexec
benchexec/check_cgroups.py
main
def main(argv=None): """ A simple command-line interface for the cgroups check of BenchExec. """ if argv is None: argv = sys.argv parser = argparse.ArgumentParser( fromfile_prefix_chars='@', description= """Check whether cgroups are available and can be used for BenchExec. Part of BenchExec: https://github.com/sosy-lab/benchexec/""") parser.add_argument("--wait", type=int, default=1, metavar="SECONDS", help='wait some time to ensure no process interferes with cgroups in the meantime (default: 1s)') parser.add_argument("--no-thread", action="store_true", help='run check on the main thread instead of a separate thread' + '(behavior of cgrulesengd differs depending on this)') options = parser.parse_args(argv[1:]) if options.no_thread: check_cgroup_availability(options.wait) else: check_cgroup_availability_in_thread(options)
python
def main(argv=None): """ A simple command-line interface for the cgroups check of BenchExec. """ if argv is None: argv = sys.argv parser = argparse.ArgumentParser( fromfile_prefix_chars='@', description= """Check whether cgroups are available and can be used for BenchExec. Part of BenchExec: https://github.com/sosy-lab/benchexec/""") parser.add_argument("--wait", type=int, default=1, metavar="SECONDS", help='wait some time to ensure no process interferes with cgroups in the meantime (default: 1s)') parser.add_argument("--no-thread", action="store_true", help='run check on the main thread instead of a separate thread' + '(behavior of cgrulesengd differs depending on this)') options = parser.parse_args(argv[1:]) if options.no_thread: check_cgroup_availability(options.wait) else: check_cgroup_availability_in_thread(options)
[ "def", "main", "(", "argv", "=", "None", ")", ":", "if", "argv", "is", "None", ":", "argv", "=", "sys", ".", "argv", "parser", "=", "argparse", ".", "ArgumentParser", "(", "fromfile_prefix_chars", "=", "'@'", ",", "description", "=", "\"\"\"Check whether cgroups are available and can be used for BenchExec.\n Part of BenchExec: https://github.com/sosy-lab/benchexec/\"\"\"", ")", "parser", ".", "add_argument", "(", "\"--wait\"", ",", "type", "=", "int", ",", "default", "=", "1", ",", "metavar", "=", "\"SECONDS\"", ",", "help", "=", "'wait some time to ensure no process interferes with cgroups in the meantime (default: 1s)'", ")", "parser", ".", "add_argument", "(", "\"--no-thread\"", ",", "action", "=", "\"store_true\"", ",", "help", "=", "'run check on the main thread instead of a separate thread'", "+", "'(behavior of cgrulesengd differs depending on this)'", ")", "options", "=", "parser", ".", "parse_args", "(", "argv", "[", "1", ":", "]", ")", "if", "options", ".", "no_thread", ":", "check_cgroup_availability", "(", "options", ".", "wait", ")", "else", ":", "check_cgroup_availability_in_thread", "(", "options", ")" ]
A simple command-line interface for the cgroups check of BenchExec.
[ "A", "simple", "command", "-", "line", "interface", "for", "the", "cgroups", "check", "of", "BenchExec", "." ]
train
https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/check_cgroups.py#L112-L135
0.005941
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py
NeuralNetworkBuilder.set_output
def set_output(self, output_names, output_dims): """ Set the outputs of the network spec. Parameters ---------- output_names: [str] List of output names of the network. output_dims: [tuple] List of output dimensions of the network. The ordering of output_dims is the same as output_names. Examples -------- .. sourcecode:: python # Set the neural network spec outputs to be 3 dimensional vector feature1 and # 4 dimensional vector feature2. >>> builder.set_output(output_names = ['feature1', 'feature2'], [(3,), (4,)]) See Also -------- set_input, set_class_labels """ spec = self.spec nn_spec = self.nn_spec for idx, dim in enumerate(output_dims): spec.description.output[idx].type.multiArrayType.ClearField("shape") spec.description.output[idx].type.multiArrayType.shape.extend(dim) spec.description.output[idx].type.multiArrayType.dataType = \ _Model_pb2.ArrayFeatureType.DOUBLE
python
def set_output(self, output_names, output_dims): """ Set the outputs of the network spec. Parameters ---------- output_names: [str] List of output names of the network. output_dims: [tuple] List of output dimensions of the network. The ordering of output_dims is the same as output_names. Examples -------- .. sourcecode:: python # Set the neural network spec outputs to be 3 dimensional vector feature1 and # 4 dimensional vector feature2. >>> builder.set_output(output_names = ['feature1', 'feature2'], [(3,), (4,)]) See Also -------- set_input, set_class_labels """ spec = self.spec nn_spec = self.nn_spec for idx, dim in enumerate(output_dims): spec.description.output[idx].type.multiArrayType.ClearField("shape") spec.description.output[idx].type.multiArrayType.shape.extend(dim) spec.description.output[idx].type.multiArrayType.dataType = \ _Model_pb2.ArrayFeatureType.DOUBLE
[ "def", "set_output", "(", "self", ",", "output_names", ",", "output_dims", ")", ":", "spec", "=", "self", ".", "spec", "nn_spec", "=", "self", ".", "nn_spec", "for", "idx", ",", "dim", "in", "enumerate", "(", "output_dims", ")", ":", "spec", ".", "description", ".", "output", "[", "idx", "]", ".", "type", ".", "multiArrayType", ".", "ClearField", "(", "\"shape\"", ")", "spec", ".", "description", ".", "output", "[", "idx", "]", ".", "type", ".", "multiArrayType", ".", "shape", ".", "extend", "(", "dim", ")", "spec", ".", "description", ".", "output", "[", "idx", "]", ".", "type", ".", "multiArrayType", ".", "dataType", "=", "_Model_pb2", ".", "ArrayFeatureType", ".", "DOUBLE" ]
Set the outputs of the network spec. Parameters ---------- output_names: [str] List of output names of the network. output_dims: [tuple] List of output dimensions of the network. The ordering of output_dims is the same as output_names. Examples -------- .. sourcecode:: python # Set the neural network spec outputs to be 3 dimensional vector feature1 and # 4 dimensional vector feature2. >>> builder.set_output(output_names = ['feature1', 'feature2'], [(3,), (4,)]) See Also -------- set_input, set_class_labels
[ "Set", "the", "outputs", "of", "the", "network", "spec", "." ]
train
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L204-L235
0.00613
bram85/topydo
topydo/lib/Filter.py
HiddenTagFilter.match
def match(self, p_todo): """ Returns True when p_todo doesn't have a tag to mark it as hidden. """ for my_tag in config().hidden_item_tags(): my_values = p_todo.tag_values(my_tag) for my_value in my_values: if not my_value in (0, '0', False, 'False'): return False return True
python
def match(self, p_todo): """ Returns True when p_todo doesn't have a tag to mark it as hidden. """ for my_tag in config().hidden_item_tags(): my_values = p_todo.tag_values(my_tag) for my_value in my_values: if not my_value in (0, '0', False, 'False'): return False return True
[ "def", "match", "(", "self", ",", "p_todo", ")", ":", "for", "my_tag", "in", "config", "(", ")", ".", "hidden_item_tags", "(", ")", ":", "my_values", "=", "p_todo", ".", "tag_values", "(", "my_tag", ")", "for", "my_value", "in", "my_values", ":", "if", "not", "my_value", "in", "(", "0", ",", "'0'", ",", "False", ",", "'False'", ")", ":", "return", "False", "return", "True" ]
Returns True when p_todo doesn't have a tag to mark it as hidden.
[ "Returns", "True", "when", "p_todo", "doesn", "t", "have", "a", "tag", "to", "mark", "it", "as", "hidden", "." ]
train
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/lib/Filter.py#L186-L196
0.007958
chrisjsewell/jsonextended
jsonextended/edict.py
to_json
def to_json(dct, jfile, overwrite=False, dirlevel=0, sort_keys=True, indent=2, default_name='root.json', **kwargs): """ output dict to json Parameters ---------- dct : dict jfile : str or file_like if file_like, must have write method overwrite : bool whether to overwrite existing files dirlevel : int if jfile is path to folder, defines how many key levels to set as sub-folders sort_keys : bool if true then the output of dictionaries will be sorted by key indent : int if non-negative integer, then JSON array elements and object members will be pretty-printed on new lines with that indent level spacing. kwargs : dict keywords for json.dump Examples -------- >>> from jsonextended.utils import MockPath >>> file_obj = MockPath('test.json',is_file=True,exists=False) >>> dct = {'a':{'b':1}} >>> to_json(dct, file_obj) >>> print(file_obj.to_string()) File("test.json") Contents: { "a": { "b": 1 } } >>> from jsonextended.utils import MockPath >>> folder_obj = MockPath() >>> dct = {'x':{'a':{'b':1},'c':{'d':3}}} >>> to_json(dct, folder_obj, dirlevel=0,indent=None) >>> print(folder_obj.to_string(file_content=True)) Folder("root") File("x.json") Contents: {"a": {"b": 1}, "c": {"d": 3}} >>> folder_obj = MockPath() >>> to_json(dct, folder_obj, dirlevel=1,indent=None) >>> print(folder_obj.to_string(file_content=True)) Folder("root") Folder("x") File("a.json") Contents: {"b": 1} File("c.json") Contents: {"d": 3} """ if hasattr(jfile, 'write'): json.dump(dct, jfile, sort_keys=sort_keys, indent=indent, default=encode) return if isinstance(jfile, basestring): path = pathlib.Path(jfile) else: path = jfile file_attrs = ['exists', 'is_dir', 'is_file', 'touch', 'open'] if not all([hasattr(path, attr) for attr in file_attrs]): raise ValueError( 'jfile should be a str or file_like object: {}'.format(jfile)) if path.is_file() and path.exists() and not overwrite: raise IOError('jfile already exists and ' 'overwrite is set to false: {}'.format(jfile)) if not path.is_dir() and dirlevel <= 0: path.touch() # try to create file if doesn't already exist with path.open('w') as outfile: outfile.write(unicode(json.dumps( dct, sort_keys=sort_keys, indent=indent, default=encode, **kwargs))) return if not path.is_dir(): path.mkdir() dirlevel -= 1 # if one or more values if not a nested dict if not all([hasattr(v, 'items') for v in dct.values()]): newpath = path.joinpath(default_name) newpath.touch() with newpath.open('w') as outfile: outfile.write(unicode(json.dumps( dct, sort_keys=sort_keys, indent=indent, default=encode, **kwargs))) return for key, val in dct.items(): if dirlevel <= 0: newpath = path.joinpath('{}.json'.format(key)) newpath.touch() with newpath.open('w') as outfile: outfile.write(unicode(json.dumps( val, ensure_ascii=False, sort_keys=sort_keys, indent=indent, default=encode, **kwargs))) else: newpath = path.joinpath('{}'.format(key)) if not newpath.exists(): newpath.mkdir() to_json(val, newpath, overwrite=overwrite, dirlevel=dirlevel - 1, sort_keys=sort_keys, indent=indent, default_name='{}.json'.format(key), **kwargs)
python
def to_json(dct, jfile, overwrite=False, dirlevel=0, sort_keys=True, indent=2, default_name='root.json', **kwargs): """ output dict to json Parameters ---------- dct : dict jfile : str or file_like if file_like, must have write method overwrite : bool whether to overwrite existing files dirlevel : int if jfile is path to folder, defines how many key levels to set as sub-folders sort_keys : bool if true then the output of dictionaries will be sorted by key indent : int if non-negative integer, then JSON array elements and object members will be pretty-printed on new lines with that indent level spacing. kwargs : dict keywords for json.dump Examples -------- >>> from jsonextended.utils import MockPath >>> file_obj = MockPath('test.json',is_file=True,exists=False) >>> dct = {'a':{'b':1}} >>> to_json(dct, file_obj) >>> print(file_obj.to_string()) File("test.json") Contents: { "a": { "b": 1 } } >>> from jsonextended.utils import MockPath >>> folder_obj = MockPath() >>> dct = {'x':{'a':{'b':1},'c':{'d':3}}} >>> to_json(dct, folder_obj, dirlevel=0,indent=None) >>> print(folder_obj.to_string(file_content=True)) Folder("root") File("x.json") Contents: {"a": {"b": 1}, "c": {"d": 3}} >>> folder_obj = MockPath() >>> to_json(dct, folder_obj, dirlevel=1,indent=None) >>> print(folder_obj.to_string(file_content=True)) Folder("root") Folder("x") File("a.json") Contents: {"b": 1} File("c.json") Contents: {"d": 3} """ if hasattr(jfile, 'write'): json.dump(dct, jfile, sort_keys=sort_keys, indent=indent, default=encode) return if isinstance(jfile, basestring): path = pathlib.Path(jfile) else: path = jfile file_attrs = ['exists', 'is_dir', 'is_file', 'touch', 'open'] if not all([hasattr(path, attr) for attr in file_attrs]): raise ValueError( 'jfile should be a str or file_like object: {}'.format(jfile)) if path.is_file() and path.exists() and not overwrite: raise IOError('jfile already exists and ' 'overwrite is set to false: {}'.format(jfile)) if not path.is_dir() and dirlevel <= 0: path.touch() # try to create file if doesn't already exist with path.open('w') as outfile: outfile.write(unicode(json.dumps( dct, sort_keys=sort_keys, indent=indent, default=encode, **kwargs))) return if not path.is_dir(): path.mkdir() dirlevel -= 1 # if one or more values if not a nested dict if not all([hasattr(v, 'items') for v in dct.values()]): newpath = path.joinpath(default_name) newpath.touch() with newpath.open('w') as outfile: outfile.write(unicode(json.dumps( dct, sort_keys=sort_keys, indent=indent, default=encode, **kwargs))) return for key, val in dct.items(): if dirlevel <= 0: newpath = path.joinpath('{}.json'.format(key)) newpath.touch() with newpath.open('w') as outfile: outfile.write(unicode(json.dumps( val, ensure_ascii=False, sort_keys=sort_keys, indent=indent, default=encode, **kwargs))) else: newpath = path.joinpath('{}'.format(key)) if not newpath.exists(): newpath.mkdir() to_json(val, newpath, overwrite=overwrite, dirlevel=dirlevel - 1, sort_keys=sort_keys, indent=indent, default_name='{}.json'.format(key), **kwargs)
[ "def", "to_json", "(", "dct", ",", "jfile", ",", "overwrite", "=", "False", ",", "dirlevel", "=", "0", ",", "sort_keys", "=", "True", ",", "indent", "=", "2", ",", "default_name", "=", "'root.json'", ",", "*", "*", "kwargs", ")", ":", "if", "hasattr", "(", "jfile", ",", "'write'", ")", ":", "json", ".", "dump", "(", "dct", ",", "jfile", ",", "sort_keys", "=", "sort_keys", ",", "indent", "=", "indent", ",", "default", "=", "encode", ")", "return", "if", "isinstance", "(", "jfile", ",", "basestring", ")", ":", "path", "=", "pathlib", ".", "Path", "(", "jfile", ")", "else", ":", "path", "=", "jfile", "file_attrs", "=", "[", "'exists'", ",", "'is_dir'", ",", "'is_file'", ",", "'touch'", ",", "'open'", "]", "if", "not", "all", "(", "[", "hasattr", "(", "path", ",", "attr", ")", "for", "attr", "in", "file_attrs", "]", ")", ":", "raise", "ValueError", "(", "'jfile should be a str or file_like object: {}'", ".", "format", "(", "jfile", ")", ")", "if", "path", ".", "is_file", "(", ")", "and", "path", ".", "exists", "(", ")", "and", "not", "overwrite", ":", "raise", "IOError", "(", "'jfile already exists and '", "'overwrite is set to false: {}'", ".", "format", "(", "jfile", ")", ")", "if", "not", "path", ".", "is_dir", "(", ")", "and", "dirlevel", "<=", "0", ":", "path", ".", "touch", "(", ")", "# try to create file if doesn't already exist", "with", "path", ".", "open", "(", "'w'", ")", "as", "outfile", ":", "outfile", ".", "write", "(", "unicode", "(", "json", ".", "dumps", "(", "dct", ",", "sort_keys", "=", "sort_keys", ",", "indent", "=", "indent", ",", "default", "=", "encode", ",", "*", "*", "kwargs", ")", ")", ")", "return", "if", "not", "path", ".", "is_dir", "(", ")", ":", "path", ".", "mkdir", "(", ")", "dirlevel", "-=", "1", "# if one or more values if not a nested dict", "if", "not", "all", "(", "[", "hasattr", "(", "v", ",", "'items'", ")", "for", "v", "in", "dct", ".", "values", "(", ")", "]", ")", ":", "newpath", "=", "path", ".", "joinpath", "(", "default_name", ")", "newpath", ".", "touch", "(", ")", "with", "newpath", ".", "open", "(", "'w'", ")", "as", "outfile", ":", "outfile", ".", "write", "(", "unicode", "(", "json", ".", "dumps", "(", "dct", ",", "sort_keys", "=", "sort_keys", ",", "indent", "=", "indent", ",", "default", "=", "encode", ",", "*", "*", "kwargs", ")", ")", ")", "return", "for", "key", ",", "val", "in", "dct", ".", "items", "(", ")", ":", "if", "dirlevel", "<=", "0", ":", "newpath", "=", "path", ".", "joinpath", "(", "'{}.json'", ".", "format", "(", "key", ")", ")", "newpath", ".", "touch", "(", ")", "with", "newpath", ".", "open", "(", "'w'", ")", "as", "outfile", ":", "outfile", ".", "write", "(", "unicode", "(", "json", ".", "dumps", "(", "val", ",", "ensure_ascii", "=", "False", ",", "sort_keys", "=", "sort_keys", ",", "indent", "=", "indent", ",", "default", "=", "encode", ",", "*", "*", "kwargs", ")", ")", ")", "else", ":", "newpath", "=", "path", ".", "joinpath", "(", "'{}'", ".", "format", "(", "key", ")", ")", "if", "not", "newpath", ".", "exists", "(", ")", ":", "newpath", ".", "mkdir", "(", ")", "to_json", "(", "val", ",", "newpath", ",", "overwrite", "=", "overwrite", ",", "dirlevel", "=", "dirlevel", "-", "1", ",", "sort_keys", "=", "sort_keys", ",", "indent", "=", "indent", ",", "default_name", "=", "'{}.json'", ".", "format", "(", "key", ")", ",", "*", "*", "kwargs", ")" ]
output dict to json Parameters ---------- dct : dict jfile : str or file_like if file_like, must have write method overwrite : bool whether to overwrite existing files dirlevel : int if jfile is path to folder, defines how many key levels to set as sub-folders sort_keys : bool if true then the output of dictionaries will be sorted by key indent : int if non-negative integer, then JSON array elements and object members will be pretty-printed on new lines with that indent level spacing. kwargs : dict keywords for json.dump Examples -------- >>> from jsonextended.utils import MockPath >>> file_obj = MockPath('test.json',is_file=True,exists=False) >>> dct = {'a':{'b':1}} >>> to_json(dct, file_obj) >>> print(file_obj.to_string()) File("test.json") Contents: { "a": { "b": 1 } } >>> from jsonextended.utils import MockPath >>> folder_obj = MockPath() >>> dct = {'x':{'a':{'b':1},'c':{'d':3}}} >>> to_json(dct, folder_obj, dirlevel=0,indent=None) >>> print(folder_obj.to_string(file_content=True)) Folder("root") File("x.json") Contents: {"a": {"b": 1}, "c": {"d": 3}} >>> folder_obj = MockPath() >>> to_json(dct, folder_obj, dirlevel=1,indent=None) >>> print(folder_obj.to_string(file_content=True)) Folder("root") Folder("x") File("a.json") Contents: {"b": 1} File("c.json") Contents: {"d": 3}
[ "output", "dict", "to", "json" ]
train
https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L1865-L1978
0.00026
UCL-INGI/INGInious
inginious/frontend/pages/api/_api_page.py
APIPage.GET
def GET(self, *args, **kwargs): """ GET request """ return self._handle_api(self.API_GET, args, kwargs)
python
def GET(self, *args, **kwargs): """ GET request """ return self._handle_api(self.API_GET, args, kwargs)
[ "def", "GET", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_handle_api", "(", "self", ".", "API_GET", ",", "args", ",", "kwargs", ")" ]
GET request
[ "GET", "request" ]
train
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/pages/api/_api_page.py#L19-L21
0.016807
stan-dev/pystan
pystan/misc.py
_pars_total_indexes
def _pars_total_indexes(names, dims, fnames, pars): """Obtain all the indexes for parameters `pars` in the sequence of names. `names` references variables that are in column-major order Parameters ---------- names : sequence of str All the parameter names. dim : sequence of list of int Dimensions, in same order as `names`. fnames : sequence of str All the scalar parameter names pars : sequence of str The parameters of interest. It is assumed all elements in `pars` are in `names`. Returns ------- indexes : OrderedDict of list of int Dictionary uses parameter names as keys. Indexes are column-major order. For each parameter there is also a key `par`+'_rowmajor' that stores the row-major indexing. Note ---- Inside each parameter (vector or array), the sequence uses column-major ordering. For example, if we have parameters alpha and beta, having dimensions [2, 2] and [2, 3] respectively, the whole parameter sequence is alpha[0,0], alpha[1,0], alpha[0, 1], alpha[1, 1], beta[0, 0], beta[1, 0], beta[0, 1], beta[1, 1], beta[0, 2], beta[1, 2]. In short, like R matrix(..., bycol=TRUE). Example ------- >>> pars_oi = ['mu', 'tau', 'eta', 'theta', 'lp__'] >>> dims_oi = [[], [], [8], [8], []] >>> fnames_oi = ['mu', 'tau', 'eta[1]', 'eta[2]', 'eta[3]', 'eta[4]', ... 'eta[5]', 'eta[6]', 'eta[7]', 'eta[8]', 'theta[1]', 'theta[2]', ... 'theta[3]', 'theta[4]', 'theta[5]', 'theta[6]', 'theta[7]', ... 'theta[8]', 'lp__'] >>> pars = ['mu', 'tau', 'eta', 'theta', 'lp__'] >>> _pars_total_indexes(pars_oi, dims_oi, fnames_oi, pars) ... # doctest: +ELLIPSIS OrderedDict([('mu', (0,)), ('tau', (1,)), ('eta', (2, 3, ... """ starts = _calc_starts(dims) def par_total_indexes(par): # if `par` is a scalar, it will match one of `fnames` if par in fnames: p = fnames.index(par) idx = tuple([p]) return OrderedDict([(par, idx), (par+'_rowmajor', idx)]) else: p = names.index(par) idx = starts[p] + np.arange(np.prod(dims[p])) idx_rowmajor = starts[p] + _idx_col2rowm(dims[p]) return OrderedDict([(par, tuple(idx)), (par+'_rowmajor', tuple(idx_rowmajor))]) indexes = OrderedDict() for par in pars: indexes.update(par_total_indexes(par)) return indexes
python
def _pars_total_indexes(names, dims, fnames, pars): """Obtain all the indexes for parameters `pars` in the sequence of names. `names` references variables that are in column-major order Parameters ---------- names : sequence of str All the parameter names. dim : sequence of list of int Dimensions, in same order as `names`. fnames : sequence of str All the scalar parameter names pars : sequence of str The parameters of interest. It is assumed all elements in `pars` are in `names`. Returns ------- indexes : OrderedDict of list of int Dictionary uses parameter names as keys. Indexes are column-major order. For each parameter there is also a key `par`+'_rowmajor' that stores the row-major indexing. Note ---- Inside each parameter (vector or array), the sequence uses column-major ordering. For example, if we have parameters alpha and beta, having dimensions [2, 2] and [2, 3] respectively, the whole parameter sequence is alpha[0,0], alpha[1,0], alpha[0, 1], alpha[1, 1], beta[0, 0], beta[1, 0], beta[0, 1], beta[1, 1], beta[0, 2], beta[1, 2]. In short, like R matrix(..., bycol=TRUE). Example ------- >>> pars_oi = ['mu', 'tau', 'eta', 'theta', 'lp__'] >>> dims_oi = [[], [], [8], [8], []] >>> fnames_oi = ['mu', 'tau', 'eta[1]', 'eta[2]', 'eta[3]', 'eta[4]', ... 'eta[5]', 'eta[6]', 'eta[7]', 'eta[8]', 'theta[1]', 'theta[2]', ... 'theta[3]', 'theta[4]', 'theta[5]', 'theta[6]', 'theta[7]', ... 'theta[8]', 'lp__'] >>> pars = ['mu', 'tau', 'eta', 'theta', 'lp__'] >>> _pars_total_indexes(pars_oi, dims_oi, fnames_oi, pars) ... # doctest: +ELLIPSIS OrderedDict([('mu', (0,)), ('tau', (1,)), ('eta', (2, 3, ... """ starts = _calc_starts(dims) def par_total_indexes(par): # if `par` is a scalar, it will match one of `fnames` if par in fnames: p = fnames.index(par) idx = tuple([p]) return OrderedDict([(par, idx), (par+'_rowmajor', idx)]) else: p = names.index(par) idx = starts[p] + np.arange(np.prod(dims[p])) idx_rowmajor = starts[p] + _idx_col2rowm(dims[p]) return OrderedDict([(par, tuple(idx)), (par+'_rowmajor', tuple(idx_rowmajor))]) indexes = OrderedDict() for par in pars: indexes.update(par_total_indexes(par)) return indexes
[ "def", "_pars_total_indexes", "(", "names", ",", "dims", ",", "fnames", ",", "pars", ")", ":", "starts", "=", "_calc_starts", "(", "dims", ")", "def", "par_total_indexes", "(", "par", ")", ":", "# if `par` is a scalar, it will match one of `fnames`", "if", "par", "in", "fnames", ":", "p", "=", "fnames", ".", "index", "(", "par", ")", "idx", "=", "tuple", "(", "[", "p", "]", ")", "return", "OrderedDict", "(", "[", "(", "par", ",", "idx", ")", ",", "(", "par", "+", "'_rowmajor'", ",", "idx", ")", "]", ")", "else", ":", "p", "=", "names", ".", "index", "(", "par", ")", "idx", "=", "starts", "[", "p", "]", "+", "np", ".", "arange", "(", "np", ".", "prod", "(", "dims", "[", "p", "]", ")", ")", "idx_rowmajor", "=", "starts", "[", "p", "]", "+", "_idx_col2rowm", "(", "dims", "[", "p", "]", ")", "return", "OrderedDict", "(", "[", "(", "par", ",", "tuple", "(", "idx", ")", ")", ",", "(", "par", "+", "'_rowmajor'", ",", "tuple", "(", "idx_rowmajor", ")", ")", "]", ")", "indexes", "=", "OrderedDict", "(", ")", "for", "par", "in", "pars", ":", "indexes", ".", "update", "(", "par_total_indexes", "(", "par", ")", ")", "return", "indexes" ]
Obtain all the indexes for parameters `pars` in the sequence of names. `names` references variables that are in column-major order Parameters ---------- names : sequence of str All the parameter names. dim : sequence of list of int Dimensions, in same order as `names`. fnames : sequence of str All the scalar parameter names pars : sequence of str The parameters of interest. It is assumed all elements in `pars` are in `names`. Returns ------- indexes : OrderedDict of list of int Dictionary uses parameter names as keys. Indexes are column-major order. For each parameter there is also a key `par`+'_rowmajor' that stores the row-major indexing. Note ---- Inside each parameter (vector or array), the sequence uses column-major ordering. For example, if we have parameters alpha and beta, having dimensions [2, 2] and [2, 3] respectively, the whole parameter sequence is alpha[0,0], alpha[1,0], alpha[0, 1], alpha[1, 1], beta[0, 0], beta[1, 0], beta[0, 1], beta[1, 1], beta[0, 2], beta[1, 2]. In short, like R matrix(..., bycol=TRUE). Example ------- >>> pars_oi = ['mu', 'tau', 'eta', 'theta', 'lp__'] >>> dims_oi = [[], [], [8], [8], []] >>> fnames_oi = ['mu', 'tau', 'eta[1]', 'eta[2]', 'eta[3]', 'eta[4]', ... 'eta[5]', 'eta[6]', 'eta[7]', 'eta[8]', 'theta[1]', 'theta[2]', ... 'theta[3]', 'theta[4]', 'theta[5]', 'theta[6]', 'theta[7]', ... 'theta[8]', 'lp__'] >>> pars = ['mu', 'tau', 'eta', 'theta', 'lp__'] >>> _pars_total_indexes(pars_oi, dims_oi, fnames_oi, pars) ... # doctest: +ELLIPSIS OrderedDict([('mu', (0,)), ('tau', (1,)), ('eta', (2, 3, ...
[ "Obtain", "all", "the", "indexes", "for", "parameters", "pars", "in", "the", "sequence", "of", "names", "." ]
train
https://github.com/stan-dev/pystan/blob/57bdccea11888157e7aaafba083003080a934805/pystan/misc.py#L811-L875
0.001616
user-cont/conu
conu/apidefs/filesystem.py
Filesystem.file_is_present
def file_is_present(self, file_path): """ check if file 'file_path' is present, raises IOError if file_path is not a file :param file_path: str, path to the file :return: True if file exists, False if file does not exist """ p = self.p(file_path) if not os.path.exists(p): return False if not os.path.isfile(p): raise IOError("%s is not a file" % file_path) return True
python
def file_is_present(self, file_path): """ check if file 'file_path' is present, raises IOError if file_path is not a file :param file_path: str, path to the file :return: True if file exists, False if file does not exist """ p = self.p(file_path) if not os.path.exists(p): return False if not os.path.isfile(p): raise IOError("%s is not a file" % file_path) return True
[ "def", "file_is_present", "(", "self", ",", "file_path", ")", ":", "p", "=", "self", ".", "p", "(", "file_path", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "p", ")", ":", "return", "False", "if", "not", "os", ".", "path", ".", "isfile", "(", "p", ")", ":", "raise", "IOError", "(", "\"%s is not a file\"", "%", "file_path", ")", "return", "True" ]
check if file 'file_path' is present, raises IOError if file_path is not a file :param file_path: str, path to the file :return: True if file exists, False if file does not exist
[ "check", "if", "file", "file_path", "is", "present", "raises", "IOError", "if", "file_path", "is", "not", "a", "file" ]
train
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/apidefs/filesystem.py#L131-L144
0.004219
projectatomic/atomic-reactor
atomic_reactor/plugins/post_compare_components.py
CompareComponentsPlugin.get_component_list_from_workers
def get_component_list_from_workers(self, worker_metadatas): """ Find the component lists from each worker build. The components that are interesting are under the 'output' key. The buildhost's components are ignored. Inside the 'output' key are various 'instances'. The only 'instance' with a 'component list' is the 'docker-image' instance. The 'log' instances are ignored for now. Reference plugin post_koji_upload for details on how this is created. :return: list of component lists """ comp_list = [] for platform in sorted(worker_metadatas.keys()): for instance in worker_metadatas[platform]['output']: if instance['type'] == 'docker-image': if 'components' not in instance or not instance['components']: self.log.warn("Missing 'components' key in 'output' metadata instance: %s", instance) continue comp_list.append(instance['components']) return comp_list
python
def get_component_list_from_workers(self, worker_metadatas): """ Find the component lists from each worker build. The components that are interesting are under the 'output' key. The buildhost's components are ignored. Inside the 'output' key are various 'instances'. The only 'instance' with a 'component list' is the 'docker-image' instance. The 'log' instances are ignored for now. Reference plugin post_koji_upload for details on how this is created. :return: list of component lists """ comp_list = [] for platform in sorted(worker_metadatas.keys()): for instance in worker_metadatas[platform]['output']: if instance['type'] == 'docker-image': if 'components' not in instance or not instance['components']: self.log.warn("Missing 'components' key in 'output' metadata instance: %s", instance) continue comp_list.append(instance['components']) return comp_list
[ "def", "get_component_list_from_workers", "(", "self", ",", "worker_metadatas", ")", ":", "comp_list", "=", "[", "]", "for", "platform", "in", "sorted", "(", "worker_metadatas", ".", "keys", "(", ")", ")", ":", "for", "instance", "in", "worker_metadatas", "[", "platform", "]", "[", "'output'", "]", ":", "if", "instance", "[", "'type'", "]", "==", "'docker-image'", ":", "if", "'components'", "not", "in", "instance", "or", "not", "instance", "[", "'components'", "]", ":", "self", ".", "log", ".", "warn", "(", "\"Missing 'components' key in 'output' metadata instance: %s\"", ",", "instance", ")", "continue", "comp_list", ".", "append", "(", "instance", "[", "'components'", "]", ")", "return", "comp_list" ]
Find the component lists from each worker build. The components that are interesting are under the 'output' key. The buildhost's components are ignored. Inside the 'output' key are various 'instances'. The only 'instance' with a 'component list' is the 'docker-image' instance. The 'log' instances are ignored for now. Reference plugin post_koji_upload for details on how this is created. :return: list of component lists
[ "Find", "the", "component", "lists", "from", "each", "worker", "build", "." ]
train
https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/atomic_reactor/plugins/post_compare_components.py#L51-L77
0.00354
openid/python-openid
openid/extensions/draft/pape5.py
Response.parseExtensionArgs
def parseExtensionArgs(self, args, is_openid1, strict=False): """Parse the provider authentication policy arguments into the internal state of this object @param args: unqualified provider authentication policy arguments @param strict: Whether to raise an exception when bad data is encountered @returns: None. The data is parsed into the internal fields of this object. """ policies_str = args.get('auth_policies') if policies_str: auth_policies = policies_str.split(' ') elif strict: raise ValueError('Missing auth_policies') else: auth_policies = [] if (len(auth_policies) > 1 and strict and AUTH_NONE in auth_policies): raise ValueError('Got some auth policies, as well as the special ' '"none" URI: %r' % (auth_policies,)) if 'none' in auth_policies: msg = '"none" used as a policy URI (see PAPE draft < 5)' if strict: raise ValueError(msg) else: warnings.warn(msg, stacklevel=2) auth_policies = [u for u in auth_policies if u not in ['none', AUTH_NONE]] self.auth_policies = auth_policies for (key, val) in args.iteritems(): if key.startswith('auth_level.'): alias = key[11:] # skip the already-processed namespace declarations if alias.startswith('ns.'): continue try: uri = args['auth_level.ns.%s' % (alias,)] except KeyError: if is_openid1: uri = self._default_auth_level_aliases.get(alias) else: uri = None if uri is None: if strict: raise ValueError( 'Undefined auth level alias: %r' % (alias,)) else: self.setAuthLevel(uri, val, alias) auth_time = args.get('auth_time') if auth_time: if TIME_VALIDATOR.match(auth_time): self.auth_time = auth_time elif strict: raise ValueError("auth_time must be in RFC3339 format")
python
def parseExtensionArgs(self, args, is_openid1, strict=False): """Parse the provider authentication policy arguments into the internal state of this object @param args: unqualified provider authentication policy arguments @param strict: Whether to raise an exception when bad data is encountered @returns: None. The data is parsed into the internal fields of this object. """ policies_str = args.get('auth_policies') if policies_str: auth_policies = policies_str.split(' ') elif strict: raise ValueError('Missing auth_policies') else: auth_policies = [] if (len(auth_policies) > 1 and strict and AUTH_NONE in auth_policies): raise ValueError('Got some auth policies, as well as the special ' '"none" URI: %r' % (auth_policies,)) if 'none' in auth_policies: msg = '"none" used as a policy URI (see PAPE draft < 5)' if strict: raise ValueError(msg) else: warnings.warn(msg, stacklevel=2) auth_policies = [u for u in auth_policies if u not in ['none', AUTH_NONE]] self.auth_policies = auth_policies for (key, val) in args.iteritems(): if key.startswith('auth_level.'): alias = key[11:] # skip the already-processed namespace declarations if alias.startswith('ns.'): continue try: uri = args['auth_level.ns.%s' % (alias,)] except KeyError: if is_openid1: uri = self._default_auth_level_aliases.get(alias) else: uri = None if uri is None: if strict: raise ValueError( 'Undefined auth level alias: %r' % (alias,)) else: self.setAuthLevel(uri, val, alias) auth_time = args.get('auth_time') if auth_time: if TIME_VALIDATOR.match(auth_time): self.auth_time = auth_time elif strict: raise ValueError("auth_time must be in RFC3339 format")
[ "def", "parseExtensionArgs", "(", "self", ",", "args", ",", "is_openid1", ",", "strict", "=", "False", ")", ":", "policies_str", "=", "args", ".", "get", "(", "'auth_policies'", ")", "if", "policies_str", ":", "auth_policies", "=", "policies_str", ".", "split", "(", "' '", ")", "elif", "strict", ":", "raise", "ValueError", "(", "'Missing auth_policies'", ")", "else", ":", "auth_policies", "=", "[", "]", "if", "(", "len", "(", "auth_policies", ")", ">", "1", "and", "strict", "and", "AUTH_NONE", "in", "auth_policies", ")", ":", "raise", "ValueError", "(", "'Got some auth policies, as well as the special '", "'\"none\" URI: %r'", "%", "(", "auth_policies", ",", ")", ")", "if", "'none'", "in", "auth_policies", ":", "msg", "=", "'\"none\" used as a policy URI (see PAPE draft < 5)'", "if", "strict", ":", "raise", "ValueError", "(", "msg", ")", "else", ":", "warnings", ".", "warn", "(", "msg", ",", "stacklevel", "=", "2", ")", "auth_policies", "=", "[", "u", "for", "u", "in", "auth_policies", "if", "u", "not", "in", "[", "'none'", ",", "AUTH_NONE", "]", "]", "self", ".", "auth_policies", "=", "auth_policies", "for", "(", "key", ",", "val", ")", "in", "args", ".", "iteritems", "(", ")", ":", "if", "key", ".", "startswith", "(", "'auth_level.'", ")", ":", "alias", "=", "key", "[", "11", ":", "]", "# skip the already-processed namespace declarations", "if", "alias", ".", "startswith", "(", "'ns.'", ")", ":", "continue", "try", ":", "uri", "=", "args", "[", "'auth_level.ns.%s'", "%", "(", "alias", ",", ")", "]", "except", "KeyError", ":", "if", "is_openid1", ":", "uri", "=", "self", ".", "_default_auth_level_aliases", ".", "get", "(", "alias", ")", "else", ":", "uri", "=", "None", "if", "uri", "is", "None", ":", "if", "strict", ":", "raise", "ValueError", "(", "'Undefined auth level alias: %r'", "%", "(", "alias", ",", ")", ")", "else", ":", "self", ".", "setAuthLevel", "(", "uri", ",", "val", ",", "alias", ")", "auth_time", "=", "args", ".", "get", "(", "'auth_time'", ")", "if", "auth_time", ":", "if", "TIME_VALIDATOR", ".", "match", "(", "auth_time", ")", ":", "self", ".", "auth_time", "=", "auth_time", "elif", "strict", ":", "raise", "ValueError", "(", "\"auth_time must be in RFC3339 format\"", ")" ]
Parse the provider authentication policy arguments into the internal state of this object @param args: unqualified provider authentication policy arguments @param strict: Whether to raise an exception when bad data is encountered @returns: None. The data is parsed into the internal fields of this object.
[ "Parse", "the", "provider", "authentication", "policy", "arguments", "into", "the", "internal", "state", "of", "this", "object" ]
train
https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/extensions/draft/pape5.py#L379-L444
0.00084
jtwhite79/pyemu
pyemu/pst/pst_handler.py
Pst.nnz_obs_groups
def nnz_obs_groups(self): """ get the observation groups that contain at least one non-zero weighted observation Returns ------- nnz_obs_groups : list a list of observation groups that contain at least one non-zero weighted observation """ og = [] obs = self.observation_data for g in self.obs_groups: if obs.loc[obs.obgnme==g,"weight"].sum() > 0.0: og.append(g) return og
python
def nnz_obs_groups(self): """ get the observation groups that contain at least one non-zero weighted observation Returns ------- nnz_obs_groups : list a list of observation groups that contain at least one non-zero weighted observation """ og = [] obs = self.observation_data for g in self.obs_groups: if obs.loc[obs.obgnme==g,"weight"].sum() > 0.0: og.append(g) return og
[ "def", "nnz_obs_groups", "(", "self", ")", ":", "og", "=", "[", "]", "obs", "=", "self", ".", "observation_data", "for", "g", "in", "self", ".", "obs_groups", ":", "if", "obs", ".", "loc", "[", "obs", ".", "obgnme", "==", "g", ",", "\"weight\"", "]", ".", "sum", "(", ")", ">", "0.0", ":", "og", ".", "append", "(", "g", ")", "return", "og" ]
get the observation groups that contain at least one non-zero weighted observation Returns ------- nnz_obs_groups : list a list of observation groups that contain at least one non-zero weighted observation
[ "get", "the", "observation", "groups", "that", "contain", "at", "least", "one", "non", "-", "zero", "weighted", "observation" ]
train
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/pst/pst_handler.py#L359-L375
0.009862
juju/charm-helpers
charmhelpers/contrib/hardening/utils.py
_get_defaults
def _get_defaults(modules): """Load the default config for the provided modules. :param modules: stack modules config defaults to lookup. :returns: modules default config dictionary. """ default = os.path.join(os.path.dirname(__file__), 'defaults/%s.yaml' % (modules)) return yaml.safe_load(open(default))
python
def _get_defaults(modules): """Load the default config for the provided modules. :param modules: stack modules config defaults to lookup. :returns: modules default config dictionary. """ default = os.path.join(os.path.dirname(__file__), 'defaults/%s.yaml' % (modules)) return yaml.safe_load(open(default))
[ "def", "_get_defaults", "(", "modules", ")", ":", "default", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'defaults/%s.yaml'", "%", "(", "modules", ")", ")", "return", "yaml", ".", "safe_load", "(", "open", "(", "default", ")", ")" ]
Load the default config for the provided modules. :param modules: stack modules config defaults to lookup. :returns: modules default config dictionary.
[ "Load", "the", "default", "config", "for", "the", "provided", "modules", "." ]
train
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/hardening/utils.py#L37-L45
0.002801
datastax/python-driver
cassandra/cqlengine/query.py
DMLQuery._delete_null_columns
def _delete_null_columns(self, conditionals=None): """ executes a delete query to remove columns that have changed to null """ ds = DeleteStatement(self.column_family_name, conditionals=conditionals, if_exists=self._if_exists) deleted_fields = False static_only = True for _, v in self.instance._values.items(): col = v.column if v.deleted: ds.add_field(col.db_field_name) deleted_fields = True static_only &= col.static elif isinstance(col, columns.Map): uc = MapDeleteClause(col.db_field_name, v.value, v.previous_value) if uc.get_context_size() > 0: ds.add_field(uc) deleted_fields = True static_only |= col.static if deleted_fields: keys = self.model._partition_keys if static_only else self.model._primary_keys for name, col in keys.items(): ds.add_where(col, EqualsOperator(), getattr(self.instance, name)) self._execute(ds)
python
def _delete_null_columns(self, conditionals=None): """ executes a delete query to remove columns that have changed to null """ ds = DeleteStatement(self.column_family_name, conditionals=conditionals, if_exists=self._if_exists) deleted_fields = False static_only = True for _, v in self.instance._values.items(): col = v.column if v.deleted: ds.add_field(col.db_field_name) deleted_fields = True static_only &= col.static elif isinstance(col, columns.Map): uc = MapDeleteClause(col.db_field_name, v.value, v.previous_value) if uc.get_context_size() > 0: ds.add_field(uc) deleted_fields = True static_only |= col.static if deleted_fields: keys = self.model._partition_keys if static_only else self.model._primary_keys for name, col in keys.items(): ds.add_where(col, EqualsOperator(), getattr(self.instance, name)) self._execute(ds)
[ "def", "_delete_null_columns", "(", "self", ",", "conditionals", "=", "None", ")", ":", "ds", "=", "DeleteStatement", "(", "self", ".", "column_family_name", ",", "conditionals", "=", "conditionals", ",", "if_exists", "=", "self", ".", "_if_exists", ")", "deleted_fields", "=", "False", "static_only", "=", "True", "for", "_", ",", "v", "in", "self", ".", "instance", ".", "_values", ".", "items", "(", ")", ":", "col", "=", "v", ".", "column", "if", "v", ".", "deleted", ":", "ds", ".", "add_field", "(", "col", ".", "db_field_name", ")", "deleted_fields", "=", "True", "static_only", "&=", "col", ".", "static", "elif", "isinstance", "(", "col", ",", "columns", ".", "Map", ")", ":", "uc", "=", "MapDeleteClause", "(", "col", ".", "db_field_name", ",", "v", ".", "value", ",", "v", ".", "previous_value", ")", "if", "uc", ".", "get_context_size", "(", ")", ">", "0", ":", "ds", ".", "add_field", "(", "uc", ")", "deleted_fields", "=", "True", "static_only", "|=", "col", ".", "static", "if", "deleted_fields", ":", "keys", "=", "self", ".", "model", ".", "_partition_keys", "if", "static_only", "else", "self", ".", "model", ".", "_primary_keys", "for", "name", ",", "col", "in", "keys", ".", "items", "(", ")", ":", "ds", ".", "add_where", "(", "col", ",", "EqualsOperator", "(", ")", ",", "getattr", "(", "self", ".", "instance", ",", "name", ")", ")", "self", ".", "_execute", "(", "ds", ")" ]
executes a delete query to remove columns that have changed to null
[ "executes", "a", "delete", "query", "to", "remove", "columns", "that", "have", "changed", "to", "null" ]
train
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cqlengine/query.py#L1387-L1411
0.005343
yyuu/botornado
boto/cloudfront/distribution.py
Distribution.add_object
def add_object(self, name, content, headers=None, replace=True): """ Adds a new content object to the Distribution. The content for the object will be copied to a new Key in the S3 Bucket and the permissions will be set appropriately for the type of Distribution. :type name: str or unicode :param name: The name or key of the new object. :type content: file-like object :param content: A file-like object that contains the content for the new object. :type headers: dict :param headers: A dictionary containing additional headers you would like associated with the new object in S3. :rtype: :class:`boto.cloudfront.object.Object` :return: The newly created object. """ if self.config.origin.origin_access_identity: policy = 'private' else: policy = 'public-read' bucket = self._get_bucket() object = bucket.new_key(name) object.set_contents_from_file(content, headers=headers, policy=policy) if self.config.origin.origin_access_identity: self.set_permissions(object, replace) return object
python
def add_object(self, name, content, headers=None, replace=True): """ Adds a new content object to the Distribution. The content for the object will be copied to a new Key in the S3 Bucket and the permissions will be set appropriately for the type of Distribution. :type name: str or unicode :param name: The name or key of the new object. :type content: file-like object :param content: A file-like object that contains the content for the new object. :type headers: dict :param headers: A dictionary containing additional headers you would like associated with the new object in S3. :rtype: :class:`boto.cloudfront.object.Object` :return: The newly created object. """ if self.config.origin.origin_access_identity: policy = 'private' else: policy = 'public-read' bucket = self._get_bucket() object = bucket.new_key(name) object.set_contents_from_file(content, headers=headers, policy=policy) if self.config.origin.origin_access_identity: self.set_permissions(object, replace) return object
[ "def", "add_object", "(", "self", ",", "name", ",", "content", ",", "headers", "=", "None", ",", "replace", "=", "True", ")", ":", "if", "self", ".", "config", ".", "origin", ".", "origin_access_identity", ":", "policy", "=", "'private'", "else", ":", "policy", "=", "'public-read'", "bucket", "=", "self", ".", "_get_bucket", "(", ")", "object", "=", "bucket", ".", "new_key", "(", "name", ")", "object", ".", "set_contents_from_file", "(", "content", ",", "headers", "=", "headers", ",", "policy", "=", "policy", ")", "if", "self", ".", "config", ".", "origin", ".", "origin_access_identity", ":", "self", ".", "set_permissions", "(", "object", ",", "replace", ")", "return", "object" ]
Adds a new content object to the Distribution. The content for the object will be copied to a new Key in the S3 Bucket and the permissions will be set appropriately for the type of Distribution. :type name: str or unicode :param name: The name or key of the new object. :type content: file-like object :param content: A file-like object that contains the content for the new object. :type headers: dict :param headers: A dictionary containing additional headers you would like associated with the new object in S3. :rtype: :class:`boto.cloudfront.object.Object` :return: The newly created object.
[ "Adds", "a", "new", "content", "object", "to", "the", "Distribution", ".", "The", "content", "for", "the", "object", "will", "be", "copied", "to", "a", "new", "Key", "in", "the", "S3", "Bucket", "and", "the", "permissions", "will", "be", "set", "appropriately", "for", "the", "type", "of", "Distribution", "." ]
train
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/cloudfront/distribution.py#L465-L496
0.001574
vtkiorg/vtki
vtki/common.py
Common.cell_arrays
def cell_arrays(self): """ Returns the all cell arrays """ cdata = self.GetCellData() narr = cdata.GetNumberOfArrays() # Update data if necessary if hasattr(self, '_cell_arrays'): keys = list(self._cell_arrays.keys()) if narr == len(keys): if keys: if self._cell_arrays[keys[0]].size == self.n_cells: return self._cell_arrays else: return self._cell_arrays # dictionary with callbacks self._cell_arrays = CellScalarsDict(self) for i in range(narr): name = cdata.GetArrayName(i) self._cell_arrays[name] = self._cell_scalar(name) self._cell_arrays.enable_callback() return self._cell_arrays
python
def cell_arrays(self): """ Returns the all cell arrays """ cdata = self.GetCellData() narr = cdata.GetNumberOfArrays() # Update data if necessary if hasattr(self, '_cell_arrays'): keys = list(self._cell_arrays.keys()) if narr == len(keys): if keys: if self._cell_arrays[keys[0]].size == self.n_cells: return self._cell_arrays else: return self._cell_arrays # dictionary with callbacks self._cell_arrays = CellScalarsDict(self) for i in range(narr): name = cdata.GetArrayName(i) self._cell_arrays[name] = self._cell_scalar(name) self._cell_arrays.enable_callback() return self._cell_arrays
[ "def", "cell_arrays", "(", "self", ")", ":", "cdata", "=", "self", ".", "GetCellData", "(", ")", "narr", "=", "cdata", ".", "GetNumberOfArrays", "(", ")", "# Update data if necessary", "if", "hasattr", "(", "self", ",", "'_cell_arrays'", ")", ":", "keys", "=", "list", "(", "self", ".", "_cell_arrays", ".", "keys", "(", ")", ")", "if", "narr", "==", "len", "(", "keys", ")", ":", "if", "keys", ":", "if", "self", ".", "_cell_arrays", "[", "keys", "[", "0", "]", "]", ".", "size", "==", "self", ".", "n_cells", ":", "return", "self", ".", "_cell_arrays", "else", ":", "return", "self", ".", "_cell_arrays", "# dictionary with callbacks", "self", ".", "_cell_arrays", "=", "CellScalarsDict", "(", "self", ")", "for", "i", "in", "range", "(", "narr", ")", ":", "name", "=", "cdata", ".", "GetArrayName", "(", "i", ")", "self", ".", "_cell_arrays", "[", "name", "]", "=", "self", ".", "_cell_scalar", "(", "name", ")", "self", ".", "_cell_arrays", ".", "enable_callback", "(", ")", "return", "self", ".", "_cell_arrays" ]
Returns the all cell arrays
[ "Returns", "the", "all", "cell", "arrays" ]
train
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/common.py#L602-L625
0.002451
maxfischer2781/include
include/inhibit.py
DisabledIncludeTypes.enable
def enable(self, identifier, exclude_children=False): """ Enable a previously disabled include type :param identifier: module or name of the include type :param exclude_children: disable the include type only for child processes, not the current process The ``identifier`` can be specified in multiple ways to disable an include type. See :py:meth:`~.DisabledIncludeTypes.disable` for details. """ import_path = self._identifier2import_path(identifier=identifier) if import_path in self._disabled: self._enable_path(import_path) self._disabled.remove(import_path) if not exclude_children and import_path in self._children_disabled: self._children_disabled.remove(import_path) self._write_child_disabled()
python
def enable(self, identifier, exclude_children=False): """ Enable a previously disabled include type :param identifier: module or name of the include type :param exclude_children: disable the include type only for child processes, not the current process The ``identifier`` can be specified in multiple ways to disable an include type. See :py:meth:`~.DisabledIncludeTypes.disable` for details. """ import_path = self._identifier2import_path(identifier=identifier) if import_path in self._disabled: self._enable_path(import_path) self._disabled.remove(import_path) if not exclude_children and import_path in self._children_disabled: self._children_disabled.remove(import_path) self._write_child_disabled()
[ "def", "enable", "(", "self", ",", "identifier", ",", "exclude_children", "=", "False", ")", ":", "import_path", "=", "self", ".", "_identifier2import_path", "(", "identifier", "=", "identifier", ")", "if", "import_path", "in", "self", ".", "_disabled", ":", "self", ".", "_enable_path", "(", "import_path", ")", "self", ".", "_disabled", ".", "remove", "(", "import_path", ")", "if", "not", "exclude_children", "and", "import_path", "in", "self", ".", "_children_disabled", ":", "self", ".", "_children_disabled", ".", "remove", "(", "import_path", ")", "self", ".", "_write_child_disabled", "(", ")" ]
Enable a previously disabled include type :param identifier: module or name of the include type :param exclude_children: disable the include type only for child processes, not the current process The ``identifier`` can be specified in multiple ways to disable an include type. See :py:meth:`~.DisabledIncludeTypes.disable` for details.
[ "Enable", "a", "previously", "disabled", "include", "type" ]
train
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/inhibit.py#L96-L112
0.004796
disqus/overseer
overseer/views.py
verify_subscription
def verify_subscription(request, ident): """ Verifies an unverified subscription and create or appends to an existing subscription. """ try: unverified = UnverifiedSubscription.objects.get(ident=ident) except UnverifiedSubscription.DoesNotExist: return respond('overseer/invalid_subscription_token.html', {}, request) subscription = Subscription.objects.get_or_create(email=unverified.email, defaults={ 'ident': unverified.ident, })[0] subscription.services = unverified.services.all() unverified.delete() return respond('overseer/subscription_confirmed.html', { 'subscription': subscription, }, request)
python
def verify_subscription(request, ident): """ Verifies an unverified subscription and create or appends to an existing subscription. """ try: unverified = UnverifiedSubscription.objects.get(ident=ident) except UnverifiedSubscription.DoesNotExist: return respond('overseer/invalid_subscription_token.html', {}, request) subscription = Subscription.objects.get_or_create(email=unverified.email, defaults={ 'ident': unverified.ident, })[0] subscription.services = unverified.services.all() unverified.delete() return respond('overseer/subscription_confirmed.html', { 'subscription': subscription, }, request)
[ "def", "verify_subscription", "(", "request", ",", "ident", ")", ":", "try", ":", "unverified", "=", "UnverifiedSubscription", ".", "objects", ".", "get", "(", "ident", "=", "ident", ")", "except", "UnverifiedSubscription", ".", "DoesNotExist", ":", "return", "respond", "(", "'overseer/invalid_subscription_token.html'", ",", "{", "}", ",", "request", ")", "subscription", "=", "Subscription", ".", "objects", ".", "get_or_create", "(", "email", "=", "unverified", ".", "email", ",", "defaults", "=", "{", "'ident'", ":", "unverified", ".", "ident", ",", "}", ")", "[", "0", "]", "subscription", ".", "services", "=", "unverified", ".", "services", ".", "all", "(", ")", "unverified", ".", "delete", "(", ")", "return", "respond", "(", "'overseer/subscription_confirmed.html'", ",", "{", "'subscription'", ":", "subscription", ",", "}", ",", "request", ")" ]
Verifies an unverified subscription and create or appends to an existing subscription.
[ "Verifies", "an", "unverified", "subscription", "and", "create", "or", "appends", "to", "an", "existing", "subscription", "." ]
train
https://github.com/disqus/overseer/blob/b37573aba33b20aa86f89eb0c7e6f4d9905bedef/overseer/views.py#L192-L213
0.008511
spacetelescope/drizzlepac
drizzlepac/staticMask.py
staticMask.addMember
def addMember(self, imagePtr=None): """ Combines the input image with the static mask that has the same signature. Parameters ---------- imagePtr : object An imageObject reference Notes ----- The signature parameter consists of the tuple:: (instrument/detector, (nx,ny), chip_id) The signature is defined in the image object for each chip """ numchips=imagePtr._numchips log.info("Computing static mask:\n") chips = imagePtr.group if chips is None: chips = imagePtr.getExtensions() #for chip in range(1,numchips+1,1): for chip in chips: chipid=imagePtr.scienceExt + ','+ str(chip) chipimage=imagePtr.getData(chipid) signature=imagePtr[chipid].signature # If this is a new signature, create a new Static Mask file which is empty # only create a new mask if one doesn't already exist if ((signature not in self.masklist) or (len(self.masklist) == 0)): self.masklist[signature] = self._buildMaskArray(signature) maskname = constructFilename(signature) self.masknames[signature] = maskname else: chip_sig = buildSignatureKey(signature) for s in self.masknames: if chip_sig in self.masknames[s]: maskname = self.masknames[s] break imagePtr[chipid].outputNames['staticMask'] = maskname stats = ImageStats(chipimage,nclip=3,fields='mode') mode = stats.mode rms = stats.stddev nbins = len(stats.histogram) del stats log.info(' mode = %9f; rms = %7f; static_sig = %0.2f' % (mode, rms, self.static_sig)) if nbins >= 2: # only combine data from new image if enough data to mask sky_rms_diff = mode - (self.static_sig*rms) np.bitwise_and(self.masklist[signature], np.logical_not(np.less(chipimage, sky_rms_diff)), self.masklist[signature]) del chipimage
python
def addMember(self, imagePtr=None): """ Combines the input image with the static mask that has the same signature. Parameters ---------- imagePtr : object An imageObject reference Notes ----- The signature parameter consists of the tuple:: (instrument/detector, (nx,ny), chip_id) The signature is defined in the image object for each chip """ numchips=imagePtr._numchips log.info("Computing static mask:\n") chips = imagePtr.group if chips is None: chips = imagePtr.getExtensions() #for chip in range(1,numchips+1,1): for chip in chips: chipid=imagePtr.scienceExt + ','+ str(chip) chipimage=imagePtr.getData(chipid) signature=imagePtr[chipid].signature # If this is a new signature, create a new Static Mask file which is empty # only create a new mask if one doesn't already exist if ((signature not in self.masklist) or (len(self.masklist) == 0)): self.masklist[signature] = self._buildMaskArray(signature) maskname = constructFilename(signature) self.masknames[signature] = maskname else: chip_sig = buildSignatureKey(signature) for s in self.masknames: if chip_sig in self.masknames[s]: maskname = self.masknames[s] break imagePtr[chipid].outputNames['staticMask'] = maskname stats = ImageStats(chipimage,nclip=3,fields='mode') mode = stats.mode rms = stats.stddev nbins = len(stats.histogram) del stats log.info(' mode = %9f; rms = %7f; static_sig = %0.2f' % (mode, rms, self.static_sig)) if nbins >= 2: # only combine data from new image if enough data to mask sky_rms_diff = mode - (self.static_sig*rms) np.bitwise_and(self.masklist[signature], np.logical_not(np.less(chipimage, sky_rms_diff)), self.masklist[signature]) del chipimage
[ "def", "addMember", "(", "self", ",", "imagePtr", "=", "None", ")", ":", "numchips", "=", "imagePtr", ".", "_numchips", "log", ".", "info", "(", "\"Computing static mask:\\n\"", ")", "chips", "=", "imagePtr", ".", "group", "if", "chips", "is", "None", ":", "chips", "=", "imagePtr", ".", "getExtensions", "(", ")", "#for chip in range(1,numchips+1,1):", "for", "chip", "in", "chips", ":", "chipid", "=", "imagePtr", ".", "scienceExt", "+", "','", "+", "str", "(", "chip", ")", "chipimage", "=", "imagePtr", ".", "getData", "(", "chipid", ")", "signature", "=", "imagePtr", "[", "chipid", "]", ".", "signature", "# If this is a new signature, create a new Static Mask file which is empty", "# only create a new mask if one doesn't already exist", "if", "(", "(", "signature", "not", "in", "self", ".", "masklist", ")", "or", "(", "len", "(", "self", ".", "masklist", ")", "==", "0", ")", ")", ":", "self", ".", "masklist", "[", "signature", "]", "=", "self", ".", "_buildMaskArray", "(", "signature", ")", "maskname", "=", "constructFilename", "(", "signature", ")", "self", ".", "masknames", "[", "signature", "]", "=", "maskname", "else", ":", "chip_sig", "=", "buildSignatureKey", "(", "signature", ")", "for", "s", "in", "self", ".", "masknames", ":", "if", "chip_sig", "in", "self", ".", "masknames", "[", "s", "]", ":", "maskname", "=", "self", ".", "masknames", "[", "s", "]", "break", "imagePtr", "[", "chipid", "]", ".", "outputNames", "[", "'staticMask'", "]", "=", "maskname", "stats", "=", "ImageStats", "(", "chipimage", ",", "nclip", "=", "3", ",", "fields", "=", "'mode'", ")", "mode", "=", "stats", ".", "mode", "rms", "=", "stats", ".", "stddev", "nbins", "=", "len", "(", "stats", ".", "histogram", ")", "del", "stats", "log", ".", "info", "(", "' mode = %9f; rms = %7f; static_sig = %0.2f'", "%", "(", "mode", ",", "rms", ",", "self", ".", "static_sig", ")", ")", "if", "nbins", ">=", "2", ":", "# only combine data from new image if enough data to mask", "sky_rms_diff", "=", "mode", "-", "(", "self", ".", "static_sig", "*", "rms", ")", "np", ".", "bitwise_and", "(", "self", ".", "masklist", "[", "signature", "]", ",", "np", ".", "logical_not", "(", "np", ".", "less", "(", "chipimage", ",", "sky_rms_diff", ")", ")", ",", "self", ".", "masklist", "[", "signature", "]", ")", "del", "chipimage" ]
Combines the input image with the static mask that has the same signature. Parameters ---------- imagePtr : object An imageObject reference Notes ----- The signature parameter consists of the tuple:: (instrument/detector, (nx,ny), chip_id) The signature is defined in the image object for each chip
[ "Combines", "the", "input", "image", "with", "the", "static", "mask", "that", "has", "the", "same", "signature", "." ]
train
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/staticMask.py#L145-L205
0.007433
molmod/molmod
molmod/transformations.py
Rotation.from_properties
def from_properties(cls, angle, axis, invert): """Initialize a rotation based on the properties""" norm = np.linalg.norm(axis) if norm > 0: x = axis[0] / norm y = axis[1] / norm z = axis[2] / norm c = np.cos(angle) s = np.sin(angle) r = (1-2*invert) * np.array([ [x*x*(1-c)+c , x*y*(1-c)-z*s, x*z*(1-c)+y*s], [x*y*(1-c)+z*s, y*y*(1-c)+c , y*z*(1-c)-x*s], [x*z*(1-c)-y*s, y*z*(1-c)+x*s, z*z*(1-c)+c ] ]) else: r = np.identity(3) * (1-2*invert) return cls(r)
python
def from_properties(cls, angle, axis, invert): """Initialize a rotation based on the properties""" norm = np.linalg.norm(axis) if norm > 0: x = axis[0] / norm y = axis[1] / norm z = axis[2] / norm c = np.cos(angle) s = np.sin(angle) r = (1-2*invert) * np.array([ [x*x*(1-c)+c , x*y*(1-c)-z*s, x*z*(1-c)+y*s], [x*y*(1-c)+z*s, y*y*(1-c)+c , y*z*(1-c)-x*s], [x*z*(1-c)-y*s, y*z*(1-c)+x*s, z*z*(1-c)+c ] ]) else: r = np.identity(3) * (1-2*invert) return cls(r)
[ "def", "from_properties", "(", "cls", ",", "angle", ",", "axis", ",", "invert", ")", ":", "norm", "=", "np", ".", "linalg", ".", "norm", "(", "axis", ")", "if", "norm", ">", "0", ":", "x", "=", "axis", "[", "0", "]", "/", "norm", "y", "=", "axis", "[", "1", "]", "/", "norm", "z", "=", "axis", "[", "2", "]", "/", "norm", "c", "=", "np", ".", "cos", "(", "angle", ")", "s", "=", "np", ".", "sin", "(", "angle", ")", "r", "=", "(", "1", "-", "2", "*", "invert", ")", "*", "np", ".", "array", "(", "[", "[", "x", "*", "x", "*", "(", "1", "-", "c", ")", "+", "c", ",", "x", "*", "y", "*", "(", "1", "-", "c", ")", "-", "z", "*", "s", ",", "x", "*", "z", "*", "(", "1", "-", "c", ")", "+", "y", "*", "s", "]", ",", "[", "x", "*", "y", "*", "(", "1", "-", "c", ")", "+", "z", "*", "s", ",", "y", "*", "y", "*", "(", "1", "-", "c", ")", "+", "c", ",", "y", "*", "z", "*", "(", "1", "-", "c", ")", "-", "x", "*", "s", "]", ",", "[", "x", "*", "z", "*", "(", "1", "-", "c", ")", "-", "y", "*", "s", ",", "y", "*", "z", "*", "(", "1", "-", "c", ")", "+", "x", "*", "s", ",", "z", "*", "z", "*", "(", "1", "-", "c", ")", "+", "c", "]", "]", ")", "else", ":", "r", "=", "np", ".", "identity", "(", "3", ")", "*", "(", "1", "-", "2", "*", "invert", ")", "return", "cls", "(", "r", ")" ]
Initialize a rotation based on the properties
[ "Initialize", "a", "rotation", "based", "on", "the", "properties" ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/transformations.py#L194-L210
0.007776
pandas-dev/pandas
pandas/core/computation/align.py
_any_pandas_objects
def _any_pandas_objects(terms): """Check a sequence of terms for instances of PandasObject.""" return any(isinstance(term.value, pd.core.generic.PandasObject) for term in terms)
python
def _any_pandas_objects(terms): """Check a sequence of terms for instances of PandasObject.""" return any(isinstance(term.value, pd.core.generic.PandasObject) for term in terms)
[ "def", "_any_pandas_objects", "(", "terms", ")", ":", "return", "any", "(", "isinstance", "(", "term", ".", "value", ",", "pd", ".", "core", ".", "generic", ".", "PandasObject", ")", "for", "term", "in", "terms", ")" ]
Check a sequence of terms for instances of PandasObject.
[ "Check", "a", "sequence", "of", "terms", "for", "instances", "of", "PandasObject", "." ]
train
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/align.py#L36-L39
0.005
ray-project/ray
python/ray/tune/suggest/suggestion.py
SuggestionAlgorithm.add_configurations
def add_configurations(self, experiments): """Chains generator given experiment specifications. Arguments: experiments (Experiment | list | dict): Experiments to run. """ experiment_list = convert_to_experiment_list(experiments) for experiment in experiment_list: self._trial_generator = itertools.chain( self._trial_generator, self._generate_trials(experiment.spec, experiment.name))
python
def add_configurations(self, experiments): """Chains generator given experiment specifications. Arguments: experiments (Experiment | list | dict): Experiments to run. """ experiment_list = convert_to_experiment_list(experiments) for experiment in experiment_list: self._trial_generator = itertools.chain( self._trial_generator, self._generate_trials(experiment.spec, experiment.name))
[ "def", "add_configurations", "(", "self", ",", "experiments", ")", ":", "experiment_list", "=", "convert_to_experiment_list", "(", "experiments", ")", "for", "experiment", "in", "experiment_list", ":", "self", ".", "_trial_generator", "=", "itertools", ".", "chain", "(", "self", ".", "_trial_generator", ",", "self", ".", "_generate_trials", "(", "experiment", ".", "spec", ",", "experiment", ".", "name", ")", ")" ]
Chains generator given experiment specifications. Arguments: experiments (Experiment | list | dict): Experiments to run.
[ "Chains", "generator", "given", "experiment", "specifications", "." ]
train
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/suggest/suggestion.py#L43-L53
0.004158
calmjs/calmjs.parse
src/calmjs/parse/unparsers/es5.py
minify_printer
def minify_printer( obfuscate=False, obfuscate_globals=False, shadow_funcname=False, drop_semi=False): """ Construct a minimum printer. Arguments obfuscate If True, obfuscate identifiers nested in each scope with a shortened identifier name to further reduce output size. Defaults to False. obfuscate_globals Also do the same to identifiers nested on the global scope; do not enable unless the renaming of global variables in a not fully deterministic manner into something else is guaranteed to not cause problems with the generated code and other code that in the same environment that it will be executed in. Defaults to False for the reason above. drop_semi Drop semicolons whenever possible (e.g. the final semicolons of a given block). """ active_rules = [rules.minify(drop_semi=drop_semi)] if obfuscate: active_rules.append(rules.obfuscate( obfuscate_globals=obfuscate_globals, shadow_funcname=shadow_funcname, reserved_keywords=(Lexer.keywords_dict.keys()) )) return Unparser(rules=active_rules)
python
def minify_printer( obfuscate=False, obfuscate_globals=False, shadow_funcname=False, drop_semi=False): """ Construct a minimum printer. Arguments obfuscate If True, obfuscate identifiers nested in each scope with a shortened identifier name to further reduce output size. Defaults to False. obfuscate_globals Also do the same to identifiers nested on the global scope; do not enable unless the renaming of global variables in a not fully deterministic manner into something else is guaranteed to not cause problems with the generated code and other code that in the same environment that it will be executed in. Defaults to False for the reason above. drop_semi Drop semicolons whenever possible (e.g. the final semicolons of a given block). """ active_rules = [rules.minify(drop_semi=drop_semi)] if obfuscate: active_rules.append(rules.obfuscate( obfuscate_globals=obfuscate_globals, shadow_funcname=shadow_funcname, reserved_keywords=(Lexer.keywords_dict.keys()) )) return Unparser(rules=active_rules)
[ "def", "minify_printer", "(", "obfuscate", "=", "False", ",", "obfuscate_globals", "=", "False", ",", "shadow_funcname", "=", "False", ",", "drop_semi", "=", "False", ")", ":", "active_rules", "=", "[", "rules", ".", "minify", "(", "drop_semi", "=", "drop_semi", ")", "]", "if", "obfuscate", ":", "active_rules", ".", "append", "(", "rules", ".", "obfuscate", "(", "obfuscate_globals", "=", "obfuscate_globals", ",", "shadow_funcname", "=", "shadow_funcname", ",", "reserved_keywords", "=", "(", "Lexer", ".", "keywords_dict", ".", "keys", "(", ")", ")", ")", ")", "return", "Unparser", "(", "rules", "=", "active_rules", ")" ]
Construct a minimum printer. Arguments obfuscate If True, obfuscate identifiers nested in each scope with a shortened identifier name to further reduce output size. Defaults to False. obfuscate_globals Also do the same to identifiers nested on the global scope; do not enable unless the renaming of global variables in a not fully deterministic manner into something else is guaranteed to not cause problems with the generated code and other code that in the same environment that it will be executed in. Defaults to False for the reason above. drop_semi Drop semicolons whenever possible (e.g. the final semicolons of a given block).
[ "Construct", "a", "minimum", "printer", "." ]
train
https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/unparsers/es5.py#L345-L380
0.00082
pbrisk/timewave
timewave/engine.py
Engine._run_parallel_process
def _run_parallel_process(self, start_path, stop_path, queue): """ The function calls _run_process and puts results produced by consumer at observations of top most consumer in to the queue """ process_num = int(current_process().name.split('-', 2)[1]) self._run_process(start_path, stop_path, process_num) queue.put(self.consumer.put())
python
def _run_parallel_process(self, start_path, stop_path, queue): """ The function calls _run_process and puts results produced by consumer at observations of top most consumer in to the queue """ process_num = int(current_process().name.split('-', 2)[1]) self._run_process(start_path, stop_path, process_num) queue.put(self.consumer.put())
[ "def", "_run_parallel_process", "(", "self", ",", "start_path", ",", "stop_path", ",", "queue", ")", ":", "process_num", "=", "int", "(", "current_process", "(", ")", ".", "name", ".", "split", "(", "'-'", ",", "2", ")", "[", "1", "]", ")", "self", ".", "_run_process", "(", "start_path", ",", "stop_path", ",", "process_num", ")", "queue", ".", "put", "(", "self", ".", "consumer", ".", "put", "(", ")", ")" ]
The function calls _run_process and puts results produced by consumer at observations of top most consumer in to the queue
[ "The", "function", "calls", "_run_process", "and", "puts", "results", "produced", "by", "consumer", "at", "observations", "of", "top", "most", "consumer", "in", "to", "the", "queue" ]
train
https://github.com/pbrisk/timewave/blob/cf641391d1607a424042724c8b990d43ee270ef6/timewave/engine.py#L165-L172
0.005089
HDI-Project/BTB
examples/selection_example.py
tune_pipeline
def tune_pipeline(X, y, X_val, y_val, generate_model, tuner): """ Tunes a specified pipeline with the specified tuner for TUNING_BUDGET_PER_ITER (3) iterations. Params: X: np.array of X training data y: np.array of y training data X_val: np.array of X validation data y_val: np.array of y validation data generate_model: function that returns an slkearn model to fit tuner: BTB tuner object for tuning hyperparameters """ print("Tuning with GP tuner for %s iterations" % TUNING_BUDGET_PER_ITER) for i in range(TUNING_BUDGET_PER_ITER): params = tuner.propose() # create model using proposed hyperparams from tuner model = generate_model(params) model.fit(X, y) predicted = model.predict(X_val) score = accuracy_score(predicted, y_val) # record hyper-param combination and score for tuning tuner.add(params, score) print("Final score:", tuner._best_score)
python
def tune_pipeline(X, y, X_val, y_val, generate_model, tuner): """ Tunes a specified pipeline with the specified tuner for TUNING_BUDGET_PER_ITER (3) iterations. Params: X: np.array of X training data y: np.array of y training data X_val: np.array of X validation data y_val: np.array of y validation data generate_model: function that returns an slkearn model to fit tuner: BTB tuner object for tuning hyperparameters """ print("Tuning with GP tuner for %s iterations" % TUNING_BUDGET_PER_ITER) for i in range(TUNING_BUDGET_PER_ITER): params = tuner.propose() # create model using proposed hyperparams from tuner model = generate_model(params) model.fit(X, y) predicted = model.predict(X_val) score = accuracy_score(predicted, y_val) # record hyper-param combination and score for tuning tuner.add(params, score) print("Final score:", tuner._best_score)
[ "def", "tune_pipeline", "(", "X", ",", "y", ",", "X_val", ",", "y_val", ",", "generate_model", ",", "tuner", ")", ":", "print", "(", "\"Tuning with GP tuner for %s iterations\"", "%", "TUNING_BUDGET_PER_ITER", ")", "for", "i", "in", "range", "(", "TUNING_BUDGET_PER_ITER", ")", ":", "params", "=", "tuner", ".", "propose", "(", ")", "# create model using proposed hyperparams from tuner", "model", "=", "generate_model", "(", "params", ")", "model", ".", "fit", "(", "X", ",", "y", ")", "predicted", "=", "model", ".", "predict", "(", "X_val", ")", "score", "=", "accuracy_score", "(", "predicted", ",", "y_val", ")", "# record hyper-param combination and score for tuning", "tuner", ".", "add", "(", "params", ",", "score", ")", "print", "(", "\"Final score:\"", ",", "tuner", ".", "_best_score", ")" ]
Tunes a specified pipeline with the specified tuner for TUNING_BUDGET_PER_ITER (3) iterations. Params: X: np.array of X training data y: np.array of y training data X_val: np.array of X validation data y_val: np.array of y validation data generate_model: function that returns an slkearn model to fit tuner: BTB tuner object for tuning hyperparameters
[ "Tunes", "a", "specified", "pipeline", "with", "the", "specified", "tuner", "for", "TUNING_BUDGET_PER_ITER", "(", "3", ")", "iterations", "." ]
train
https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/examples/selection_example.py#L23-L46
0.001002
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/decour.py
DecourDocumentGraph._add_spanning_relation
def _add_spanning_relation(self, source, target): """add a spanning relation to this docgraph""" self.add_edge(source, target, layers={self.ns, self.ns+':unit'}, edge_type=EdgeTypes.spanning_relation)
python
def _add_spanning_relation(self, source, target): """add a spanning relation to this docgraph""" self.add_edge(source, target, layers={self.ns, self.ns+':unit'}, edge_type=EdgeTypes.spanning_relation)
[ "def", "_add_spanning_relation", "(", "self", ",", "source", ",", "target", ")", ":", "self", ".", "add_edge", "(", "source", ",", "target", ",", "layers", "=", "{", "self", ".", "ns", ",", "self", ".", "ns", "+", "':unit'", "}", ",", "edge_type", "=", "EdgeTypes", ".", "spanning_relation", ")" ]
add a spanning relation to this docgraph
[ "add", "a", "spanning", "relation", "to", "this", "docgraph" ]
train
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/decour.py#L151-L154
0.008403
KnowledgeLinks/rdfframework
rdfframework/connections/rdflibconn.py
RdflibConn.query
def query(self, sparql, mode="get", namespace=None, rtn_format="json", **kwargs): """ runs a sparql query and returns the results args: sparql: the sparql query to run namespace: the namespace to run the sparql query against mode: ['get'(default), 'update'] the type of sparql query rtn_format: ['json'(default), 'xml'] format of query results """ if kwargs.get("debug"): log.setLevel(logging.DEBUG) conn = self.conn if namespace and namespace != self.namespace: conn = self.tstore.get_namespace(namespace) else: namespace = self.namespace if rtn_format not in self.qry_results_formats: raise KeyError("rtn_format was '%s'. Allowed values are %s" % \ (rtn_format, self.qry_results_formats)) if not sparql.strip().lower().startswith("prefix"): sparql = "%s\n%s" % (NSM.prefix(), sparql) start = datetime.datetime.now() if mode == "get": try: result = json.loads( \ conn.query(sparql).serialize(\ format=rtn_format).decode()).get('results', {}).get('bindings', []) except: print(sparql) raise if mode == "update": try: result = conn.update(sparql) except: print(sparql) raise log.debug("\nmode='%s', namespace='%s', rtn_format='%s'\n**** SPAQRL QUERY \n%s\nQuery Time: %s", mode, namespace, rtn_format, sparql, (datetime.datetime.now()-start)) return result
python
def query(self, sparql, mode="get", namespace=None, rtn_format="json", **kwargs): """ runs a sparql query and returns the results args: sparql: the sparql query to run namespace: the namespace to run the sparql query against mode: ['get'(default), 'update'] the type of sparql query rtn_format: ['json'(default), 'xml'] format of query results """ if kwargs.get("debug"): log.setLevel(logging.DEBUG) conn = self.conn if namespace and namespace != self.namespace: conn = self.tstore.get_namespace(namespace) else: namespace = self.namespace if rtn_format not in self.qry_results_formats: raise KeyError("rtn_format was '%s'. Allowed values are %s" % \ (rtn_format, self.qry_results_formats)) if not sparql.strip().lower().startswith("prefix"): sparql = "%s\n%s" % (NSM.prefix(), sparql) start = datetime.datetime.now() if mode == "get": try: result = json.loads( \ conn.query(sparql).serialize(\ format=rtn_format).decode()).get('results', {}).get('bindings', []) except: print(sparql) raise if mode == "update": try: result = conn.update(sparql) except: print(sparql) raise log.debug("\nmode='%s', namespace='%s', rtn_format='%s'\n**** SPAQRL QUERY \n%s\nQuery Time: %s", mode, namespace, rtn_format, sparql, (datetime.datetime.now()-start)) return result
[ "def", "query", "(", "self", ",", "sparql", ",", "mode", "=", "\"get\"", ",", "namespace", "=", "None", ",", "rtn_format", "=", "\"json\"", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ".", "get", "(", "\"debug\"", ")", ":", "log", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "conn", "=", "self", ".", "conn", "if", "namespace", "and", "namespace", "!=", "self", ".", "namespace", ":", "conn", "=", "self", ".", "tstore", ".", "get_namespace", "(", "namespace", ")", "else", ":", "namespace", "=", "self", ".", "namespace", "if", "rtn_format", "not", "in", "self", ".", "qry_results_formats", ":", "raise", "KeyError", "(", "\"rtn_format was '%s'. Allowed values are %s\"", "%", "(", "rtn_format", ",", "self", ".", "qry_results_formats", ")", ")", "if", "not", "sparql", ".", "strip", "(", ")", ".", "lower", "(", ")", ".", "startswith", "(", "\"prefix\"", ")", ":", "sparql", "=", "\"%s\\n%s\"", "%", "(", "NSM", ".", "prefix", "(", ")", ",", "sparql", ")", "start", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "if", "mode", "==", "\"get\"", ":", "try", ":", "result", "=", "json", ".", "loads", "(", "conn", ".", "query", "(", "sparql", ")", ".", "serialize", "(", "format", "=", "rtn_format", ")", ".", "decode", "(", ")", ")", ".", "get", "(", "'results'", ",", "{", "}", ")", ".", "get", "(", "'bindings'", ",", "[", "]", ")", "except", ":", "print", "(", "sparql", ")", "raise", "if", "mode", "==", "\"update\"", ":", "try", ":", "result", "=", "conn", ".", "update", "(", "sparql", ")", "except", ":", "print", "(", "sparql", ")", "raise", "log", ".", "debug", "(", "\"\\nmode='%s', namespace='%s', rtn_format='%s'\\n**** SPAQRL QUERY \\n%s\\nQuery Time: %s\"", ",", "mode", ",", "namespace", ",", "rtn_format", ",", "sparql", ",", "(", "datetime", ".", "datetime", ".", "now", "(", ")", "-", "start", ")", ")", "return", "result" ]
runs a sparql query and returns the results args: sparql: the sparql query to run namespace: the namespace to run the sparql query against mode: ['get'(default), 'update'] the type of sparql query rtn_format: ['json'(default), 'xml'] format of query results
[ "runs", "a", "sparql", "query", "and", "returns", "the", "results", "args", ":", "sparql", ":", "the", "sparql", "query", "to", "run", "namespace", ":", "the", "namespace", "to", "run", "the", "sparql", "query", "against", "mode", ":", "[", "get", "(", "default", ")", "update", "]", "the", "type", "of", "sparql", "query", "rtn_format", ":", "[", "json", "(", "default", ")", "xml", "]", "format", "of", "query", "results" ]
train
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/connections/rdflibconn.py#L161-L209
0.008573
jazzband/inflect
inflect.py
engine.defadj
def defadj(self, singular, plural): """ Set the adjective plural of singular to plural. """ self.checkpat(singular) self.checkpatplural(plural) self.pl_adj_user_defined.extend((singular, plural)) return 1
python
def defadj(self, singular, plural): """ Set the adjective plural of singular to plural. """ self.checkpat(singular) self.checkpatplural(plural) self.pl_adj_user_defined.extend((singular, plural)) return 1
[ "def", "defadj", "(", "self", ",", "singular", ",", "plural", ")", ":", "self", ".", "checkpat", "(", "singular", ")", "self", ".", "checkpatplural", "(", "plural", ")", "self", ".", "pl_adj_user_defined", ".", "extend", "(", "(", "singular", ",", "plural", ")", ")", "return", "1" ]
Set the adjective plural of singular to plural.
[ "Set", "the", "adjective", "plural", "of", "singular", "to", "plural", "." ]
train
https://github.com/jazzband/inflect/blob/c2a3df74725990c195a5d7f37199de56873962e9/inflect.py#L1978-L1986
0.007663
gbowerman/azurerm
azurerm/acs.py
list_container_services_sub
def list_container_services_sub(access_token, subscription_id): '''List the container services in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON model. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.ContainerService/ContainerServices', '?api-version=', ACS_API]) return do_get(endpoint, access_token)
python
def list_container_services_sub(access_token, subscription_id): '''List the container services in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON model. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.ContainerService/ContainerServices', '?api-version=', ACS_API]) return do_get(endpoint, access_token)
[ "def", "list_container_services_sub", "(", "access_token", ",", "subscription_id", ")", ":", "endpoint", "=", "''", ".", "join", "(", "[", "get_rm_endpoint", "(", ")", ",", "'/subscriptions/'", ",", "subscription_id", ",", "'/providers/Microsoft.ContainerService/ContainerServices'", ",", "'?api-version='", ",", "ACS_API", "]", ")", "return", "do_get", "(", "endpoint", ",", "access_token", ")" ]
List the container services in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON model.
[ "List", "the", "container", "services", "in", "a", "subscription", "." ]
train
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/acs.py#L138-L152
0.003442
mlperf/training
translation/tensorflow/transformer/utils/metrics.py
padded_accuracy
def padded_accuracy(logits, labels): """Percentage of times that predictions matches labels on non-0s.""" with tf.variable_scope("padded_accuracy", values=[logits, labels]): logits, labels = _pad_tensors_to_same_length(logits, labels) weights = tf.to_float(tf.not_equal(labels, 0)) outputs = tf.to_int32(tf.argmax(logits, axis=-1)) padded_labels = tf.to_int32(labels) return tf.to_float(tf.equal(outputs, padded_labels)), weights
python
def padded_accuracy(logits, labels): """Percentage of times that predictions matches labels on non-0s.""" with tf.variable_scope("padded_accuracy", values=[logits, labels]): logits, labels = _pad_tensors_to_same_length(logits, labels) weights = tf.to_float(tf.not_equal(labels, 0)) outputs = tf.to_int32(tf.argmax(logits, axis=-1)) padded_labels = tf.to_int32(labels) return tf.to_float(tf.equal(outputs, padded_labels)), weights
[ "def", "padded_accuracy", "(", "logits", ",", "labels", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"padded_accuracy\"", ",", "values", "=", "[", "logits", ",", "labels", "]", ")", ":", "logits", ",", "labels", "=", "_pad_tensors_to_same_length", "(", "logits", ",", "labels", ")", "weights", "=", "tf", ".", "to_float", "(", "tf", ".", "not_equal", "(", "labels", ",", "0", ")", ")", "outputs", "=", "tf", ".", "to_int32", "(", "tf", ".", "argmax", "(", "logits", ",", "axis", "=", "-", "1", ")", ")", "padded_labels", "=", "tf", ".", "to_int32", "(", "labels", ")", "return", "tf", ".", "to_float", "(", "tf", ".", "equal", "(", "outputs", ",", "padded_labels", ")", ")", ",", "weights" ]
Percentage of times that predictions matches labels on non-0s.
[ "Percentage", "of", "times", "that", "predictions", "matches", "labels", "on", "non", "-", "0s", "." ]
train
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/translation/tensorflow/transformer/utils/metrics.py#L133-L140
0.006623
DusanMadar/TorIpChanger
toripchanger/changer.py
TorIpChanger.get_current_ip
def get_current_ip(self): """ Get the current IP Tor is using. :returns str :raises TorIpError """ response = get(ICANHAZIP, proxies={"http": self.local_http_proxy}) if response.ok: return self._get_response_text(response) raise TorIpError("Failed to get the current Tor IP")
python
def get_current_ip(self): """ Get the current IP Tor is using. :returns str :raises TorIpError """ response = get(ICANHAZIP, proxies={"http": self.local_http_proxy}) if response.ok: return self._get_response_text(response) raise TorIpError("Failed to get the current Tor IP")
[ "def", "get_current_ip", "(", "self", ")", ":", "response", "=", "get", "(", "ICANHAZIP", ",", "proxies", "=", "{", "\"http\"", ":", "self", ".", "local_http_proxy", "}", ")", "if", "response", ".", "ok", ":", "return", "self", ".", "_get_response_text", "(", "response", ")", "raise", "TorIpError", "(", "\"Failed to get the current Tor IP\"", ")" ]
Get the current IP Tor is using. :returns str :raises TorIpError
[ "Get", "the", "current", "IP", "Tor", "is", "using", "." ]
train
https://github.com/DusanMadar/TorIpChanger/blob/c2c4371e16f239b01ea36b82d971612bae00526d/toripchanger/changer.py#L82-L94
0.00565
alexa/alexa-skills-kit-sdk-for-python
ask-sdk-runtime/ask_sdk_runtime/dispatch_components/request_components.py
GenericRequestMapper.get_request_handler_chain
def get_request_handler_chain(self, handler_input): # type: (Input) -> Union[GenericRequestHandlerChain, None] """Get the request handler chain that can handle the dispatch input. :param handler_input: Generic input passed to the dispatcher. :type handler_input: Input :return: Handler Chain that can handle the input. :rtype: Union[None, GenericRequestHandlerChain] """ for chain in self.request_handler_chains: handler = chain.request_handler # type: AbstractRequestHandler if handler.can_handle(handler_input=handler_input): return chain return None
python
def get_request_handler_chain(self, handler_input): # type: (Input) -> Union[GenericRequestHandlerChain, None] """Get the request handler chain that can handle the dispatch input. :param handler_input: Generic input passed to the dispatcher. :type handler_input: Input :return: Handler Chain that can handle the input. :rtype: Union[None, GenericRequestHandlerChain] """ for chain in self.request_handler_chains: handler = chain.request_handler # type: AbstractRequestHandler if handler.can_handle(handler_input=handler_input): return chain return None
[ "def", "get_request_handler_chain", "(", "self", ",", "handler_input", ")", ":", "# type: (Input) -> Union[GenericRequestHandlerChain, None]", "for", "chain", "in", "self", ".", "request_handler_chains", ":", "handler", "=", "chain", ".", "request_handler", "# type: AbstractRequestHandler", "if", "handler", ".", "can_handle", "(", "handler_input", "=", "handler_input", ")", ":", "return", "chain", "return", "None" ]
Get the request handler chain that can handle the dispatch input. :param handler_input: Generic input passed to the dispatcher. :type handler_input: Input :return: Handler Chain that can handle the input. :rtype: Union[None, GenericRequestHandlerChain]
[ "Get", "the", "request", "handler", "chain", "that", "can", "handle", "the", "dispatch", "input", "." ]
train
https://github.com/alexa/alexa-skills-kit-sdk-for-python/blob/097b6406aa12d5ca0b825b00c936861b530cbf39/ask-sdk-runtime/ask_sdk_runtime/dispatch_components/request_components.py#L348-L363
0.004373
portfors-lab/sparkle
sparkle/gui/plotting/viewbox.py
SpikeyViewBox.setZeroWheel
def setZeroWheel(self): """Sets the zoom locus of the mouse wheel to the point 0,0 (instead of the coordinates under the cursor)""" self._zeroWheel = True # want padding in this case self.menu.viewAll.triggered.disconnect() self.menu.viewAll.triggered.connect(self.autoRange)
python
def setZeroWheel(self): """Sets the zoom locus of the mouse wheel to the point 0,0 (instead of the coordinates under the cursor)""" self._zeroWheel = True # want padding in this case self.menu.viewAll.triggered.disconnect() self.menu.viewAll.triggered.connect(self.autoRange)
[ "def", "setZeroWheel", "(", "self", ")", ":", "self", ".", "_zeroWheel", "=", "True", "# want padding in this case", "self", ".", "menu", ".", "viewAll", ".", "triggered", ".", "disconnect", "(", ")", "self", ".", "menu", ".", "viewAll", ".", "triggered", ".", "connect", "(", "self", ".", "autoRange", ")" ]
Sets the zoom locus of the mouse wheel to the point 0,0 (instead of the coordinates under the cursor)
[ "Sets", "the", "zoom", "locus", "of", "the", "mouse", "wheel", "to", "the", "point", "0", "0", "(", "instead", "of", "the", "coordinates", "under", "the", "cursor", ")" ]
train
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/plotting/viewbox.py#L25-L31
0.006192
phac-nml/sistr_cmd
sistr/misc/reduce_to_centroid_alleles.py
parse_cgmlst_alleles
def parse_cgmlst_alleles(cgmlst_fasta): """Parse cgMLST alleles from fasta file cgMLST FASTA file must have a header format of ">{marker name}|{allele name}" Args: cgmlst_fasta (str): cgMLST fasta file path Returns: dict of list: Marker name to list of allele sequences """ out = defaultdict(list) for header, seq in parse_fasta(cgmlst_fasta): if not '|' in header: raise Exception('Unexpected format for cgMLST fasta file header. No "|" (pipe) delimiter present! Header="{}"'.format(header)) marker_name, allele_name = header.split('|') out[marker_name].append(seq) return out
python
def parse_cgmlst_alleles(cgmlst_fasta): """Parse cgMLST alleles from fasta file cgMLST FASTA file must have a header format of ">{marker name}|{allele name}" Args: cgmlst_fasta (str): cgMLST fasta file path Returns: dict of list: Marker name to list of allele sequences """ out = defaultdict(list) for header, seq in parse_fasta(cgmlst_fasta): if not '|' in header: raise Exception('Unexpected format for cgMLST fasta file header. No "|" (pipe) delimiter present! Header="{}"'.format(header)) marker_name, allele_name = header.split('|') out[marker_name].append(seq) return out
[ "def", "parse_cgmlst_alleles", "(", "cgmlst_fasta", ")", ":", "out", "=", "defaultdict", "(", "list", ")", "for", "header", ",", "seq", "in", "parse_fasta", "(", "cgmlst_fasta", ")", ":", "if", "not", "'|'", "in", "header", ":", "raise", "Exception", "(", "'Unexpected format for cgMLST fasta file header. No \"|\" (pipe) delimiter present! Header=\"{}\"'", ".", "format", "(", "header", ")", ")", "marker_name", ",", "allele_name", "=", "header", ".", "split", "(", "'|'", ")", "out", "[", "marker_name", "]", ".", "append", "(", "seq", ")", "return", "out" ]
Parse cgMLST alleles from fasta file cgMLST FASTA file must have a header format of ">{marker name}|{allele name}" Args: cgmlst_fasta (str): cgMLST fasta file path Returns: dict of list: Marker name to list of allele sequences
[ "Parse", "cgMLST", "alleles", "from", "fasta", "file", "cgMLST", "FASTA", "file", "must", "have", "a", "header", "format", "of", ">", "{", "marker", "name", "}", "|", "{", "allele", "name", "}" ]
train
https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/misc/reduce_to_centroid_alleles.py#L44-L60
0.006033
ynop/audiomate
audiomate/corpus/base.py
CorpusView.stats_per_utterance
def stats_per_utterance(self): """ Return statistics calculated for all samples of each utterance in the corpus. Returns: dict: A dictionary containing a DataStats object for each utt. """ all_stats = {} for utterance in self.utterances.values(): data = utterance.read_samples() all_stats[utterance.idx] = stats.DataStats(float(np.mean(data)), float(np.var(data)), np.min(data), np.max(data), data.size) return all_stats
python
def stats_per_utterance(self): """ Return statistics calculated for all samples of each utterance in the corpus. Returns: dict: A dictionary containing a DataStats object for each utt. """ all_stats = {} for utterance in self.utterances.values(): data = utterance.read_samples() all_stats[utterance.idx] = stats.DataStats(float(np.mean(data)), float(np.var(data)), np.min(data), np.max(data), data.size) return all_stats
[ "def", "stats_per_utterance", "(", "self", ")", ":", "all_stats", "=", "{", "}", "for", "utterance", "in", "self", ".", "utterances", ".", "values", "(", ")", ":", "data", "=", "utterance", ".", "read_samples", "(", ")", "all_stats", "[", "utterance", ".", "idx", "]", "=", "stats", ".", "DataStats", "(", "float", "(", "np", ".", "mean", "(", "data", ")", ")", ",", "float", "(", "np", ".", "var", "(", "data", ")", ")", ",", "np", ".", "min", "(", "data", ")", ",", "np", ".", "max", "(", "data", ")", ",", "data", ".", "size", ")", "return", "all_stats" ]
Return statistics calculated for all samples of each utterance in the corpus. Returns: dict: A dictionary containing a DataStats object for each utt.
[ "Return", "statistics", "calculated", "for", "all", "samples", "of", "each", "utterance", "in", "the", "corpus", "." ]
train
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/corpus/base.py#L236-L254
0.004076