repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
PixelwarStudio/PyTree
Tree/core.py
https://github.com/PixelwarStudio/PyTree/blob/f14b25ea145da6b00d836e34251d2a4c823766dc/Tree/core.py#L92-L101
def get_node_age_sum(self, age=None): """Get the sum of branches grown in an specific age. Returns: int: The sum of all nodes grown in an age. """ if age is None: age = self.age return pow(self.comp, age)
[ "def", "get_node_age_sum", "(", "self", ",", "age", "=", "None", ")", ":", "if", "age", "is", "None", ":", "age", "=", "self", ".", "age", "return", "pow", "(", "self", ".", "comp", ",", "age", ")" ]
Get the sum of branches grown in an specific age. Returns: int: The sum of all nodes grown in an age.
[ "Get", "the", "sum", "of", "branches", "grown", "in", "an", "specific", "age", "." ]
python
train
evhub/coconut
coconut/command/util.py
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/command/util.py#L493-L512
def run(self, code, use_eval=None, path=None, all_errors_exit=False, store=True): """Execute Python code.""" if use_eval is None: run_func = interpret elif use_eval is True: run_func = eval else: run_func = exec_func with self.handling_errors(all_errors_exit): if path is None: result = run_func(code, self.vars) else: use_vars = self.build_vars(path) try: result = run_func(code, use_vars) finally: self.vars.update(use_vars) if store: self.store(code) return result
[ "def", "run", "(", "self", ",", "code", ",", "use_eval", "=", "None", ",", "path", "=", "None", ",", "all_errors_exit", "=", "False", ",", "store", "=", "True", ")", ":", "if", "use_eval", "is", "None", ":", "run_func", "=", "interpret", "elif", "use_eval", "is", "True", ":", "run_func", "=", "eval", "else", ":", "run_func", "=", "exec_func", "with", "self", ".", "handling_errors", "(", "all_errors_exit", ")", ":", "if", "path", "is", "None", ":", "result", "=", "run_func", "(", "code", ",", "self", ".", "vars", ")", "else", ":", "use_vars", "=", "self", ".", "build_vars", "(", "path", ")", "try", ":", "result", "=", "run_func", "(", "code", ",", "use_vars", ")", "finally", ":", "self", ".", "vars", ".", "update", "(", "use_vars", ")", "if", "store", ":", "self", ".", "store", "(", "code", ")", "return", "result" ]
Execute Python code.
[ "Execute", "Python", "code", "." ]
python
train
google-research/batch-ppo
agents/tools/loop.py
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/loop.py#L170-L187
def _find_current_phase(self, global_step): """Determine the current phase based on the global step. This ensures continuing the correct phase after restoring checkoints. Args: global_step: The global number of steps performed across all phases. Returns: Tuple of phase object, epoch number, and phase steps within the epoch. """ epoch_size = sum(phase.steps for phase in self._phases) epoch = int(global_step // epoch_size) steps_in = global_step % epoch_size for phase in self._phases: if steps_in < phase.steps: return phase, epoch, steps_in steps_in -= phase.steps
[ "def", "_find_current_phase", "(", "self", ",", "global_step", ")", ":", "epoch_size", "=", "sum", "(", "phase", ".", "steps", "for", "phase", "in", "self", ".", "_phases", ")", "epoch", "=", "int", "(", "global_step", "//", "epoch_size", ")", "steps_in", "=", "global_step", "%", "epoch_size", "for", "phase", "in", "self", ".", "_phases", ":", "if", "steps_in", "<", "phase", ".", "steps", ":", "return", "phase", ",", "epoch", ",", "steps_in", "steps_in", "-=", "phase", ".", "steps" ]
Determine the current phase based on the global step. This ensures continuing the correct phase after restoring checkoints. Args: global_step: The global number of steps performed across all phases. Returns: Tuple of phase object, epoch number, and phase steps within the epoch.
[ "Determine", "the", "current", "phase", "based", "on", "the", "global", "step", "." ]
python
train
kobejohn/PQHelper
pqhelper/easy.py
https://github.com/kobejohn/PQHelper/blob/d2b78a22dcb631794295e6a159b06f39c3f10db6/pqhelper/easy.py#L8-L56
def versus_summaries(turns=2, sims_to_average=2, async_results_q=None): """Return summaries of the likely resutls of each available action.. Arguments: - turns: how many turns to simulate. - in 2013, 1 is fast (seconds), 2 is slow (seconds), 3 who knows - sims_to_average: how many times to run the simulation to get more representative average results of each action. - async_results_q: provide a multiprocessing Queue on which the summaries of each turn will be placed. this is an asynchronous alternative to waiting for the final return value """ board, player, opponent, extra_actions = _state_investigator.get_versus() if extra_actions: extra_actions = 1 # limit value for realistic time if board is None: return tuple() averaged_summaries = list() # default return value is empty # keep a separate advisor for each simulation to average advisors = list() for i in range(sims_to_average): advisor = versus.Advisor() advisor.reset(board, player, opponent, extra_actions) advisors.append(advisor) # provide async sim results per turn; final results as return value for turn in range(turns): # store {action: list of results from each simulation} summaries_by_action = dict() for i in range(sims_to_average): advisor = advisors[i] advisor.simulate_next_turn() for s in advisor.sorted_current_summaries(): summaries_by_action.setdefault(s.action, list()).append(s) # now all sims and analysis for this turn have been completed averaged_summaries = list() for action, summaries in summaries_by_action.items(): board = summaries[0].board # any board. they are all the same action = summaries[0].action # any action. they are all the same score_sum = sum(s.score for s in summaries) score_avg = score_sum / len(summaries) manadrain_sum = sum(s.mana_drain_leaves for s in summaries) leaves_sum = sum(s.total_leaves for s in summaries) avg_summary = base.Summary(board, action, score_avg, manadrain_sum, leaves_sum) averaged_summaries.append(avg_summary) averaged_summaries.sort(key=lambda s: s.score, reverse=True) # option to provide the results asynchronouslys if not async_results_q is None: async_results_q.put(averaged_summaries) return averaged_summaries
[ "def", "versus_summaries", "(", "turns", "=", "2", ",", "sims_to_average", "=", "2", ",", "async_results_q", "=", "None", ")", ":", "board", ",", "player", ",", "opponent", ",", "extra_actions", "=", "_state_investigator", ".", "get_versus", "(", ")", "if", "extra_actions", ":", "extra_actions", "=", "1", "# limit value for realistic time", "if", "board", "is", "None", ":", "return", "tuple", "(", ")", "averaged_summaries", "=", "list", "(", ")", "# default return value is empty", "# keep a separate advisor for each simulation to average", "advisors", "=", "list", "(", ")", "for", "i", "in", "range", "(", "sims_to_average", ")", ":", "advisor", "=", "versus", ".", "Advisor", "(", ")", "advisor", ".", "reset", "(", "board", ",", "player", ",", "opponent", ",", "extra_actions", ")", "advisors", ".", "append", "(", "advisor", ")", "# provide async sim results per turn; final results as return value", "for", "turn", "in", "range", "(", "turns", ")", ":", "# store {action: list of results from each simulation}", "summaries_by_action", "=", "dict", "(", ")", "for", "i", "in", "range", "(", "sims_to_average", ")", ":", "advisor", "=", "advisors", "[", "i", "]", "advisor", ".", "simulate_next_turn", "(", ")", "for", "s", "in", "advisor", ".", "sorted_current_summaries", "(", ")", ":", "summaries_by_action", ".", "setdefault", "(", "s", ".", "action", ",", "list", "(", ")", ")", ".", "append", "(", "s", ")", "# now all sims and analysis for this turn have been completed", "averaged_summaries", "=", "list", "(", ")", "for", "action", ",", "summaries", "in", "summaries_by_action", ".", "items", "(", ")", ":", "board", "=", "summaries", "[", "0", "]", ".", "board", "# any board. they are all the same", "action", "=", "summaries", "[", "0", "]", ".", "action", "# any action. they are all the same", "score_sum", "=", "sum", "(", "s", ".", "score", "for", "s", "in", "summaries", ")", "score_avg", "=", "score_sum", "/", "len", "(", "summaries", ")", "manadrain_sum", "=", "sum", "(", "s", ".", "mana_drain_leaves", "for", "s", "in", "summaries", ")", "leaves_sum", "=", "sum", "(", "s", ".", "total_leaves", "for", "s", "in", "summaries", ")", "avg_summary", "=", "base", ".", "Summary", "(", "board", ",", "action", ",", "score_avg", ",", "manadrain_sum", ",", "leaves_sum", ")", "averaged_summaries", ".", "append", "(", "avg_summary", ")", "averaged_summaries", ".", "sort", "(", "key", "=", "lambda", "s", ":", "s", ".", "score", ",", "reverse", "=", "True", ")", "# option to provide the results asynchronouslys", "if", "not", "async_results_q", "is", "None", ":", "async_results_q", ".", "put", "(", "averaged_summaries", ")", "return", "averaged_summaries" ]
Return summaries of the likely resutls of each available action.. Arguments: - turns: how many turns to simulate. - in 2013, 1 is fast (seconds), 2 is slow (seconds), 3 who knows - sims_to_average: how many times to run the simulation to get more representative average results of each action. - async_results_q: provide a multiprocessing Queue on which the summaries of each turn will be placed. this is an asynchronous alternative to waiting for the final return value
[ "Return", "summaries", "of", "the", "likely", "resutls", "of", "each", "available", "action", ".." ]
python
train
ellmetha/django-machina
machina/apps/forum_conversation/abstract_models.py
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_conversation/abstract_models.py#L269-L271
def is_topic_head(self): """ Returns ``True`` if the post is the first post of the topic. """ return self.topic.first_post.id == self.id if self.topic.first_post else False
[ "def", "is_topic_head", "(", "self", ")", ":", "return", "self", ".", "topic", ".", "first_post", ".", "id", "==", "self", ".", "id", "if", "self", ".", "topic", ".", "first_post", "else", "False" ]
Returns ``True`` if the post is the first post of the topic.
[ "Returns", "True", "if", "the", "post", "is", "the", "first", "post", "of", "the", "topic", "." ]
python
train
f213/rumetr-client
rumetr/roometr.py
https://github.com/f213/rumetr-client/blob/5180152bcb2eed8246b88035db7c0bb1fe603166/rumetr/roometr.py#L61-L70
def appt_exists(self, complex: str, house: str, appt: str) -> bool: """ Shortcut to check if appt exists in our database. """ try: self.check_appt(complex, house, appt) except exceptions.RumetrApptNotFound: return False return True
[ "def", "appt_exists", "(", "self", ",", "complex", ":", "str", ",", "house", ":", "str", ",", "appt", ":", "str", ")", "->", "bool", ":", "try", ":", "self", ".", "check_appt", "(", "complex", ",", "house", ",", "appt", ")", "except", "exceptions", ".", "RumetrApptNotFound", ":", "return", "False", "return", "True" ]
Shortcut to check if appt exists in our database.
[ "Shortcut", "to", "check", "if", "appt", "exists", "in", "our", "database", "." ]
python
train
SuryaSankar/flask-sqlalchemy-booster
flask_sqlalchemy_booster/custom_collections.py
https://github.com/SuryaSankar/flask-sqlalchemy-booster/blob/444048d167ab7718f758e943665ef32d101423a5/flask_sqlalchemy_booster/custom_collections.py#L36-L58
def _convert(self, dictlike): """Validate and convert a dict-like object into values for set()ing. This is called behind the scenes when a MappedCollection is replaced entirely by another collection, as in:: myobj.mappedcollection = {'a':obj1, 'b': obj2} # ... Raises a TypeError if the key in any (key, value) pair in the dictlike object does not match the key that this collection's keyfunc would have assigned for that value. """ for incoming_key, valuelist in util.dictlike_iteritems(dictlike): for value in valuelist: new_key = self.keyfunc(value) if incoming_key != new_key: raise TypeError( "Found incompatible key %r for value %r; this " "collection's " "keying function requires a key of %r for this value." % ( incoming_key, value, new_key)) yield value
[ "def", "_convert", "(", "self", ",", "dictlike", ")", ":", "for", "incoming_key", ",", "valuelist", "in", "util", ".", "dictlike_iteritems", "(", "dictlike", ")", ":", "for", "value", "in", "valuelist", ":", "new_key", "=", "self", ".", "keyfunc", "(", "value", ")", "if", "incoming_key", "!=", "new_key", ":", "raise", "TypeError", "(", "\"Found incompatible key %r for value %r; this \"", "\"collection's \"", "\"keying function requires a key of %r for this value.\"", "%", "(", "incoming_key", ",", "value", ",", "new_key", ")", ")", "yield", "value" ]
Validate and convert a dict-like object into values for set()ing. This is called behind the scenes when a MappedCollection is replaced entirely by another collection, as in:: myobj.mappedcollection = {'a':obj1, 'b': obj2} # ... Raises a TypeError if the key in any (key, value) pair in the dictlike object does not match the key that this collection's keyfunc would have assigned for that value.
[ "Validate", "and", "convert", "a", "dict", "-", "like", "object", "into", "values", "for", "set", "()", "ing", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/video_utils.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/video_utils.py#L573-L630
def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): """Generate samples of the encoded frames with possible extra data. By default this function just encodes the numpy array returned as "frame" from `self.generate_samples` into a PNG image. Override this function to get other encodings on disk. Args: data_dir: final data directory. Typically only used in this method to copy over user-supplied vocab files if there are extra fields needing them. tmp_dir: temporary directory that you can use for downloading and scratch. dataset_split: problem.DatasetSplit, which data split to generate samples for (for example, training and evaluation). Yields: Sample: dict<str feature_name, feature value> which is in disk encoding. Raises: ValueError: if the frame has a different number of channels than required. """ writer = None with tf.Graph().as_default(): image_t = tf.placeholder(dtype=tf.uint8, shape=(None, None, None)) encoded_image_t = tf.image.encode_png(image_t) with tf.Session() as sess: for features in self.generate_samples(data_dir, tmp_dir, dataset_split): unencoded_frame = features.pop("frame") self.validate_frame(unencoded_frame) height, width, _ = unencoded_frame.shape encoded_frame = sess.run( encoded_image_t, feed_dict={image_t: unencoded_frame}) features["image/encoded"] = [encoded_frame] features["image/format"] = ["png"] features["image/height"] = [height] features["image/width"] = [width] has_debug_image = "image/debug" in features if has_debug_image: unencoded_debug = features.pop("image/debug") encoded_debug = sess.run( encoded_image_t, feed_dict={image_t: unencoded_debug}) features["image/encoded_debug"] = [encoded_debug] if self.debug_dump_frames_path: # Defer creating debug writer until we know debug_dump_frames_path. if writer is None: if not tf.gfile.Exists(self.debug_dump_frames_path): tf.gfile.MkDir(self.debug_dump_frames_path) writer = debug_video_writer_factory(self.debug_dump_frames_path) img = unencoded_debug if has_debug_image else unencoded_frame encoded_img = encoded_debug if has_debug_image else encoded_frame writer.write(img, encoded_img) yield features if self.debug_dump_frames_path: writer.finish_to_disk()
[ "def", "generate_encoded_samples", "(", "self", ",", "data_dir", ",", "tmp_dir", ",", "dataset_split", ")", ":", "writer", "=", "None", "with", "tf", ".", "Graph", "(", ")", ".", "as_default", "(", ")", ":", "image_t", "=", "tf", ".", "placeholder", "(", "dtype", "=", "tf", ".", "uint8", ",", "shape", "=", "(", "None", ",", "None", ",", "None", ")", ")", "encoded_image_t", "=", "tf", ".", "image", ".", "encode_png", "(", "image_t", ")", "with", "tf", ".", "Session", "(", ")", "as", "sess", ":", "for", "features", "in", "self", ".", "generate_samples", "(", "data_dir", ",", "tmp_dir", ",", "dataset_split", ")", ":", "unencoded_frame", "=", "features", ".", "pop", "(", "\"frame\"", ")", "self", ".", "validate_frame", "(", "unencoded_frame", ")", "height", ",", "width", ",", "_", "=", "unencoded_frame", ".", "shape", "encoded_frame", "=", "sess", ".", "run", "(", "encoded_image_t", ",", "feed_dict", "=", "{", "image_t", ":", "unencoded_frame", "}", ")", "features", "[", "\"image/encoded\"", "]", "=", "[", "encoded_frame", "]", "features", "[", "\"image/format\"", "]", "=", "[", "\"png\"", "]", "features", "[", "\"image/height\"", "]", "=", "[", "height", "]", "features", "[", "\"image/width\"", "]", "=", "[", "width", "]", "has_debug_image", "=", "\"image/debug\"", "in", "features", "if", "has_debug_image", ":", "unencoded_debug", "=", "features", ".", "pop", "(", "\"image/debug\"", ")", "encoded_debug", "=", "sess", ".", "run", "(", "encoded_image_t", ",", "feed_dict", "=", "{", "image_t", ":", "unencoded_debug", "}", ")", "features", "[", "\"image/encoded_debug\"", "]", "=", "[", "encoded_debug", "]", "if", "self", ".", "debug_dump_frames_path", ":", "# Defer creating debug writer until we know debug_dump_frames_path.", "if", "writer", "is", "None", ":", "if", "not", "tf", ".", "gfile", ".", "Exists", "(", "self", ".", "debug_dump_frames_path", ")", ":", "tf", ".", "gfile", ".", "MkDir", "(", "self", ".", "debug_dump_frames_path", ")", "writer", "=", "debug_video_writer_factory", "(", "self", ".", "debug_dump_frames_path", ")", "img", "=", "unencoded_debug", "if", "has_debug_image", "else", "unencoded_frame", "encoded_img", "=", "encoded_debug", "if", "has_debug_image", "else", "encoded_frame", "writer", ".", "write", "(", "img", ",", "encoded_img", ")", "yield", "features", "if", "self", ".", "debug_dump_frames_path", ":", "writer", ".", "finish_to_disk", "(", ")" ]
Generate samples of the encoded frames with possible extra data. By default this function just encodes the numpy array returned as "frame" from `self.generate_samples` into a PNG image. Override this function to get other encodings on disk. Args: data_dir: final data directory. Typically only used in this method to copy over user-supplied vocab files if there are extra fields needing them. tmp_dir: temporary directory that you can use for downloading and scratch. dataset_split: problem.DatasetSplit, which data split to generate samples for (for example, training and evaluation). Yields: Sample: dict<str feature_name, feature value> which is in disk encoding. Raises: ValueError: if the frame has a different number of channels than required.
[ "Generate", "samples", "of", "the", "encoded", "frames", "with", "possible", "extra", "data", "." ]
python
train
SuryaSankar/flask-sqlalchemy-booster
flask_sqlalchemy_booster/model_booster/queryable_mixin.py
https://github.com/SuryaSankar/flask-sqlalchemy-booster/blob/444048d167ab7718f758e943665ef32d101423a5/flask_sqlalchemy_booster/model_booster/queryable_mixin.py#L580-L615
def find_or_create(cls, **kwargs): """Checks if an instance already exists by filtering with the kwargs. If yes, returns that instance. If not, creates a new instance with kwargs and returns it Args: **kwargs: The keyword arguments which are used for filtering and initialization. keys(list, optional): A special keyword argument. If passed, only the set of keys mentioned here will be used for filtering. Useful when we want to 'find' based on a subset of the keys and create with all the keys Examples: >>> customer = Customer.find_or_create( ... name="vicky", email="vicky@h.com", country="India") >>> customer.id 45 >>> customer1 = Customer.find_or_create( ... name="vicky", email="vicky@h.com", country="India") >>> customer1==customer True >>> customer2 = Customer.find_or_create( ... name="vicky", email="vicky@h.com", country="Russia") >>> customer2==customer False >>> customer3 = Customer.find_or_create( ... name="vicky", email="vicky@h.com", country="Russia", ... keys=['name', 'email']) >>> customer3==customer True """ keys = kwargs.pop('keys') if 'keys' in kwargs else [] return cls.first(**subdict(kwargs, keys)) or cls.create(**kwargs)
[ "def", "find_or_create", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "keys", "=", "kwargs", ".", "pop", "(", "'keys'", ")", "if", "'keys'", "in", "kwargs", "else", "[", "]", "return", "cls", ".", "first", "(", "*", "*", "subdict", "(", "kwargs", ",", "keys", ")", ")", "or", "cls", ".", "create", "(", "*", "*", "kwargs", ")" ]
Checks if an instance already exists by filtering with the kwargs. If yes, returns that instance. If not, creates a new instance with kwargs and returns it Args: **kwargs: The keyword arguments which are used for filtering and initialization. keys(list, optional): A special keyword argument. If passed, only the set of keys mentioned here will be used for filtering. Useful when we want to 'find' based on a subset of the keys and create with all the keys Examples: >>> customer = Customer.find_or_create( ... name="vicky", email="vicky@h.com", country="India") >>> customer.id 45 >>> customer1 = Customer.find_or_create( ... name="vicky", email="vicky@h.com", country="India") >>> customer1==customer True >>> customer2 = Customer.find_or_create( ... name="vicky", email="vicky@h.com", country="Russia") >>> customer2==customer False >>> customer3 = Customer.find_or_create( ... name="vicky", email="vicky@h.com", country="Russia", ... keys=['name', 'email']) >>> customer3==customer True
[ "Checks", "if", "an", "instance", "already", "exists", "by", "filtering", "with", "the", "kwargs", ".", "If", "yes", "returns", "that", "instance", ".", "If", "not", "creates", "a", "new", "instance", "with", "kwargs", "and", "returns", "it" ]
python
train
hawkular/hawkular-client-python
hawkular/alerts/triggers.py
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/alerts/triggers.py#L301-L309
def conditions(self, trigger_id): """ Get all conditions for a specific trigger. :param trigger_id: Trigger definition id to be retrieved :return: list of condition objects """ response = self._get(self._service_url(['triggers', trigger_id, 'conditions'])) return Condition.list_to_object_list(response)
[ "def", "conditions", "(", "self", ",", "trigger_id", ")", ":", "response", "=", "self", ".", "_get", "(", "self", ".", "_service_url", "(", "[", "'triggers'", ",", "trigger_id", ",", "'conditions'", "]", ")", ")", "return", "Condition", ".", "list_to_object_list", "(", "response", ")" ]
Get all conditions for a specific trigger. :param trigger_id: Trigger definition id to be retrieved :return: list of condition objects
[ "Get", "all", "conditions", "for", "a", "specific", "trigger", "." ]
python
train
kiwi0fruit/sugartex
sugartex/sugartex_pandoc_filter.py
https://github.com/kiwi0fruit/sugartex/blob/9eb13703cb02d3e2163c9c5f29df280f6bf49cec/sugartex/sugartex_pandoc_filter.py#L25-L43
def cli(): """ Usage: sugartex [OPTIONS] [TO] Reads from stdin and writes to stdout. Can have single argument/option only. When no args or the arg is not from options then run Pandoc SugarTeX filter that iterates over math blocks. Options: --kiwi Same as above but with kiwi flavor, --help Show this message and exit. """ if len(sys.argv) > 1: if sys.argv[1] == '--kiwi': kiwi_hack() elif sys.argv[1].lower() == '--help': print(str(cli.__doc__).replace('\n ', '\n')) return None main()
[ "def", "cli", "(", ")", ":", "if", "len", "(", "sys", ".", "argv", ")", ">", "1", ":", "if", "sys", ".", "argv", "[", "1", "]", "==", "'--kiwi'", ":", "kiwi_hack", "(", ")", "elif", "sys", ".", "argv", "[", "1", "]", ".", "lower", "(", ")", "==", "'--help'", ":", "print", "(", "str", "(", "cli", ".", "__doc__", ")", ".", "replace", "(", "'\\n '", ",", "'\\n'", ")", ")", "return", "None", "main", "(", ")" ]
Usage: sugartex [OPTIONS] [TO] Reads from stdin and writes to stdout. Can have single argument/option only. When no args or the arg is not from options then run Pandoc SugarTeX filter that iterates over math blocks. Options: --kiwi Same as above but with kiwi flavor, --help Show this message and exit.
[ "Usage", ":", "sugartex", "[", "OPTIONS", "]", "[", "TO", "]" ]
python
train
sorgerlab/indra
indra/assemblers/pysb/assembler.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L650-L694
def set_expression(self, expression_dict): """Set protein expression amounts as initial conditions Parameters ---------- expression_dict : dict A dictionary in which the keys are gene names and the values are numbers representing the absolute amount (count per cell) of proteins expressed. Proteins that are not expressed can be represented as nan. Entries that are not in the dict or are in there but resolve to None, are set to the default initial amount. Example: {'EGFR': 12345, 'BRAF': 4567, 'ESR1': nan} """ if self.model is None: return monomers_found = [] monomers_notfound = [] # Iterate over all the monomers for m in self.model.monomers: if (m.name in expression_dict and expression_dict[m.name] is not None): # Try to get the expression amount from the dict init = expression_dict[m.name] # We interpret nan and None as not expressed if math.isnan(init): init = 0 init_round = round(init) set_base_initial_condition(self.model, m, init_round) monomers_found.append(m.name) else: set_base_initial_condition(self.model, m, self.default_initial_amount) monomers_notfound.append(m.name) logger.info('Monomers set to given context') logger.info('-----------------------------') for m in monomers_found: logger.info('%s' % m) if monomers_notfound: logger.info('') logger.info('Monomers not found in given context') logger.info('-----------------------------------') for m in monomers_notfound: logger.info('%s' % m)
[ "def", "set_expression", "(", "self", ",", "expression_dict", ")", ":", "if", "self", ".", "model", "is", "None", ":", "return", "monomers_found", "=", "[", "]", "monomers_notfound", "=", "[", "]", "# Iterate over all the monomers", "for", "m", "in", "self", ".", "model", ".", "monomers", ":", "if", "(", "m", ".", "name", "in", "expression_dict", "and", "expression_dict", "[", "m", ".", "name", "]", "is", "not", "None", ")", ":", "# Try to get the expression amount from the dict", "init", "=", "expression_dict", "[", "m", ".", "name", "]", "# We interpret nan and None as not expressed", "if", "math", ".", "isnan", "(", "init", ")", ":", "init", "=", "0", "init_round", "=", "round", "(", "init", ")", "set_base_initial_condition", "(", "self", ".", "model", ",", "m", ",", "init_round", ")", "monomers_found", ".", "append", "(", "m", ".", "name", ")", "else", ":", "set_base_initial_condition", "(", "self", ".", "model", ",", "m", ",", "self", ".", "default_initial_amount", ")", "monomers_notfound", ".", "append", "(", "m", ".", "name", ")", "logger", ".", "info", "(", "'Monomers set to given context'", ")", "logger", ".", "info", "(", "'-----------------------------'", ")", "for", "m", "in", "monomers_found", ":", "logger", ".", "info", "(", "'%s'", "%", "m", ")", "if", "monomers_notfound", ":", "logger", ".", "info", "(", "''", ")", "logger", ".", "info", "(", "'Monomers not found in given context'", ")", "logger", ".", "info", "(", "'-----------------------------------'", ")", "for", "m", "in", "monomers_notfound", ":", "logger", ".", "info", "(", "'%s'", "%", "m", ")" ]
Set protein expression amounts as initial conditions Parameters ---------- expression_dict : dict A dictionary in which the keys are gene names and the values are numbers representing the absolute amount (count per cell) of proteins expressed. Proteins that are not expressed can be represented as nan. Entries that are not in the dict or are in there but resolve to None, are set to the default initial amount. Example: {'EGFR': 12345, 'BRAF': 4567, 'ESR1': nan}
[ "Set", "protein", "expression", "amounts", "as", "initial", "conditions" ]
python
train
jim-easterbrook/pywws
src/pywws/filedata.py
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/filedata.py#L282-L298
def before(self, idx): """Return datetime of newest existing data record whose datetime is < idx. Might not even be in the same year! If no such record exists, return None.""" if not isinstance(idx, datetime): raise TypeError("'%s' is not %s" % (idx, datetime)) day = min(idx.date(), self._hi_limit - DAY) while day >= self._lo_limit: if day < self._rd_cache.lo or day >= self._rd_cache.hi: self._load(self._rd_cache, day) self._rd_cache.set_ptr(idx) if self._rd_cache.ptr > 0: return self._rd_cache.data[self._rd_cache.ptr - 1]['idx'] day = self._rd_cache.lo - DAY return None
[ "def", "before", "(", "self", ",", "idx", ")", ":", "if", "not", "isinstance", "(", "idx", ",", "datetime", ")", ":", "raise", "TypeError", "(", "\"'%s' is not %s\"", "%", "(", "idx", ",", "datetime", ")", ")", "day", "=", "min", "(", "idx", ".", "date", "(", ")", ",", "self", ".", "_hi_limit", "-", "DAY", ")", "while", "day", ">=", "self", ".", "_lo_limit", ":", "if", "day", "<", "self", ".", "_rd_cache", ".", "lo", "or", "day", ">=", "self", ".", "_rd_cache", ".", "hi", ":", "self", ".", "_load", "(", "self", ".", "_rd_cache", ",", "day", ")", "self", ".", "_rd_cache", ".", "set_ptr", "(", "idx", ")", "if", "self", ".", "_rd_cache", ".", "ptr", ">", "0", ":", "return", "self", ".", "_rd_cache", ".", "data", "[", "self", ".", "_rd_cache", ".", "ptr", "-", "1", "]", "[", "'idx'", "]", "day", "=", "self", ".", "_rd_cache", ".", "lo", "-", "DAY", "return", "None" ]
Return datetime of newest existing data record whose datetime is < idx. Might not even be in the same year! If no such record exists, return None.
[ "Return", "datetime", "of", "newest", "existing", "data", "record", "whose", "datetime", "is", "<", "idx", "." ]
python
train
alorence/pysvg-py3
pysvg/shape.py
https://github.com/alorence/pysvg-py3/blob/ce217a4da3ada44a71d3e2f391d37c67d95c724e/pysvg/shape.py#L145-L150
def getBottomLeft(self): """ Retrieves a tuple with the x,y coordinates of the lower left point of the circle. Requires the radius and the coordinates to be numbers """ return (float(self.get_cx()) - float(self.get_r()), float(self.get_cy()) - float(self.get_r()))
[ "def", "getBottomLeft", "(", "self", ")", ":", "return", "(", "float", "(", "self", ".", "get_cx", "(", ")", ")", "-", "float", "(", "self", ".", "get_r", "(", ")", ")", ",", "float", "(", "self", ".", "get_cy", "(", ")", ")", "-", "float", "(", "self", ".", "get_r", "(", ")", ")", ")" ]
Retrieves a tuple with the x,y coordinates of the lower left point of the circle. Requires the radius and the coordinates to be numbers
[ "Retrieves", "a", "tuple", "with", "the", "x", "y", "coordinates", "of", "the", "lower", "left", "point", "of", "the", "circle", ".", "Requires", "the", "radius", "and", "the", "coordinates", "to", "be", "numbers" ]
python
train
eddyxu/cpp-coveralls
cpp_coveralls/coverage.py
https://github.com/eddyxu/cpp-coveralls/blob/ff7af7eea2a23828f6ab2541667ea04f94344dce/cpp_coveralls/coverage.py#L101-L107
def exclude_paths(args): """Returns the absolute paths for excluded path.""" results = [] if args.exclude: for excl_path in args.exclude: results.append(os.path.abspath(os.path.join(args.root, excl_path))) return results
[ "def", "exclude_paths", "(", "args", ")", ":", "results", "=", "[", "]", "if", "args", ".", "exclude", ":", "for", "excl_path", "in", "args", ".", "exclude", ":", "results", ".", "append", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "args", ".", "root", ",", "excl_path", ")", ")", ")", "return", "results" ]
Returns the absolute paths for excluded path.
[ "Returns", "the", "absolute", "paths", "for", "excluded", "path", "." ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/visuals/visual.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/visuals/visual.py#L338-L349
def set_gl_state(self, preset=None, **kwargs): """Define the set of GL state parameters to use when drawing Parameters ---------- preset : str Preset to use. **kwargs : dict Keyword arguments to `gloo.set_state`. """ self._vshare.gl_state = kwargs self._vshare.gl_state['preset'] = preset
[ "def", "set_gl_state", "(", "self", ",", "preset", "=", "None", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_vshare", ".", "gl_state", "=", "kwargs", "self", ".", "_vshare", ".", "gl_state", "[", "'preset'", "]", "=", "preset" ]
Define the set of GL state parameters to use when drawing Parameters ---------- preset : str Preset to use. **kwargs : dict Keyword arguments to `gloo.set_state`.
[ "Define", "the", "set", "of", "GL", "state", "parameters", "to", "use", "when", "drawing" ]
python
train
AguaClara/aguaclara
aguaclara/design/sed_tank.py
https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/design/sed_tank.py#L505-L524
def n_tanks(Q_plant, sed_inputs=sed_dict): """Return the number of sedimentation tanks required for a given flow rate. Parameters ---------- Q_plant : float Total plant flow rate sed_inputs : dict A dictionary of all of the constant inputs needed for sedimentation tank calculations can be found in sed.yaml Returns ------- int Number of sedimentation tanks required for a given flow rate. Examples -------- >>> from aide_design.play import* >>> """ q = q_tank(sed_inputs).magnitude return (int(np.ceil(Q_plant / q)))
[ "def", "n_tanks", "(", "Q_plant", ",", "sed_inputs", "=", "sed_dict", ")", ":", "q", "=", "q_tank", "(", "sed_inputs", ")", ".", "magnitude", "return", "(", "int", "(", "np", ".", "ceil", "(", "Q_plant", "/", "q", ")", ")", ")" ]
Return the number of sedimentation tanks required for a given flow rate. Parameters ---------- Q_plant : float Total plant flow rate sed_inputs : dict A dictionary of all of the constant inputs needed for sedimentation tank calculations can be found in sed.yaml Returns ------- int Number of sedimentation tanks required for a given flow rate. Examples -------- >>> from aide_design.play import* >>>
[ "Return", "the", "number", "of", "sedimentation", "tanks", "required", "for", "a", "given", "flow", "rate", ".", "Parameters", "----------", "Q_plant", ":", "float", "Total", "plant", "flow", "rate", "sed_inputs", ":", "dict", "A", "dictionary", "of", "all", "of", "the", "constant", "inputs", "needed", "for", "sedimentation", "tank", "calculations", "can", "be", "found", "in", "sed", ".", "yaml", "Returns", "-------", "int", "Number", "of", "sedimentation", "tanks", "required", "for", "a", "given", "flow", "rate", ".", "Examples", "--------", ">>>", "from", "aide_design", ".", "play", "import", "*", ">>>" ]
python
train
mozilla-b2g/fxos-certsuite
mcts/certsuite/cert.py
https://github.com/mozilla-b2g/fxos-certsuite/blob/152a76c7c4c9d908524cf6e6fc25a498058f363d/mcts/certsuite/cert.py#L572-L590
def set_permission(permission, value, app): """Set a permission for the specified app Value should be 'deny' or 'allow' """ # The object created to wrap PermissionSettingsModule is to work around # an intermittent bug where it will sometimes be undefined. script = """ const {classes: Cc, interfaces: Ci, utils: Cu, results: Cr} = Components; var a = {b: Cu.import("resource://gre/modules/PermissionSettings.jsm")}; return a.b.PermissionSettingsModule.addPermission({ type: '%s', origin: '%s', manifestURL: '%s/manifest.webapp', value: '%s', browserFlag: false }); """ app_url = 'app://' + app run_marionette_script(script % (permission, app_url, app_url, value), True)
[ "def", "set_permission", "(", "permission", ",", "value", ",", "app", ")", ":", "# The object created to wrap PermissionSettingsModule is to work around", "# an intermittent bug where it will sometimes be undefined.", "script", "=", "\"\"\"\n const {classes: Cc, interfaces: Ci, utils: Cu, results: Cr} = Components;\n var a = {b: Cu.import(\"resource://gre/modules/PermissionSettings.jsm\")};\n return a.b.PermissionSettingsModule.addPermission({\n type: '%s',\n origin: '%s',\n manifestURL: '%s/manifest.webapp',\n value: '%s',\n browserFlag: false\n });\n \"\"\"", "app_url", "=", "'app://'", "+", "app", "run_marionette_script", "(", "script", "%", "(", "permission", ",", "app_url", ",", "app_url", ",", "value", ")", ",", "True", ")" ]
Set a permission for the specified app Value should be 'deny' or 'allow'
[ "Set", "a", "permission", "for", "the", "specified", "app", "Value", "should", "be", "deny", "or", "allow" ]
python
train
saltstack/salt
salt/modules/win_groupadd.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_groupadd.py#L203-L238
def getent(refresh=False): ''' Return info on all groups Args: refresh (bool): Refresh the info for all groups in ``__context__``. If False only the groups in ``__context__`` will be returned. If True the ``__context__`` will be refreshed with current data and returned. Default is False Returns: A list of groups and their information CLI Example: .. code-block:: bash salt '*' group.getent ''' if 'group.getent' in __context__ and not refresh: return __context__['group.getent'] ret = [] results = _get_all_groups() for result in results: group = {'gid': __salt__['file.group_to_gid'](result.Name), 'members': [_get_username(x) for x in result.members()], 'name': result.Name, 'passwd': 'x'} ret.append(group) __context__['group.getent'] = ret return ret
[ "def", "getent", "(", "refresh", "=", "False", ")", ":", "if", "'group.getent'", "in", "__context__", "and", "not", "refresh", ":", "return", "__context__", "[", "'group.getent'", "]", "ret", "=", "[", "]", "results", "=", "_get_all_groups", "(", ")", "for", "result", "in", "results", ":", "group", "=", "{", "'gid'", ":", "__salt__", "[", "'file.group_to_gid'", "]", "(", "result", ".", "Name", ")", ",", "'members'", ":", "[", "_get_username", "(", "x", ")", "for", "x", "in", "result", ".", "members", "(", ")", "]", ",", "'name'", ":", "result", ".", "Name", ",", "'passwd'", ":", "'x'", "}", "ret", ".", "append", "(", "group", ")", "__context__", "[", "'group.getent'", "]", "=", "ret", "return", "ret" ]
Return info on all groups Args: refresh (bool): Refresh the info for all groups in ``__context__``. If False only the groups in ``__context__`` will be returned. If True the ``__context__`` will be refreshed with current data and returned. Default is False Returns: A list of groups and their information CLI Example: .. code-block:: bash salt '*' group.getent
[ "Return", "info", "on", "all", "groups" ]
python
train
tarbell-project/tarbell
tarbell/cli.py
https://github.com/tarbell-project/tarbell/blob/818b3d3623dcda5a08a5bf45550219719b0f0365/tarbell/cli.py#L448-L465
def tarbell_switch(command, args): """ Switch to a project. """ with ensure_settings(command, args) as settings: projects_path = settings.config.get("projects_path") if not projects_path: show_error("{0} does not exist".format(projects_path)) sys.exit() project = args.get(0) args.remove(project) project_path = os.path.join(projects_path, project) if os.path.isdir(project_path): os.chdir(project_path) puts("\nSwitching to {0}".format(colored.red(project))) tarbell_serve(command, args) else: show_error("{0} isn't a tarbell project".format(project_path))
[ "def", "tarbell_switch", "(", "command", ",", "args", ")", ":", "with", "ensure_settings", "(", "command", ",", "args", ")", "as", "settings", ":", "projects_path", "=", "settings", ".", "config", ".", "get", "(", "\"projects_path\"", ")", "if", "not", "projects_path", ":", "show_error", "(", "\"{0} does not exist\"", ".", "format", "(", "projects_path", ")", ")", "sys", ".", "exit", "(", ")", "project", "=", "args", ".", "get", "(", "0", ")", "args", ".", "remove", "(", "project", ")", "project_path", "=", "os", ".", "path", ".", "join", "(", "projects_path", ",", "project", ")", "if", "os", ".", "path", ".", "isdir", "(", "project_path", ")", ":", "os", ".", "chdir", "(", "project_path", ")", "puts", "(", "\"\\nSwitching to {0}\"", ".", "format", "(", "colored", ".", "red", "(", "project", ")", ")", ")", "tarbell_serve", "(", "command", ",", "args", ")", "else", ":", "show_error", "(", "\"{0} isn't a tarbell project\"", ".", "format", "(", "project_path", ")", ")" ]
Switch to a project.
[ "Switch", "to", "a", "project", "." ]
python
train
nwilming/ocupy
ocupy/fixmat.py
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/fixmat.py#L283-L325
def FixmatFactory(fixmatfile, categories = None, var_name = 'fixmat', field_name='x'): """ Loads a single fixmat (fixmatfile). Parameters: fixmatfile : string The matlab fixmat that should be loaded. categories : instance of stimuli.Categories, optional Links data in categories to data in fixmat. """ try: data = loadmat(fixmatfile, struct_as_record = False) keys = list(data.keys()) data = data[var_name][0][0] except KeyError: raise RuntimeError('%s is not a field of the matlab structure. Possible'+ 'Keys are %s'%str(keys)) num_fix = data.__getattribute__(field_name).size # Get a list with fieldnames and a list with parameters fields = {} parameters = {} for field in data._fieldnames: if data.__getattribute__(field).size == num_fix: fields[field] = data.__getattribute__(field) else: parameters[field] = data.__getattribute__(field)[0].tolist() if len(parameters[field]) == 1: parameters[field] = parameters[field][0] # Generate FixMat fixmat = FixMat(categories = categories) fixmat._fields = list(fields.keys()) for (field, value) in list(fields.items()): fixmat.__dict__[field] = value.reshape(-1,) fixmat._parameters = parameters fixmat._subjects = None for (field, value) in list(parameters.items()): fixmat.__dict__[field] = value fixmat._num_fix = num_fix return fixmat
[ "def", "FixmatFactory", "(", "fixmatfile", ",", "categories", "=", "None", ",", "var_name", "=", "'fixmat'", ",", "field_name", "=", "'x'", ")", ":", "try", ":", "data", "=", "loadmat", "(", "fixmatfile", ",", "struct_as_record", "=", "False", ")", "keys", "=", "list", "(", "data", ".", "keys", "(", ")", ")", "data", "=", "data", "[", "var_name", "]", "[", "0", "]", "[", "0", "]", "except", "KeyError", ":", "raise", "RuntimeError", "(", "'%s is not a field of the matlab structure. Possible'", "+", "'Keys are %s'", "%", "str", "(", "keys", ")", ")", "num_fix", "=", "data", ".", "__getattribute__", "(", "field_name", ")", ".", "size", "# Get a list with fieldnames and a list with parameters", "fields", "=", "{", "}", "parameters", "=", "{", "}", "for", "field", "in", "data", ".", "_fieldnames", ":", "if", "data", ".", "__getattribute__", "(", "field", ")", ".", "size", "==", "num_fix", ":", "fields", "[", "field", "]", "=", "data", ".", "__getattribute__", "(", "field", ")", "else", ":", "parameters", "[", "field", "]", "=", "data", ".", "__getattribute__", "(", "field", ")", "[", "0", "]", ".", "tolist", "(", ")", "if", "len", "(", "parameters", "[", "field", "]", ")", "==", "1", ":", "parameters", "[", "field", "]", "=", "parameters", "[", "field", "]", "[", "0", "]", "# Generate FixMat", "fixmat", "=", "FixMat", "(", "categories", "=", "categories", ")", "fixmat", ".", "_fields", "=", "list", "(", "fields", ".", "keys", "(", ")", ")", "for", "(", "field", ",", "value", ")", "in", "list", "(", "fields", ".", "items", "(", ")", ")", ":", "fixmat", ".", "__dict__", "[", "field", "]", "=", "value", ".", "reshape", "(", "-", "1", ",", ")", "fixmat", ".", "_parameters", "=", "parameters", "fixmat", ".", "_subjects", "=", "None", "for", "(", "field", ",", "value", ")", "in", "list", "(", "parameters", ".", "items", "(", ")", ")", ":", "fixmat", ".", "__dict__", "[", "field", "]", "=", "value", "fixmat", ".", "_num_fix", "=", "num_fix", "return", "fixmat" ]
Loads a single fixmat (fixmatfile). Parameters: fixmatfile : string The matlab fixmat that should be loaded. categories : instance of stimuli.Categories, optional Links data in categories to data in fixmat.
[ "Loads", "a", "single", "fixmat", "(", "fixmatfile", ")", ".", "Parameters", ":", "fixmatfile", ":", "string", "The", "matlab", "fixmat", "that", "should", "be", "loaded", ".", "categories", ":", "instance", "of", "stimuli", ".", "Categories", "optional", "Links", "data", "in", "categories", "to", "data", "in", "fixmat", "." ]
python
train
jessevdk/cldoc
cldoc/clang/cindex.py
https://github.com/jessevdk/cldoc/blob/fc7f59405c4a891b8367c80a700f5aa3c5c9230c/cldoc/clang/cindex.py#L1528-L1533
def spelling(self): """Return the spelling of the entity pointed at by the cursor.""" if not hasattr(self, '_spelling'): self._spelling = conf.lib.clang_getCursorSpelling(self) return self._spelling
[ "def", "spelling", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_spelling'", ")", ":", "self", ".", "_spelling", "=", "conf", ".", "lib", ".", "clang_getCursorSpelling", "(", "self", ")", "return", "self", ".", "_spelling" ]
Return the spelling of the entity pointed at by the cursor.
[ "Return", "the", "spelling", "of", "the", "entity", "pointed", "at", "by", "the", "cursor", "." ]
python
train
shoebot/shoebot
shoebot/gui/gtk_drawingarea.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/gui/gtk_drawingarea.py#L78-L98
def scale_context_and_center(self, cr): """ Scale context based on difference between bot size and widget """ bot_width, bot_height = self.bot_size if self.width != bot_width or self.height != bot_height: # Scale up by largest dimension if self.width < self.height: scale_x = float(self.width) / float(bot_width) scale_y = scale_x cr.translate(0, (self.height - (bot_height * scale_y)) / 2.0) elif self.width > self.height: scale_y = float(self.height) / float(bot_height) scale_x = scale_y cr.translate((self.width - (bot_width * scale_x)) / 2.0, 0) else: scale_x = 1.0 scale_y = 1.0 cr.scale(scale_x, scale_y) self.input_device.scale_x = scale_y self.input_device.scale_y = scale_y
[ "def", "scale_context_and_center", "(", "self", ",", "cr", ")", ":", "bot_width", ",", "bot_height", "=", "self", ".", "bot_size", "if", "self", ".", "width", "!=", "bot_width", "or", "self", ".", "height", "!=", "bot_height", ":", "# Scale up by largest dimension", "if", "self", ".", "width", "<", "self", ".", "height", ":", "scale_x", "=", "float", "(", "self", ".", "width", ")", "/", "float", "(", "bot_width", ")", "scale_y", "=", "scale_x", "cr", ".", "translate", "(", "0", ",", "(", "self", ".", "height", "-", "(", "bot_height", "*", "scale_y", ")", ")", "/", "2.0", ")", "elif", "self", ".", "width", ">", "self", ".", "height", ":", "scale_y", "=", "float", "(", "self", ".", "height", ")", "/", "float", "(", "bot_height", ")", "scale_x", "=", "scale_y", "cr", ".", "translate", "(", "(", "self", ".", "width", "-", "(", "bot_width", "*", "scale_x", ")", ")", "/", "2.0", ",", "0", ")", "else", ":", "scale_x", "=", "1.0", "scale_y", "=", "1.0", "cr", ".", "scale", "(", "scale_x", ",", "scale_y", ")", "self", ".", "input_device", ".", "scale_x", "=", "scale_y", "self", ".", "input_device", ".", "scale_y", "=", "scale_y" ]
Scale context based on difference between bot size and widget
[ "Scale", "context", "based", "on", "difference", "between", "bot", "size", "and", "widget" ]
python
valid
pallets/werkzeug
src/werkzeug/filesystem.py
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/filesystem.py#L42-L64
def get_filesystem_encoding(): """Returns the filesystem encoding that should be used. Note that this is different from the Python understanding of the filesystem encoding which might be deeply flawed. Do not use this value against Python's unicode APIs because it might be different. See :ref:`filesystem-encoding` for the exact behavior. The concept of a filesystem encoding in generally is not something you should rely on. As such if you ever need to use this function except for writing wrapper code reconsider. """ global _warned_about_filesystem_encoding rv = sys.getfilesystemencoding() if has_likely_buggy_unicode_filesystem and not rv or _is_ascii_encoding(rv): if not _warned_about_filesystem_encoding: warnings.warn( "Detected a misconfigured UNIX filesystem: Will use" " UTF-8 as filesystem encoding instead of {0!r}".format(rv), BrokenFilesystemWarning, ) _warned_about_filesystem_encoding = True return "utf-8" return rv
[ "def", "get_filesystem_encoding", "(", ")", ":", "global", "_warned_about_filesystem_encoding", "rv", "=", "sys", ".", "getfilesystemencoding", "(", ")", "if", "has_likely_buggy_unicode_filesystem", "and", "not", "rv", "or", "_is_ascii_encoding", "(", "rv", ")", ":", "if", "not", "_warned_about_filesystem_encoding", ":", "warnings", ".", "warn", "(", "\"Detected a misconfigured UNIX filesystem: Will use\"", "\" UTF-8 as filesystem encoding instead of {0!r}\"", ".", "format", "(", "rv", ")", ",", "BrokenFilesystemWarning", ",", ")", "_warned_about_filesystem_encoding", "=", "True", "return", "\"utf-8\"", "return", "rv" ]
Returns the filesystem encoding that should be used. Note that this is different from the Python understanding of the filesystem encoding which might be deeply flawed. Do not use this value against Python's unicode APIs because it might be different. See :ref:`filesystem-encoding` for the exact behavior. The concept of a filesystem encoding in generally is not something you should rely on. As such if you ever need to use this function except for writing wrapper code reconsider.
[ "Returns", "the", "filesystem", "encoding", "that", "should", "be", "used", ".", "Note", "that", "this", "is", "different", "from", "the", "Python", "understanding", "of", "the", "filesystem", "encoding", "which", "might", "be", "deeply", "flawed", ".", "Do", "not", "use", "this", "value", "against", "Python", "s", "unicode", "APIs", "because", "it", "might", "be", "different", ".", "See", ":", "ref", ":", "filesystem", "-", "encoding", "for", "the", "exact", "behavior", "." ]
python
train
PmagPy/PmagPy
pmagpy/pmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L5222-L5242
def vfunc(pars_1, pars_2): """ Calculate the Watson Vw test statistic. Calculated as 2*(Sw-Rw) Parameters ---------- pars_1 : dictionary of Fisher statistics from population 1 pars_2 : dictionary of Fisher statistics from population 2 Returns ------- Vw : Watson's Vw statistic """ cart_1 = dir2cart([pars_1["dec"], pars_1["inc"], pars_1["r"]]) cart_2 = dir2cart([pars_2['dec'], pars_2['inc'], pars_2["r"]]) Sw = pars_1['k'] * pars_1['r'] + pars_2['k'] * pars_2['r'] # k1*r1+k2*r2 xhat_1 = pars_1['k'] * cart_1[0] + pars_2['k'] * cart_2[0] # k1*x1+k2*x2 xhat_2 = pars_1['k'] * cart_1[1] + pars_2['k'] * cart_2[1] # k1*y1+k2*y2 xhat_3 = pars_1['k'] * cart_1[2] + pars_2['k'] * cart_2[2] # k1*z1+k2*z2 Rw = np.sqrt(xhat_1**2 + xhat_2**2 + xhat_3**2) return 2 * (Sw - Rw)
[ "def", "vfunc", "(", "pars_1", ",", "pars_2", ")", ":", "cart_1", "=", "dir2cart", "(", "[", "pars_1", "[", "\"dec\"", "]", ",", "pars_1", "[", "\"inc\"", "]", ",", "pars_1", "[", "\"r\"", "]", "]", ")", "cart_2", "=", "dir2cart", "(", "[", "pars_2", "[", "'dec'", "]", ",", "pars_2", "[", "'inc'", "]", ",", "pars_2", "[", "\"r\"", "]", "]", ")", "Sw", "=", "pars_1", "[", "'k'", "]", "*", "pars_1", "[", "'r'", "]", "+", "pars_2", "[", "'k'", "]", "*", "pars_2", "[", "'r'", "]", "# k1*r1+k2*r2", "xhat_1", "=", "pars_1", "[", "'k'", "]", "*", "cart_1", "[", "0", "]", "+", "pars_2", "[", "'k'", "]", "*", "cart_2", "[", "0", "]", "# k1*x1+k2*x2", "xhat_2", "=", "pars_1", "[", "'k'", "]", "*", "cart_1", "[", "1", "]", "+", "pars_2", "[", "'k'", "]", "*", "cart_2", "[", "1", "]", "# k1*y1+k2*y2", "xhat_3", "=", "pars_1", "[", "'k'", "]", "*", "cart_1", "[", "2", "]", "+", "pars_2", "[", "'k'", "]", "*", "cart_2", "[", "2", "]", "# k1*z1+k2*z2", "Rw", "=", "np", ".", "sqrt", "(", "xhat_1", "**", "2", "+", "xhat_2", "**", "2", "+", "xhat_3", "**", "2", ")", "return", "2", "*", "(", "Sw", "-", "Rw", ")" ]
Calculate the Watson Vw test statistic. Calculated as 2*(Sw-Rw) Parameters ---------- pars_1 : dictionary of Fisher statistics from population 1 pars_2 : dictionary of Fisher statistics from population 2 Returns ------- Vw : Watson's Vw statistic
[ "Calculate", "the", "Watson", "Vw", "test", "statistic", ".", "Calculated", "as", "2", "*", "(", "Sw", "-", "Rw", ")" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/trax/layers/attention.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/attention.py#L97-L111
def _positional_encoding_new_params(input_shape, rng, max_len=2048): # pylint: disable=invalid-name """Helper: create positional encoding parameters.""" del rng # Check if we are operating on chunked inputs by checking if the first # shape is a list/tuple of shapes (otherwise it's an int or numpy array). is_chunked = isinstance(input_shape[0], (list, tuple)) feature_depth = input_shape[0][-1] if is_chunked else input_shape[-1] pe = onp.zeros((max_len, feature_depth), dtype=onp.float32) position = onp.arange(0, max_len)[:, onp.newaxis] div_term = onp.exp( onp.arange(0, feature_depth, 2) * -(onp.log(10000.0) / feature_depth)) pe[:, 0::2] = onp.sin(position * div_term) pe[:, 1::2] = onp.cos(position * div_term) pe = pe[onp.newaxis, :, :] # [1, max_len, feature_depth] return np.array(pe)
[ "def", "_positional_encoding_new_params", "(", "input_shape", ",", "rng", ",", "max_len", "=", "2048", ")", ":", "# pylint: disable=invalid-name", "del", "rng", "# Check if we are operating on chunked inputs by checking if the first", "# shape is a list/tuple of shapes (otherwise it's an int or numpy array).", "is_chunked", "=", "isinstance", "(", "input_shape", "[", "0", "]", ",", "(", "list", ",", "tuple", ")", ")", "feature_depth", "=", "input_shape", "[", "0", "]", "[", "-", "1", "]", "if", "is_chunked", "else", "input_shape", "[", "-", "1", "]", "pe", "=", "onp", ".", "zeros", "(", "(", "max_len", ",", "feature_depth", ")", ",", "dtype", "=", "onp", ".", "float32", ")", "position", "=", "onp", ".", "arange", "(", "0", ",", "max_len", ")", "[", ":", ",", "onp", ".", "newaxis", "]", "div_term", "=", "onp", ".", "exp", "(", "onp", ".", "arange", "(", "0", ",", "feature_depth", ",", "2", ")", "*", "-", "(", "onp", ".", "log", "(", "10000.0", ")", "/", "feature_depth", ")", ")", "pe", "[", ":", ",", "0", ":", ":", "2", "]", "=", "onp", ".", "sin", "(", "position", "*", "div_term", ")", "pe", "[", ":", ",", "1", ":", ":", "2", "]", "=", "onp", ".", "cos", "(", "position", "*", "div_term", ")", "pe", "=", "pe", "[", "onp", ".", "newaxis", ",", ":", ",", ":", "]", "# [1, max_len, feature_depth]", "return", "np", ".", "array", "(", "pe", ")" ]
Helper: create positional encoding parameters.
[ "Helper", ":", "create", "positional", "encoding", "parameters", "." ]
python
train
scott-maddox/openbandparams
src/openbandparams/iii_v_zinc_blende_alloy.py
https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_alloy.py#L132-L141
def F(self, **kwargs): ''' Returns the Kane remote-band parameter, `F`, calculated from `Eg_Gamma_0`, `Delta_SO`, `Ep`, and `meff_e_Gamma_0`. ''' Eg = self.Eg_Gamma_0(**kwargs) Delta_SO = self.Delta_SO(**kwargs) Ep = self.Ep(**kwargs) meff = self.meff_e_Gamma_0(**kwargs) return (1./meff-1-(Ep*(Eg+2.*Delta_SO/3.))/(Eg*(Eg+Delta_SO)))/2
[ "def", "F", "(", "self", ",", "*", "*", "kwargs", ")", ":", "Eg", "=", "self", ".", "Eg_Gamma_0", "(", "*", "*", "kwargs", ")", "Delta_SO", "=", "self", ".", "Delta_SO", "(", "*", "*", "kwargs", ")", "Ep", "=", "self", ".", "Ep", "(", "*", "*", "kwargs", ")", "meff", "=", "self", ".", "meff_e_Gamma_0", "(", "*", "*", "kwargs", ")", "return", "(", "1.", "/", "meff", "-", "1", "-", "(", "Ep", "*", "(", "Eg", "+", "2.", "*", "Delta_SO", "/", "3.", ")", ")", "/", "(", "Eg", "*", "(", "Eg", "+", "Delta_SO", ")", ")", ")", "/", "2" ]
Returns the Kane remote-band parameter, `F`, calculated from `Eg_Gamma_0`, `Delta_SO`, `Ep`, and `meff_e_Gamma_0`.
[ "Returns", "the", "Kane", "remote", "-", "band", "parameter", "F", "calculated", "from", "Eg_Gamma_0", "Delta_SO", "Ep", "and", "meff_e_Gamma_0", "." ]
python
train
PyCQA/pylint
pylint/utils/file_state.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/utils/file_state.py#L103-L117
def handle_ignored_message( self, state_scope, msgid, line, node, args, confidence ): # pylint: disable=unused-argument """Report an ignored message. state_scope is either MSG_STATE_SCOPE_MODULE or MSG_STATE_SCOPE_CONFIG, depending on whether the message was disabled locally in the module, or globally. The other arguments are the same as for add_message. """ if state_scope == MSG_STATE_SCOPE_MODULE: try: orig_line = self._suppression_mapping[(msgid, line)] self._ignored_msgs[(msgid, orig_line)].add(line) except KeyError: pass
[ "def", "handle_ignored_message", "(", "self", ",", "state_scope", ",", "msgid", ",", "line", ",", "node", ",", "args", ",", "confidence", ")", ":", "# pylint: disable=unused-argument", "if", "state_scope", "==", "MSG_STATE_SCOPE_MODULE", ":", "try", ":", "orig_line", "=", "self", ".", "_suppression_mapping", "[", "(", "msgid", ",", "line", ")", "]", "self", ".", "_ignored_msgs", "[", "(", "msgid", ",", "orig_line", ")", "]", ".", "add", "(", "line", ")", "except", "KeyError", ":", "pass" ]
Report an ignored message. state_scope is either MSG_STATE_SCOPE_MODULE or MSG_STATE_SCOPE_CONFIG, depending on whether the message was disabled locally in the module, or globally. The other arguments are the same as for add_message.
[ "Report", "an", "ignored", "message", "." ]
python
test
allenai/allennlp
allennlp/common/util.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/util.py#L272-L306
def get_spacy_model(spacy_model_name: str, pos_tags: bool, parse: bool, ner: bool) -> SpacyModelType: """ In order to avoid loading spacy models a whole bunch of times, we'll save references to them, keyed by the options we used to create the spacy model, so any particular configuration only gets loaded once. """ options = (spacy_model_name, pos_tags, parse, ner) if options not in LOADED_SPACY_MODELS: disable = ['vectors', 'textcat'] if not pos_tags: disable.append('tagger') if not parse: disable.append('parser') if not ner: disable.append('ner') try: spacy_model = spacy.load(spacy_model_name, disable=disable) except OSError: logger.warning(f"Spacy models '{spacy_model_name}' not found. Downloading and installing.") spacy_download(spacy_model_name) # NOTE(mattg): The following four lines are a workaround suggested by Ines for spacy # 2.1.0, which removed the linking that was done in spacy 2.0. importlib doesn't find # packages that were installed in the same python session, so the way `spacy_download` # works in 2.1.0 is broken for this use case. These four lines can probably be removed # at some point in the future, once spacy has figured out a better way to handle this. # See https://github.com/explosion/spaCy/issues/3435. from spacy.cli import link from spacy.util import get_package_path package_path = get_package_path(spacy_model_name) link(spacy_model_name, spacy_model_name, model_path=package_path) spacy_model = spacy.load(spacy_model_name, disable=disable) LOADED_SPACY_MODELS[options] = spacy_model return LOADED_SPACY_MODELS[options]
[ "def", "get_spacy_model", "(", "spacy_model_name", ":", "str", ",", "pos_tags", ":", "bool", ",", "parse", ":", "bool", ",", "ner", ":", "bool", ")", "->", "SpacyModelType", ":", "options", "=", "(", "spacy_model_name", ",", "pos_tags", ",", "parse", ",", "ner", ")", "if", "options", "not", "in", "LOADED_SPACY_MODELS", ":", "disable", "=", "[", "'vectors'", ",", "'textcat'", "]", "if", "not", "pos_tags", ":", "disable", ".", "append", "(", "'tagger'", ")", "if", "not", "parse", ":", "disable", ".", "append", "(", "'parser'", ")", "if", "not", "ner", ":", "disable", ".", "append", "(", "'ner'", ")", "try", ":", "spacy_model", "=", "spacy", ".", "load", "(", "spacy_model_name", ",", "disable", "=", "disable", ")", "except", "OSError", ":", "logger", ".", "warning", "(", "f\"Spacy models '{spacy_model_name}' not found. Downloading and installing.\"", ")", "spacy_download", "(", "spacy_model_name", ")", "# NOTE(mattg): The following four lines are a workaround suggested by Ines for spacy", "# 2.1.0, which removed the linking that was done in spacy 2.0. importlib doesn't find", "# packages that were installed in the same python session, so the way `spacy_download`", "# works in 2.1.0 is broken for this use case. These four lines can probably be removed", "# at some point in the future, once spacy has figured out a better way to handle this.", "# See https://github.com/explosion/spaCy/issues/3435.", "from", "spacy", ".", "cli", "import", "link", "from", "spacy", ".", "util", "import", "get_package_path", "package_path", "=", "get_package_path", "(", "spacy_model_name", ")", "link", "(", "spacy_model_name", ",", "spacy_model_name", ",", "model_path", "=", "package_path", ")", "spacy_model", "=", "spacy", ".", "load", "(", "spacy_model_name", ",", "disable", "=", "disable", ")", "LOADED_SPACY_MODELS", "[", "options", "]", "=", "spacy_model", "return", "LOADED_SPACY_MODELS", "[", "options", "]" ]
In order to avoid loading spacy models a whole bunch of times, we'll save references to them, keyed by the options we used to create the spacy model, so any particular configuration only gets loaded once.
[ "In", "order", "to", "avoid", "loading", "spacy", "models", "a", "whole", "bunch", "of", "times", "we", "ll", "save", "references", "to", "them", "keyed", "by", "the", "options", "we", "used", "to", "create", "the", "spacy", "model", "so", "any", "particular", "configuration", "only", "gets", "loaded", "once", "." ]
python
train
wolverdude/GenSON
genson/schema/node.py
https://github.com/wolverdude/GenSON/blob/76552d23cf9202e8e7c262cb018eb3cb3df686b9/genson/schema/node.py#L18-L37
def add_schema(self, schema): """ Merges in an existing schema. arguments: * `schema` (required - `dict` or `SchemaNode`): an existing JSON Schema to merge. """ # serialize instances of SchemaNode before parsing if isinstance(schema, SchemaNode): schema = schema.to_schema() for subschema in self._get_subschemas(schema): # delegate to SchemaType object schema_generator = self._get_generator_for_schema(subschema) schema_generator.add_schema(subschema) # return self for easy method chaining return self
[ "def", "add_schema", "(", "self", ",", "schema", ")", ":", "# serialize instances of SchemaNode before parsing", "if", "isinstance", "(", "schema", ",", "SchemaNode", ")", ":", "schema", "=", "schema", ".", "to_schema", "(", ")", "for", "subschema", "in", "self", ".", "_get_subschemas", "(", "schema", ")", ":", "# delegate to SchemaType object", "schema_generator", "=", "self", ".", "_get_generator_for_schema", "(", "subschema", ")", "schema_generator", ".", "add_schema", "(", "subschema", ")", "# return self for easy method chaining", "return", "self" ]
Merges in an existing schema. arguments: * `schema` (required - `dict` or `SchemaNode`): an existing JSON Schema to merge.
[ "Merges", "in", "an", "existing", "schema", "." ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_console.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_console.py#L191-L516
def mavlink_packet(self, msg): '''handle an incoming mavlink packet''' if not isinstance(self.console, wxconsole.MessageConsole): return if not self.console.is_alive(): self.mpstate.console = textconsole.SimpleConsole() return type = msg.get_type() if type == 'HEARTBEAT': sysid = msg.get_srcSystem() if not sysid in self.vehicle_list: self.add_new_vehicle(msg) if sysid not in self.component_name: self.component_name[sysid] = {} compid = msg.get_srcComponent() if compid not in self.component_name[sysid]: self.component_name[sysid][compid] = self.component_type_string(msg) self.update_vehicle_menu() if self.last_param_sysid_timestamp != self.module('param').new_sysid_timestamp: '''a new component ID has appeared for parameters''' self.last_param_sysid_timestamp = self.module('param').new_sysid_timestamp self.update_vehicle_menu() if type in ['RADIO', 'RADIO_STATUS']: # handle RADIO msgs from all vehicles if msg.rssi < msg.noise+10 or msg.remrssi < msg.remnoise+10: fg = 'red' else: fg = 'black' self.console.set_status('Radio', 'Radio %u/%u %u/%u' % (msg.rssi, msg.noise, msg.remrssi, msg.remnoise), fg=fg) if not self.is_primary_vehicle(msg): # don't process msgs from other than primary vehicle, other than # updating vehicle list return master = self.master # add some status fields if type in [ 'GPS_RAW', 'GPS_RAW_INT' ]: if type == "GPS_RAW": num_sats1 = master.field('GPS_STATUS', 'satellites_visible', 0) else: num_sats1 = msg.satellites_visible num_sats2 = master.field('GPS2_RAW', 'satellites_visible', -1) if num_sats2 == -1: sats_string = "%u" % num_sats1 else: sats_string = "%u/%u" % (num_sats1, num_sats2) if ((msg.fix_type >= 3 and master.mavlink10()) or (msg.fix_type == 2 and not master.mavlink10())): if (msg.fix_type >= 4): fix_type = "%u" % msg.fix_type else: fix_type = "" self.console.set_status('GPS', 'GPS: OK%s (%s)' % (fix_type, sats_string), fg='green') else: self.console.set_status('GPS', 'GPS: %u (%s)' % (msg.fix_type, sats_string), fg='red') if master.mavlink10(): gps_heading = int(self.mpstate.status.msgs['GPS_RAW_INT'].cog * 0.01) else: gps_heading = self.mpstate.status.msgs['GPS_RAW'].hdg self.console.set_status('Heading', 'Hdg %s/%u' % (master.field('VFR_HUD', 'heading', '-'), gps_heading)) elif type == 'VFR_HUD': if master.mavlink10(): alt = master.field('GPS_RAW_INT', 'alt', 0) / 1.0e3 else: alt = master.field('GPS_RAW', 'alt', 0) home = self.module('wp').get_home() if home is not None: home_lat = home.x home_lng = home.y else: home_lat = None home_lng = None lat = master.field('GLOBAL_POSITION_INT', 'lat', 0) * 1.0e-7 lng = master.field('GLOBAL_POSITION_INT', 'lon', 0) * 1.0e-7 rel_alt = master.field('GLOBAL_POSITION_INT', 'relative_alt', 0) * 1.0e-3 agl_alt = None if self.settings.basealt != 0: agl_alt = self.console.ElevationMap.GetElevation(lat, lng) if agl_alt is not None: agl_alt = self.settings.basealt - agl_alt else: try: agl_alt_home = self.console.ElevationMap.GetElevation(home_lat, home_lng) except Exception as ex: print(ex) agl_alt_home = None if agl_alt_home is not None: agl_alt = self.console.ElevationMap.GetElevation(lat, lng) if agl_alt is not None: agl_alt = agl_alt_home - agl_alt if agl_alt is not None: agl_alt += rel_alt vehicle_agl = master.field('TERRAIN_REPORT', 'current_height', None) if vehicle_agl is None: vehicle_agl = '---' else: vehicle_agl = self.height_string(vehicle_agl) self.console.set_status('AGL', 'AGL %s/%s' % (self.height_string(agl_alt), vehicle_agl)) self.console.set_status('Alt', 'Alt %s' % self.height_string(rel_alt)) self.console.set_status('AirSpeed', 'AirSpeed %s' % self.speed_string(msg.airspeed)) self.console.set_status('GPSSpeed', 'GPSSpeed %s' % self.speed_string(msg.groundspeed)) self.console.set_status('Thr', 'Thr %u' % msg.throttle) t = time.localtime(msg._timestamp) flying = False if self.mpstate.vehicle_type == 'copter': flying = self.master.motors_armed() else: flying = msg.groundspeed > 3 if flying and not self.in_air: self.in_air = True self.start_time = time.mktime(t) elif flying and self.in_air: self.total_time = time.mktime(t) - self.start_time self.console.set_status('FlightTime', 'FlightTime %u:%02u' % (int(self.total_time)/60, int(self.total_time)%60)) elif not flying and self.in_air: self.in_air = False self.total_time = time.mktime(t) - self.start_time self.console.set_status('FlightTime', 'FlightTime %u:%02u' % (int(self.total_time)/60, int(self.total_time)%60)) elif type == 'ATTITUDE': self.console.set_status('Roll', 'Roll %u' % math.degrees(msg.roll)) self.console.set_status('Pitch', 'Pitch %u' % math.degrees(msg.pitch)) elif type in ['SYS_STATUS']: sensors = { 'AS' : mavutil.mavlink.MAV_SYS_STATUS_SENSOR_DIFFERENTIAL_PRESSURE, 'MAG' : mavutil.mavlink.MAV_SYS_STATUS_SENSOR_3D_MAG, 'INS' : mavutil.mavlink.MAV_SYS_STATUS_SENSOR_3D_ACCEL | mavutil.mavlink.MAV_SYS_STATUS_SENSOR_3D_GYRO, 'AHRS' : mavutil.mavlink.MAV_SYS_STATUS_AHRS, 'RC' : mavutil.mavlink.MAV_SYS_STATUS_SENSOR_RC_RECEIVER, 'TERR' : mavutil.mavlink.MAV_SYS_STATUS_TERRAIN, 'RNG' : mavutil.mavlink.MAV_SYS_STATUS_SENSOR_LASER_POSITION, 'LOG' : mavutil.mavlink.MAV_SYS_STATUS_LOGGING, } announce = [ 'RC' ] for s in sensors.keys(): bits = sensors[s] present = ((msg.onboard_control_sensors_present & bits) == bits) enabled = ((msg.onboard_control_sensors_enabled & bits) == bits) healthy = ((msg.onboard_control_sensors_health & bits) == bits) if not present: fg = 'black' elif not enabled: fg = 'grey' elif not healthy: fg = 'red' else: fg = 'green' # for terrain show yellow if still loading if s == 'TERR' and fg == 'green' and master.field('TERRAIN_REPORT', 'pending', 0) != 0: fg = 'yellow' self.console.set_status(s, s, fg=fg) for s in announce: bits = sensors[s] enabled = ((msg.onboard_control_sensors_enabled & bits) == bits) healthy = ((msg.onboard_control_sensors_health & bits) == bits) was_healthy = ((self.last_sys_status_health & bits) == bits) if enabled and not healthy and was_healthy: self.say("%s fail" % s) self.last_sys_status_health = msg.onboard_control_sensors_health # check for any error bits being set: now = time.time() if now - self.last_sys_status_errors_announce > self.mpstate.settings.sys_status_error_warn_interval: for field_num in range(1, 5): field = "errors_count%u" % field_num x = getattr(msg, field, None) if x is None: self.console.writeln("Failed to get field %s" % field) self.last_sys_status_errors_announce = now break if x != 0: self.last_sys_status_errors_announce = now self.say("Critical failure") break elif type == 'WIND': self.console.set_status('Wind', 'Wind %u/%.2f' % (msg.direction, msg.speed)) elif type == 'EKF_STATUS_REPORT': highest = 0.0 vars = ['velocity_variance', 'pos_horiz_variance', 'pos_vert_variance', 'compass_variance', 'terrain_alt_variance'] for var in vars: v = getattr(msg, var, 0) highest = max(v, highest) if highest >= 1.0: fg = 'red' elif highest >= 0.5: fg = 'orange' else: fg = 'green' self.console.set_status('EKF', 'EKF', fg=fg) elif type == 'HWSTATUS': if msg.Vcc >= 4600 and msg.Vcc <= 5300: fg = 'green' else: fg = 'red' self.console.set_status('Vcc', 'Vcc %.2f' % (msg.Vcc * 0.001), fg=fg) elif type == 'POWER_STATUS': if msg.flags & mavutil.mavlink.MAV_POWER_STATUS_CHANGED: fg = 'red' else: fg = 'green' status = 'PWR:' if msg.flags & mavutil.mavlink.MAV_POWER_STATUS_USB_CONNECTED: status += 'U' if msg.flags & mavutil.mavlink.MAV_POWER_STATUS_BRICK_VALID: status += 'B' if msg.flags & mavutil.mavlink.MAV_POWER_STATUS_SERVO_VALID: status += 'S' if msg.flags & mavutil.mavlink.MAV_POWER_STATUS_PERIPH_OVERCURRENT: status += 'O1' if msg.flags & mavutil.mavlink.MAV_POWER_STATUS_PERIPH_HIPOWER_OVERCURRENT: status += 'O2' self.console.set_status('PWR', status, fg=fg) self.console.set_status('Srv', 'Srv %.2f' % (msg.Vservo*0.001), fg='green') elif type == 'HEARTBEAT': fmode = master.flightmode if self.settings.vehicle_name: fmode = self.settings.vehicle_name + ':' + fmode self.console.set_status('Mode', '%s' % fmode, fg='blue') if len(self.vehicle_list) > 1: self.console.set_status('SysID', 'Sys:%u' % msg.get_srcSystem(), fg='blue') if self.master.motors_armed(): arm_colour = 'green' else: arm_colour = 'red' armstring = 'ARM' # add safety switch state if 'SYS_STATUS' in self.mpstate.status.msgs: if (self.mpstate.status.msgs['SYS_STATUS'].onboard_control_sensors_enabled & mavutil.mavlink.MAV_SYS_STATUS_SENSOR_MOTOR_OUTPUTS) == 0: armstring += '(SAFE)' self.console.set_status('ARM', armstring, fg=arm_colour) if self.max_link_num != len(self.mpstate.mav_master): for i in range(self.max_link_num): self.console.set_status('Link%u'%(i+1), '', row=1) self.max_link_num = len(self.mpstate.mav_master) for m in self.mpstate.mav_master: if self.mpstate.settings.checkdelay: linkdelay = (self.mpstate.status.highest_msec - m.highest_msec)*1.0e-3 else: linkdelay = 0 linkline = "Link %s " % (self.link_label(m)) fg = 'dark green' if m.linkerror: linkline += "down" fg = 'red' else: packets_rcvd_percentage = 100 if (m.mav_count+m.mav_loss) != 0: #avoid divide-by-zero packets_rcvd_percentage = (100.0 * m.mav_count) / (m.mav_count + m.mav_loss) linkbits = ["%u pkts" % m.mav_count, "%u lost" % m.mav_loss, "%.2fs delay" % linkdelay, ] try: if m.mav.signing.sig_count: # other end is sending us signed packets if not m.mav.signing.secret_key: # we've received signed packets but # can't verify them fg = 'orange' linkbits.append("!KEY") elif not m.mav.signing.sign_outgoing: # we've received signed packets but aren't # signing outselves; this can lead to hairloss fg = 'orange' linkbits.append("!SIGNING") if m.mav.signing.badsig_count: fg = 'orange' linkbits.append("%u badsigs" % m.mav.signing.badsig_count) except AttributeError as e: # mav.signing.sig_count probably doesn't exist pass linkline += "OK {rcv_pct:.1f}% ({bits})".format( rcv_pct=packets_rcvd_percentage, bits=", ".join(linkbits)) if linkdelay > 1 and fg == 'dark green': fg = 'orange' self.console.set_status('Link%u'%m.linknum, linkline, row=1, fg=fg) elif type in ['WAYPOINT_CURRENT', 'MISSION_CURRENT']: wpmax = self.module('wp').wploader.count() if wpmax > 0: wpmax = "/%u" % wpmax else: wpmax = "" self.console.set_status('WP', 'WP %u%s' % (msg.seq, wpmax)) lat = master.field('GLOBAL_POSITION_INT', 'lat', 0) * 1.0e-7 lng = master.field('GLOBAL_POSITION_INT', 'lon', 0) * 1.0e-7 if lat != 0 and lng != 0: airspeed = master.field('VFR_HUD', 'airspeed', 30) if abs(airspeed - self.speed) > 5: self.speed = airspeed else: self.speed = 0.98*self.speed + 0.02*airspeed self.speed = max(1, self.speed) time_remaining = int(self.estimated_time_remaining(lat, lng, msg.seq, self.speed)) self.console.set_status('ETR', 'ETR %u:%02u' % (time_remaining/60, time_remaining%60)) elif type == 'NAV_CONTROLLER_OUTPUT': self.console.set_status('WPDist', 'Distance %s' % self.dist_string(msg.wp_dist)) self.console.set_status('WPBearing', 'Bearing %u' % msg.target_bearing) if msg.alt_error > 0: alt_error_sign = "L" else: alt_error_sign = "H" if msg.aspd_error > 0: aspd_error_sign = "L" else: aspd_error_sign = "H" if math.isnan(msg.alt_error): alt_error = "NaN" else: alt_error = "%d%s" % (msg.alt_error, alt_error_sign) self.console.set_status('AltError', 'AltError %s' % alt_error) self.console.set_status('AspdError', 'AspdError %.1f%s' % (msg.aspd_error*0.01, aspd_error_sign))
[ "def", "mavlink_packet", "(", "self", ",", "msg", ")", ":", "if", "not", "isinstance", "(", "self", ".", "console", ",", "wxconsole", ".", "MessageConsole", ")", ":", "return", "if", "not", "self", ".", "console", ".", "is_alive", "(", ")", ":", "self", ".", "mpstate", ".", "console", "=", "textconsole", ".", "SimpleConsole", "(", ")", "return", "type", "=", "msg", ".", "get_type", "(", ")", "if", "type", "==", "'HEARTBEAT'", ":", "sysid", "=", "msg", ".", "get_srcSystem", "(", ")", "if", "not", "sysid", "in", "self", ".", "vehicle_list", ":", "self", ".", "add_new_vehicle", "(", "msg", ")", "if", "sysid", "not", "in", "self", ".", "component_name", ":", "self", ".", "component_name", "[", "sysid", "]", "=", "{", "}", "compid", "=", "msg", ".", "get_srcComponent", "(", ")", "if", "compid", "not", "in", "self", ".", "component_name", "[", "sysid", "]", ":", "self", ".", "component_name", "[", "sysid", "]", "[", "compid", "]", "=", "self", ".", "component_type_string", "(", "msg", ")", "self", ".", "update_vehicle_menu", "(", ")", "if", "self", ".", "last_param_sysid_timestamp", "!=", "self", ".", "module", "(", "'param'", ")", ".", "new_sysid_timestamp", ":", "'''a new component ID has appeared for parameters'''", "self", ".", "last_param_sysid_timestamp", "=", "self", ".", "module", "(", "'param'", ")", ".", "new_sysid_timestamp", "self", ".", "update_vehicle_menu", "(", ")", "if", "type", "in", "[", "'RADIO'", ",", "'RADIO_STATUS'", "]", ":", "# handle RADIO msgs from all vehicles", "if", "msg", ".", "rssi", "<", "msg", ".", "noise", "+", "10", "or", "msg", ".", "remrssi", "<", "msg", ".", "remnoise", "+", "10", ":", "fg", "=", "'red'", "else", ":", "fg", "=", "'black'", "self", ".", "console", ".", "set_status", "(", "'Radio'", ",", "'Radio %u/%u %u/%u'", "%", "(", "msg", ".", "rssi", ",", "msg", ".", "noise", ",", "msg", ".", "remrssi", ",", "msg", ".", "remnoise", ")", ",", "fg", "=", "fg", ")", "if", "not", "self", ".", "is_primary_vehicle", "(", "msg", ")", ":", "# don't process msgs from other than primary vehicle, other than", "# updating vehicle list", "return", "master", "=", "self", ".", "master", "# add some status fields", "if", "type", "in", "[", "'GPS_RAW'", ",", "'GPS_RAW_INT'", "]", ":", "if", "type", "==", "\"GPS_RAW\"", ":", "num_sats1", "=", "master", ".", "field", "(", "'GPS_STATUS'", ",", "'satellites_visible'", ",", "0", ")", "else", ":", "num_sats1", "=", "msg", ".", "satellites_visible", "num_sats2", "=", "master", ".", "field", "(", "'GPS2_RAW'", ",", "'satellites_visible'", ",", "-", "1", ")", "if", "num_sats2", "==", "-", "1", ":", "sats_string", "=", "\"%u\"", "%", "num_sats1", "else", ":", "sats_string", "=", "\"%u/%u\"", "%", "(", "num_sats1", ",", "num_sats2", ")", "if", "(", "(", "msg", ".", "fix_type", ">=", "3", "and", "master", ".", "mavlink10", "(", ")", ")", "or", "(", "msg", ".", "fix_type", "==", "2", "and", "not", "master", ".", "mavlink10", "(", ")", ")", ")", ":", "if", "(", "msg", ".", "fix_type", ">=", "4", ")", ":", "fix_type", "=", "\"%u\"", "%", "msg", ".", "fix_type", "else", ":", "fix_type", "=", "\"\"", "self", ".", "console", ".", "set_status", "(", "'GPS'", ",", "'GPS: OK%s (%s)'", "%", "(", "fix_type", ",", "sats_string", ")", ",", "fg", "=", "'green'", ")", "else", ":", "self", ".", "console", ".", "set_status", "(", "'GPS'", ",", "'GPS: %u (%s)'", "%", "(", "msg", ".", "fix_type", ",", "sats_string", ")", ",", "fg", "=", "'red'", ")", "if", "master", ".", "mavlink10", "(", ")", ":", "gps_heading", "=", "int", "(", "self", ".", "mpstate", ".", "status", ".", "msgs", "[", "'GPS_RAW_INT'", "]", ".", "cog", "*", "0.01", ")", "else", ":", "gps_heading", "=", "self", ".", "mpstate", ".", "status", ".", "msgs", "[", "'GPS_RAW'", "]", ".", "hdg", "self", ".", "console", ".", "set_status", "(", "'Heading'", ",", "'Hdg %s/%u'", "%", "(", "master", ".", "field", "(", "'VFR_HUD'", ",", "'heading'", ",", "'-'", ")", ",", "gps_heading", ")", ")", "elif", "type", "==", "'VFR_HUD'", ":", "if", "master", ".", "mavlink10", "(", ")", ":", "alt", "=", "master", ".", "field", "(", "'GPS_RAW_INT'", ",", "'alt'", ",", "0", ")", "/", "1.0e3", "else", ":", "alt", "=", "master", ".", "field", "(", "'GPS_RAW'", ",", "'alt'", ",", "0", ")", "home", "=", "self", ".", "module", "(", "'wp'", ")", ".", "get_home", "(", ")", "if", "home", "is", "not", "None", ":", "home_lat", "=", "home", ".", "x", "home_lng", "=", "home", ".", "y", "else", ":", "home_lat", "=", "None", "home_lng", "=", "None", "lat", "=", "master", ".", "field", "(", "'GLOBAL_POSITION_INT'", ",", "'lat'", ",", "0", ")", "*", "1.0e-7", "lng", "=", "master", ".", "field", "(", "'GLOBAL_POSITION_INT'", ",", "'lon'", ",", "0", ")", "*", "1.0e-7", "rel_alt", "=", "master", ".", "field", "(", "'GLOBAL_POSITION_INT'", ",", "'relative_alt'", ",", "0", ")", "*", "1.0e-3", "agl_alt", "=", "None", "if", "self", ".", "settings", ".", "basealt", "!=", "0", ":", "agl_alt", "=", "self", ".", "console", ".", "ElevationMap", ".", "GetElevation", "(", "lat", ",", "lng", ")", "if", "agl_alt", "is", "not", "None", ":", "agl_alt", "=", "self", ".", "settings", ".", "basealt", "-", "agl_alt", "else", ":", "try", ":", "agl_alt_home", "=", "self", ".", "console", ".", "ElevationMap", ".", "GetElevation", "(", "home_lat", ",", "home_lng", ")", "except", "Exception", "as", "ex", ":", "print", "(", "ex", ")", "agl_alt_home", "=", "None", "if", "agl_alt_home", "is", "not", "None", ":", "agl_alt", "=", "self", ".", "console", ".", "ElevationMap", ".", "GetElevation", "(", "lat", ",", "lng", ")", "if", "agl_alt", "is", "not", "None", ":", "agl_alt", "=", "agl_alt_home", "-", "agl_alt", "if", "agl_alt", "is", "not", "None", ":", "agl_alt", "+=", "rel_alt", "vehicle_agl", "=", "master", ".", "field", "(", "'TERRAIN_REPORT'", ",", "'current_height'", ",", "None", ")", "if", "vehicle_agl", "is", "None", ":", "vehicle_agl", "=", "'---'", "else", ":", "vehicle_agl", "=", "self", ".", "height_string", "(", "vehicle_agl", ")", "self", ".", "console", ".", "set_status", "(", "'AGL'", ",", "'AGL %s/%s'", "%", "(", "self", ".", "height_string", "(", "agl_alt", ")", ",", "vehicle_agl", ")", ")", "self", ".", "console", ".", "set_status", "(", "'Alt'", ",", "'Alt %s'", "%", "self", ".", "height_string", "(", "rel_alt", ")", ")", "self", ".", "console", ".", "set_status", "(", "'AirSpeed'", ",", "'AirSpeed %s'", "%", "self", ".", "speed_string", "(", "msg", ".", "airspeed", ")", ")", "self", ".", "console", ".", "set_status", "(", "'GPSSpeed'", ",", "'GPSSpeed %s'", "%", "self", ".", "speed_string", "(", "msg", ".", "groundspeed", ")", ")", "self", ".", "console", ".", "set_status", "(", "'Thr'", ",", "'Thr %u'", "%", "msg", ".", "throttle", ")", "t", "=", "time", ".", "localtime", "(", "msg", ".", "_timestamp", ")", "flying", "=", "False", "if", "self", ".", "mpstate", ".", "vehicle_type", "==", "'copter'", ":", "flying", "=", "self", ".", "master", ".", "motors_armed", "(", ")", "else", ":", "flying", "=", "msg", ".", "groundspeed", ">", "3", "if", "flying", "and", "not", "self", ".", "in_air", ":", "self", ".", "in_air", "=", "True", "self", ".", "start_time", "=", "time", ".", "mktime", "(", "t", ")", "elif", "flying", "and", "self", ".", "in_air", ":", "self", ".", "total_time", "=", "time", ".", "mktime", "(", "t", ")", "-", "self", ".", "start_time", "self", ".", "console", ".", "set_status", "(", "'FlightTime'", ",", "'FlightTime %u:%02u'", "%", "(", "int", "(", "self", ".", "total_time", ")", "/", "60", ",", "int", "(", "self", ".", "total_time", ")", "%", "60", ")", ")", "elif", "not", "flying", "and", "self", ".", "in_air", ":", "self", ".", "in_air", "=", "False", "self", ".", "total_time", "=", "time", ".", "mktime", "(", "t", ")", "-", "self", ".", "start_time", "self", ".", "console", ".", "set_status", "(", "'FlightTime'", ",", "'FlightTime %u:%02u'", "%", "(", "int", "(", "self", ".", "total_time", ")", "/", "60", ",", "int", "(", "self", ".", "total_time", ")", "%", "60", ")", ")", "elif", "type", "==", "'ATTITUDE'", ":", "self", ".", "console", ".", "set_status", "(", "'Roll'", ",", "'Roll %u'", "%", "math", ".", "degrees", "(", "msg", ".", "roll", ")", ")", "self", ".", "console", ".", "set_status", "(", "'Pitch'", ",", "'Pitch %u'", "%", "math", ".", "degrees", "(", "msg", ".", "pitch", ")", ")", "elif", "type", "in", "[", "'SYS_STATUS'", "]", ":", "sensors", "=", "{", "'AS'", ":", "mavutil", ".", "mavlink", ".", "MAV_SYS_STATUS_SENSOR_DIFFERENTIAL_PRESSURE", ",", "'MAG'", ":", "mavutil", ".", "mavlink", ".", "MAV_SYS_STATUS_SENSOR_3D_MAG", ",", "'INS'", ":", "mavutil", ".", "mavlink", ".", "MAV_SYS_STATUS_SENSOR_3D_ACCEL", "|", "mavutil", ".", "mavlink", ".", "MAV_SYS_STATUS_SENSOR_3D_GYRO", ",", "'AHRS'", ":", "mavutil", ".", "mavlink", ".", "MAV_SYS_STATUS_AHRS", ",", "'RC'", ":", "mavutil", ".", "mavlink", ".", "MAV_SYS_STATUS_SENSOR_RC_RECEIVER", ",", "'TERR'", ":", "mavutil", ".", "mavlink", ".", "MAV_SYS_STATUS_TERRAIN", ",", "'RNG'", ":", "mavutil", ".", "mavlink", ".", "MAV_SYS_STATUS_SENSOR_LASER_POSITION", ",", "'LOG'", ":", "mavutil", ".", "mavlink", ".", "MAV_SYS_STATUS_LOGGING", ",", "}", "announce", "=", "[", "'RC'", "]", "for", "s", "in", "sensors", ".", "keys", "(", ")", ":", "bits", "=", "sensors", "[", "s", "]", "present", "=", "(", "(", "msg", ".", "onboard_control_sensors_present", "&", "bits", ")", "==", "bits", ")", "enabled", "=", "(", "(", "msg", ".", "onboard_control_sensors_enabled", "&", "bits", ")", "==", "bits", ")", "healthy", "=", "(", "(", "msg", ".", "onboard_control_sensors_health", "&", "bits", ")", "==", "bits", ")", "if", "not", "present", ":", "fg", "=", "'black'", "elif", "not", "enabled", ":", "fg", "=", "'grey'", "elif", "not", "healthy", ":", "fg", "=", "'red'", "else", ":", "fg", "=", "'green'", "# for terrain show yellow if still loading", "if", "s", "==", "'TERR'", "and", "fg", "==", "'green'", "and", "master", ".", "field", "(", "'TERRAIN_REPORT'", ",", "'pending'", ",", "0", ")", "!=", "0", ":", "fg", "=", "'yellow'", "self", ".", "console", ".", "set_status", "(", "s", ",", "s", ",", "fg", "=", "fg", ")", "for", "s", "in", "announce", ":", "bits", "=", "sensors", "[", "s", "]", "enabled", "=", "(", "(", "msg", ".", "onboard_control_sensors_enabled", "&", "bits", ")", "==", "bits", ")", "healthy", "=", "(", "(", "msg", ".", "onboard_control_sensors_health", "&", "bits", ")", "==", "bits", ")", "was_healthy", "=", "(", "(", "self", ".", "last_sys_status_health", "&", "bits", ")", "==", "bits", ")", "if", "enabled", "and", "not", "healthy", "and", "was_healthy", ":", "self", ".", "say", "(", "\"%s fail\"", "%", "s", ")", "self", ".", "last_sys_status_health", "=", "msg", ".", "onboard_control_sensors_health", "# check for any error bits being set:", "now", "=", "time", ".", "time", "(", ")", "if", "now", "-", "self", ".", "last_sys_status_errors_announce", ">", "self", ".", "mpstate", ".", "settings", ".", "sys_status_error_warn_interval", ":", "for", "field_num", "in", "range", "(", "1", ",", "5", ")", ":", "field", "=", "\"errors_count%u\"", "%", "field_num", "x", "=", "getattr", "(", "msg", ",", "field", ",", "None", ")", "if", "x", "is", "None", ":", "self", ".", "console", ".", "writeln", "(", "\"Failed to get field %s\"", "%", "field", ")", "self", ".", "last_sys_status_errors_announce", "=", "now", "break", "if", "x", "!=", "0", ":", "self", ".", "last_sys_status_errors_announce", "=", "now", "self", ".", "say", "(", "\"Critical failure\"", ")", "break", "elif", "type", "==", "'WIND'", ":", "self", ".", "console", ".", "set_status", "(", "'Wind'", ",", "'Wind %u/%.2f'", "%", "(", "msg", ".", "direction", ",", "msg", ".", "speed", ")", ")", "elif", "type", "==", "'EKF_STATUS_REPORT'", ":", "highest", "=", "0.0", "vars", "=", "[", "'velocity_variance'", ",", "'pos_horiz_variance'", ",", "'pos_vert_variance'", ",", "'compass_variance'", ",", "'terrain_alt_variance'", "]", "for", "var", "in", "vars", ":", "v", "=", "getattr", "(", "msg", ",", "var", ",", "0", ")", "highest", "=", "max", "(", "v", ",", "highest", ")", "if", "highest", ">=", "1.0", ":", "fg", "=", "'red'", "elif", "highest", ">=", "0.5", ":", "fg", "=", "'orange'", "else", ":", "fg", "=", "'green'", "self", ".", "console", ".", "set_status", "(", "'EKF'", ",", "'EKF'", ",", "fg", "=", "fg", ")", "elif", "type", "==", "'HWSTATUS'", ":", "if", "msg", ".", "Vcc", ">=", "4600", "and", "msg", ".", "Vcc", "<=", "5300", ":", "fg", "=", "'green'", "else", ":", "fg", "=", "'red'", "self", ".", "console", ".", "set_status", "(", "'Vcc'", ",", "'Vcc %.2f'", "%", "(", "msg", ".", "Vcc", "*", "0.001", ")", ",", "fg", "=", "fg", ")", "elif", "type", "==", "'POWER_STATUS'", ":", "if", "msg", ".", "flags", "&", "mavutil", ".", "mavlink", ".", "MAV_POWER_STATUS_CHANGED", ":", "fg", "=", "'red'", "else", ":", "fg", "=", "'green'", "status", "=", "'PWR:'", "if", "msg", ".", "flags", "&", "mavutil", ".", "mavlink", ".", "MAV_POWER_STATUS_USB_CONNECTED", ":", "status", "+=", "'U'", "if", "msg", ".", "flags", "&", "mavutil", ".", "mavlink", ".", "MAV_POWER_STATUS_BRICK_VALID", ":", "status", "+=", "'B'", "if", "msg", ".", "flags", "&", "mavutil", ".", "mavlink", ".", "MAV_POWER_STATUS_SERVO_VALID", ":", "status", "+=", "'S'", "if", "msg", ".", "flags", "&", "mavutil", ".", "mavlink", ".", "MAV_POWER_STATUS_PERIPH_OVERCURRENT", ":", "status", "+=", "'O1'", "if", "msg", ".", "flags", "&", "mavutil", ".", "mavlink", ".", "MAV_POWER_STATUS_PERIPH_HIPOWER_OVERCURRENT", ":", "status", "+=", "'O2'", "self", ".", "console", ".", "set_status", "(", "'PWR'", ",", "status", ",", "fg", "=", "fg", ")", "self", ".", "console", ".", "set_status", "(", "'Srv'", ",", "'Srv %.2f'", "%", "(", "msg", ".", "Vservo", "*", "0.001", ")", ",", "fg", "=", "'green'", ")", "elif", "type", "==", "'HEARTBEAT'", ":", "fmode", "=", "master", ".", "flightmode", "if", "self", ".", "settings", ".", "vehicle_name", ":", "fmode", "=", "self", ".", "settings", ".", "vehicle_name", "+", "':'", "+", "fmode", "self", ".", "console", ".", "set_status", "(", "'Mode'", ",", "'%s'", "%", "fmode", ",", "fg", "=", "'blue'", ")", "if", "len", "(", "self", ".", "vehicle_list", ")", ">", "1", ":", "self", ".", "console", ".", "set_status", "(", "'SysID'", ",", "'Sys:%u'", "%", "msg", ".", "get_srcSystem", "(", ")", ",", "fg", "=", "'blue'", ")", "if", "self", ".", "master", ".", "motors_armed", "(", ")", ":", "arm_colour", "=", "'green'", "else", ":", "arm_colour", "=", "'red'", "armstring", "=", "'ARM'", "# add safety switch state", "if", "'SYS_STATUS'", "in", "self", ".", "mpstate", ".", "status", ".", "msgs", ":", "if", "(", "self", ".", "mpstate", ".", "status", ".", "msgs", "[", "'SYS_STATUS'", "]", ".", "onboard_control_sensors_enabled", "&", "mavutil", ".", "mavlink", ".", "MAV_SYS_STATUS_SENSOR_MOTOR_OUTPUTS", ")", "==", "0", ":", "armstring", "+=", "'(SAFE)'", "self", ".", "console", ".", "set_status", "(", "'ARM'", ",", "armstring", ",", "fg", "=", "arm_colour", ")", "if", "self", ".", "max_link_num", "!=", "len", "(", "self", ".", "mpstate", ".", "mav_master", ")", ":", "for", "i", "in", "range", "(", "self", ".", "max_link_num", ")", ":", "self", ".", "console", ".", "set_status", "(", "'Link%u'", "%", "(", "i", "+", "1", ")", ",", "''", ",", "row", "=", "1", ")", "self", ".", "max_link_num", "=", "len", "(", "self", ".", "mpstate", ".", "mav_master", ")", "for", "m", "in", "self", ".", "mpstate", ".", "mav_master", ":", "if", "self", ".", "mpstate", ".", "settings", ".", "checkdelay", ":", "linkdelay", "=", "(", "self", ".", "mpstate", ".", "status", ".", "highest_msec", "-", "m", ".", "highest_msec", ")", "*", "1.0e-3", "else", ":", "linkdelay", "=", "0", "linkline", "=", "\"Link %s \"", "%", "(", "self", ".", "link_label", "(", "m", ")", ")", "fg", "=", "'dark green'", "if", "m", ".", "linkerror", ":", "linkline", "+=", "\"down\"", "fg", "=", "'red'", "else", ":", "packets_rcvd_percentage", "=", "100", "if", "(", "m", ".", "mav_count", "+", "m", ".", "mav_loss", ")", "!=", "0", ":", "#avoid divide-by-zero", "packets_rcvd_percentage", "=", "(", "100.0", "*", "m", ".", "mav_count", ")", "/", "(", "m", ".", "mav_count", "+", "m", ".", "mav_loss", ")", "linkbits", "=", "[", "\"%u pkts\"", "%", "m", ".", "mav_count", ",", "\"%u lost\"", "%", "m", ".", "mav_loss", ",", "\"%.2fs delay\"", "%", "linkdelay", ",", "]", "try", ":", "if", "m", ".", "mav", ".", "signing", ".", "sig_count", ":", "# other end is sending us signed packets", "if", "not", "m", ".", "mav", ".", "signing", ".", "secret_key", ":", "# we've received signed packets but", "# can't verify them", "fg", "=", "'orange'", "linkbits", ".", "append", "(", "\"!KEY\"", ")", "elif", "not", "m", ".", "mav", ".", "signing", ".", "sign_outgoing", ":", "# we've received signed packets but aren't", "# signing outselves; this can lead to hairloss", "fg", "=", "'orange'", "linkbits", ".", "append", "(", "\"!SIGNING\"", ")", "if", "m", ".", "mav", ".", "signing", ".", "badsig_count", ":", "fg", "=", "'orange'", "linkbits", ".", "append", "(", "\"%u badsigs\"", "%", "m", ".", "mav", ".", "signing", ".", "badsig_count", ")", "except", "AttributeError", "as", "e", ":", "# mav.signing.sig_count probably doesn't exist", "pass", "linkline", "+=", "\"OK {rcv_pct:.1f}% ({bits})\"", ".", "format", "(", "rcv_pct", "=", "packets_rcvd_percentage", ",", "bits", "=", "\", \"", ".", "join", "(", "linkbits", ")", ")", "if", "linkdelay", ">", "1", "and", "fg", "==", "'dark green'", ":", "fg", "=", "'orange'", "self", ".", "console", ".", "set_status", "(", "'Link%u'", "%", "m", ".", "linknum", ",", "linkline", ",", "row", "=", "1", ",", "fg", "=", "fg", ")", "elif", "type", "in", "[", "'WAYPOINT_CURRENT'", ",", "'MISSION_CURRENT'", "]", ":", "wpmax", "=", "self", ".", "module", "(", "'wp'", ")", ".", "wploader", ".", "count", "(", ")", "if", "wpmax", ">", "0", ":", "wpmax", "=", "\"/%u\"", "%", "wpmax", "else", ":", "wpmax", "=", "\"\"", "self", ".", "console", ".", "set_status", "(", "'WP'", ",", "'WP %u%s'", "%", "(", "msg", ".", "seq", ",", "wpmax", ")", ")", "lat", "=", "master", ".", "field", "(", "'GLOBAL_POSITION_INT'", ",", "'lat'", ",", "0", ")", "*", "1.0e-7", "lng", "=", "master", ".", "field", "(", "'GLOBAL_POSITION_INT'", ",", "'lon'", ",", "0", ")", "*", "1.0e-7", "if", "lat", "!=", "0", "and", "lng", "!=", "0", ":", "airspeed", "=", "master", ".", "field", "(", "'VFR_HUD'", ",", "'airspeed'", ",", "30", ")", "if", "abs", "(", "airspeed", "-", "self", ".", "speed", ")", ">", "5", ":", "self", ".", "speed", "=", "airspeed", "else", ":", "self", ".", "speed", "=", "0.98", "*", "self", ".", "speed", "+", "0.02", "*", "airspeed", "self", ".", "speed", "=", "max", "(", "1", ",", "self", ".", "speed", ")", "time_remaining", "=", "int", "(", "self", ".", "estimated_time_remaining", "(", "lat", ",", "lng", ",", "msg", ".", "seq", ",", "self", ".", "speed", ")", ")", "self", ".", "console", ".", "set_status", "(", "'ETR'", ",", "'ETR %u:%02u'", "%", "(", "time_remaining", "/", "60", ",", "time_remaining", "%", "60", ")", ")", "elif", "type", "==", "'NAV_CONTROLLER_OUTPUT'", ":", "self", ".", "console", ".", "set_status", "(", "'WPDist'", ",", "'Distance %s'", "%", "self", ".", "dist_string", "(", "msg", ".", "wp_dist", ")", ")", "self", ".", "console", ".", "set_status", "(", "'WPBearing'", ",", "'Bearing %u'", "%", "msg", ".", "target_bearing", ")", "if", "msg", ".", "alt_error", ">", "0", ":", "alt_error_sign", "=", "\"L\"", "else", ":", "alt_error_sign", "=", "\"H\"", "if", "msg", ".", "aspd_error", ">", "0", ":", "aspd_error_sign", "=", "\"L\"", "else", ":", "aspd_error_sign", "=", "\"H\"", "if", "math", ".", "isnan", "(", "msg", ".", "alt_error", ")", ":", "alt_error", "=", "\"NaN\"", "else", ":", "alt_error", "=", "\"%d%s\"", "%", "(", "msg", ".", "alt_error", ",", "alt_error_sign", ")", "self", ".", "console", ".", "set_status", "(", "'AltError'", ",", "'AltError %s'", "%", "alt_error", ")", "self", ".", "console", ".", "set_status", "(", "'AspdError'", ",", "'AspdError %.1f%s'", "%", "(", "msg", ".", "aspd_error", "*", "0.01", ",", "aspd_error_sign", ")", ")" ]
handle an incoming mavlink packet
[ "handle", "an", "incoming", "mavlink", "packet" ]
python
train
LonamiWebs/Telethon
telethon/extensions/markdown.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/extensions/markdown.py#L132-L200
def unparse(text, entities, delimiters=None, url_fmt=None): """ Performs the reverse operation to .parse(), effectively returning markdown-like syntax given a normal text and its MessageEntity's. :param text: the text to be reconverted into markdown. :param entities: the MessageEntity's applied to the text. :return: a markdown-like text representing the combination of both inputs. """ if not text or not entities: return text if not delimiters: if delimiters is not None: return text delimiters = DEFAULT_DELIMITERS if url_fmt is None: url_fmt = DEFAULT_URL_FORMAT if isinstance(entities, TLObject): entities = (entities,) else: entities = tuple(sorted(entities, key=lambda e: e.offset, reverse=True)) text = add_surrogate(text) delimiters = {v: k for k, v in delimiters.items()} for entity in entities: s = entity.offset e = entity.offset + entity.length delimiter = delimiters.get(type(entity), None) if delimiter: text = text[:s] + delimiter + text[s:e] + delimiter + text[e:] elif url_fmt: url = None if isinstance(entity, MessageEntityTextUrl): url = entity.url elif isinstance(entity, MessageEntityMentionName): url = 'tg://user?id={}'.format(entity.user_id) if url: # It's possible that entities are malformed and end up in the # middle of some character, like emoji, by using malformed # clients or bots. Try decoding the current one to check if # this is the case, and if it is, advance the entity. while e <= len(text): try: del_surrogate(text[s:e]) break except UnicodeDecodeError: e += 1 else: # Out of bounds, no luck going forward while e > s: try: del_surrogate(text[s:e]) break except UnicodeDecodeError: e -= 1 else: # No luck going backwards either, ignore entity continue text = ( text[:s] + add_surrogate(url_fmt.format(text[s:e], url)) + text[e:] ) return del_surrogate(text)
[ "def", "unparse", "(", "text", ",", "entities", ",", "delimiters", "=", "None", ",", "url_fmt", "=", "None", ")", ":", "if", "not", "text", "or", "not", "entities", ":", "return", "text", "if", "not", "delimiters", ":", "if", "delimiters", "is", "not", "None", ":", "return", "text", "delimiters", "=", "DEFAULT_DELIMITERS", "if", "url_fmt", "is", "None", ":", "url_fmt", "=", "DEFAULT_URL_FORMAT", "if", "isinstance", "(", "entities", ",", "TLObject", ")", ":", "entities", "=", "(", "entities", ",", ")", "else", ":", "entities", "=", "tuple", "(", "sorted", "(", "entities", ",", "key", "=", "lambda", "e", ":", "e", ".", "offset", ",", "reverse", "=", "True", ")", ")", "text", "=", "add_surrogate", "(", "text", ")", "delimiters", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "delimiters", ".", "items", "(", ")", "}", "for", "entity", "in", "entities", ":", "s", "=", "entity", ".", "offset", "e", "=", "entity", ".", "offset", "+", "entity", ".", "length", "delimiter", "=", "delimiters", ".", "get", "(", "type", "(", "entity", ")", ",", "None", ")", "if", "delimiter", ":", "text", "=", "text", "[", ":", "s", "]", "+", "delimiter", "+", "text", "[", "s", ":", "e", "]", "+", "delimiter", "+", "text", "[", "e", ":", "]", "elif", "url_fmt", ":", "url", "=", "None", "if", "isinstance", "(", "entity", ",", "MessageEntityTextUrl", ")", ":", "url", "=", "entity", ".", "url", "elif", "isinstance", "(", "entity", ",", "MessageEntityMentionName", ")", ":", "url", "=", "'tg://user?id={}'", ".", "format", "(", "entity", ".", "user_id", ")", "if", "url", ":", "# It's possible that entities are malformed and end up in the", "# middle of some character, like emoji, by using malformed", "# clients or bots. Try decoding the current one to check if", "# this is the case, and if it is, advance the entity.", "while", "e", "<=", "len", "(", "text", ")", ":", "try", ":", "del_surrogate", "(", "text", "[", "s", ":", "e", "]", ")", "break", "except", "UnicodeDecodeError", ":", "e", "+=", "1", "else", ":", "# Out of bounds, no luck going forward", "while", "e", ">", "s", ":", "try", ":", "del_surrogate", "(", "text", "[", "s", ":", "e", "]", ")", "break", "except", "UnicodeDecodeError", ":", "e", "-=", "1", "else", ":", "# No luck going backwards either, ignore entity", "continue", "text", "=", "(", "text", "[", ":", "s", "]", "+", "add_surrogate", "(", "url_fmt", ".", "format", "(", "text", "[", "s", ":", "e", "]", ",", "url", ")", ")", "+", "text", "[", "e", ":", "]", ")", "return", "del_surrogate", "(", "text", ")" ]
Performs the reverse operation to .parse(), effectively returning markdown-like syntax given a normal text and its MessageEntity's. :param text: the text to be reconverted into markdown. :param entities: the MessageEntity's applied to the text. :return: a markdown-like text representing the combination of both inputs.
[ "Performs", "the", "reverse", "operation", "to", ".", "parse", "()", "effectively", "returning", "markdown", "-", "like", "syntax", "given", "a", "normal", "text", "and", "its", "MessageEntity", "s", "." ]
python
train
genialis/resolwe
resolwe/flow/migrations/0006_add_total_size.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/migrations/0006_add_total_size.py#L11-L16
def calculate_total_size(apps, schema_editor): """Add ``total_size`` field to all file/dir-type outputs.""" Data = apps.get_model('flow', 'Data') for data in Data.objects.all(): hydrate_size(data, force=True) data.save()
[ "def", "calculate_total_size", "(", "apps", ",", "schema_editor", ")", ":", "Data", "=", "apps", ".", "get_model", "(", "'flow'", ",", "'Data'", ")", "for", "data", "in", "Data", ".", "objects", ".", "all", "(", ")", ":", "hydrate_size", "(", "data", ",", "force", "=", "True", ")", "data", ".", "save", "(", ")" ]
Add ``total_size`` field to all file/dir-type outputs.
[ "Add", "total_size", "field", "to", "all", "file", "/", "dir", "-", "type", "outputs", "." ]
python
train
projectatomic/osbs-client
osbs/utils.py
https://github.com/projectatomic/osbs-client/blob/571fe035dab3a7c02e1dccd5d65ffd75be750458/osbs/utils.py#L270-L314
def reset_git_repo(target_dir, git_reference, retry_depth=None): """ hard reset git clone in target_dir to given git_reference :param target_dir: str, filesystem path where the repo is cloned :param git_reference: str, any valid git reference :param retry_depth: int, if the repo was cloned with --shallow, this is the expected depth of the commit :return: str and int, commit ID of HEAD and commit depth of git_reference """ deepen = retry_depth or 0 base_commit_depth = 0 for _ in range(GIT_FETCH_RETRY): try: if not deepen: cmd = ['git', 'rev-list', '--count', git_reference] base_commit_depth = int(subprocess.check_output(cmd, cwd=target_dir)) - 1 cmd = ["git", "reset", "--hard", git_reference] logger.debug("Resetting current HEAD: '%s'", cmd) subprocess.check_call(cmd, cwd=target_dir) break except subprocess.CalledProcessError: if not deepen: raise OsbsException('cannot find commit %s in repo %s' % (git_reference, target_dir)) deepen *= 2 cmd = ["git", "fetch", "--depth", str(deepen)] subprocess.check_call(cmd, cwd=target_dir) logger.debug("Couldn't find commit %s, increasing depth with '%s'", git_reference, cmd) else: raise OsbsException('cannot find commit %s in repo %s' % (git_reference, target_dir)) cmd = ["git", "rev-parse", "HEAD"] logger.debug("getting SHA-1 of provided ref '%s'", git_reference) commit_id = subprocess.check_output(cmd, cwd=target_dir, universal_newlines=True) commit_id = commit_id.strip() logger.info("commit ID = %s", commit_id) final_commit_depth = None if not deepen: cmd = ['git', 'rev-list', '--count', 'HEAD'] final_commit_depth = int(subprocess.check_output(cmd, cwd=target_dir)) - base_commit_depth return commit_id, final_commit_depth
[ "def", "reset_git_repo", "(", "target_dir", ",", "git_reference", ",", "retry_depth", "=", "None", ")", ":", "deepen", "=", "retry_depth", "or", "0", "base_commit_depth", "=", "0", "for", "_", "in", "range", "(", "GIT_FETCH_RETRY", ")", ":", "try", ":", "if", "not", "deepen", ":", "cmd", "=", "[", "'git'", ",", "'rev-list'", ",", "'--count'", ",", "git_reference", "]", "base_commit_depth", "=", "int", "(", "subprocess", ".", "check_output", "(", "cmd", ",", "cwd", "=", "target_dir", ")", ")", "-", "1", "cmd", "=", "[", "\"git\"", ",", "\"reset\"", ",", "\"--hard\"", ",", "git_reference", "]", "logger", ".", "debug", "(", "\"Resetting current HEAD: '%s'\"", ",", "cmd", ")", "subprocess", ".", "check_call", "(", "cmd", ",", "cwd", "=", "target_dir", ")", "break", "except", "subprocess", ".", "CalledProcessError", ":", "if", "not", "deepen", ":", "raise", "OsbsException", "(", "'cannot find commit %s in repo %s'", "%", "(", "git_reference", ",", "target_dir", ")", ")", "deepen", "*=", "2", "cmd", "=", "[", "\"git\"", ",", "\"fetch\"", ",", "\"--depth\"", ",", "str", "(", "deepen", ")", "]", "subprocess", ".", "check_call", "(", "cmd", ",", "cwd", "=", "target_dir", ")", "logger", ".", "debug", "(", "\"Couldn't find commit %s, increasing depth with '%s'\"", ",", "git_reference", ",", "cmd", ")", "else", ":", "raise", "OsbsException", "(", "'cannot find commit %s in repo %s'", "%", "(", "git_reference", ",", "target_dir", ")", ")", "cmd", "=", "[", "\"git\"", ",", "\"rev-parse\"", ",", "\"HEAD\"", "]", "logger", ".", "debug", "(", "\"getting SHA-1 of provided ref '%s'\"", ",", "git_reference", ")", "commit_id", "=", "subprocess", ".", "check_output", "(", "cmd", ",", "cwd", "=", "target_dir", ",", "universal_newlines", "=", "True", ")", "commit_id", "=", "commit_id", ".", "strip", "(", ")", "logger", ".", "info", "(", "\"commit ID = %s\"", ",", "commit_id", ")", "final_commit_depth", "=", "None", "if", "not", "deepen", ":", "cmd", "=", "[", "'git'", ",", "'rev-list'", ",", "'--count'", ",", "'HEAD'", "]", "final_commit_depth", "=", "int", "(", "subprocess", ".", "check_output", "(", "cmd", ",", "cwd", "=", "target_dir", ")", ")", "-", "base_commit_depth", "return", "commit_id", ",", "final_commit_depth" ]
hard reset git clone in target_dir to given git_reference :param target_dir: str, filesystem path where the repo is cloned :param git_reference: str, any valid git reference :param retry_depth: int, if the repo was cloned with --shallow, this is the expected depth of the commit :return: str and int, commit ID of HEAD and commit depth of git_reference
[ "hard", "reset", "git", "clone", "in", "target_dir", "to", "given", "git_reference" ]
python
train
gopalkoduri/pypeaks
pypeaks/intervals.py
https://github.com/gopalkoduri/pypeaks/blob/59b1e4153e80c6a4c523dda241cc1713fd66161e/pypeaks/intervals.py#L23-L32
def next_interval(self, interval): """ Given a value of an interval, this function returns the next interval value """ index = np.where(self.intervals == interval) if index[0][0] + 1 < len(self.intervals): return self.intervals[index[0][0] + 1] else: raise IndexError("Ran out of intervals!")
[ "def", "next_interval", "(", "self", ",", "interval", ")", ":", "index", "=", "np", ".", "where", "(", "self", ".", "intervals", "==", "interval", ")", "if", "index", "[", "0", "]", "[", "0", "]", "+", "1", "<", "len", "(", "self", ".", "intervals", ")", ":", "return", "self", ".", "intervals", "[", "index", "[", "0", "]", "[", "0", "]", "+", "1", "]", "else", ":", "raise", "IndexError", "(", "\"Ran out of intervals!\"", ")" ]
Given a value of an interval, this function returns the next interval value
[ "Given", "a", "value", "of", "an", "interval", "this", "function", "returns", "the", "next", "interval", "value" ]
python
train
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/setuptools/command/egg_info.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/setuptools/command/egg_info.py#L332-L354
def _add_egg_info(self, cmd): """ Add paths for egg-info files for an external egg-base. The egg-info files are written to egg-base. If egg-base is outside the current working directory, this method searchs the egg-base directory for files to include in the manifest. Uses distutils.filelist.findall (which is really the version monkeypatched in by setuptools/__init__.py) to perform the search. Since findall records relative paths, prefix the returned paths with cmd.egg_base, so add_default's include_pattern call (which is looking for the absolute cmd.egg_info) will match them. """ if cmd.egg_base == os.curdir: # egg-info files were already added by something else return discovered = distutils.filelist.findall(cmd.egg_base) resolved = (os.path.join(cmd.egg_base, path) for path in discovered) self.filelist.allfiles.extend(resolved)
[ "def", "_add_egg_info", "(", "self", ",", "cmd", ")", ":", "if", "cmd", ".", "egg_base", "==", "os", ".", "curdir", ":", "# egg-info files were already added by something else", "return", "discovered", "=", "distutils", ".", "filelist", ".", "findall", "(", "cmd", ".", "egg_base", ")", "resolved", "=", "(", "os", ".", "path", ".", "join", "(", "cmd", ".", "egg_base", ",", "path", ")", "for", "path", "in", "discovered", ")", "self", ".", "filelist", ".", "allfiles", ".", "extend", "(", "resolved", ")" ]
Add paths for egg-info files for an external egg-base. The egg-info files are written to egg-base. If egg-base is outside the current working directory, this method searchs the egg-base directory for files to include in the manifest. Uses distutils.filelist.findall (which is really the version monkeypatched in by setuptools/__init__.py) to perform the search. Since findall records relative paths, prefix the returned paths with cmd.egg_base, so add_default's include_pattern call (which is looking for the absolute cmd.egg_info) will match them.
[ "Add", "paths", "for", "egg", "-", "info", "files", "for", "an", "external", "egg", "-", "base", "." ]
python
test
saltstack/salt
salt/modules/redismod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/redismod.py#L382-L399
def hmset(key, **fieldsvals): ''' Sets multiple hash fields to multiple values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmset foo_hash bar_field1=bar_value1 bar_field2=bar_value2 ''' host = fieldsvals.pop('host', None) port = fieldsvals.pop('port', None) database = fieldsvals.pop('db', None) password = fieldsvals.pop('password', None) server = _connect(host, port, database, password) return server.hmset(key, salt.utils.args.clean_kwargs(**fieldsvals))
[ "def", "hmset", "(", "key", ",", "*", "*", "fieldsvals", ")", ":", "host", "=", "fieldsvals", ".", "pop", "(", "'host'", ",", "None", ")", "port", "=", "fieldsvals", ".", "pop", "(", "'port'", ",", "None", ")", "database", "=", "fieldsvals", ".", "pop", "(", "'db'", ",", "None", ")", "password", "=", "fieldsvals", ".", "pop", "(", "'password'", ",", "None", ")", "server", "=", "_connect", "(", "host", ",", "port", ",", "database", ",", "password", ")", "return", "server", ".", "hmset", "(", "key", ",", "salt", ".", "utils", ".", "args", ".", "clean_kwargs", "(", "*", "*", "fieldsvals", ")", ")" ]
Sets multiple hash fields to multiple values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmset foo_hash bar_field1=bar_value1 bar_field2=bar_value2
[ "Sets", "multiple", "hash", "fields", "to", "multiple", "values", "." ]
python
train
kaniblu/pytorch-text-utils
torchtextutils/vocab.py
https://github.com/kaniblu/pytorch-text-utils/blob/ab26b88b3e1ed8e777abf32dbfab900399e0cf08/torchtextutils/vocab.py#L54-L65
def reconstruct_indices(self): """ Reconstruct word indices in case of word removals. Vocabulary does not handle empty indices when words are removed, hence it need to be told explicity about when to reconstruct them. """ del self.i2f, self.f2i self.f2i, self.i2f = {}, {} for i, w in enumerate(self.words): self.f2i[w] = i self.i2f[i] = w
[ "def", "reconstruct_indices", "(", "self", ")", ":", "del", "self", ".", "i2f", ",", "self", ".", "f2i", "self", ".", "f2i", ",", "self", ".", "i2f", "=", "{", "}", ",", "{", "}", "for", "i", ",", "w", "in", "enumerate", "(", "self", ".", "words", ")", ":", "self", ".", "f2i", "[", "w", "]", "=", "i", "self", ".", "i2f", "[", "i", "]", "=", "w" ]
Reconstruct word indices in case of word removals. Vocabulary does not handle empty indices when words are removed, hence it need to be told explicity about when to reconstruct them.
[ "Reconstruct", "word", "indices", "in", "case", "of", "word", "removals", ".", "Vocabulary", "does", "not", "handle", "empty", "indices", "when", "words", "are", "removed", "hence", "it", "need", "to", "be", "told", "explicity", "about", "when", "to", "reconstruct", "them", "." ]
python
train
airspeed-velocity/asv
asv/extern/asizeof.py
https://github.com/airspeed-velocity/asv/blob/d23bb8b74e8adacbfa3cf5724bda55fb39d56ba6/asv/extern/asizeof.py#L529-L540
def _repr(obj, clip=80): '''Clip long repr() string. ''' try: # safe repr() r = repr(obj) except TypeError: r = 'N/A' if 0 < clip < len(r): h = (clip // 2) - 2 if h > 0: r = r[:h] + '....' + r[-h:] return r
[ "def", "_repr", "(", "obj", ",", "clip", "=", "80", ")", ":", "try", ":", "# safe repr()", "r", "=", "repr", "(", "obj", ")", "except", "TypeError", ":", "r", "=", "'N/A'", "if", "0", "<", "clip", "<", "len", "(", "r", ")", ":", "h", "=", "(", "clip", "//", "2", ")", "-", "2", "if", "h", ">", "0", ":", "r", "=", "r", "[", ":", "h", "]", "+", "'....'", "+", "r", "[", "-", "h", ":", "]", "return", "r" ]
Clip long repr() string.
[ "Clip", "long", "repr", "()", "string", "." ]
python
train
awslabs/sockeye
sockeye/encoder.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/encoder.py#L197-L220
def get_transformer_encoder(config: transformer.TransformerConfig, prefix: str) -> 'Encoder': """ Returns a Transformer encoder, consisting of an embedding layer with positional encodings and a TransformerEncoder instance. :param config: Configuration for transformer encoder. :param prefix: Prefix for variable names. :return: Encoder instance. """ encoder_seq = EncoderSequence([], dtype=config.dtype) cls, encoder_params = _get_positional_embedding_params(config.positional_embedding_type, config.model_size, config.max_seq_len_source, fixed_pos_embed_scale_up_input=True, fixed_pos_embed_scale_down_positions=False, prefix=prefix + C.SOURCE_POSITIONAL_EMBEDDING_PREFIX) encoder_seq.append(cls, **encoder_params) if config.conv_config is not None: encoder_seq.append(ConvolutionalEmbeddingEncoder, config=config.conv_config, prefix=prefix + C.CHAR_SEQ_ENCODER_PREFIX) encoder_seq.append(TransformerEncoder, config=config, prefix=prefix + C.TRANSFORMER_ENCODER_PREFIX) return encoder_seq
[ "def", "get_transformer_encoder", "(", "config", ":", "transformer", ".", "TransformerConfig", ",", "prefix", ":", "str", ")", "->", "'Encoder'", ":", "encoder_seq", "=", "EncoderSequence", "(", "[", "]", ",", "dtype", "=", "config", ".", "dtype", ")", "cls", ",", "encoder_params", "=", "_get_positional_embedding_params", "(", "config", ".", "positional_embedding_type", ",", "config", ".", "model_size", ",", "config", ".", "max_seq_len_source", ",", "fixed_pos_embed_scale_up_input", "=", "True", ",", "fixed_pos_embed_scale_down_positions", "=", "False", ",", "prefix", "=", "prefix", "+", "C", ".", "SOURCE_POSITIONAL_EMBEDDING_PREFIX", ")", "encoder_seq", ".", "append", "(", "cls", ",", "*", "*", "encoder_params", ")", "if", "config", ".", "conv_config", "is", "not", "None", ":", "encoder_seq", ".", "append", "(", "ConvolutionalEmbeddingEncoder", ",", "config", "=", "config", ".", "conv_config", ",", "prefix", "=", "prefix", "+", "C", ".", "CHAR_SEQ_ENCODER_PREFIX", ")", "encoder_seq", ".", "append", "(", "TransformerEncoder", ",", "config", "=", "config", ",", "prefix", "=", "prefix", "+", "C", ".", "TRANSFORMER_ENCODER_PREFIX", ")", "return", "encoder_seq" ]
Returns a Transformer encoder, consisting of an embedding layer with positional encodings and a TransformerEncoder instance. :param config: Configuration for transformer encoder. :param prefix: Prefix for variable names. :return: Encoder instance.
[ "Returns", "a", "Transformer", "encoder", "consisting", "of", "an", "embedding", "layer", "with", "positional", "encodings", "and", "a", "TransformerEncoder", "instance", "." ]
python
train
kkroening/ffmpeg-python
ffmpeg/_probe.py
https://github.com/kkroening/ffmpeg-python/blob/ac111dc3a976ddbb872bc7d6d4fe24a267c1a956/ffmpeg/_probe.py#L7-L24
def probe(filename, cmd='ffprobe', **kwargs): """Run ffprobe on the specified file and return a JSON representation of the output. Raises: :class:`ffmpeg.Error`: if ffprobe returns a non-zero exit code, an :class:`Error` is returned with a generic error message. The stderr output can be retrieved by accessing the ``stderr`` property of the exception. """ args = [cmd, '-show_format', '-show_streams', '-of', 'json'] args += convert_kwargs_to_cmd_line_args(kwargs) args += [filename] p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if p.returncode != 0: raise Error('ffprobe', out, err) return json.loads(out.decode('utf-8'))
[ "def", "probe", "(", "filename", ",", "cmd", "=", "'ffprobe'", ",", "*", "*", "kwargs", ")", ":", "args", "=", "[", "cmd", ",", "'-show_format'", ",", "'-show_streams'", ",", "'-of'", ",", "'json'", "]", "args", "+=", "convert_kwargs_to_cmd_line_args", "(", "kwargs", ")", "args", "+=", "[", "filename", "]", "p", "=", "subprocess", ".", "Popen", "(", "args", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "out", ",", "err", "=", "p", ".", "communicate", "(", ")", "if", "p", ".", "returncode", "!=", "0", ":", "raise", "Error", "(", "'ffprobe'", ",", "out", ",", "err", ")", "return", "json", ".", "loads", "(", "out", ".", "decode", "(", "'utf-8'", ")", ")" ]
Run ffprobe on the specified file and return a JSON representation of the output. Raises: :class:`ffmpeg.Error`: if ffprobe returns a non-zero exit code, an :class:`Error` is returned with a generic error message. The stderr output can be retrieved by accessing the ``stderr`` property of the exception.
[ "Run", "ffprobe", "on", "the", "specified", "file", "and", "return", "a", "JSON", "representation", "of", "the", "output", "." ]
python
train
marcelcaraciolo/foursquare
pyfoursquare/foursquare.py
https://github.com/marcelcaraciolo/foursquare/blob/a8bda33cc2d61e25aa8df72011246269fd98aa13/pyfoursquare/foursquare.py#L208-L218
def get_authorization_url(self): """Get the authorization URL to redirect the user""" url = self._get_oauth_url('authenticate') query = { 'client_id': self._client_id, 'response_type': 'code', 'redirect_uri': self.callback } query_str = self.urlencode(query) return url + '?' + query_str
[ "def", "get_authorization_url", "(", "self", ")", ":", "url", "=", "self", ".", "_get_oauth_url", "(", "'authenticate'", ")", "query", "=", "{", "'client_id'", ":", "self", ".", "_client_id", ",", "'response_type'", ":", "'code'", ",", "'redirect_uri'", ":", "self", ".", "callback", "}", "query_str", "=", "self", ".", "urlencode", "(", "query", ")", "return", "url", "+", "'?'", "+", "query_str" ]
Get the authorization URL to redirect the user
[ "Get", "the", "authorization", "URL", "to", "redirect", "the", "user" ]
python
train
iotile/coretools
iotilecore/iotile/core/dev/registry.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/dev/registry.py#L52-L58
def kvstore(self): """Lazily load the underlying key-value store backing this registry.""" if self._kvstore is None: self._kvstore = self.BackingType(self.BackingFileName, respect_venv=True) return self._kvstore
[ "def", "kvstore", "(", "self", ")", ":", "if", "self", ".", "_kvstore", "is", "None", ":", "self", ".", "_kvstore", "=", "self", ".", "BackingType", "(", "self", ".", "BackingFileName", ",", "respect_venv", "=", "True", ")", "return", "self", ".", "_kvstore" ]
Lazily load the underlying key-value store backing this registry.
[ "Lazily", "load", "the", "underlying", "key", "-", "value", "store", "backing", "this", "registry", "." ]
python
train
ga4gh/ga4gh-server
ga4gh/server/datarepo.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datarepo.py#L1217-L1237
def insertBiosample(self, biosample): """ Inserts the specified Biosample into this repository. """ try: models.Biosample.create( id=biosample.getId(), datasetid=biosample.getParentContainer().getId(), name=biosample.getLocalId(), description=biosample.getDescription(), disease=json.dumps(biosample.getDisease()), created=biosample.getCreated(), updated=biosample.getUpdated(), individualid=biosample.getIndividualId(), attributes=json.dumps(biosample.getAttributes()), individualAgeAtCollection=json.dumps( biosample.getIndividualAgeAtCollection())) except Exception: raise exceptions.DuplicateNameException( biosample.getLocalId(), biosample.getParentContainer().getLocalId())
[ "def", "insertBiosample", "(", "self", ",", "biosample", ")", ":", "try", ":", "models", ".", "Biosample", ".", "create", "(", "id", "=", "biosample", ".", "getId", "(", ")", ",", "datasetid", "=", "biosample", ".", "getParentContainer", "(", ")", ".", "getId", "(", ")", ",", "name", "=", "biosample", ".", "getLocalId", "(", ")", ",", "description", "=", "biosample", ".", "getDescription", "(", ")", ",", "disease", "=", "json", ".", "dumps", "(", "biosample", ".", "getDisease", "(", ")", ")", ",", "created", "=", "biosample", ".", "getCreated", "(", ")", ",", "updated", "=", "biosample", ".", "getUpdated", "(", ")", ",", "individualid", "=", "biosample", ".", "getIndividualId", "(", ")", ",", "attributes", "=", "json", ".", "dumps", "(", "biosample", ".", "getAttributes", "(", ")", ")", ",", "individualAgeAtCollection", "=", "json", ".", "dumps", "(", "biosample", ".", "getIndividualAgeAtCollection", "(", ")", ")", ")", "except", "Exception", ":", "raise", "exceptions", ".", "DuplicateNameException", "(", "biosample", ".", "getLocalId", "(", ")", ",", "biosample", ".", "getParentContainer", "(", ")", ".", "getLocalId", "(", ")", ")" ]
Inserts the specified Biosample into this repository.
[ "Inserts", "the", "specified", "Biosample", "into", "this", "repository", "." ]
python
train
wakatime/wakatime
wakatime/stats.py
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/stats.py#L288-L301
def get_file_head(file_name): """Returns the first 512000 bytes of the file's contents.""" text = None try: with open(file_name, 'r', encoding='utf-8') as fh: text = fh.read(512000) except: try: with open(file_name, 'r', encoding=sys.getfilesystemencoding()) as fh: text = fh.read(512000) # pragma: nocover except: log.traceback(logging.DEBUG) return text
[ "def", "get_file_head", "(", "file_name", ")", ":", "text", "=", "None", "try", ":", "with", "open", "(", "file_name", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "fh", ":", "text", "=", "fh", ".", "read", "(", "512000", ")", "except", ":", "try", ":", "with", "open", "(", "file_name", ",", "'r'", ",", "encoding", "=", "sys", ".", "getfilesystemencoding", "(", ")", ")", "as", "fh", ":", "text", "=", "fh", ".", "read", "(", "512000", ")", "# pragma: nocover", "except", ":", "log", ".", "traceback", "(", "logging", ".", "DEBUG", ")", "return", "text" ]
Returns the first 512000 bytes of the file's contents.
[ "Returns", "the", "first", "512000", "bytes", "of", "the", "file", "s", "contents", "." ]
python
train
googleapis/google-cloud-python
securitycenter/google/cloud/securitycenter_v1/gapic/security_center_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/securitycenter/google/cloud/securitycenter_v1/gapic/security_center_client.py#L122-L128
def source_path(cls, organization, source): """Return a fully-qualified source string.""" return google.api_core.path_template.expand( "organizations/{organization}/sources/{source}", organization=organization, source=source, )
[ "def", "source_path", "(", "cls", ",", "organization", ",", "source", ")", ":", "return", "google", ".", "api_core", ".", "path_template", ".", "expand", "(", "\"organizations/{organization}/sources/{source}\"", ",", "organization", "=", "organization", ",", "source", "=", "source", ",", ")" ]
Return a fully-qualified source string.
[ "Return", "a", "fully", "-", "qualified", "source", "string", "." ]
python
train
optimizely/python-sdk
optimizely/project_config.py
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/project_config.py#L478-L497
def get_variable_for_feature(self, feature_key, variable_key): """ Get the variable with the given variable key for the given feature. Args: feature_key: The key of the feature for which we are getting the variable. variable_key: The key of the variable we are getting. Returns: Variable with the given key in the given variation. """ feature = self.feature_key_map.get(feature_key) if not feature: self.logger.error('Feature with key "%s" not found in the datafile.' % feature_key) return None if variable_key not in feature.variables: self.logger.error('Variable with key "%s" not found in the datafile.' % variable_key) return None return feature.variables.get(variable_key)
[ "def", "get_variable_for_feature", "(", "self", ",", "feature_key", ",", "variable_key", ")", ":", "feature", "=", "self", ".", "feature_key_map", ".", "get", "(", "feature_key", ")", "if", "not", "feature", ":", "self", ".", "logger", ".", "error", "(", "'Feature with key \"%s\" not found in the datafile.'", "%", "feature_key", ")", "return", "None", "if", "variable_key", "not", "in", "feature", ".", "variables", ":", "self", ".", "logger", ".", "error", "(", "'Variable with key \"%s\" not found in the datafile.'", "%", "variable_key", ")", "return", "None", "return", "feature", ".", "variables", ".", "get", "(", "variable_key", ")" ]
Get the variable with the given variable key for the given feature. Args: feature_key: The key of the feature for which we are getting the variable. variable_key: The key of the variable we are getting. Returns: Variable with the given key in the given variation.
[ "Get", "the", "variable", "with", "the", "given", "variable", "key", "for", "the", "given", "feature", "." ]
python
train
saltstack/salt
salt/states/cron.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/cron.py#L257-L383
def present(name, user='root', minute='*', hour='*', daymonth='*', month='*', dayweek='*', comment=None, commented=False, identifier=False, special=None): ''' Verifies that the specified cron job is present for the specified user. It is recommended to use `identifier`. Otherwise the cron job is installed twice if you change the name. For more advanced information about what exactly can be set in the cron timing parameters, check your cron system's documentation. Most Unix-like systems' cron documentation can be found via the crontab man page: ``man 5 crontab``. name The command that should be executed by the cron job. user The name of the user whose crontab needs to be modified, defaults to the root user minute The information to be set into the minute section, this can be any string supported by your cron system's the minute field. Default is ``*`` hour The information to be set in the hour section. Default is ``*`` daymonth The information to be set in the day of month section. Default is ``*`` month The information to be set in the month section. Default is ``*`` dayweek The information to be set in the day of week section. Default is ``*`` comment User comment to be added on line previous the cron job commented The cron job is set commented (prefixed with ``#DISABLED#``). Defaults to False. .. versionadded:: 2016.3.0 identifier Custom-defined identifier for tracking the cron line for future crontab edits. This defaults to the state name special A special keyword to specify periodicity (eg. @reboot, @hourly...). Quotes must be used, otherwise PyYAML will strip the '@' sign. .. versionadded:: 2016.3.0 ''' name = name.strip() if identifier is False: identifier = name ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if __opts__['test']: status = _check_cron(user, cmd=name, minute=minute, hour=hour, daymonth=daymonth, month=month, dayweek=dayweek, comment=comment, commented=commented, identifier=identifier, special=special) ret['result'] = None if status == 'absent': ret['comment'] = 'Cron {0} is set to be added'.format(name) elif status == 'present': ret['result'] = True ret['comment'] = 'Cron {0} already present'.format(name) elif status == 'update': ret['comment'] = 'Cron {0} is set to be updated'.format(name) return ret if special is None: data = __salt__['cron.set_job'](user=user, minute=minute, hour=hour, daymonth=daymonth, month=month, dayweek=dayweek, cmd=name, comment=comment, commented=commented, identifier=identifier) else: data = __salt__['cron.set_special'](user=user, special=special, cmd=name, comment=comment, commented=commented, identifier=identifier) if data == 'present': ret['comment'] = 'Cron {0} already present'.format(name) return ret if data == 'new': ret['comment'] = 'Cron {0} added to {1}\'s crontab'.format(name, user) ret['changes'] = {user: name} return ret if data == 'updated': ret['comment'] = 'Cron {0} updated'.format(name) ret['changes'] = {user: name} return ret ret['comment'] = ('Cron {0} for user {1} failed to commit with error \n{2}' .format(name, user, data)) ret['result'] = False return ret
[ "def", "present", "(", "name", ",", "user", "=", "'root'", ",", "minute", "=", "'*'", ",", "hour", "=", "'*'", ",", "daymonth", "=", "'*'", ",", "month", "=", "'*'", ",", "dayweek", "=", "'*'", ",", "comment", "=", "None", ",", "commented", "=", "False", ",", "identifier", "=", "False", ",", "special", "=", "None", ")", ":", "name", "=", "name", ".", "strip", "(", ")", "if", "identifier", "is", "False", ":", "identifier", "=", "name", "ret", "=", "{", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", ",", "'name'", ":", "name", ",", "'result'", ":", "True", "}", "if", "__opts__", "[", "'test'", "]", ":", "status", "=", "_check_cron", "(", "user", ",", "cmd", "=", "name", ",", "minute", "=", "minute", ",", "hour", "=", "hour", ",", "daymonth", "=", "daymonth", ",", "month", "=", "month", ",", "dayweek", "=", "dayweek", ",", "comment", "=", "comment", ",", "commented", "=", "commented", ",", "identifier", "=", "identifier", ",", "special", "=", "special", ")", "ret", "[", "'result'", "]", "=", "None", "if", "status", "==", "'absent'", ":", "ret", "[", "'comment'", "]", "=", "'Cron {0} is set to be added'", ".", "format", "(", "name", ")", "elif", "status", "==", "'present'", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Cron {0} already present'", ".", "format", "(", "name", ")", "elif", "status", "==", "'update'", ":", "ret", "[", "'comment'", "]", "=", "'Cron {0} is set to be updated'", ".", "format", "(", "name", ")", "return", "ret", "if", "special", "is", "None", ":", "data", "=", "__salt__", "[", "'cron.set_job'", "]", "(", "user", "=", "user", ",", "minute", "=", "minute", ",", "hour", "=", "hour", ",", "daymonth", "=", "daymonth", ",", "month", "=", "month", ",", "dayweek", "=", "dayweek", ",", "cmd", "=", "name", ",", "comment", "=", "comment", ",", "commented", "=", "commented", ",", "identifier", "=", "identifier", ")", "else", ":", "data", "=", "__salt__", "[", "'cron.set_special'", "]", "(", "user", "=", "user", ",", "special", "=", "special", ",", "cmd", "=", "name", ",", "comment", "=", "comment", ",", "commented", "=", "commented", ",", "identifier", "=", "identifier", ")", "if", "data", "==", "'present'", ":", "ret", "[", "'comment'", "]", "=", "'Cron {0} already present'", ".", "format", "(", "name", ")", "return", "ret", "if", "data", "==", "'new'", ":", "ret", "[", "'comment'", "]", "=", "'Cron {0} added to {1}\\'s crontab'", ".", "format", "(", "name", ",", "user", ")", "ret", "[", "'changes'", "]", "=", "{", "user", ":", "name", "}", "return", "ret", "if", "data", "==", "'updated'", ":", "ret", "[", "'comment'", "]", "=", "'Cron {0} updated'", ".", "format", "(", "name", ")", "ret", "[", "'changes'", "]", "=", "{", "user", ":", "name", "}", "return", "ret", "ret", "[", "'comment'", "]", "=", "(", "'Cron {0} for user {1} failed to commit with error \\n{2}'", ".", "format", "(", "name", ",", "user", ",", "data", ")", ")", "ret", "[", "'result'", "]", "=", "False", "return", "ret" ]
Verifies that the specified cron job is present for the specified user. It is recommended to use `identifier`. Otherwise the cron job is installed twice if you change the name. For more advanced information about what exactly can be set in the cron timing parameters, check your cron system's documentation. Most Unix-like systems' cron documentation can be found via the crontab man page: ``man 5 crontab``. name The command that should be executed by the cron job. user The name of the user whose crontab needs to be modified, defaults to the root user minute The information to be set into the minute section, this can be any string supported by your cron system's the minute field. Default is ``*`` hour The information to be set in the hour section. Default is ``*`` daymonth The information to be set in the day of month section. Default is ``*`` month The information to be set in the month section. Default is ``*`` dayweek The information to be set in the day of week section. Default is ``*`` comment User comment to be added on line previous the cron job commented The cron job is set commented (prefixed with ``#DISABLED#``). Defaults to False. .. versionadded:: 2016.3.0 identifier Custom-defined identifier for tracking the cron line for future crontab edits. This defaults to the state name special A special keyword to specify periodicity (eg. @reboot, @hourly...). Quotes must be used, otherwise PyYAML will strip the '@' sign. .. versionadded:: 2016.3.0
[ "Verifies", "that", "the", "specified", "cron", "job", "is", "present", "for", "the", "specified", "user", ".", "It", "is", "recommended", "to", "use", "identifier", ".", "Otherwise", "the", "cron", "job", "is", "installed", "twice", "if", "you", "change", "the", "name", ".", "For", "more", "advanced", "information", "about", "what", "exactly", "can", "be", "set", "in", "the", "cron", "timing", "parameters", "check", "your", "cron", "system", "s", "documentation", ".", "Most", "Unix", "-", "like", "systems", "cron", "documentation", "can", "be", "found", "via", "the", "crontab", "man", "page", ":", "man", "5", "crontab", "." ]
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/update_service/apis/default_api.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/update_service/apis/default_api.py#L2066-L2091
def upload_job_chunk_list(self, upload_job_id, **kwargs): # noqa: E501 """List all metadata for uploaded chunks # noqa: E501 List all metadata for uploaded chunks # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.upload_job_chunk_list(upload_job_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str upload_job_id: Upload job (required) :param int limit: How many metadata items for uploaded chunks to retrieve :param str order: ASC or DESC :param str after: The ID of the the item after which to retrieve the next page :param str include: A comma-separated list of data fields to return. Currently supported: total_count :param str filter: URL-encoded query string parameter to filter returned data `?filter={URL-encoded query string}` ###### Filterable fields: The table lists all the fields that can be filtered on with certain filters: <table> <thead> <tr> <th>Field</th> <th>= / __eq / __neq</th> <th>__in / __nin</th> <th>__lte / __gte</th> <tr> <thead> <tbody> <tr> <td>created_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>etag</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>id</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>updated_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>status</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>hash</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>length</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> </tbody> </table> &nbsp; The query string is made up of key-value pairs separated by ampersands. For example, this query: `key1=value1&key2=value2&key3=value3` would be URL-encoded as: `?filter=key1__eq%3Dvalue1%26key2__eq%3Dvalue2%26key3__eq%3Dvalue3` **Filtering by properties** `status__eq=in_progress` **Filtering on date-time fields** Date-time fields should be specified in UTC RFC3339 format, `YYYY-MM-DDThh:mm:ss.msZ`. There are three permitted variations: * UTC RFC3339 with milliseconds. Example: `2016-11-30T16:25:12.1234Z` * UTC RFC3339 without milliseconds. Example: `2016-11-30T16:25:12Z` * UTC RFC3339 shortened without milliseconds and punctuation. Example: `20161130T162512Z` Date-time filtering supports three operators: * equality by appending `__eq` to the field name * greater than or equal to by appending `__gte` to the field name * less than or equal to by appending `__lte` to the field name `{field name}[|__eq|__lte|__gte]={UTC RFC3339 date-time}` Time ranges may be specified by including both the `__gte` and `__lte` forms in the filter. For example: `created_at__gte=2016-11-30T16:25:12.1234Z&created_at__lte=2016-12-30T00:00:00Z` **Filtering on multiple fields** `status__eq=in_progress&created_at__gte=2016-11-30T16:25:12.1234Z&created_at__lte=2016-12-30T00:00:00Z` **Filtering with filter operators** String field filtering supports the following operators: * equality: `__eq` * non-equality: `__neq` * in : `__in` * not in: `__nin` For `__in` and `__nin` filters list of parameters must be comma-separated: `status__in=in_progress,success` :return: UploadChunkInfoPage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.upload_job_chunk_list_with_http_info(upload_job_id, **kwargs) # noqa: E501 else: (data) = self.upload_job_chunk_list_with_http_info(upload_job_id, **kwargs) # noqa: E501 return data
[ "def", "upload_job_chunk_list", "(", "self", ",", "upload_job_id", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'asynchronous'", ")", ":", "return", "self", ".", "upload_job_chunk_list_with_http_info", "(", "upload_job_id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "upload_job_chunk_list_with_http_info", "(", "upload_job_id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
List all metadata for uploaded chunks # noqa: E501 List all metadata for uploaded chunks # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.upload_job_chunk_list(upload_job_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str upload_job_id: Upload job (required) :param int limit: How many metadata items for uploaded chunks to retrieve :param str order: ASC or DESC :param str after: The ID of the the item after which to retrieve the next page :param str include: A comma-separated list of data fields to return. Currently supported: total_count :param str filter: URL-encoded query string parameter to filter returned data `?filter={URL-encoded query string}` ###### Filterable fields: The table lists all the fields that can be filtered on with certain filters: <table> <thead> <tr> <th>Field</th> <th>= / __eq / __neq</th> <th>__in / __nin</th> <th>__lte / __gte</th> <tr> <thead> <tbody> <tr> <td>created_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>etag</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>id</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>updated_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>status</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>hash</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>length</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> </tbody> </table> &nbsp; The query string is made up of key-value pairs separated by ampersands. For example, this query: `key1=value1&key2=value2&key3=value3` would be URL-encoded as: `?filter=key1__eq%3Dvalue1%26key2__eq%3Dvalue2%26key3__eq%3Dvalue3` **Filtering by properties** `status__eq=in_progress` **Filtering on date-time fields** Date-time fields should be specified in UTC RFC3339 format, `YYYY-MM-DDThh:mm:ss.msZ`. There are three permitted variations: * UTC RFC3339 with milliseconds. Example: `2016-11-30T16:25:12.1234Z` * UTC RFC3339 without milliseconds. Example: `2016-11-30T16:25:12Z` * UTC RFC3339 shortened without milliseconds and punctuation. Example: `20161130T162512Z` Date-time filtering supports three operators: * equality by appending `__eq` to the field name * greater than or equal to by appending `__gte` to the field name * less than or equal to by appending `__lte` to the field name `{field name}[|__eq|__lte|__gte]={UTC RFC3339 date-time}` Time ranges may be specified by including both the `__gte` and `__lte` forms in the filter. For example: `created_at__gte=2016-11-30T16:25:12.1234Z&created_at__lte=2016-12-30T00:00:00Z` **Filtering on multiple fields** `status__eq=in_progress&created_at__gte=2016-11-30T16:25:12.1234Z&created_at__lte=2016-12-30T00:00:00Z` **Filtering with filter operators** String field filtering supports the following operators: * equality: `__eq` * non-equality: `__neq` * in : `__in` * not in: `__nin` For `__in` and `__nin` filters list of parameters must be comma-separated: `status__in=in_progress,success` :return: UploadChunkInfoPage If the method is called asynchronously, returns the request thread.
[ "List", "all", "metadata", "for", "uploaded", "chunks", "#", "noqa", ":", "E501" ]
python
train
opencobra/cobrapy
cobra/flux_analysis/deletion.py
https://github.com/opencobra/cobrapy/blob/9d1987cdb3a395cf4125a3439c3b002ff2be2009/cobra/flux_analysis/deletion.py#L184-L225
def single_reaction_deletion(model, reaction_list=None, method="fba", solution=None, processes=None, **kwargs): """ Knock out each reaction from a given list. Parameters ---------- model : cobra.Model The metabolic model to perform deletions in. reaction_list : iterable, optional ``cobra.Reaction``s to be deleted. If not passed, all the reactions from the model are used. method: {"fba", "moma", "linear moma", "room", "linear room"}, optional Method used to predict the growth rate. solution : cobra.Solution, optional A previous solution to use as a reference for (linear) MOMA or ROOM. processes : int, optional The number of parallel processes to run. Can speed up the computations if the number of knockouts to perform is large. If not passed, will be set to the number of CPUs found. kwargs : Keyword arguments are passed on to underlying simulation functions such as ``add_room``. Returns ------- pandas.DataFrame A representation of all single reaction deletions. The columns are 'growth' and 'status', where index : frozenset([str]) The reaction identifier that was knocked out. growth : float The growth rate of the adjusted model. status : str The solution's status. """ return _multi_deletion( model, 'reaction', element_lists=_element_lists(model.reactions, reaction_list), method=method, solution=solution, processes=processes, **kwargs)
[ "def", "single_reaction_deletion", "(", "model", ",", "reaction_list", "=", "None", ",", "method", "=", "\"fba\"", ",", "solution", "=", "None", ",", "processes", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "_multi_deletion", "(", "model", ",", "'reaction'", ",", "element_lists", "=", "_element_lists", "(", "model", ".", "reactions", ",", "reaction_list", ")", ",", "method", "=", "method", ",", "solution", "=", "solution", ",", "processes", "=", "processes", ",", "*", "*", "kwargs", ")" ]
Knock out each reaction from a given list. Parameters ---------- model : cobra.Model The metabolic model to perform deletions in. reaction_list : iterable, optional ``cobra.Reaction``s to be deleted. If not passed, all the reactions from the model are used. method: {"fba", "moma", "linear moma", "room", "linear room"}, optional Method used to predict the growth rate. solution : cobra.Solution, optional A previous solution to use as a reference for (linear) MOMA or ROOM. processes : int, optional The number of parallel processes to run. Can speed up the computations if the number of knockouts to perform is large. If not passed, will be set to the number of CPUs found. kwargs : Keyword arguments are passed on to underlying simulation functions such as ``add_room``. Returns ------- pandas.DataFrame A representation of all single reaction deletions. The columns are 'growth' and 'status', where index : frozenset([str]) The reaction identifier that was knocked out. growth : float The growth rate of the adjusted model. status : str The solution's status.
[ "Knock", "out", "each", "reaction", "from", "a", "given", "list", "." ]
python
valid
mdgoldberg/sportsref
sportsref/nba/players.py
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nba/players.py#L175-L177
def stats_per100(self, kind='R', summary=False): """Returns a DataFrame of per-100-possession stats.""" return self._get_stats_table('per_poss', kind=kind, summary=summary)
[ "def", "stats_per100", "(", "self", ",", "kind", "=", "'R'", ",", "summary", "=", "False", ")", ":", "return", "self", ".", "_get_stats_table", "(", "'per_poss'", ",", "kind", "=", "kind", ",", "summary", "=", "summary", ")" ]
Returns a DataFrame of per-100-possession stats.
[ "Returns", "a", "DataFrame", "of", "per", "-", "100", "-", "possession", "stats", "." ]
python
test
zvoase/django-relax
relax/viewserver.py
https://github.com/zvoase/django-relax/blob/10bb37bf3a512b290816856a6877c17fa37e930f/relax/viewserver.py#L158-L190
def handle(self): """The main function called to handle a request.""" while True: try: line = self.rfile.readline() try: # All input data are lines of JSON like the following: # ["<cmd_name>" "<cmd_arg1>" "<cmd_arg2>" ...] # So I handle this by dispatching to various methods. cmd = json.loads(line) except Exception, exc: # Sometimes errors come up. Once again, I can't predict # anything, but can at least tell CouchDB about the error. self.wfile.write(repr(exc) + NEWLINE) continue else: # Automagically get the command handler. handler = getattr(self, 'handle_' + cmd[0], None) if not handler: # We are ready to not find commands. It probably won't # happen, but fortune favours the prepared. self.wfile.write( repr(CommandNotFound(cmd[0])) + NEWLINE) continue return_value = handler(*cmd[1:]) if not return_value: continue # We write the output back to CouchDB. self.wfile.write( one_lineify(json.dumps(return_value)) + NEWLINE) except Exception, exc: self.wfile.write(repr(exc) + NEWLINE) continue
[ "def", "handle", "(", "self", ")", ":", "while", "True", ":", "try", ":", "line", "=", "self", ".", "rfile", ".", "readline", "(", ")", "try", ":", "# All input data are lines of JSON like the following:", "# [\"<cmd_name>\" \"<cmd_arg1>\" \"<cmd_arg2>\" ...]", "# So I handle this by dispatching to various methods.", "cmd", "=", "json", ".", "loads", "(", "line", ")", "except", "Exception", ",", "exc", ":", "# Sometimes errors come up. Once again, I can't predict", "# anything, but can at least tell CouchDB about the error.", "self", ".", "wfile", ".", "write", "(", "repr", "(", "exc", ")", "+", "NEWLINE", ")", "continue", "else", ":", "# Automagically get the command handler.", "handler", "=", "getattr", "(", "self", ",", "'handle_'", "+", "cmd", "[", "0", "]", ",", "None", ")", "if", "not", "handler", ":", "# We are ready to not find commands. It probably won't", "# happen, but fortune favours the prepared.", "self", ".", "wfile", ".", "write", "(", "repr", "(", "CommandNotFound", "(", "cmd", "[", "0", "]", ")", ")", "+", "NEWLINE", ")", "continue", "return_value", "=", "handler", "(", "*", "cmd", "[", "1", ":", "]", ")", "if", "not", "return_value", ":", "continue", "# We write the output back to CouchDB.", "self", ".", "wfile", ".", "write", "(", "one_lineify", "(", "json", ".", "dumps", "(", "return_value", ")", ")", "+", "NEWLINE", ")", "except", "Exception", ",", "exc", ":", "self", ".", "wfile", ".", "write", "(", "repr", "(", "exc", ")", "+", "NEWLINE", ")", "continue" ]
The main function called to handle a request.
[ "The", "main", "function", "called", "to", "handle", "a", "request", "." ]
python
valid
inasafe/inasafe
safe/gui/tools/wizard/step_kw47_default_inasafe_fields.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_kw47_default_inasafe_fields.py#L106-L193
def set_widgets(self): """Set widgets on the Extra Keywords tab.""" self.clear() existing_inasafe_field = self.parent.get_existing_keyword( 'inasafe_fields') existing_inasafe_default_values = self.parent.get_existing_keyword( 'inasafe_default_values') # Remove old container and parameter if self.parameter_container: self.kwExtraKeywordsGridLayout.removeWidget( self.parameter_container) if self.parameters: self.parameters = [] # Iterate through all inasafe fields # existing_inasafe_default_values for inasafe_field in self.inasafe_fields_for_the_layer(): # Option for Not Available option_list = [no_field] for field in self.parent.layer.fields(): # Check the field type if isinstance(inasafe_field['type'], list): if field.type() in inasafe_field['type']: field_name = field.name() option_list.append('%s' % field_name) else: if field.type() == inasafe_field['type']: field_name = field.name() option_list.append('%s' % field_name) # Create DefaultSelectParameter parameter = DefaultSelectParameter() parameter.guid = inasafe_field['key'] parameter.name = inasafe_field['name'] parameter.is_required = False parameter.description = inasafe_field['description'] parameter.help_text = inasafe_field['help_text'] parameter.element_type = str parameter.options_list = option_list parameter.value = no_field parameter.default_labels = get_inasafe_default_value_fields( self.parent.setting, inasafe_field['key'])[0] parameter.default_values = get_inasafe_default_value_fields( self.parent.setting, inasafe_field['key'])[1] parameter.minimum = inasafe_field['default_value'].get( 'min_value') parameter.maximum = inasafe_field['default_value'].get( 'max_value') # Check if there is already value in the metadata. if existing_inasafe_field: existing_value = existing_inasafe_field.get( inasafe_field['key']) if existing_value: if existing_value in parameter.options_list: parameter.value = existing_value if existing_inasafe_default_values: existing_default_value = existing_inasafe_default_values.get( inasafe_field['key']) if existing_default_value: parameter.default = existing_default_value self.parameters.append(parameter) # Create the parameter container and add to the wizard. self.parameter_container = ParameterContainer( self.parameters, extra_parameters=self.extra_parameters) self.parameter_container.setup_ui() self.kwExtraKeywordsGridLayout.addWidget(self.parameter_container) # Add Message label self.kwExtraKeywordsGridLayout.addWidget(self.message_label) # Set default value to None for parameter_widget in self.parameter_container.\ get_parameter_widgets(): parameter_widget.widget().set_default(None) # Set selected radio button to 'Do not report' parameter_widget.widget().set_selected_radio_button() # Set default value from existing keywords if existing_inasafe_default_values: for guid, default in list(existing_inasafe_default_values.items()): parameter_widget = self.parameter_container.\ get_parameter_widget_by_guid(guid) if isinstance(parameter_widget, DefaultSelectParameterWidget): parameter_widget.set_default(default) # Set selected radio button to 'Do not report' parameter_widget.set_selected_radio_button()
[ "def", "set_widgets", "(", "self", ")", ":", "self", ".", "clear", "(", ")", "existing_inasafe_field", "=", "self", ".", "parent", ".", "get_existing_keyword", "(", "'inasafe_fields'", ")", "existing_inasafe_default_values", "=", "self", ".", "parent", ".", "get_existing_keyword", "(", "'inasafe_default_values'", ")", "# Remove old container and parameter", "if", "self", ".", "parameter_container", ":", "self", ".", "kwExtraKeywordsGridLayout", ".", "removeWidget", "(", "self", ".", "parameter_container", ")", "if", "self", ".", "parameters", ":", "self", ".", "parameters", "=", "[", "]", "# Iterate through all inasafe fields", "# existing_inasafe_default_values", "for", "inasafe_field", "in", "self", ".", "inasafe_fields_for_the_layer", "(", ")", ":", "# Option for Not Available", "option_list", "=", "[", "no_field", "]", "for", "field", "in", "self", ".", "parent", ".", "layer", ".", "fields", "(", ")", ":", "# Check the field type", "if", "isinstance", "(", "inasafe_field", "[", "'type'", "]", ",", "list", ")", ":", "if", "field", ".", "type", "(", ")", "in", "inasafe_field", "[", "'type'", "]", ":", "field_name", "=", "field", ".", "name", "(", ")", "option_list", ".", "append", "(", "'%s'", "%", "field_name", ")", "else", ":", "if", "field", ".", "type", "(", ")", "==", "inasafe_field", "[", "'type'", "]", ":", "field_name", "=", "field", ".", "name", "(", ")", "option_list", ".", "append", "(", "'%s'", "%", "field_name", ")", "# Create DefaultSelectParameter", "parameter", "=", "DefaultSelectParameter", "(", ")", "parameter", ".", "guid", "=", "inasafe_field", "[", "'key'", "]", "parameter", ".", "name", "=", "inasafe_field", "[", "'name'", "]", "parameter", ".", "is_required", "=", "False", "parameter", ".", "description", "=", "inasafe_field", "[", "'description'", "]", "parameter", ".", "help_text", "=", "inasafe_field", "[", "'help_text'", "]", "parameter", ".", "element_type", "=", "str", "parameter", ".", "options_list", "=", "option_list", "parameter", ".", "value", "=", "no_field", "parameter", ".", "default_labels", "=", "get_inasafe_default_value_fields", "(", "self", ".", "parent", ".", "setting", ",", "inasafe_field", "[", "'key'", "]", ")", "[", "0", "]", "parameter", ".", "default_values", "=", "get_inasafe_default_value_fields", "(", "self", ".", "parent", ".", "setting", ",", "inasafe_field", "[", "'key'", "]", ")", "[", "1", "]", "parameter", ".", "minimum", "=", "inasafe_field", "[", "'default_value'", "]", ".", "get", "(", "'min_value'", ")", "parameter", ".", "maximum", "=", "inasafe_field", "[", "'default_value'", "]", ".", "get", "(", "'max_value'", ")", "# Check if there is already value in the metadata.", "if", "existing_inasafe_field", ":", "existing_value", "=", "existing_inasafe_field", ".", "get", "(", "inasafe_field", "[", "'key'", "]", ")", "if", "existing_value", ":", "if", "existing_value", "in", "parameter", ".", "options_list", ":", "parameter", ".", "value", "=", "existing_value", "if", "existing_inasafe_default_values", ":", "existing_default_value", "=", "existing_inasafe_default_values", ".", "get", "(", "inasafe_field", "[", "'key'", "]", ")", "if", "existing_default_value", ":", "parameter", ".", "default", "=", "existing_default_value", "self", ".", "parameters", ".", "append", "(", "parameter", ")", "# Create the parameter container and add to the wizard.", "self", ".", "parameter_container", "=", "ParameterContainer", "(", "self", ".", "parameters", ",", "extra_parameters", "=", "self", ".", "extra_parameters", ")", "self", ".", "parameter_container", ".", "setup_ui", "(", ")", "self", ".", "kwExtraKeywordsGridLayout", ".", "addWidget", "(", "self", ".", "parameter_container", ")", "# Add Message label", "self", ".", "kwExtraKeywordsGridLayout", ".", "addWidget", "(", "self", ".", "message_label", ")", "# Set default value to None", "for", "parameter_widget", "in", "self", ".", "parameter_container", ".", "get_parameter_widgets", "(", ")", ":", "parameter_widget", ".", "widget", "(", ")", ".", "set_default", "(", "None", ")", "# Set selected radio button to 'Do not report'", "parameter_widget", ".", "widget", "(", ")", ".", "set_selected_radio_button", "(", ")", "# Set default value from existing keywords", "if", "existing_inasafe_default_values", ":", "for", "guid", ",", "default", "in", "list", "(", "existing_inasafe_default_values", ".", "items", "(", ")", ")", ":", "parameter_widget", "=", "self", ".", "parameter_container", ".", "get_parameter_widget_by_guid", "(", "guid", ")", "if", "isinstance", "(", "parameter_widget", ",", "DefaultSelectParameterWidget", ")", ":", "parameter_widget", ".", "set_default", "(", "default", ")", "# Set selected radio button to 'Do not report'", "parameter_widget", ".", "set_selected_radio_button", "(", ")" ]
Set widgets on the Extra Keywords tab.
[ "Set", "widgets", "on", "the", "Extra", "Keywords", "tab", "." ]
python
train
Jammy2211/PyAutoLens
autolens/data/array/mask.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/array/mask.py#L231-L241
def map_2d_array_to_masked_1d_array(self, array_2d): """For a 2D array (e.g. an image, noise_map, etc.) map it to a masked 1D array of valuees using this mask. Parameters ---------- array_2d : ndarray | None | float The 2D array to be mapped to a masked 1D array. """ if array_2d is None or isinstance(array_2d, float): return array_2d return mapping_util.map_2d_array_to_masked_1d_array_from_array_2d_and_mask(self, array_2d)
[ "def", "map_2d_array_to_masked_1d_array", "(", "self", ",", "array_2d", ")", ":", "if", "array_2d", "is", "None", "or", "isinstance", "(", "array_2d", ",", "float", ")", ":", "return", "array_2d", "return", "mapping_util", ".", "map_2d_array_to_masked_1d_array_from_array_2d_and_mask", "(", "self", ",", "array_2d", ")" ]
For a 2D array (e.g. an image, noise_map, etc.) map it to a masked 1D array of valuees using this mask. Parameters ---------- array_2d : ndarray | None | float The 2D array to be mapped to a masked 1D array.
[ "For", "a", "2D", "array", "(", "e", ".", "g", ".", "an", "image", "noise_map", "etc", ".", ")", "map", "it", "to", "a", "masked", "1D", "array", "of", "valuees", "using", "this", "mask", "." ]
python
valid
pypa/pipenv
pipenv/vendor/click/decorators.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click/decorators.py#L286-L307
def help_option(*param_decls, **attrs): """Adds a ``--help`` option which immediately ends the program printing out the help page. This is usually unnecessary to add as this is added by default to all commands unless suppressed. Like :func:`version_option`, this is implemented as eager option that prints in the callback and exits. All arguments are forwarded to :func:`option`. """ def decorator(f): def callback(ctx, param, value): if value and not ctx.resilient_parsing: echo(ctx.get_help(), color=ctx.color) ctx.exit() attrs.setdefault('is_flag', True) attrs.setdefault('expose_value', False) attrs.setdefault('help', 'Show this message and exit.') attrs.setdefault('is_eager', True) attrs['callback'] = callback return option(*(param_decls or ('--help',)), **attrs)(f) return decorator
[ "def", "help_option", "(", "*", "param_decls", ",", "*", "*", "attrs", ")", ":", "def", "decorator", "(", "f", ")", ":", "def", "callback", "(", "ctx", ",", "param", ",", "value", ")", ":", "if", "value", "and", "not", "ctx", ".", "resilient_parsing", ":", "echo", "(", "ctx", ".", "get_help", "(", ")", ",", "color", "=", "ctx", ".", "color", ")", "ctx", ".", "exit", "(", ")", "attrs", ".", "setdefault", "(", "'is_flag'", ",", "True", ")", "attrs", ".", "setdefault", "(", "'expose_value'", ",", "False", ")", "attrs", ".", "setdefault", "(", "'help'", ",", "'Show this message and exit.'", ")", "attrs", ".", "setdefault", "(", "'is_eager'", ",", "True", ")", "attrs", "[", "'callback'", "]", "=", "callback", "return", "option", "(", "*", "(", "param_decls", "or", "(", "'--help'", ",", ")", ")", ",", "*", "*", "attrs", ")", "(", "f", ")", "return", "decorator" ]
Adds a ``--help`` option which immediately ends the program printing out the help page. This is usually unnecessary to add as this is added by default to all commands unless suppressed. Like :func:`version_option`, this is implemented as eager option that prints in the callback and exits. All arguments are forwarded to :func:`option`.
[ "Adds", "a", "--", "help", "option", "which", "immediately", "ends", "the", "program", "printing", "out", "the", "help", "page", ".", "This", "is", "usually", "unnecessary", "to", "add", "as", "this", "is", "added", "by", "default", "to", "all", "commands", "unless", "suppressed", "." ]
python
train
apache/airflow
airflow/models/xcom.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/xcom.py#L88-L136
def set( cls, key, value, execution_date, task_id, dag_id, session=None): """ Store an XCom value. TODO: "pickling" has been deprecated and JSON is preferred. "pickling" will be removed in Airflow 2.0. :return: None """ session.expunge_all() enable_pickling = configuration.getboolean('core', 'enable_xcom_pickling') if enable_pickling: value = pickle.dumps(value) else: try: value = json.dumps(value).encode('UTF-8') except ValueError: log = LoggingMixin().log log.error("Could not serialize the XCOM value into JSON. " "If you are using pickles instead of JSON " "for XCOM, then you need to enable pickle " "support for XCOM in your airflow config.") raise # remove any duplicate XComs session.query(cls).filter( cls.key == key, cls.execution_date == execution_date, cls.task_id == task_id, cls.dag_id == dag_id).delete() session.commit() # insert new XCom session.add(XCom( key=key, value=value, execution_date=execution_date, task_id=task_id, dag_id=dag_id)) session.commit()
[ "def", "set", "(", "cls", ",", "key", ",", "value", ",", "execution_date", ",", "task_id", ",", "dag_id", ",", "session", "=", "None", ")", ":", "session", ".", "expunge_all", "(", ")", "enable_pickling", "=", "configuration", ".", "getboolean", "(", "'core'", ",", "'enable_xcom_pickling'", ")", "if", "enable_pickling", ":", "value", "=", "pickle", ".", "dumps", "(", "value", ")", "else", ":", "try", ":", "value", "=", "json", ".", "dumps", "(", "value", ")", ".", "encode", "(", "'UTF-8'", ")", "except", "ValueError", ":", "log", "=", "LoggingMixin", "(", ")", ".", "log", "log", ".", "error", "(", "\"Could not serialize the XCOM value into JSON. \"", "\"If you are using pickles instead of JSON \"", "\"for XCOM, then you need to enable pickle \"", "\"support for XCOM in your airflow config.\"", ")", "raise", "# remove any duplicate XComs", "session", ".", "query", "(", "cls", ")", ".", "filter", "(", "cls", ".", "key", "==", "key", ",", "cls", ".", "execution_date", "==", "execution_date", ",", "cls", ".", "task_id", "==", "task_id", ",", "cls", ".", "dag_id", "==", "dag_id", ")", ".", "delete", "(", ")", "session", ".", "commit", "(", ")", "# insert new XCom", "session", ".", "add", "(", "XCom", "(", "key", "=", "key", ",", "value", "=", "value", ",", "execution_date", "=", "execution_date", ",", "task_id", "=", "task_id", ",", "dag_id", "=", "dag_id", ")", ")", "session", ".", "commit", "(", ")" ]
Store an XCom value. TODO: "pickling" has been deprecated and JSON is preferred. "pickling" will be removed in Airflow 2.0. :return: None
[ "Store", "an", "XCom", "value", ".", "TODO", ":", "pickling", "has", "been", "deprecated", "and", "JSON", "is", "preferred", ".", "pickling", "will", "be", "removed", "in", "Airflow", "2", ".", "0", "." ]
python
test
stephantul/somber
somber/base.py
https://github.com/stephantul/somber/blob/b7a13e646239500cc393668c01a7169c3e50b7b5/somber/base.py#L471-L494
def predict(self, X, batch_size=1, show_progressbar=False): """ Predict the BMU for each input data. Parameters ---------- X : numpy array. The input data. batch_size : int, optional, default 100 The batch size to use in prediction. This may affect prediction in stateful, i.e. sequential SOMs. show_progressbar : bool Whether to show a progressbar during prediction. Returns ------- predictions : numpy array An array containing the BMU for each input data point. """ dist = self.transform(X, batch_size, show_progressbar) res = dist.__getattribute__(self.argfunc)(1) return res
[ "def", "predict", "(", "self", ",", "X", ",", "batch_size", "=", "1", ",", "show_progressbar", "=", "False", ")", ":", "dist", "=", "self", ".", "transform", "(", "X", ",", "batch_size", ",", "show_progressbar", ")", "res", "=", "dist", ".", "__getattribute__", "(", "self", ".", "argfunc", ")", "(", "1", ")", "return", "res" ]
Predict the BMU for each input data. Parameters ---------- X : numpy array. The input data. batch_size : int, optional, default 100 The batch size to use in prediction. This may affect prediction in stateful, i.e. sequential SOMs. show_progressbar : bool Whether to show a progressbar during prediction. Returns ------- predictions : numpy array An array containing the BMU for each input data point.
[ "Predict", "the", "BMU", "for", "each", "input", "data", "." ]
python
train
pkgw/pwkit
pwkit/environments/__init__.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/environments/__init__.py#L113-L130
def prepend_environ_path(env, name, text, pathsep=os.pathsep): """Prepend `text` into a $PATH-like environment variable. `env` is a dictionary of environment variables and `name` is the variable name. `pathsep` is the character separating path elements, defaulting to `os.pathsep`. The variable will be created if it is not already in `env`. Returns `env`. Example:: prepend_environ_path(env, 'PATH', '/mypackage/bin') The `name` and `text` arguments should be `str` objects; that is, bytes in Python 2 and Unicode in Python 3. Literal strings will be OK unless you use the ``from __future__ import unicode_literals`` feature. """ env[name] = prepend_path(env.get(name), text, pathsep=pathsep) return env
[ "def", "prepend_environ_path", "(", "env", ",", "name", ",", "text", ",", "pathsep", "=", "os", ".", "pathsep", ")", ":", "env", "[", "name", "]", "=", "prepend_path", "(", "env", ".", "get", "(", "name", ")", ",", "text", ",", "pathsep", "=", "pathsep", ")", "return", "env" ]
Prepend `text` into a $PATH-like environment variable. `env` is a dictionary of environment variables and `name` is the variable name. `pathsep` is the character separating path elements, defaulting to `os.pathsep`. The variable will be created if it is not already in `env`. Returns `env`. Example:: prepend_environ_path(env, 'PATH', '/mypackage/bin') The `name` and `text` arguments should be `str` objects; that is, bytes in Python 2 and Unicode in Python 3. Literal strings will be OK unless you use the ``from __future__ import unicode_literals`` feature.
[ "Prepend", "text", "into", "a", "$PATH", "-", "like", "environment", "variable", ".", "env", "is", "a", "dictionary", "of", "environment", "variables", "and", "name", "is", "the", "variable", "name", ".", "pathsep", "is", "the", "character", "separating", "path", "elements", "defaulting", "to", "os", ".", "pathsep", ".", "The", "variable", "will", "be", "created", "if", "it", "is", "not", "already", "in", "env", ".", "Returns", "env", "." ]
python
train
quantopian/zipline
zipline/pipeline/loaders/earnings_estimates.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L74-L92
def validate_column_specs(events, columns): """ Verify that the columns of ``events`` can be used by a EarningsEstimatesLoader to serve the BoundColumns described by `columns`. """ required = required_estimates_fields(columns) received = set(events.columns) missing = required - received if missing: raise ValueError( "EarningsEstimatesLoader missing required columns {missing}.\n" "Got Columns: {received}\n" "Expected Columns: {required}".format( missing=sorted(missing), received=sorted(received), required=sorted(required), ) )
[ "def", "validate_column_specs", "(", "events", ",", "columns", ")", ":", "required", "=", "required_estimates_fields", "(", "columns", ")", "received", "=", "set", "(", "events", ".", "columns", ")", "missing", "=", "required", "-", "received", "if", "missing", ":", "raise", "ValueError", "(", "\"EarningsEstimatesLoader missing required columns {missing}.\\n\"", "\"Got Columns: {received}\\n\"", "\"Expected Columns: {required}\"", ".", "format", "(", "missing", "=", "sorted", "(", "missing", ")", ",", "received", "=", "sorted", "(", "received", ")", ",", "required", "=", "sorted", "(", "required", ")", ",", ")", ")" ]
Verify that the columns of ``events`` can be used by a EarningsEstimatesLoader to serve the BoundColumns described by `columns`.
[ "Verify", "that", "the", "columns", "of", "events", "can", "be", "used", "by", "a", "EarningsEstimatesLoader", "to", "serve", "the", "BoundColumns", "described", "by", "columns", "." ]
python
train
NASA-AMMOS/AIT-Core
ait/core/server/server.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/server/server.py#L192-L215
def _create_handler(self, config): """ Creates a handler from its config. Params: config: handler config Returns: handler instance """ if config is None: raise ValueError('No handler config to create handler from.') if 'name' not in config: raise ValueError('Handler name is required.') handler_name = config['name'] # try to create handler module_name = handler_name.rsplit('.', 1)[0] class_name = handler_name.rsplit('.', 1)[-1] module = import_module(module_name) handler_class = getattr(module, class_name) instance = handler_class(**config) return instance
[ "def", "_create_handler", "(", "self", ",", "config", ")", ":", "if", "config", "is", "None", ":", "raise", "ValueError", "(", "'No handler config to create handler from.'", ")", "if", "'name'", "not", "in", "config", ":", "raise", "ValueError", "(", "'Handler name is required.'", ")", "handler_name", "=", "config", "[", "'name'", "]", "# try to create handler", "module_name", "=", "handler_name", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "0", "]", "class_name", "=", "handler_name", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "-", "1", "]", "module", "=", "import_module", "(", "module_name", ")", "handler_class", "=", "getattr", "(", "module", ",", "class_name", ")", "instance", "=", "handler_class", "(", "*", "*", "config", ")", "return", "instance" ]
Creates a handler from its config. Params: config: handler config Returns: handler instance
[ "Creates", "a", "handler", "from", "its", "config", "." ]
python
train
ethereum/lahja
lahja/endpoint.py
https://github.com/ethereum/lahja/blob/e3993c5892232887a11800ed3e66332febcee96b/lahja/endpoint.py#L359-L377
def broadcast(self, item: BaseEvent, config: Optional[BroadcastConfig] = None) -> None: """ Broadcast an instance of :class:`~lahja.misc.BaseEvent` on the event bus. Takes an optional second parameter of :class:`~lahja.misc.BroadcastConfig` to decide where this event should be broadcasted to. By default, events are broadcasted across all connected endpoints with their consuming call sites. """ item._origin = self.name if config is not None and config.internal: # Internal events simply bypass going through the central event bus # and are directly put into the local receiving queue instead. self._internal_queue.put_nowait((item, config)) else: # Broadcast to every connected Endpoint that is allowed to receive the event compressed_item = self._compress_event(item) for name, connector in self._connected_endpoints.items(): allowed = (config is None) or config.allowed_to_receive(name) if allowed: connector.put_nowait((compressed_item, config))
[ "def", "broadcast", "(", "self", ",", "item", ":", "BaseEvent", ",", "config", ":", "Optional", "[", "BroadcastConfig", "]", "=", "None", ")", "->", "None", ":", "item", ".", "_origin", "=", "self", ".", "name", "if", "config", "is", "not", "None", "and", "config", ".", "internal", ":", "# Internal events simply bypass going through the central event bus", "# and are directly put into the local receiving queue instead.", "self", ".", "_internal_queue", ".", "put_nowait", "(", "(", "item", ",", "config", ")", ")", "else", ":", "# Broadcast to every connected Endpoint that is allowed to receive the event", "compressed_item", "=", "self", ".", "_compress_event", "(", "item", ")", "for", "name", ",", "connector", "in", "self", ".", "_connected_endpoints", ".", "items", "(", ")", ":", "allowed", "=", "(", "config", "is", "None", ")", "or", "config", ".", "allowed_to_receive", "(", "name", ")", "if", "allowed", ":", "connector", ".", "put_nowait", "(", "(", "compressed_item", ",", "config", ")", ")" ]
Broadcast an instance of :class:`~lahja.misc.BaseEvent` on the event bus. Takes an optional second parameter of :class:`~lahja.misc.BroadcastConfig` to decide where this event should be broadcasted to. By default, events are broadcasted across all connected endpoints with their consuming call sites.
[ "Broadcast", "an", "instance", "of", ":", "class", ":", "~lahja", ".", "misc", ".", "BaseEvent", "on", "the", "event", "bus", ".", "Takes", "an", "optional", "second", "parameter", "of", ":", "class", ":", "~lahja", ".", "misc", ".", "BroadcastConfig", "to", "decide", "where", "this", "event", "should", "be", "broadcasted", "to", ".", "By", "default", "events", "are", "broadcasted", "across", "all", "connected", "endpoints", "with", "their", "consuming", "call", "sites", "." ]
python
train
billy-yoyo/RainbowSixSiege-Python-API
r6sapi/r6sapi.py
https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L1259-L1277
def get_operator(self, operator): """|coro| Checks the players stats for this operator, only loading them if they haven't already been found Parameters ---------- operator : str the name of the operator Returns ------- :class:`Operator` the operator object found""" if operator in self.operators: return self.operators[operator] result = yield from self.load_operator(operator) return result
[ "def", "get_operator", "(", "self", ",", "operator", ")", ":", "if", "operator", "in", "self", ".", "operators", ":", "return", "self", ".", "operators", "[", "operator", "]", "result", "=", "yield", "from", "self", ".", "load_operator", "(", "operator", ")", "return", "result" ]
|coro| Checks the players stats for this operator, only loading them if they haven't already been found Parameters ---------- operator : str the name of the operator Returns ------- :class:`Operator` the operator object found
[ "|coro|" ]
python
train
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/flask/templating.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/flask/templating.py#L21-L33
def _default_template_ctx_processor(): """Default template context processor. Injects `request`, `session` and `g`. """ reqctx = _request_ctx_stack.top appctx = _app_ctx_stack.top rv = {} if appctx is not None: rv['g'] = appctx.g if reqctx is not None: rv['request'] = reqctx.request rv['session'] = reqctx.session return rv
[ "def", "_default_template_ctx_processor", "(", ")", ":", "reqctx", "=", "_request_ctx_stack", ".", "top", "appctx", "=", "_app_ctx_stack", ".", "top", "rv", "=", "{", "}", "if", "appctx", "is", "not", "None", ":", "rv", "[", "'g'", "]", "=", "appctx", ".", "g", "if", "reqctx", "is", "not", "None", ":", "rv", "[", "'request'", "]", "=", "reqctx", ".", "request", "rv", "[", "'session'", "]", "=", "reqctx", ".", "session", "return", "rv" ]
Default template context processor. Injects `request`, `session` and `g`.
[ "Default", "template", "context", "processor", ".", "Injects", "request", "session", "and", "g", "." ]
python
test
mozilla/DeepSpeech
bin/benchmark_nc.py
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/bin/benchmark_nc.py#L139-L186
def all_files(models=[]): r''' Return a list of full path of files matching 'models', sorted in human numerical order (i.e., 0 1 2 ..., 10 11 12, ..., 100, ..., 1000). Files are supposed to be named identically except one variable component e.g. the list, test.weights.e5.lstm1200.ldc93s1.pb test.weights.e5.lstm1000.ldc93s1.pb test.weights.e5.lstm800.ldc93s1.pb gets sorted: test.weights.e5.lstm800.ldc93s1.pb test.weights.e5.lstm1000.ldc93s1.pb test.weights.e5.lstm1200.ldc93s1.pb ''' def nsort(a, b): fa = os.path.basename(a).split('.') fb = os.path.basename(b).split('.') elements_to_remove = [] assert len(fa) == len(fb) for i in range(0, len(fa)): if fa[i] == fb[i]: elements_to_remove.append(fa[i]) for e in elements_to_remove: fa.remove(e) fb.remove(e) assert len(fa) == len(fb) assert len(fa) == 1 fa = keep_only_digits(fa[0]) fb = keep_only_digits(fb[0]) if fa < fb: return -1 if fa == fb: return 0 if fa > fb: return 1 base = list(map(lambda x: os.path.abspath(x), maybe_inspect_zip(models))) base.sort(cmp=nsort) return base
[ "def", "all_files", "(", "models", "=", "[", "]", ")", ":", "def", "nsort", "(", "a", ",", "b", ")", ":", "fa", "=", "os", ".", "path", ".", "basename", "(", "a", ")", ".", "split", "(", "'.'", ")", "fb", "=", "os", ".", "path", ".", "basename", "(", "b", ")", ".", "split", "(", "'.'", ")", "elements_to_remove", "=", "[", "]", "assert", "len", "(", "fa", ")", "==", "len", "(", "fb", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "fa", ")", ")", ":", "if", "fa", "[", "i", "]", "==", "fb", "[", "i", "]", ":", "elements_to_remove", ".", "append", "(", "fa", "[", "i", "]", ")", "for", "e", "in", "elements_to_remove", ":", "fa", ".", "remove", "(", "e", ")", "fb", ".", "remove", "(", "e", ")", "assert", "len", "(", "fa", ")", "==", "len", "(", "fb", ")", "assert", "len", "(", "fa", ")", "==", "1", "fa", "=", "keep_only_digits", "(", "fa", "[", "0", "]", ")", "fb", "=", "keep_only_digits", "(", "fb", "[", "0", "]", ")", "if", "fa", "<", "fb", ":", "return", "-", "1", "if", "fa", "==", "fb", ":", "return", "0", "if", "fa", ">", "fb", ":", "return", "1", "base", "=", "list", "(", "map", "(", "lambda", "x", ":", "os", ".", "path", ".", "abspath", "(", "x", ")", ",", "maybe_inspect_zip", "(", "models", ")", ")", ")", "base", ".", "sort", "(", "cmp", "=", "nsort", ")", "return", "base" ]
r''' Return a list of full path of files matching 'models', sorted in human numerical order (i.e., 0 1 2 ..., 10 11 12, ..., 100, ..., 1000). Files are supposed to be named identically except one variable component e.g. the list, test.weights.e5.lstm1200.ldc93s1.pb test.weights.e5.lstm1000.ldc93s1.pb test.weights.e5.lstm800.ldc93s1.pb gets sorted: test.weights.e5.lstm800.ldc93s1.pb test.weights.e5.lstm1000.ldc93s1.pb test.weights.e5.lstm1200.ldc93s1.pb
[ "r", "Return", "a", "list", "of", "full", "path", "of", "files", "matching", "models", "sorted", "in", "human", "numerical", "order", "(", "i", ".", "e", ".", "0", "1", "2", "...", "10", "11", "12", "...", "100", "...", "1000", ")", "." ]
python
train
Equitable/trump
trump/converting/objects.py
https://github.com/Equitable/trump/blob/a2802692bc642fa32096374159eea7ceca2947b4/trump/converting/objects.py#L65-L76
def use_trump_data(self, symbols): """ Use trump data to build conversion table symbols : list of symbols: will attempt to use units to build the conversion table, strings represent symbol names. """ dfs = {sym.units : sym.df[sym.name] for sym in symbols} self.build_conversion_table(dfs)
[ "def", "use_trump_data", "(", "self", ",", "symbols", ")", ":", "dfs", "=", "{", "sym", ".", "units", ":", "sym", ".", "df", "[", "sym", ".", "name", "]", "for", "sym", "in", "symbols", "}", "self", ".", "build_conversion_table", "(", "dfs", ")" ]
Use trump data to build conversion table symbols : list of symbols: will attempt to use units to build the conversion table, strings represent symbol names.
[ "Use", "trump", "data", "to", "build", "conversion", "table", "symbols", ":", "list", "of", "symbols", ":", "will", "attempt", "to", "use", "units", "to", "build", "the", "conversion", "table", "strings", "represent", "symbol", "names", "." ]
python
train
blockstack/blockstack-core
blockstack/lib/atlas.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L2713-L2722
def canonical_peer( self, peer ): """ Get the canonical peer name """ their_host, their_port = url_to_host_port( peer ) if their_host in ['127.0.0.1', '::1']: their_host = 'localhost' return "%s:%s" % (their_host, their_port)
[ "def", "canonical_peer", "(", "self", ",", "peer", ")", ":", "their_host", ",", "their_port", "=", "url_to_host_port", "(", "peer", ")", "if", "their_host", "in", "[", "'127.0.0.1'", ",", "'::1'", "]", ":", "their_host", "=", "'localhost'", "return", "\"%s:%s\"", "%", "(", "their_host", ",", "their_port", ")" ]
Get the canonical peer name
[ "Get", "the", "canonical", "peer", "name" ]
python
train
bitshares/python-bitshares
bitshares/bitshares.py
https://github.com/bitshares/python-bitshares/blob/8a3b5954a6abcaaff7c6a5c41d910e58eea3142f/bitshares/bitshares.py#L128-L163
def transfer(self, to, amount, asset, memo="", account=None, **kwargs): """ Transfer an asset to another account. :param str to: Recipient :param float amount: Amount to transfer :param str asset: Asset to transfer :param str memo: (optional) Memo, may begin with `#` for encrypted messaging :param str account: (optional) the source account for the transfer if not ``default_account`` """ from .memo import Memo if not account: if "default_account" in self.config: account = self.config["default_account"] if not account: raise ValueError("You need to provide an account") account = Account(account, blockchain_instance=self) amount = Amount(amount, asset, blockchain_instance=self) to = Account(to, blockchain_instance=self) memoObj = Memo(from_account=account, to_account=to, blockchain_instance=self) op = operations.Transfer( **{ "fee": {"amount": 0, "asset_id": "1.3.0"}, "from": account["id"], "to": to["id"], "amount": {"amount": int(amount), "asset_id": amount.asset["id"]}, "memo": memoObj.encrypt(memo), "prefix": self.prefix, } ) return self.finalizeOp(op, account, "active", **kwargs)
[ "def", "transfer", "(", "self", ",", "to", ",", "amount", ",", "asset", ",", "memo", "=", "\"\"", ",", "account", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", ".", "memo", "import", "Memo", "if", "not", "account", ":", "if", "\"default_account\"", "in", "self", ".", "config", ":", "account", "=", "self", ".", "config", "[", "\"default_account\"", "]", "if", "not", "account", ":", "raise", "ValueError", "(", "\"You need to provide an account\"", ")", "account", "=", "Account", "(", "account", ",", "blockchain_instance", "=", "self", ")", "amount", "=", "Amount", "(", "amount", ",", "asset", ",", "blockchain_instance", "=", "self", ")", "to", "=", "Account", "(", "to", ",", "blockchain_instance", "=", "self", ")", "memoObj", "=", "Memo", "(", "from_account", "=", "account", ",", "to_account", "=", "to", ",", "blockchain_instance", "=", "self", ")", "op", "=", "operations", ".", "Transfer", "(", "*", "*", "{", "\"fee\"", ":", "{", "\"amount\"", ":", "0", ",", "\"asset_id\"", ":", "\"1.3.0\"", "}", ",", "\"from\"", ":", "account", "[", "\"id\"", "]", ",", "\"to\"", ":", "to", "[", "\"id\"", "]", ",", "\"amount\"", ":", "{", "\"amount\"", ":", "int", "(", "amount", ")", ",", "\"asset_id\"", ":", "amount", ".", "asset", "[", "\"id\"", "]", "}", ",", "\"memo\"", ":", "memoObj", ".", "encrypt", "(", "memo", ")", ",", "\"prefix\"", ":", "self", ".", "prefix", ",", "}", ")", "return", "self", ".", "finalizeOp", "(", "op", ",", "account", ",", "\"active\"", ",", "*", "*", "kwargs", ")" ]
Transfer an asset to another account. :param str to: Recipient :param float amount: Amount to transfer :param str asset: Asset to transfer :param str memo: (optional) Memo, may begin with `#` for encrypted messaging :param str account: (optional) the source account for the transfer if not ``default_account``
[ "Transfer", "an", "asset", "to", "another", "account", "." ]
python
train
marcomusy/vtkplotter
vtkplotter/vtkio.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/vtkio.py#L357-L405
def loadDolfin(filename, c="gold", alpha=0.5, wire=None, bc=None): """Reads a `Fenics/Dolfin` file format. Return an ``Actor(vtkActor)`` object.""" if not os.path.exists(filename): colors.printc("~noentry Error in loadDolfin: Cannot find", filename, c=1) return None import xml.etree.ElementTree as et if filename.endswith(".gz"): import gzip inF = gzip.open(filename, "rb") outF = open("/tmp/filename.xml", "wb") outF.write(inF.read()) outF.close() inF.close() tree = et.parse("/tmp/filename.xml") else: tree = et.parse(filename) coords, connectivity = [], [] for mesh in tree.getroot(): for elem in mesh: for e in elem.findall("vertex"): x = float(e.get("x")) y = float(e.get("y")) ez = e.get("z") if ez is None: coords.append([x, y]) else: z = float(ez) coords.append([x, y, z]) tets = elem.findall("tetrahedron") if not len(tets): tris = elem.findall("triangle") for e in tris: v0 = int(e.get("v0")) v1 = int(e.get("v1")) v2 = int(e.get("v2")) connectivity.append([v0, v1, v2]) else: for e in tets: v0 = int(e.get("v0")) v1 = int(e.get("v1")) v2 = int(e.get("v2")) v3 = int(e.get("v3")) connectivity.append([v0, v1, v2, v3]) poly = buildPolyData(coords, connectivity) return Actor(poly, c, alpha, True, bc)
[ "def", "loadDolfin", "(", "filename", ",", "c", "=", "\"gold\"", ",", "alpha", "=", "0.5", ",", "wire", "=", "None", ",", "bc", "=", "None", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "colors", ".", "printc", "(", "\"~noentry Error in loadDolfin: Cannot find\"", ",", "filename", ",", "c", "=", "1", ")", "return", "None", "import", "xml", ".", "etree", ".", "ElementTree", "as", "et", "if", "filename", ".", "endswith", "(", "\".gz\"", ")", ":", "import", "gzip", "inF", "=", "gzip", ".", "open", "(", "filename", ",", "\"rb\"", ")", "outF", "=", "open", "(", "\"/tmp/filename.xml\"", ",", "\"wb\"", ")", "outF", ".", "write", "(", "inF", ".", "read", "(", ")", ")", "outF", ".", "close", "(", ")", "inF", ".", "close", "(", ")", "tree", "=", "et", ".", "parse", "(", "\"/tmp/filename.xml\"", ")", "else", ":", "tree", "=", "et", ".", "parse", "(", "filename", ")", "coords", ",", "connectivity", "=", "[", "]", ",", "[", "]", "for", "mesh", "in", "tree", ".", "getroot", "(", ")", ":", "for", "elem", "in", "mesh", ":", "for", "e", "in", "elem", ".", "findall", "(", "\"vertex\"", ")", ":", "x", "=", "float", "(", "e", ".", "get", "(", "\"x\"", ")", ")", "y", "=", "float", "(", "e", ".", "get", "(", "\"y\"", ")", ")", "ez", "=", "e", ".", "get", "(", "\"z\"", ")", "if", "ez", "is", "None", ":", "coords", ".", "append", "(", "[", "x", ",", "y", "]", ")", "else", ":", "z", "=", "float", "(", "ez", ")", "coords", ".", "append", "(", "[", "x", ",", "y", ",", "z", "]", ")", "tets", "=", "elem", ".", "findall", "(", "\"tetrahedron\"", ")", "if", "not", "len", "(", "tets", ")", ":", "tris", "=", "elem", ".", "findall", "(", "\"triangle\"", ")", "for", "e", "in", "tris", ":", "v0", "=", "int", "(", "e", ".", "get", "(", "\"v0\"", ")", ")", "v1", "=", "int", "(", "e", ".", "get", "(", "\"v1\"", ")", ")", "v2", "=", "int", "(", "e", ".", "get", "(", "\"v2\"", ")", ")", "connectivity", ".", "append", "(", "[", "v0", ",", "v1", ",", "v2", "]", ")", "else", ":", "for", "e", "in", "tets", ":", "v0", "=", "int", "(", "e", ".", "get", "(", "\"v0\"", ")", ")", "v1", "=", "int", "(", "e", ".", "get", "(", "\"v1\"", ")", ")", "v2", "=", "int", "(", "e", ".", "get", "(", "\"v2\"", ")", ")", "v3", "=", "int", "(", "e", ".", "get", "(", "\"v3\"", ")", ")", "connectivity", ".", "append", "(", "[", "v0", ",", "v1", ",", "v2", ",", "v3", "]", ")", "poly", "=", "buildPolyData", "(", "coords", ",", "connectivity", ")", "return", "Actor", "(", "poly", ",", "c", ",", "alpha", ",", "True", ",", "bc", ")" ]
Reads a `Fenics/Dolfin` file format. Return an ``Actor(vtkActor)`` object.
[ "Reads", "a", "Fenics", "/", "Dolfin", "file", "format", ".", "Return", "an", "Actor", "(", "vtkActor", ")", "object", "." ]
python
train
AtteqCom/zsl
src/zsl/utils/xml_helper.py
https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/utils/xml_helper.py#L81-L96
def attrib_to_dict(element, *args, **kwargs): """For an ElementTree ``element`` extract specified attributes. If an attribute does not exists, its value will be ``None``. attrib_to_dict(element, 'attr_a', 'attr_b') -> {'attr_a': 'value', 'attr_a': 'value'} Mapping between xml attributes and dictionary keys is done with kwargs. attrib_to_dict(element, my_new_name = 'xml_atribute_name', ..) """ if len(args) > 0: return {key: element.get(key) for key in args} if len(kwargs) > 0: return {new_key: element.get(old_key) for new_key, old_key in viewitems(kwargs)} return element.attrib
[ "def", "attrib_to_dict", "(", "element", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "args", ")", ">", "0", ":", "return", "{", "key", ":", "element", ".", "get", "(", "key", ")", "for", "key", "in", "args", "}", "if", "len", "(", "kwargs", ")", ">", "0", ":", "return", "{", "new_key", ":", "element", ".", "get", "(", "old_key", ")", "for", "new_key", ",", "old_key", "in", "viewitems", "(", "kwargs", ")", "}", "return", "element", ".", "attrib" ]
For an ElementTree ``element`` extract specified attributes. If an attribute does not exists, its value will be ``None``. attrib_to_dict(element, 'attr_a', 'attr_b') -> {'attr_a': 'value', 'attr_a': 'value'} Mapping between xml attributes and dictionary keys is done with kwargs. attrib_to_dict(element, my_new_name = 'xml_atribute_name', ..)
[ "For", "an", "ElementTree", "element", "extract", "specified", "attributes", ".", "If", "an", "attribute", "does", "not", "exists", "its", "value", "will", "be", "None", "." ]
python
train
yjzhang/uncurl_python
uncurl/nb_clustering.py
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nb_clustering.py#L22-L28
def log_ncr(a, b): """ Returns log(nCr(a,b)), given that b<a. Does not assume that a and b are integers (uses log-gamma). """ val = gammaln(a+1) - gammaln(a-b+1) - gammaln(b+1) return val
[ "def", "log_ncr", "(", "a", ",", "b", ")", ":", "val", "=", "gammaln", "(", "a", "+", "1", ")", "-", "gammaln", "(", "a", "-", "b", "+", "1", ")", "-", "gammaln", "(", "b", "+", "1", ")", "return", "val" ]
Returns log(nCr(a,b)), given that b<a. Does not assume that a and b are integers (uses log-gamma).
[ "Returns", "log", "(", "nCr", "(", "a", "b", "))", "given", "that", "b<a", ".", "Does", "not", "assume", "that", "a", "and", "b", "are", "integers", "(", "uses", "log", "-", "gamma", ")", "." ]
python
train
google/grr
grr/server/grr_response_server/databases/mem_clients.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mem_clients.py#L342-L352
def ReadClientCrashInfoHistory(self, client_id): """Reads the full crash history for a particular client.""" history = self.crash_history.get(client_id) if not history: return [] res = [] for ts in sorted(history, reverse=True): client_data = rdf_client.ClientCrash.FromSerializedString(history[ts]) client_data.timestamp = ts res.append(client_data) return res
[ "def", "ReadClientCrashInfoHistory", "(", "self", ",", "client_id", ")", ":", "history", "=", "self", ".", "crash_history", ".", "get", "(", "client_id", ")", "if", "not", "history", ":", "return", "[", "]", "res", "=", "[", "]", "for", "ts", "in", "sorted", "(", "history", ",", "reverse", "=", "True", ")", ":", "client_data", "=", "rdf_client", ".", "ClientCrash", ".", "FromSerializedString", "(", "history", "[", "ts", "]", ")", "client_data", ".", "timestamp", "=", "ts", "res", ".", "append", "(", "client_data", ")", "return", "res" ]
Reads the full crash history for a particular client.
[ "Reads", "the", "full", "crash", "history", "for", "a", "particular", "client", "." ]
python
train
bcb/jsonrpcclient
jsonrpcclient/clients/tornado_client.py
https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/clients/tornado_client.py#L46-L66
async def send_message( # type: ignore self, request: str, response_expected: bool, **kwargs: Any ) -> Response: """ Transport the message to the server and return the response. Args: request: The JSON-RPC request string. response_expected: Whether the request expects a response. Returns: A Response object. """ headers = dict(self.DEFAULT_HEADERS) headers.update(kwargs.pop("headers", {})) response = await self.client.fetch( self.endpoint, method="POST", body=request, headers=headers, **kwargs ) return Response(response.body.decode(), raw=response)
[ "async", "def", "send_message", "(", "# type: ignore", "self", ",", "request", ":", "str", ",", "response_expected", ":", "bool", ",", "*", "*", "kwargs", ":", "Any", ")", "->", "Response", ":", "headers", "=", "dict", "(", "self", ".", "DEFAULT_HEADERS", ")", "headers", ".", "update", "(", "kwargs", ".", "pop", "(", "\"headers\"", ",", "{", "}", ")", ")", "response", "=", "await", "self", ".", "client", ".", "fetch", "(", "self", ".", "endpoint", ",", "method", "=", "\"POST\"", ",", "body", "=", "request", ",", "headers", "=", "headers", ",", "*", "*", "kwargs", ")", "return", "Response", "(", "response", ".", "body", ".", "decode", "(", ")", ",", "raw", "=", "response", ")" ]
Transport the message to the server and return the response. Args: request: The JSON-RPC request string. response_expected: Whether the request expects a response. Returns: A Response object.
[ "Transport", "the", "message", "to", "the", "server", "and", "return", "the", "response", "." ]
python
train
edx/edx-django-release-util
release_util/management/commands/__init__.py
https://github.com/edx/edx-django-release-util/blob/de0fde41d6a19885ab7dc309472b94fd0fccbc1d/release_util/management/commands/__init__.py#L310-L324
def apply_all(self): """ Applies all Django model migrations at once, recording the result. """ if self.__closed: raise MigrationSessionError("Can't apply applied session") if self._to_apply: raise MigrationSessionError("Can't apply_all with migrations added to session") try: self.__apply(run_all=True) except: raise finally: self.__closed = True
[ "def", "apply_all", "(", "self", ")", ":", "if", "self", ".", "__closed", ":", "raise", "MigrationSessionError", "(", "\"Can't apply applied session\"", ")", "if", "self", ".", "_to_apply", ":", "raise", "MigrationSessionError", "(", "\"Can't apply_all with migrations added to session\"", ")", "try", ":", "self", ".", "__apply", "(", "run_all", "=", "True", ")", "except", ":", "raise", "finally", ":", "self", ".", "__closed", "=", "True" ]
Applies all Django model migrations at once, recording the result.
[ "Applies", "all", "Django", "model", "migrations", "at", "once", "recording", "the", "result", "." ]
python
train
deepmipt/DeepPavlov
deeppavlov/models/preprocessors/capitalization.py
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/preprocessors/capitalization.py#L75-L108
def process_word(word: str, to_lower: bool = False, append_case: Optional[str] = None) -> Tuple[str]: """Converts word to a tuple of symbols, optionally converts it to lowercase and adds capitalization label. Args: word: input word to_lower: whether to lowercase append_case: whether to add case mark ('<FIRST_UPPER>' for first capital and '<ALL_UPPER>' for all caps) Returns: a preprocessed word """ if all(x.isupper() for x in word) and len(word) > 1: uppercase = "<ALL_UPPER>" elif word[0].isupper(): uppercase = "<FIRST_UPPER>" else: uppercase = None if to_lower: word = word.lower() if word.isdigit(): answer = ["<DIGIT>"] elif word.startswith("http://") or word.startswith("www."): answer = ["<HTTP>"] else: answer = list(word) if to_lower and uppercase is not None: if append_case == "first": answer = [uppercase] + answer elif append_case == "last": answer = answer + [uppercase] return tuple(answer)
[ "def", "process_word", "(", "word", ":", "str", ",", "to_lower", ":", "bool", "=", "False", ",", "append_case", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "Tuple", "[", "str", "]", ":", "if", "all", "(", "x", ".", "isupper", "(", ")", "for", "x", "in", "word", ")", "and", "len", "(", "word", ")", ">", "1", ":", "uppercase", "=", "\"<ALL_UPPER>\"", "elif", "word", "[", "0", "]", ".", "isupper", "(", ")", ":", "uppercase", "=", "\"<FIRST_UPPER>\"", "else", ":", "uppercase", "=", "None", "if", "to_lower", ":", "word", "=", "word", ".", "lower", "(", ")", "if", "word", ".", "isdigit", "(", ")", ":", "answer", "=", "[", "\"<DIGIT>\"", "]", "elif", "word", ".", "startswith", "(", "\"http://\"", ")", "or", "word", ".", "startswith", "(", "\"www.\"", ")", ":", "answer", "=", "[", "\"<HTTP>\"", "]", "else", ":", "answer", "=", "list", "(", "word", ")", "if", "to_lower", "and", "uppercase", "is", "not", "None", ":", "if", "append_case", "==", "\"first\"", ":", "answer", "=", "[", "uppercase", "]", "+", "answer", "elif", "append_case", "==", "\"last\"", ":", "answer", "=", "answer", "+", "[", "uppercase", "]", "return", "tuple", "(", "answer", ")" ]
Converts word to a tuple of symbols, optionally converts it to lowercase and adds capitalization label. Args: word: input word to_lower: whether to lowercase append_case: whether to add case mark ('<FIRST_UPPER>' for first capital and '<ALL_UPPER>' for all caps) Returns: a preprocessed word
[ "Converts", "word", "to", "a", "tuple", "of", "symbols", "optionally", "converts", "it", "to", "lowercase", "and", "adds", "capitalization", "label", "." ]
python
test
koenedaele/skosprovider
skosprovider/providers.py
https://github.com/koenedaele/skosprovider/blob/7304a37953978ca8227febc2d3cc2b2be178f215/skosprovider/providers.py#L488-L503
def _get_find_dict(self, c, **kwargs): ''' Return a dict that can be used in the return list of the :meth:`find` method. :param c: A :class:`skosprovider.skos.Concept` or :class:`skosprovider.skos.Collection`. :rtype: dict ''' language = self._get_language(**kwargs) return { 'id': c.id, 'uri': c.uri, 'type': c.type, 'label': None if c.label() is None else c.label(language).label }
[ "def", "_get_find_dict", "(", "self", ",", "c", ",", "*", "*", "kwargs", ")", ":", "language", "=", "self", ".", "_get_language", "(", "*", "*", "kwargs", ")", "return", "{", "'id'", ":", "c", ".", "id", ",", "'uri'", ":", "c", ".", "uri", ",", "'type'", ":", "c", ".", "type", ",", "'label'", ":", "None", "if", "c", ".", "label", "(", ")", "is", "None", "else", "c", ".", "label", "(", "language", ")", ".", "label", "}" ]
Return a dict that can be used in the return list of the :meth:`find` method. :param c: A :class:`skosprovider.skos.Concept` or :class:`skosprovider.skos.Collection`. :rtype: dict
[ "Return", "a", "dict", "that", "can", "be", "used", "in", "the", "return", "list", "of", "the", ":", "meth", ":", "find", "method", "." ]
python
valid
nirum/tableprint
tableprint/printer.py
https://github.com/nirum/tableprint/blob/50ab4b96706fce8ee035a4d48cb456e3271eab3d/tableprint/printer.py#L276-L284
def dataframe(df, **kwargs): """Print table with data from the given pandas DataFrame Parameters ---------- df : DataFrame A pandas DataFrame with the table to print """ table(df.values, list(df.columns), **kwargs)
[ "def", "dataframe", "(", "df", ",", "*", "*", "kwargs", ")", ":", "table", "(", "df", ".", "values", ",", "list", "(", "df", ".", "columns", ")", ",", "*", "*", "kwargs", ")" ]
Print table with data from the given pandas DataFrame Parameters ---------- df : DataFrame A pandas DataFrame with the table to print
[ "Print", "table", "with", "data", "from", "the", "given", "pandas", "DataFrame" ]
python
train
joerick/pyinstrument
pyinstrument/vendor/decorator.py
https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/vendor/decorator.py#L331-L433
def dispatch_on(*dispatch_args): """ Factory of decorators turning a function into a generic function dispatching on the given arguments. """ assert dispatch_args, 'No dispatch args passed' dispatch_str = '(%s,)' % ', '.join(dispatch_args) def check(arguments, wrong=operator.ne, msg=''): """Make sure one passes the expected number of arguments""" if wrong(len(arguments), len(dispatch_args)): raise TypeError('Expected %d arguments, got %d%s' % (len(dispatch_args), len(arguments), msg)) def gen_func_dec(func): """Decorator turning a function into a generic function""" # first check the dispatch arguments argset = set(getfullargspec(func).args) if not set(dispatch_args) <= argset: raise NameError('Unknown dispatch arguments %s' % dispatch_str) typemap = {} def vancestors(*types): """ Get a list of sets of virtual ancestors for the given types """ check(types) ras = [[] for _ in range(len(dispatch_args))] for types_ in typemap: for t, type_, ra in zip(types, types_, ras): if issubclass(t, type_) and type_ not in t.mro(): append(type_, ra) return [set(ra) for ra in ras] def ancestors(*types): """ Get a list of virtual MROs, one for each type """ check(types) lists = [] for t, vas in zip(types, vancestors(*types)): n_vas = len(vas) if n_vas > 1: raise RuntimeError( 'Ambiguous dispatch for %s: %s' % (t, vas)) elif n_vas == 1: va, = vas mro = type('t', (t, va), {}).mro()[1:] else: mro = t.mro() lists.append(mro[:-1]) # discard t and object return lists def register(*types): """ Decorator to register an implementation for the given types """ check(types) def dec(f): check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__) typemap[types] = f return f return dec def dispatch_info(*types): """ An utility to introspect the dispatch algorithm """ check(types) lst = [] for anc in itertools.product(*ancestors(*types)): lst.append(tuple(a.__name__ for a in anc)) return lst def _dispatch(dispatch_args, *args, **kw): types = tuple(type(arg) for arg in dispatch_args) try: # fast path f = typemap[types] except KeyError: pass else: return f(*args, **kw) combinations = itertools.product(*ancestors(*types)) next(combinations) # the first one has been already tried for types_ in combinations: f = typemap.get(types_) if f is not None: return f(*args, **kw) # else call the default implementation return func(*args, **kw) return FunctionMaker.create( func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str, dict(_f_=_dispatch), register=register, default=func, typemap=typemap, vancestors=vancestors, ancestors=ancestors, dispatch_info=dispatch_info, __wrapped__=func) gen_func_dec.__name__ = 'dispatch_on' + dispatch_str return gen_func_dec
[ "def", "dispatch_on", "(", "*", "dispatch_args", ")", ":", "assert", "dispatch_args", ",", "'No dispatch args passed'", "dispatch_str", "=", "'(%s,)'", "%", "', '", ".", "join", "(", "dispatch_args", ")", "def", "check", "(", "arguments", ",", "wrong", "=", "operator", ".", "ne", ",", "msg", "=", "''", ")", ":", "\"\"\"Make sure one passes the expected number of arguments\"\"\"", "if", "wrong", "(", "len", "(", "arguments", ")", ",", "len", "(", "dispatch_args", ")", ")", ":", "raise", "TypeError", "(", "'Expected %d arguments, got %d%s'", "%", "(", "len", "(", "dispatch_args", ")", ",", "len", "(", "arguments", ")", ",", "msg", ")", ")", "def", "gen_func_dec", "(", "func", ")", ":", "\"\"\"Decorator turning a function into a generic function\"\"\"", "# first check the dispatch arguments", "argset", "=", "set", "(", "getfullargspec", "(", "func", ")", ".", "args", ")", "if", "not", "set", "(", "dispatch_args", ")", "<=", "argset", ":", "raise", "NameError", "(", "'Unknown dispatch arguments %s'", "%", "dispatch_str", ")", "typemap", "=", "{", "}", "def", "vancestors", "(", "*", "types", ")", ":", "\"\"\"\n Get a list of sets of virtual ancestors for the given types\n \"\"\"", "check", "(", "types", ")", "ras", "=", "[", "[", "]", "for", "_", "in", "range", "(", "len", "(", "dispatch_args", ")", ")", "]", "for", "types_", "in", "typemap", ":", "for", "t", ",", "type_", ",", "ra", "in", "zip", "(", "types", ",", "types_", ",", "ras", ")", ":", "if", "issubclass", "(", "t", ",", "type_", ")", "and", "type_", "not", "in", "t", ".", "mro", "(", ")", ":", "append", "(", "type_", ",", "ra", ")", "return", "[", "set", "(", "ra", ")", "for", "ra", "in", "ras", "]", "def", "ancestors", "(", "*", "types", ")", ":", "\"\"\"\n Get a list of virtual MROs, one for each type\n \"\"\"", "check", "(", "types", ")", "lists", "=", "[", "]", "for", "t", ",", "vas", "in", "zip", "(", "types", ",", "vancestors", "(", "*", "types", ")", ")", ":", "n_vas", "=", "len", "(", "vas", ")", "if", "n_vas", ">", "1", ":", "raise", "RuntimeError", "(", "'Ambiguous dispatch for %s: %s'", "%", "(", "t", ",", "vas", ")", ")", "elif", "n_vas", "==", "1", ":", "va", ",", "=", "vas", "mro", "=", "type", "(", "'t'", ",", "(", "t", ",", "va", ")", ",", "{", "}", ")", ".", "mro", "(", ")", "[", "1", ":", "]", "else", ":", "mro", "=", "t", ".", "mro", "(", ")", "lists", ".", "append", "(", "mro", "[", ":", "-", "1", "]", ")", "# discard t and object", "return", "lists", "def", "register", "(", "*", "types", ")", ":", "\"\"\"\n Decorator to register an implementation for the given types\n \"\"\"", "check", "(", "types", ")", "def", "dec", "(", "f", ")", ":", "check", "(", "getfullargspec", "(", "f", ")", ".", "args", ",", "operator", ".", "lt", ",", "' in '", "+", "f", ".", "__name__", ")", "typemap", "[", "types", "]", "=", "f", "return", "f", "return", "dec", "def", "dispatch_info", "(", "*", "types", ")", ":", "\"\"\"\n An utility to introspect the dispatch algorithm\n \"\"\"", "check", "(", "types", ")", "lst", "=", "[", "]", "for", "anc", "in", "itertools", ".", "product", "(", "*", "ancestors", "(", "*", "types", ")", ")", ":", "lst", ".", "append", "(", "tuple", "(", "a", ".", "__name__", "for", "a", "in", "anc", ")", ")", "return", "lst", "def", "_dispatch", "(", "dispatch_args", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "types", "=", "tuple", "(", "type", "(", "arg", ")", "for", "arg", "in", "dispatch_args", ")", "try", ":", "# fast path", "f", "=", "typemap", "[", "types", "]", "except", "KeyError", ":", "pass", "else", ":", "return", "f", "(", "*", "args", ",", "*", "*", "kw", ")", "combinations", "=", "itertools", ".", "product", "(", "*", "ancestors", "(", "*", "types", ")", ")", "next", "(", "combinations", ")", "# the first one has been already tried", "for", "types_", "in", "combinations", ":", "f", "=", "typemap", ".", "get", "(", "types_", ")", "if", "f", "is", "not", "None", ":", "return", "f", "(", "*", "args", ",", "*", "*", "kw", ")", "# else call the default implementation", "return", "func", "(", "*", "args", ",", "*", "*", "kw", ")", "return", "FunctionMaker", ".", "create", "(", "func", ",", "'return _f_(%s, %%(shortsignature)s)'", "%", "dispatch_str", ",", "dict", "(", "_f_", "=", "_dispatch", ")", ",", "register", "=", "register", ",", "default", "=", "func", ",", "typemap", "=", "typemap", ",", "vancestors", "=", "vancestors", ",", "ancestors", "=", "ancestors", ",", "dispatch_info", "=", "dispatch_info", ",", "__wrapped__", "=", "func", ")", "gen_func_dec", ".", "__name__", "=", "'dispatch_on'", "+", "dispatch_str", "return", "gen_func_dec" ]
Factory of decorators turning a function into a generic function dispatching on the given arguments.
[ "Factory", "of", "decorators", "turning", "a", "function", "into", "a", "generic", "function", "dispatching", "on", "the", "given", "arguments", "." ]
python
train
tuomas2/automate
src/automate/extensions/rpc/rpc.py
https://github.com/tuomas2/automate/blob/d8a8cd03cd0da047e033a2d305f3f260f8c4e017/src/automate/extensions/rpc/rpc.py#L44-L50
def set_object_status(self, statusdict): """ Set statuses from a dictionary of format ``{name: status}`` """ for name, value in statusdict.items(): getattr(self.system, name).status = value return True
[ "def", "set_object_status", "(", "self", ",", "statusdict", ")", ":", "for", "name", ",", "value", "in", "statusdict", ".", "items", "(", ")", ":", "getattr", "(", "self", ".", "system", ",", "name", ")", ".", "status", "=", "value", "return", "True" ]
Set statuses from a dictionary of format ``{name: status}``
[ "Set", "statuses", "from", "a", "dictionary", "of", "format", "{", "name", ":", "status", "}" ]
python
train
eight04/pyAPNG
apng/__init__.py
https://github.com/eight04/pyAPNG/blob/b4d2927f7892a1de967b5cf57d434ed65f6a017e/apng/__init__.py#L321-L334
def append(self, png, **options): """Append one frame. :arg PNG png: Append a :class:`PNG` as a frame. :arg dict options: The options for :class:`FrameControl`. """ if not isinstance(png, PNG): raise TypeError("Expect an instance of `PNG` but got `{}`".format(png)) control = FrameControl(**options) if control.width is None: control.width = png.width if control.height is None: control.height = png.height self.frames.append((png, control))
[ "def", "append", "(", "self", ",", "png", ",", "*", "*", "options", ")", ":", "if", "not", "isinstance", "(", "png", ",", "PNG", ")", ":", "raise", "TypeError", "(", "\"Expect an instance of `PNG` but got `{}`\"", ".", "format", "(", "png", ")", ")", "control", "=", "FrameControl", "(", "*", "*", "options", ")", "if", "control", ".", "width", "is", "None", ":", "control", ".", "width", "=", "png", ".", "width", "if", "control", ".", "height", "is", "None", ":", "control", ".", "height", "=", "png", ".", "height", "self", ".", "frames", ".", "append", "(", "(", "png", ",", "control", ")", ")" ]
Append one frame. :arg PNG png: Append a :class:`PNG` as a frame. :arg dict options: The options for :class:`FrameControl`.
[ "Append", "one", "frame", ".", ":", "arg", "PNG", "png", ":", "Append", "a", ":", "class", ":", "PNG", "as", "a", "frame", ".", ":", "arg", "dict", "options", ":", "The", "options", "for", ":", "class", ":", "FrameControl", "." ]
python
train
raiden-network/raiden
raiden/raiden_service.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/raiden_service.py#L1004-L1009
def sign(self, message: Message): """ Sign message inplace. """ if not isinstance(message, SignedMessage): raise ValueError('{} is not signable.'.format(repr(message))) message.sign(self.signer)
[ "def", "sign", "(", "self", ",", "message", ":", "Message", ")", ":", "if", "not", "isinstance", "(", "message", ",", "SignedMessage", ")", ":", "raise", "ValueError", "(", "'{} is not signable.'", ".", "format", "(", "repr", "(", "message", ")", ")", ")", "message", ".", "sign", "(", "self", ".", "signer", ")" ]
Sign message inplace.
[ "Sign", "message", "inplace", "." ]
python
train
ecell/ecell4
ecell4/extra/azure_batch.py
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/azure_batch.py#L45-L60
def print_batch_exception(batch_exception): """Prints the contents of the specified Batch exception. :param batch_exception: """ _log.error('-------------------------------------------') _log.error('Exception encountered:') if batch_exception.error and \ batch_exception.error.message and \ batch_exception.error.message.value: _log.error(batch_exception.error.message.value) if batch_exception.error.values: _log.error('') for mesg in batch_exception.error.values: _log.error('{}:\t{}'.format(mesg.key, mesg.value)) _log.error('-------------------------------------------')
[ "def", "print_batch_exception", "(", "batch_exception", ")", ":", "_log", ".", "error", "(", "'-------------------------------------------'", ")", "_log", ".", "error", "(", "'Exception encountered:'", ")", "if", "batch_exception", ".", "error", "and", "batch_exception", ".", "error", ".", "message", "and", "batch_exception", ".", "error", ".", "message", ".", "value", ":", "_log", ".", "error", "(", "batch_exception", ".", "error", ".", "message", ".", "value", ")", "if", "batch_exception", ".", "error", ".", "values", ":", "_log", ".", "error", "(", "''", ")", "for", "mesg", "in", "batch_exception", ".", "error", ".", "values", ":", "_log", ".", "error", "(", "'{}:\\t{}'", ".", "format", "(", "mesg", ".", "key", ",", "mesg", ".", "value", ")", ")", "_log", ".", "error", "(", "'-------------------------------------------'", ")" ]
Prints the contents of the specified Batch exception. :param batch_exception:
[ "Prints", "the", "contents", "of", "the", "specified", "Batch", "exception", "." ]
python
train
Grunny/zap-cli
zapcli/commands/context.py
https://github.com/Grunny/zap-cli/blob/d58d4850ecfc5467badfac5e5bcc841d064bd419/zapcli/commands/context.py#L35-L39
def context_new(zap_helper, name): """Create a new context.""" console.info('Creating context with name: {0}'.format(name)) res = zap_helper.zap.context.new_context(contextname=name) console.info('Context "{0}" created with ID: {1}'.format(name, res))
[ "def", "context_new", "(", "zap_helper", ",", "name", ")", ":", "console", ".", "info", "(", "'Creating context with name: {0}'", ".", "format", "(", "name", ")", ")", "res", "=", "zap_helper", ".", "zap", ".", "context", ".", "new_context", "(", "contextname", "=", "name", ")", "console", ".", "info", "(", "'Context \"{0}\" created with ID: {1}'", ".", "format", "(", "name", ",", "res", ")", ")" ]
Create a new context.
[ "Create", "a", "new", "context", "." ]
python
train
rkhleics/wagtailmenus
wagtailmenus/models/pages.py
https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/models/pages.py#L67-L80
def has_submenu_items(self, current_page, allow_repeating_parents, original_menu_tag, menu_instance=None, request=None): """ When rendering pages in a menu template a `has_children_in_menu` attribute is added to each page, letting template developers know whether or not the item has a submenu that must be rendered. By default, we return a boolean indicating whether the page has suitable child pages to include in such a menu. But, if you are overriding the `modify_submenu_items` method to programatically add items that aren't child pages, you'll likely need to alter this method too, so the template knows there are sub items to be rendered. """ return menu_instance.page_has_children(self)
[ "def", "has_submenu_items", "(", "self", ",", "current_page", ",", "allow_repeating_parents", ",", "original_menu_tag", ",", "menu_instance", "=", "None", ",", "request", "=", "None", ")", ":", "return", "menu_instance", ".", "page_has_children", "(", "self", ")" ]
When rendering pages in a menu template a `has_children_in_menu` attribute is added to each page, letting template developers know whether or not the item has a submenu that must be rendered. By default, we return a boolean indicating whether the page has suitable child pages to include in such a menu. But, if you are overriding the `modify_submenu_items` method to programatically add items that aren't child pages, you'll likely need to alter this method too, so the template knows there are sub items to be rendered.
[ "When", "rendering", "pages", "in", "a", "menu", "template", "a", "has_children_in_menu", "attribute", "is", "added", "to", "each", "page", "letting", "template", "developers", "know", "whether", "or", "not", "the", "item", "has", "a", "submenu", "that", "must", "be", "rendered", "." ]
python
train
3ll3d00d/vibe
backend/src/analyser/common/targetcontroller.py
https://github.com/3ll3d00d/vibe/blob/124b029f13ac746723e92cb47e9cb56edd2e54b5/backend/src/analyser/common/targetcontroller.py#L64-L75
def delete(self, name): """ Deletes the named entry in the cache. :param name: the name. :return: true if it is deleted. """ if name in self._cache: del self._cache[name] self.writeCache() # TODO clean files return True return False
[ "def", "delete", "(", "self", ",", "name", ")", ":", "if", "name", "in", "self", ".", "_cache", ":", "del", "self", ".", "_cache", "[", "name", "]", "self", ".", "writeCache", "(", ")", "# TODO clean files", "return", "True", "return", "False" ]
Deletes the named entry in the cache. :param name: the name. :return: true if it is deleted.
[ "Deletes", "the", "named", "entry", "in", "the", "cache", ".", ":", "param", "name", ":", "the", "name", ".", ":", "return", ":", "true", "if", "it", "is", "deleted", "." ]
python
train
titusjan/argos
argos/config/abstractcti.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/config/abstractcti.py#L400-L412
def _nodeGetNonDefaultsDict(self): """ Retrieves this nodes` values as a dictionary to be used for persistence. A dictionary with the data value will be returned if the data is not equal to the defaultData, the node is enabled and the node is editable. Otherwise and empty dictionary is returned. Non-recursive auxiliary function for getNonDefaultsDict """ dct = {} isEditable = bool(int(self.valueColumnItemFlags) and Qt.ItemIsEditable) if (self.data != self.defaultData and self.enabled and isEditable): dct['data'] = self.data return dct
[ "def", "_nodeGetNonDefaultsDict", "(", "self", ")", ":", "dct", "=", "{", "}", "isEditable", "=", "bool", "(", "int", "(", "self", ".", "valueColumnItemFlags", ")", "and", "Qt", ".", "ItemIsEditable", ")", "if", "(", "self", ".", "data", "!=", "self", ".", "defaultData", "and", "self", ".", "enabled", "and", "isEditable", ")", ":", "dct", "[", "'data'", "]", "=", "self", ".", "data", "return", "dct" ]
Retrieves this nodes` values as a dictionary to be used for persistence. A dictionary with the data value will be returned if the data is not equal to the defaultData, the node is enabled and the node is editable. Otherwise and empty dictionary is returned. Non-recursive auxiliary function for getNonDefaultsDict
[ "Retrieves", "this", "nodes", "values", "as", "a", "dictionary", "to", "be", "used", "for", "persistence", ".", "A", "dictionary", "with", "the", "data", "value", "will", "be", "returned", "if", "the", "data", "is", "not", "equal", "to", "the", "defaultData", "the", "node", "is", "enabled", "and", "the", "node", "is", "editable", ".", "Otherwise", "and", "empty", "dictionary", "is", "returned", "." ]
python
train
annoviko/pyclustering
pyclustering/cluster/rock.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/rock.py#L97-L123
def process(self): """! @brief Performs cluster analysis in line with rules of ROCK algorithm. @remark Results of clustering can be obtained using corresponding get methods. @see get_clusters() """ # TODO: (Not related to specification, just idea) First iteration should be investigated. Euclidean distance should be used for clustering between two # points and rock algorithm between clusters because we consider non-categorical samples. But it is required more investigations. if (self.__ccore is True): self.__clusters = wrapper.rock(self.__pointer_data, self.__eps, self.__number_clusters, self.__threshold); else: self.__clusters = [[index] for index in range(len(self.__pointer_data))]; while (len(self.__clusters) > self.__number_clusters): indexes = self.__find_pair_clusters(self.__clusters); if (indexes != [-1, -1]): self.__clusters[indexes[0]] += self.__clusters[indexes[1]]; self.__clusters.pop(indexes[1]); # remove merged cluster. else: break;
[ "def", "process", "(", "self", ")", ":", "# TODO: (Not related to specification, just idea) First iteration should be investigated. Euclidean distance should be used for clustering between two \r", "# points and rock algorithm between clusters because we consider non-categorical samples. But it is required more investigations.\r", "if", "(", "self", ".", "__ccore", "is", "True", ")", ":", "self", ".", "__clusters", "=", "wrapper", ".", "rock", "(", "self", ".", "__pointer_data", ",", "self", ".", "__eps", ",", "self", ".", "__number_clusters", ",", "self", ".", "__threshold", ")", "else", ":", "self", ".", "__clusters", "=", "[", "[", "index", "]", "for", "index", "in", "range", "(", "len", "(", "self", ".", "__pointer_data", ")", ")", "]", "while", "(", "len", "(", "self", ".", "__clusters", ")", ">", "self", ".", "__number_clusters", ")", ":", "indexes", "=", "self", ".", "__find_pair_clusters", "(", "self", ".", "__clusters", ")", "if", "(", "indexes", "!=", "[", "-", "1", ",", "-", "1", "]", ")", ":", "self", ".", "__clusters", "[", "indexes", "[", "0", "]", "]", "+=", "self", ".", "__clusters", "[", "indexes", "[", "1", "]", "]", "self", ".", "__clusters", ".", "pop", "(", "indexes", "[", "1", "]", ")", "# remove merged cluster.\r", "else", ":", "break" ]
! @brief Performs cluster analysis in line with rules of ROCK algorithm. @remark Results of clustering can be obtained using corresponding get methods. @see get_clusters()
[ "!" ]
python
valid
the01/python-paps
examples/measure/server.py
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/examples/measure/server.py#L136-L155
def create(host, port): """ Prepare server to execute :return: Modules to execute, cmd line function :rtype: list[WrapperServer], callable | None """ wrapper = WrapperServer({ 'server': None }) d = { 'listen_port': port, 'changer': wrapper } if host: d['listen_bind_ip'] = host ses = MeasureServer(d) wrapper.server = ses return [wrapper], cmd_line
[ "def", "create", "(", "host", ",", "port", ")", ":", "wrapper", "=", "WrapperServer", "(", "{", "'server'", ":", "None", "}", ")", "d", "=", "{", "'listen_port'", ":", "port", ",", "'changer'", ":", "wrapper", "}", "if", "host", ":", "d", "[", "'listen_bind_ip'", "]", "=", "host", "ses", "=", "MeasureServer", "(", "d", ")", "wrapper", ".", "server", "=", "ses", "return", "[", "wrapper", "]", ",", "cmd_line" ]
Prepare server to execute :return: Modules to execute, cmd line function :rtype: list[WrapperServer], callable | None
[ "Prepare", "server", "to", "execute" ]
python
train
senaite/senaite.core
bika/lims/browser/analyses/view.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/analyses/view.py#L1176-L1190
def _folder_item_remarks(self, analysis_brain, item): """Renders the Remarks field for the passed in analysis If the edition of the analysis is permitted, adds the field into the list of editable fields. :param analysis_brain: Brain that represents an analysis :param item: analysis' dictionary counterpart that represents a row """ if self.analysis_remarks_enabled(): item["Remarks"] = analysis_brain.getRemarks if self.is_analysis_edition_allowed(analysis_brain): item["allow_edit"].extend(["Remarks"])
[ "def", "_folder_item_remarks", "(", "self", ",", "analysis_brain", ",", "item", ")", ":", "if", "self", ".", "analysis_remarks_enabled", "(", ")", ":", "item", "[", "\"Remarks\"", "]", "=", "analysis_brain", ".", "getRemarks", "if", "self", ".", "is_analysis_edition_allowed", "(", "analysis_brain", ")", ":", "item", "[", "\"allow_edit\"", "]", ".", "extend", "(", "[", "\"Remarks\"", "]", ")" ]
Renders the Remarks field for the passed in analysis If the edition of the analysis is permitted, adds the field into the list of editable fields. :param analysis_brain: Brain that represents an analysis :param item: analysis' dictionary counterpart that represents a row
[ "Renders", "the", "Remarks", "field", "for", "the", "passed", "in", "analysis" ]
python
train
fronzbot/blinkpy
blinkpy/api.py
https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/api.py#L80-L88
def request_system_disarm(blink, network): """ Disarm system. :param blink: Blink instance. :param network: Sync module network id. """ url = "{}/network/{}/disarm".format(blink.urls.base_url, network) return http_post(blink, url)
[ "def", "request_system_disarm", "(", "blink", ",", "network", ")", ":", "url", "=", "\"{}/network/{}/disarm\"", ".", "format", "(", "blink", ".", "urls", ".", "base_url", ",", "network", ")", "return", "http_post", "(", "blink", ",", "url", ")" ]
Disarm system. :param blink: Blink instance. :param network: Sync module network id.
[ "Disarm", "system", "." ]
python
train
log2timeline/dfdatetime
dfdatetime/time_elements.py
https://github.com/log2timeline/dfdatetime/blob/141ca4ef1eff3d354b5deaac3d81cb08506f98d6/dfdatetime/time_elements.py#L528-L545
def CopyToDateTimeString(self): """Copies the time elements to a date and time string. Returns: str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss" or "YYYY-MM-DD hh:mm:ss.######" or None if time elements are missing. Raises: ValueError: if the precision value is unsupported. """ if self._number_of_seconds is None or self.fraction_of_second is None: return None precision_helper = precisions.PrecisionHelperFactory.CreatePrecisionHelper( self._precision) return precision_helper.CopyToDateTimeString( self._time_elements_tuple, self.fraction_of_second)
[ "def", "CopyToDateTimeString", "(", "self", ")", ":", "if", "self", ".", "_number_of_seconds", "is", "None", "or", "self", ".", "fraction_of_second", "is", "None", ":", "return", "None", "precision_helper", "=", "precisions", ".", "PrecisionHelperFactory", ".", "CreatePrecisionHelper", "(", "self", ".", "_precision", ")", "return", "precision_helper", ".", "CopyToDateTimeString", "(", "self", ".", "_time_elements_tuple", ",", "self", ".", "fraction_of_second", ")" ]
Copies the time elements to a date and time string. Returns: str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss" or "YYYY-MM-DD hh:mm:ss.######" or None if time elements are missing. Raises: ValueError: if the precision value is unsupported.
[ "Copies", "the", "time", "elements", "to", "a", "date", "and", "time", "string", "." ]
python
train
boundary/pulse-api-cli
boundary/hostgroup_get.py
https://github.com/boundary/pulse-api-cli/blob/b01ca65b442eed19faac309c9d62bbc3cb2c098f/boundary/hostgroup_get.py#L36-L44
def get_arguments(self): """ Extracts the specific arguments of this CLI """ ApiCli.get_arguments(self) if self.args.hostGroupId is not None: self.hostGroupId = self.args.hostGroupId self.path = "v1/hostgroup/{0}".format(str(self.hostGroupId))
[ "def", "get_arguments", "(", "self", ")", ":", "ApiCli", ".", "get_arguments", "(", "self", ")", "if", "self", ".", "args", ".", "hostGroupId", "is", "not", "None", ":", "self", ".", "hostGroupId", "=", "self", ".", "args", ".", "hostGroupId", "self", ".", "path", "=", "\"v1/hostgroup/{0}\"", ".", "format", "(", "str", "(", "self", ".", "hostGroupId", ")", ")" ]
Extracts the specific arguments of this CLI
[ "Extracts", "the", "specific", "arguments", "of", "this", "CLI" ]
python
test
GNS3/gns3-server
gns3server/web/documentation.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/web/documentation.py#L121-L127
def _include_query_example(self, f, method, path, api_version, server_type): """If a sample session is available we include it in documentation""" m = method["method"].lower() query_path = "{}_{}_{}.txt".format(server_type, m, self._file_path(path)) if os.path.isfile(os.path.join(self._directory, "api", "examples", query_path)): f.write("Sample session\n***************\n") f.write("\n\n.. literalinclude:: ../../../examples/{}\n\n".format(query_path))
[ "def", "_include_query_example", "(", "self", ",", "f", ",", "method", ",", "path", ",", "api_version", ",", "server_type", ")", ":", "m", "=", "method", "[", "\"method\"", "]", ".", "lower", "(", ")", "query_path", "=", "\"{}_{}_{}.txt\"", ".", "format", "(", "server_type", ",", "m", ",", "self", ".", "_file_path", "(", "path", ")", ")", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "self", ".", "_directory", ",", "\"api\"", ",", "\"examples\"", ",", "query_path", ")", ")", ":", "f", ".", "write", "(", "\"Sample session\\n***************\\n\"", ")", "f", ".", "write", "(", "\"\\n\\n.. literalinclude:: ../../../examples/{}\\n\\n\"", ".", "format", "(", "query_path", ")", ")" ]
If a sample session is available we include it in documentation
[ "If", "a", "sample", "session", "is", "available", "we", "include", "it", "in", "documentation" ]
python
train
lago-project/lago
lago/log_utils.py
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/log_utils.py#L436-L476
def handle_error(self): """ Handles an error log record that should be shown Returns: None """ if not self.tasks: return # All the parents inherit the failure self.mark_parent_tasks_as_failed( self.cur_task, flush_logs=True, ) # Show the start headers for all the parent tasks if they were not # shown by the depth level limit for index, task in enumerate(self.tasks.values()): if self.should_show_by_depth(index + 1): continue start_task_header = logging.LogRecord( '', logging.INFO, '', 0, '', [], None ) start_task_header.msg = ColorFormatter.colored( 'default', START_TASK_MSG, ) start_task_header.task = task.name self.pretty_emit( start_task_header, is_header=True, task_level=index + 1, ) # Show now all the cached logs for the current task for old_record in self.tasks[self.cur_task]: self.pretty_emit(old_record) self.tasks[self.cur_task].clear()
[ "def", "handle_error", "(", "self", ")", ":", "if", "not", "self", ".", "tasks", ":", "return", "# All the parents inherit the failure", "self", ".", "mark_parent_tasks_as_failed", "(", "self", ".", "cur_task", ",", "flush_logs", "=", "True", ",", ")", "# Show the start headers for all the parent tasks if they were not", "# shown by the depth level limit", "for", "index", ",", "task", "in", "enumerate", "(", "self", ".", "tasks", ".", "values", "(", ")", ")", ":", "if", "self", ".", "should_show_by_depth", "(", "index", "+", "1", ")", ":", "continue", "start_task_header", "=", "logging", ".", "LogRecord", "(", "''", ",", "logging", ".", "INFO", ",", "''", ",", "0", ",", "''", ",", "[", "]", ",", "None", ")", "start_task_header", ".", "msg", "=", "ColorFormatter", ".", "colored", "(", "'default'", ",", "START_TASK_MSG", ",", ")", "start_task_header", ".", "task", "=", "task", ".", "name", "self", ".", "pretty_emit", "(", "start_task_header", ",", "is_header", "=", "True", ",", "task_level", "=", "index", "+", "1", ",", ")", "# Show now all the cached logs for the current task", "for", "old_record", "in", "self", ".", "tasks", "[", "self", ".", "cur_task", "]", ":", "self", ".", "pretty_emit", "(", "old_record", ")", "self", ".", "tasks", "[", "self", ".", "cur_task", "]", ".", "clear", "(", ")" ]
Handles an error log record that should be shown Returns: None
[ "Handles", "an", "error", "log", "record", "that", "should", "be", "shown" ]
python
train
dropbox/stone
stone/frontend/parser.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/frontend/parser.py#L551-L560
def p_union_patch(self, p): """union_patch : PATCH uniont ID NL INDENT field_list examples DEDENT""" p[0] = AstUnionPatch( path=self.path, lineno=p[2][1], lexpos=p[2][2], name=p[3], fields=p[6], examples=p[7], closed=p[2][0] == 'union_closed')
[ "def", "p_union_patch", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "AstUnionPatch", "(", "path", "=", "self", ".", "path", ",", "lineno", "=", "p", "[", "2", "]", "[", "1", "]", ",", "lexpos", "=", "p", "[", "2", "]", "[", "2", "]", ",", "name", "=", "p", "[", "3", "]", ",", "fields", "=", "p", "[", "6", "]", ",", "examples", "=", "p", "[", "7", "]", ",", "closed", "=", "p", "[", "2", "]", "[", "0", "]", "==", "'union_closed'", ")" ]
union_patch : PATCH uniont ID NL INDENT field_list examples DEDENT
[ "union_patch", ":", "PATCH", "uniont", "ID", "NL", "INDENT", "field_list", "examples", "DEDENT" ]
python
train
projectatomic/atomic-reactor
atomic_reactor/util.py
https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/atomic_reactor/util.py#L1116-L1148
def get_config_and_id_from_registry(image, registry, digest, insecure=False, dockercfg_path=None, version='v2'): """Return image config by digest :param image: ImageName, the remote image to inspect :param registry: str, URI for registry, if URI schema is not provided, https:// will be used :param digest: str, digest of the image manifest :param insecure: bool, when True registry's cert is not verified :param dockercfg_path: str, dirname of .dockercfg location :param version: str, which manifest schema versions to fetch digest :return: dict, versions mapped to their digest """ registry_session = RegistrySession(registry, insecure=insecure, dockercfg_path=dockercfg_path) response = query_registry( registry_session, image, digest=digest, version=version) response.raise_for_status() manifest_config = response.json() config_digest = manifest_config['config']['digest'] config_response = query_registry( registry_session, image, digest=config_digest, version=version, is_blob=True) config_response.raise_for_status() blob_config = config_response.json() context = '/'.join([x for x in [image.namespace, image.repo] if x]) tag = image.tag logger.debug('Image %s:%s has config:\n%s', context, tag, blob_config) return blob_config, config_digest
[ "def", "get_config_and_id_from_registry", "(", "image", ",", "registry", ",", "digest", ",", "insecure", "=", "False", ",", "dockercfg_path", "=", "None", ",", "version", "=", "'v2'", ")", ":", "registry_session", "=", "RegistrySession", "(", "registry", ",", "insecure", "=", "insecure", ",", "dockercfg_path", "=", "dockercfg_path", ")", "response", "=", "query_registry", "(", "registry_session", ",", "image", ",", "digest", "=", "digest", ",", "version", "=", "version", ")", "response", ".", "raise_for_status", "(", ")", "manifest_config", "=", "response", ".", "json", "(", ")", "config_digest", "=", "manifest_config", "[", "'config'", "]", "[", "'digest'", "]", "config_response", "=", "query_registry", "(", "registry_session", ",", "image", ",", "digest", "=", "config_digest", ",", "version", "=", "version", ",", "is_blob", "=", "True", ")", "config_response", ".", "raise_for_status", "(", ")", "blob_config", "=", "config_response", ".", "json", "(", ")", "context", "=", "'/'", ".", "join", "(", "[", "x", "for", "x", "in", "[", "image", ".", "namespace", ",", "image", ".", "repo", "]", "if", "x", "]", ")", "tag", "=", "image", ".", "tag", "logger", ".", "debug", "(", "'Image %s:%s has config:\\n%s'", ",", "context", ",", "tag", ",", "blob_config", ")", "return", "blob_config", ",", "config_digest" ]
Return image config by digest :param image: ImageName, the remote image to inspect :param registry: str, URI for registry, if URI schema is not provided, https:// will be used :param digest: str, digest of the image manifest :param insecure: bool, when True registry's cert is not verified :param dockercfg_path: str, dirname of .dockercfg location :param version: str, which manifest schema versions to fetch digest :return: dict, versions mapped to their digest
[ "Return", "image", "config", "by", "digest" ]
python
train
rackerlabs/timid
timid/steps.py
https://github.com/rackerlabs/timid/blob/b1c6aa159ab380a033740f4aa392cf0d125e0ac6/timid/steps.py#L329-L339
def init(self, ctxt, step_addr): """ Initialize the item. This calls the class constructor with the appropriate arguments and returns the initialized object. :param ctxt: The context object. :param step_addr: The address of the step in the test configuration. """ return self.cls(ctxt, self.name, self.conf, step_addr)
[ "def", "init", "(", "self", ",", "ctxt", ",", "step_addr", ")", ":", "return", "self", ".", "cls", "(", "ctxt", ",", "self", ".", "name", ",", "self", ".", "conf", ",", "step_addr", ")" ]
Initialize the item. This calls the class constructor with the appropriate arguments and returns the initialized object. :param ctxt: The context object. :param step_addr: The address of the step in the test configuration.
[ "Initialize", "the", "item", ".", "This", "calls", "the", "class", "constructor", "with", "the", "appropriate", "arguments", "and", "returns", "the", "initialized", "object", "." ]
python
test
carpyncho/feets
feets/libs/ls_fap.py
https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/libs/ls_fap.py#L169-L193
def tau_davies(Z, fmax, t, y, dy, normalization='standard', dH=1, dK=3): """tau factor for estimating Davies bound (Baluev 2008, Table 1)""" N = len(t) NH = N - dH # DOF for null hypothesis NK = N - dK # DOF for periodic hypothesis Dt = _weighted_var(t, dy) Teff = np.sqrt(4 * np.pi * Dt) W = fmax * Teff if normalization == 'psd': # 'psd' normalization is same as Baluev's z return W * np.exp(-Z) * np.sqrt(Z) elif normalization == 'standard': # 'standard' normalization is Z = 2/NH * z_1 return (_gamma(NH) * W * (1 - Z) ** (0.5 * (NK - 1)) * np.sqrt(0.5 * NH * Z)) elif normalization == 'model': # 'model' normalization is Z = 2/NK * z_2 return (_gamma(NK) * W * (1 + Z) ** (-0.5 * NK) * np.sqrt(0.5 * NK * Z)) elif normalization == 'log': # 'log' normalization is Z = 2/NK * z_3 return (_gamma(NK) * W * np.exp(-0.5 * Z * (NK - 0.5)) * np.sqrt(NK * np.sinh(0.5 * Z))) else: raise NotImplementedError("normalization={0}".format(normalization))
[ "def", "tau_davies", "(", "Z", ",", "fmax", ",", "t", ",", "y", ",", "dy", ",", "normalization", "=", "'standard'", ",", "dH", "=", "1", ",", "dK", "=", "3", ")", ":", "N", "=", "len", "(", "t", ")", "NH", "=", "N", "-", "dH", "# DOF for null hypothesis", "NK", "=", "N", "-", "dK", "# DOF for periodic hypothesis", "Dt", "=", "_weighted_var", "(", "t", ",", "dy", ")", "Teff", "=", "np", ".", "sqrt", "(", "4", "*", "np", ".", "pi", "*", "Dt", ")", "W", "=", "fmax", "*", "Teff", "if", "normalization", "==", "'psd'", ":", "# 'psd' normalization is same as Baluev's z", "return", "W", "*", "np", ".", "exp", "(", "-", "Z", ")", "*", "np", ".", "sqrt", "(", "Z", ")", "elif", "normalization", "==", "'standard'", ":", "# 'standard' normalization is Z = 2/NH * z_1", "return", "(", "_gamma", "(", "NH", ")", "*", "W", "*", "(", "1", "-", "Z", ")", "**", "(", "0.5", "*", "(", "NK", "-", "1", ")", ")", "*", "np", ".", "sqrt", "(", "0.5", "*", "NH", "*", "Z", ")", ")", "elif", "normalization", "==", "'model'", ":", "# 'model' normalization is Z = 2/NK * z_2", "return", "(", "_gamma", "(", "NK", ")", "*", "W", "*", "(", "1", "+", "Z", ")", "**", "(", "-", "0.5", "*", "NK", ")", "*", "np", ".", "sqrt", "(", "0.5", "*", "NK", "*", "Z", ")", ")", "elif", "normalization", "==", "'log'", ":", "# 'log' normalization is Z = 2/NK * z_3", "return", "(", "_gamma", "(", "NK", ")", "*", "W", "*", "np", ".", "exp", "(", "-", "0.5", "*", "Z", "*", "(", "NK", "-", "0.5", ")", ")", "*", "np", ".", "sqrt", "(", "NK", "*", "np", ".", "sinh", "(", "0.5", "*", "Z", ")", ")", ")", "else", ":", "raise", "NotImplementedError", "(", "\"normalization={0}\"", ".", "format", "(", "normalization", ")", ")" ]
tau factor for estimating Davies bound (Baluev 2008, Table 1)
[ "tau", "factor", "for", "estimating", "Davies", "bound", "(", "Baluev", "2008", "Table", "1", ")" ]
python
train
unt-libraries/pyuntl
pyuntl/untl_structure.py
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L216-L295
def create_form_data(self, **kwargs): """Create groupings of form elements.""" # Get the specified keyword arguments. children = kwargs.get('children', []) sort_order = kwargs.get('sort_order', None) solr_response = kwargs.get('solr_response', None) superuser = kwargs.get('superuser', False) # Get the vocabularies to pull the qualifiers from. vocabularies = self.get_vocabularies() # Loop through all UNTL elements in the Python object. for element in children: # Add children that are missing from the form. element.children = add_missing_children( element.contained_children, element.children, ) # Add the form attribute to the element. element.add_form( vocabularies=vocabularies, qualifier=element.qualifier, content=element.content, superuser=superuser, ) # Element can contain children. if element.form.has_children: # If the parent has a qualifier, # create a representative form element for the parent. if getattr(element.form, 'qualifier_name', False): add_parent = PARENT_FORM[element.form.qualifier_name]( content=element.qualifier, ) # Add the parent to the list of child elements. element.children.append(add_parent) # Sort the elements by the index of child sort. element.children.sort( key=lambda obj: element.form.child_sort.index(obj.tag) ) # Loop through the element's children (if it has any). for child in element.children: # Add the form attribute to the element. child.add_form( vocabularies=vocabularies, qualifier=child.qualifier, content=child.content, parent_tag=element.tag, superuser=superuser, ) element_group_dict = {} # Group related objects together. for element in children: # Make meta-hidden its own group. if element.form.name == 'meta' and element.qualifier == 'hidden': element_group_dict['hidden'] = [element] # Element is not meta-hidden. else: # Make sure the dictionary key exists. if element.form.name not in element_group_dict: element_group_dict[element.form.name] = [] element_group_dict[element.form.name].append(element) # If the hidden meta element doesn't exist, add it to its own group. if 'hidden' not in element_group_dict: hidden_element = PYUNTL_DISPATCH['meta']( qualifier='hidden', content='False') hidden_element.add_form( vocabularies=vocabularies, qualifier=hidden_element.qualifier, content=hidden_element.content, superuser=superuser, ) element_group_dict['hidden'] = [hidden_element] # Create a list of group object elements. element_list = self.create_form_groupings( vocabularies, solr_response, element_group_dict, sort_order, ) # Return the list of UNTL elements with form data added. return element_list
[ "def", "create_form_data", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# Get the specified keyword arguments.", "children", "=", "kwargs", ".", "get", "(", "'children'", ",", "[", "]", ")", "sort_order", "=", "kwargs", ".", "get", "(", "'sort_order'", ",", "None", ")", "solr_response", "=", "kwargs", ".", "get", "(", "'solr_response'", ",", "None", ")", "superuser", "=", "kwargs", ".", "get", "(", "'superuser'", ",", "False", ")", "# Get the vocabularies to pull the qualifiers from.", "vocabularies", "=", "self", ".", "get_vocabularies", "(", ")", "# Loop through all UNTL elements in the Python object.", "for", "element", "in", "children", ":", "# Add children that are missing from the form.", "element", ".", "children", "=", "add_missing_children", "(", "element", ".", "contained_children", ",", "element", ".", "children", ",", ")", "# Add the form attribute to the element.", "element", ".", "add_form", "(", "vocabularies", "=", "vocabularies", ",", "qualifier", "=", "element", ".", "qualifier", ",", "content", "=", "element", ".", "content", ",", "superuser", "=", "superuser", ",", ")", "# Element can contain children.", "if", "element", ".", "form", ".", "has_children", ":", "# If the parent has a qualifier,", "# create a representative form element for the parent.", "if", "getattr", "(", "element", ".", "form", ",", "'qualifier_name'", ",", "False", ")", ":", "add_parent", "=", "PARENT_FORM", "[", "element", ".", "form", ".", "qualifier_name", "]", "(", "content", "=", "element", ".", "qualifier", ",", ")", "# Add the parent to the list of child elements.", "element", ".", "children", ".", "append", "(", "add_parent", ")", "# Sort the elements by the index of child sort.", "element", ".", "children", ".", "sort", "(", "key", "=", "lambda", "obj", ":", "element", ".", "form", ".", "child_sort", ".", "index", "(", "obj", ".", "tag", ")", ")", "# Loop through the element's children (if it has any).", "for", "child", "in", "element", ".", "children", ":", "# Add the form attribute to the element.", "child", ".", "add_form", "(", "vocabularies", "=", "vocabularies", ",", "qualifier", "=", "child", ".", "qualifier", ",", "content", "=", "child", ".", "content", ",", "parent_tag", "=", "element", ".", "tag", ",", "superuser", "=", "superuser", ",", ")", "element_group_dict", "=", "{", "}", "# Group related objects together.", "for", "element", "in", "children", ":", "# Make meta-hidden its own group.", "if", "element", ".", "form", ".", "name", "==", "'meta'", "and", "element", ".", "qualifier", "==", "'hidden'", ":", "element_group_dict", "[", "'hidden'", "]", "=", "[", "element", "]", "# Element is not meta-hidden.", "else", ":", "# Make sure the dictionary key exists.", "if", "element", ".", "form", ".", "name", "not", "in", "element_group_dict", ":", "element_group_dict", "[", "element", ".", "form", ".", "name", "]", "=", "[", "]", "element_group_dict", "[", "element", ".", "form", ".", "name", "]", ".", "append", "(", "element", ")", "# If the hidden meta element doesn't exist, add it to its own group.", "if", "'hidden'", "not", "in", "element_group_dict", ":", "hidden_element", "=", "PYUNTL_DISPATCH", "[", "'meta'", "]", "(", "qualifier", "=", "'hidden'", ",", "content", "=", "'False'", ")", "hidden_element", ".", "add_form", "(", "vocabularies", "=", "vocabularies", ",", "qualifier", "=", "hidden_element", ".", "qualifier", ",", "content", "=", "hidden_element", ".", "content", ",", "superuser", "=", "superuser", ",", ")", "element_group_dict", "[", "'hidden'", "]", "=", "[", "hidden_element", "]", "# Create a list of group object elements.", "element_list", "=", "self", ".", "create_form_groupings", "(", "vocabularies", ",", "solr_response", ",", "element_group_dict", ",", "sort_order", ",", ")", "# Return the list of UNTL elements with form data added.", "return", "element_list" ]
Create groupings of form elements.
[ "Create", "groupings", "of", "form", "elements", "." ]
python
train
hvac/hvac
hvac/api/system_backend/key.py
https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/api/system_backend/key.py#L156-L213
def start_rekey(self, secret_shares=5, secret_threshold=3, pgp_keys=None, backup=False, require_verification=False, recovery_key=False): """Initializes a new rekey attempt. Only a single recovery key rekeyattempt can take place at a time, and changing the parameters of a rekey requires canceling and starting a new rekey, which will also provide a new nonce. Supported methods: PUT: /sys/rekey/init. Produces: 204 (empty body) PUT: /sys/rekey-recovery-key/init. Produces: 204 (empty body) :param secret_shares: Specifies the number of shares to split the master key into. :type secret_shares: int :param secret_threshold: Specifies the number of shares required to reconstruct the master key. This must be less than or equal to secret_shares. :type secret_threshold: int :param pgp_keys: Specifies an array of PGP public keys used to encrypt the output unseal keys. Ordering is preserved. The keys must be base64-encoded from their original binary representation. The size of this array must be the same as secret_shares. :type pgp_keys: list :param backup: Specifies if using PGP-encrypted keys, whether Vault should also store a plaintext backup of the PGP-encrypted keys at core/unseal-keys-backup in the physical storage backend. These can then be retrieved and removed via the sys/rekey/backup endpoint. :type backup: bool :param require_verification: This turns on verification functionality. When verification is turned on, after successful authorization with the current unseal keys, the new unseal keys are returned but the master key is not actually rotated. The new keys must be provided to authorize the actual rotation of the master key. This ensures that the new keys have been successfully saved and protects against a risk of the keys being lost after rotation but before they can be persisted. This can be used with without pgp_keys, and when used with it, it allows ensuring that the returned keys can be successfully decrypted before committing to the new shares, which the backup functionality does not provide. :param recovery_key: If true, send requests to "rekey-recovery-key" instead of "rekey" api path. :type recovery_key: bool :type require_verification: bool :return: The JSON dict of the response. :rtype: dict | request.Response """ params = { 'secret_shares': secret_shares, 'secret_threshold': secret_threshold, 'require_verification': require_verification, } if pgp_keys: if len(pgp_keys) != secret_shares: raise ParamValidationError('length of pgp_keys argument must equal secret shares value') params['pgp_keys'] = pgp_keys params['backup'] = backup api_path = '/v1/sys/rekey/init' if recovery_key: api_path = '/v1/sys/rekey-recovery-key/init' response = self._adapter.put( url=api_path, json=params, ) return response.json()
[ "def", "start_rekey", "(", "self", ",", "secret_shares", "=", "5", ",", "secret_threshold", "=", "3", ",", "pgp_keys", "=", "None", ",", "backup", "=", "False", ",", "require_verification", "=", "False", ",", "recovery_key", "=", "False", ")", ":", "params", "=", "{", "'secret_shares'", ":", "secret_shares", ",", "'secret_threshold'", ":", "secret_threshold", ",", "'require_verification'", ":", "require_verification", ",", "}", "if", "pgp_keys", ":", "if", "len", "(", "pgp_keys", ")", "!=", "secret_shares", ":", "raise", "ParamValidationError", "(", "'length of pgp_keys argument must equal secret shares value'", ")", "params", "[", "'pgp_keys'", "]", "=", "pgp_keys", "params", "[", "'backup'", "]", "=", "backup", "api_path", "=", "'/v1/sys/rekey/init'", "if", "recovery_key", ":", "api_path", "=", "'/v1/sys/rekey-recovery-key/init'", "response", "=", "self", ".", "_adapter", ".", "put", "(", "url", "=", "api_path", ",", "json", "=", "params", ",", ")", "return", "response", ".", "json", "(", ")" ]
Initializes a new rekey attempt. Only a single recovery key rekeyattempt can take place at a time, and changing the parameters of a rekey requires canceling and starting a new rekey, which will also provide a new nonce. Supported methods: PUT: /sys/rekey/init. Produces: 204 (empty body) PUT: /sys/rekey-recovery-key/init. Produces: 204 (empty body) :param secret_shares: Specifies the number of shares to split the master key into. :type secret_shares: int :param secret_threshold: Specifies the number of shares required to reconstruct the master key. This must be less than or equal to secret_shares. :type secret_threshold: int :param pgp_keys: Specifies an array of PGP public keys used to encrypt the output unseal keys. Ordering is preserved. The keys must be base64-encoded from their original binary representation. The size of this array must be the same as secret_shares. :type pgp_keys: list :param backup: Specifies if using PGP-encrypted keys, whether Vault should also store a plaintext backup of the PGP-encrypted keys at core/unseal-keys-backup in the physical storage backend. These can then be retrieved and removed via the sys/rekey/backup endpoint. :type backup: bool :param require_verification: This turns on verification functionality. When verification is turned on, after successful authorization with the current unseal keys, the new unseal keys are returned but the master key is not actually rotated. The new keys must be provided to authorize the actual rotation of the master key. This ensures that the new keys have been successfully saved and protects against a risk of the keys being lost after rotation but before they can be persisted. This can be used with without pgp_keys, and when used with it, it allows ensuring that the returned keys can be successfully decrypted before committing to the new shares, which the backup functionality does not provide. :param recovery_key: If true, send requests to "rekey-recovery-key" instead of "rekey" api path. :type recovery_key: bool :type require_verification: bool :return: The JSON dict of the response. :rtype: dict | request.Response
[ "Initializes", "a", "new", "rekey", "attempt", "." ]
python
train
chriskuehl/identify
identify/identify.py
https://github.com/chriskuehl/identify/blob/27ff23a3a5a08fb46e7eac79c393c1d678b4217a/identify/identify.py#L165-L173
def parse_shebang_from_file(path): """Parse the shebang given a file path.""" if not os.path.lexists(path): raise ValueError('{} does not exist.'.format(path)) if not os.access(path, os.X_OK): return () with open(path, 'rb') as f: return parse_shebang(f)
[ "def", "parse_shebang_from_file", "(", "path", ")", ":", "if", "not", "os", ".", "path", ".", "lexists", "(", "path", ")", ":", "raise", "ValueError", "(", "'{} does not exist.'", ".", "format", "(", "path", ")", ")", "if", "not", "os", ".", "access", "(", "path", ",", "os", ".", "X_OK", ")", ":", "return", "(", ")", "with", "open", "(", "path", ",", "'rb'", ")", "as", "f", ":", "return", "parse_shebang", "(", "f", ")" ]
Parse the shebang given a file path.
[ "Parse", "the", "shebang", "given", "a", "file", "path", "." ]
python
train