repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
pywbem/pywbem
pywbem_mock/_wbemconnection_mock.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem_mock/_wbemconnection_mock.py#L2963-L3005
def _fake_associators(self, namespace, **params): """ Implements a mock WBEM server responder for :meth:`~pywbem.WBEMConnection.Associators` """ self._validate_namespace(namespace) rc = None if params['ResultClass'] is None else \ params['ResultClass'].classname ac = None if params['AssocClass'] is None else \ params['AssocClass'].classname role = params['Role'] result_role = params['ResultRole'] obj_name = params['ObjectName'] classname = obj_name.classname pl = params['PropertyList'] ico = params['IncludeClassOrigin'] iq = params['IncludeQualifiers'] if isinstance(obj_name, CIMClassName): rtn_classnames = self._get_associated_classnames(classname, namespace, ac, rc, result_role, role) # returns list of tuples of (CIMClassname, CIMClass) return self._return_assoc_class_tuples(rtn_classnames, namespace, iq, ico, pl) assert isinstance(obj_name, CIMInstanceName) assoc_names = self._get_associated_instancenames(obj_name, namespace, ac, rc, result_role, role) results = [] for obj_name in assoc_names: results.append(self._get_instance( obj_name, namespace, None, params['PropertyList'], params['IncludeClassOrigin'], params['IncludeQualifiers'])) return self._return_assoc_tuple(results)
[ "def", "_fake_associators", "(", "self", ",", "namespace", ",", "*", "*", "params", ")", ":", "self", ".", "_validate_namespace", "(", "namespace", ")", "rc", "=", "None", "if", "params", "[", "'ResultClass'", "]", "is", "None", "else", "params", "[", "'...
Implements a mock WBEM server responder for :meth:`~pywbem.WBEMConnection.Associators`
[ "Implements", "a", "mock", "WBEM", "server", "responder", "for", ":", "meth", ":", "~pywbem", ".", "WBEMConnection", ".", "Associators" ]
python
train
saltstack/salt
salt/engines/slack.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/slack.py#L708-L774
def run_commands_from_slack_async(self, message_generator, fire_all, tag, control, interval=1): ''' Pull any pending messages from the message_generator, sending each one to either the event bus, the command_async or both, depending on the values of fire_all and command ''' outstanding = {} # set of job_id that we need to check for while True: log.trace('Sleeping for interval of %s', interval) time.sleep(interval) # Drain the slack messages, up to 10 messages at a clip count = 0 for msg in message_generator: # The message_generator yields dicts. Leave this loop # on a dict that looks like {'done': True} or when we've done it # 10 times without taking a break. log.trace('Got a message from the generator: %s', msg.keys()) if count > 10: log.warning('Breaking in getting messages because count is exceeded') break if not msg: count += 1 log.warning('Skipping an empty message.') continue # This one is a dud, get the next message if msg.get('done'): log.trace('msg is done') break if fire_all: log.debug('Firing message to the bus with tag: %s', tag) log.debug('%s %s', tag, msg) self.fire('{0}/{1}'.format(tag, msg['message_data'].get('type')), msg) if control and (len(msg) > 1) and msg.get('cmdline'): channel = self.sc.server.channels.find(msg['channel']) jid = self.run_command_async(msg) log.debug('Submitted a job and got jid: %s', jid) outstanding[jid] = msg # record so we can return messages to the caller channel.send_message("@{}'s job is submitted as salt jid {}".format(msg['user_name'], jid)) count += 1 start_time = time.time() job_status = self.get_jobs_from_runner(outstanding.keys()) # dict of job_ids:results are returned log.trace('Getting %s jobs status took %s seconds', len(job_status), time.time() - start_time) for jid in job_status: result = job_status[jid]['data'] function = job_status[jid]['function'] if result: log.debug('ret to send back is %s', result) # formatting function? this_job = outstanding[jid] channel = self.sc.server.channels.find(this_job['channel']) return_text = self.format_return_text(result, function) return_prefix = "@{}'s job `{}` (id: {}) (target: {}) returned".format( this_job['user_name'], this_job['cmdline'], jid, this_job['target']) channel.send_message(return_prefix) ts = time.time() st = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d%H%M%S%f') filename = 'salt-results-{0}.yaml'.format(st) r = self.sc.api_call( 'files.upload', channels=channel.id, filename=filename, content=return_text) # Handle unicode return log.debug('Got back %s via the slack client', r) resp = salt.utils.yaml.safe_load(salt.utils.json.dumps(r)) if 'ok' in resp and resp['ok'] is False: this_job['channel'].send_message('Error: {0}'.format(resp['error'])) del outstanding[jid]
[ "def", "run_commands_from_slack_async", "(", "self", ",", "message_generator", ",", "fire_all", ",", "tag", ",", "control", ",", "interval", "=", "1", ")", ":", "outstanding", "=", "{", "}", "# set of job_id that we need to check for", "while", "True", ":", "log",...
Pull any pending messages from the message_generator, sending each one to either the event bus, the command_async or both, depending on the values of fire_all and command
[ "Pull", "any", "pending", "messages", "from", "the", "message_generator", "sending", "each", "one", "to", "either", "the", "event", "bus", "the", "command_async", "or", "both", "depending", "on", "the", "values", "of", "fire_all", "and", "command" ]
python
train
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/salt/elements.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/salt/elements.py#L74-L85
def from_etree(cls, etree_element): """ creates a `SaltElement` from an `etree._Element` representing an element in a SaltXMI file. """ label_elements = get_subelements(etree_element, 'labels') labels = [SaltLabel.from_etree(elem) for elem in label_elements] return cls(name=get_element_name(etree_element), element_id=get_graph_element_id(etree_element), xsi_type=get_xsi_type(etree_element), labels=labels, xml=etree_element)
[ "def", "from_etree", "(", "cls", ",", "etree_element", ")", ":", "label_elements", "=", "get_subelements", "(", "etree_element", ",", "'labels'", ")", "labels", "=", "[", "SaltLabel", ".", "from_etree", "(", "elem", ")", "for", "elem", "in", "label_elements", ...
creates a `SaltElement` from an `etree._Element` representing an element in a SaltXMI file.
[ "creates", "a", "SaltElement", "from", "an", "etree", ".", "_Element", "representing", "an", "element", "in", "a", "SaltXMI", "file", "." ]
python
train
readbeyond/aeneas
aeneas/syncmap/smfbase.py
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/syncmap/smfbase.py#L53-L80
def _add_fragment(cls, syncmap, identifier, lines, begin, end, language=None): """ Add a new fragment to ``syncmap``. :param syncmap: the syncmap to append to :type syncmap: :class:`~aeneas.syncmap.SyncMap` :param identifier: the identifier :type identifier: string :param lines: the lines of the text :type lines: list of string :param begin: the begin time :type begin: :class:`~aeneas.exacttiming.TimeValue` :param end: the end time :type end: :class:`~aeneas.exacttiming.TimeValue` :param language: the language :type language: string """ syncmap.add_fragment( SyncMapFragment( text_fragment=TextFragment( identifier=identifier, lines=lines, language=language ), begin=begin, end=end ) )
[ "def", "_add_fragment", "(", "cls", ",", "syncmap", ",", "identifier", ",", "lines", ",", "begin", ",", "end", ",", "language", "=", "None", ")", ":", "syncmap", ".", "add_fragment", "(", "SyncMapFragment", "(", "text_fragment", "=", "TextFragment", "(", "...
Add a new fragment to ``syncmap``. :param syncmap: the syncmap to append to :type syncmap: :class:`~aeneas.syncmap.SyncMap` :param identifier: the identifier :type identifier: string :param lines: the lines of the text :type lines: list of string :param begin: the begin time :type begin: :class:`~aeneas.exacttiming.TimeValue` :param end: the end time :type end: :class:`~aeneas.exacttiming.TimeValue` :param language: the language :type language: string
[ "Add", "a", "new", "fragment", "to", "syncmap", "." ]
python
train
MillionIntegrals/vel
vel/sources/nlp/imdb.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/sources/nlp/imdb.py#L48-L73
def create(model_config, batch_size, vectors=None): """ Create an IMDB dataset """ path = model_config.data_dir('imdb') text_field = data.Field(lower=True, tokenize='spacy', batch_first=True) label_field = data.LabelField(is_target=True) train_source, test_source = IMDBCached.splits( root=path, text_field=text_field, label_field=label_field ) text_field.build_vocab(train_source, max_size=25_000, vectors=vectors) label_field.build_vocab(train_source) train_iterator, test_iterator = data.BucketIterator.splits( (train_source, test_source), batch_size=batch_size, device=model_config.torch_device(), shuffle=True ) return TextData( train_source, test_source, train_iterator, test_iterator, text_field, label_field )
[ "def", "create", "(", "model_config", ",", "batch_size", ",", "vectors", "=", "None", ")", ":", "path", "=", "model_config", ".", "data_dir", "(", "'imdb'", ")", "text_field", "=", "data", ".", "Field", "(", "lower", "=", "True", ",", "tokenize", "=", ...
Create an IMDB dataset
[ "Create", "an", "IMDB", "dataset" ]
python
train
hozn/stravalib
stravalib/client.py
https://github.com/hozn/stravalib/blob/5500ebc39e0bf4706bb1ca4c27b25e56becaaa5f/stravalib/client.py#L470-L490
def get_club_activities(self, club_id, limit=None): """ Gets the activities associated with specified club. http://strava.github.io/api/v3/clubs/#get-activities :param club_id: The numeric ID for the club. :type club_id: int :param limit: Maximum number of activities to return. (default unlimited) :type limit: int :return: An iterator of :class:`stravalib.model.Activity` objects. :rtype: :class:`BatchedResultsIterator` """ result_fetcher = functools.partial(self.protocol.get, '/clubs/{id}/activities', id=club_id) return BatchedResultsIterator(entity=model.Activity, bind_client=self, result_fetcher=result_fetcher, limit=limit)
[ "def", "get_club_activities", "(", "self", ",", "club_id", ",", "limit", "=", "None", ")", ":", "result_fetcher", "=", "functools", ".", "partial", "(", "self", ".", "protocol", ".", "get", ",", "'/clubs/{id}/activities'", ",", "id", "=", "club_id", ")", "...
Gets the activities associated with specified club. http://strava.github.io/api/v3/clubs/#get-activities :param club_id: The numeric ID for the club. :type club_id: int :param limit: Maximum number of activities to return. (default unlimited) :type limit: int :return: An iterator of :class:`stravalib.model.Activity` objects. :rtype: :class:`BatchedResultsIterator`
[ "Gets", "the", "activities", "associated", "with", "specified", "club", "." ]
python
train
booktype/python-ooxml
ooxml/serialize.py
https://github.com/booktype/python-ooxml/blob/b56990a5bee2e1bc46839cec5161ff3726dc4d87/ooxml/serialize.py#L1005-L1029
def get_header(self, elem, style, node): """Returns HTML tag representing specific header for this element. :Returns: String representation of HTML tag. """ font_size = style if hasattr(elem, 'possible_header'): if elem.possible_header: return 'h1' if not style: return 'h6' if hasattr(style, 'style_id'): font_size = _get_font_size(self.doc, style) try: if font_size in self.doc.possible_headers_style: return 'h{}'.format(self.doc.possible_headers_style.index(font_size)+1) return 'h{}'.format(self.doc.possible_headers.index(font_size)+1) except ValueError: return 'h6'
[ "def", "get_header", "(", "self", ",", "elem", ",", "style", ",", "node", ")", ":", "font_size", "=", "style", "if", "hasattr", "(", "elem", ",", "'possible_header'", ")", ":", "if", "elem", ".", "possible_header", ":", "return", "'h1'", "if", "not", "...
Returns HTML tag representing specific header for this element. :Returns: String representation of HTML tag.
[ "Returns", "HTML", "tag", "representing", "specific", "header", "for", "this", "element", "." ]
python
train
contentful/contentful-management.py
contentful_management/api_key.py
https://github.com/contentful/contentful-management.py/blob/707dd30883b98a10c7ff0f7f5bdb8edbdc1d8df0/contentful_management/api_key.py#L34-L52
def create_attributes(klass, attributes, previous_object=None): """ Attributes for resource creation. """ return { 'name': attributes.get( 'name', previous_object.name if previous_object is not None else '' ), 'description': attributes.get( 'description', previous_object.description if previous_object is not None else '' ), 'environments': attributes.get( 'environments', [e.to_json() for e in previous_object.environments] if previous_object is not None else [] # Will default to master if empty ) }
[ "def", "create_attributes", "(", "klass", ",", "attributes", ",", "previous_object", "=", "None", ")", ":", "return", "{", "'name'", ":", "attributes", ".", "get", "(", "'name'", ",", "previous_object", ".", "name", "if", "previous_object", "is", "not", "Non...
Attributes for resource creation.
[ "Attributes", "for", "resource", "creation", "." ]
python
train
hkff/FodtlMon
fodtlmon/tools/color.py
https://github.com/hkff/FodtlMon/blob/0c9015a1a1f0a4a64d52945c86b45441d5871c56/fodtlmon/tools/color.py#L504-L535
def enable(auto_colors=False, reset_atexit=False): """Enables color text with print() or sys.stdout.write() (stderr too). Keyword arguments: auto_colors -- automatically selects dark or light colors based on current terminal's background color. Only works with {autored} and related tags. reset_atexit -- resets original colors upon Python exit (in case you forget to reset it yourself with a closing tag). """ if os.name != 'nt': return False # Overwrite stream references. if not isinstance(sys.stderr, _WindowsStream): sys.stderr.flush() sys.stderr = _WindowsStream(stderr=True) if not isinstance(sys.stdout, _WindowsStream): sys.stdout.flush() sys.stdout = _WindowsStream(stderr=False) if not isinstance(sys.stderr, _WindowsStream) and not isinstance(sys.stdout, _WindowsStream): return False # Automatically select which colors to display. bg_color = getattr(sys.stdout, 'default_bg', getattr(sys.stderr, 'default_bg', None)) if auto_colors and bg_color is not None: set_light_background() if bg_color in (112, 96, 240, 176, 224, 208, 160) else set_dark_background() # Reset on exit if requested. if reset_atexit: atexit.register(lambda: Windows.disable()) return True
[ "def", "enable", "(", "auto_colors", "=", "False", ",", "reset_atexit", "=", "False", ")", ":", "if", "os", ".", "name", "!=", "'nt'", ":", "return", "False", "# Overwrite stream references.", "if", "not", "isinstance", "(", "sys", ".", "stderr", ",", "_Wi...
Enables color text with print() or sys.stdout.write() (stderr too). Keyword arguments: auto_colors -- automatically selects dark or light colors based on current terminal's background color. Only works with {autored} and related tags. reset_atexit -- resets original colors upon Python exit (in case you forget to reset it yourself with a closing tag).
[ "Enables", "color", "text", "with", "print", "()", "or", "sys", ".", "stdout", ".", "write", "()", "(", "stderr", "too", ")", "." ]
python
train
tensorlayer/tensorlayer
tensorlayer/layers/normalization.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/normalization.py#L92-L99
def _bias_add(x, b, data_format): """Alternative implementation of tf.nn.bias_add which is compatiable with tensorRT.""" if data_format == 'NHWC': return tf.add(x, b) elif data_format == 'NCHW': return tf.add(x, _to_channel_first_bias(b)) else: raise ValueError('invalid data_format: %s' % data_format)
[ "def", "_bias_add", "(", "x", ",", "b", ",", "data_format", ")", ":", "if", "data_format", "==", "'NHWC'", ":", "return", "tf", ".", "add", "(", "x", ",", "b", ")", "elif", "data_format", "==", "'NCHW'", ":", "return", "tf", ".", "add", "(", "x", ...
Alternative implementation of tf.nn.bias_add which is compatiable with tensorRT.
[ "Alternative", "implementation", "of", "tf", ".", "nn", ".", "bias_add", "which", "is", "compatiable", "with", "tensorRT", "." ]
python
valid
rosenbrockc/fortpy
fortpy/elements.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/elements.py#L1676-L1680
def warn(self, collection): """Checks the module for documentation and best-practice warnings.""" super(CodeElement, self).warn(collection) if not "implicit none" in self.modifiers: collection.append("WARNING: implicit none not set in {}".format(self.name))
[ "def", "warn", "(", "self", ",", "collection", ")", ":", "super", "(", "CodeElement", ",", "self", ")", ".", "warn", "(", "collection", ")", "if", "not", "\"implicit none\"", "in", "self", ".", "modifiers", ":", "collection", ".", "append", "(", "\"WARNI...
Checks the module for documentation and best-practice warnings.
[ "Checks", "the", "module", "for", "documentation", "and", "best", "-", "practice", "warnings", "." ]
python
train
signalfx/signalfx-python
signalfx/ingest.py
https://github.com/signalfx/signalfx-python/blob/650eb9a2b301bcc795e4e3a8c031574ade69849d/signalfx/ingest.py#L331-L334
def _assign_value(self, pbuf_dp, value): """Assigns a value to the protobuf obj""" self._assign_value_by_type(pbuf_dp, value, _bool=False, error_prefix='Invalid value')
[ "def", "_assign_value", "(", "self", ",", "pbuf_dp", ",", "value", ")", ":", "self", ".", "_assign_value_by_type", "(", "pbuf_dp", ",", "value", ",", "_bool", "=", "False", ",", "error_prefix", "=", "'Invalid value'", ")" ]
Assigns a value to the protobuf obj
[ "Assigns", "a", "value", "to", "the", "protobuf", "obj" ]
python
train
zhmcclient/python-zhmcclient
zhmcclient/_metrics.py
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_metrics.py#L272-L294
def _setup_metric_group_definitions(self): """ Return the dict of MetricGroupDefinition objects for this metrics context, by processing its 'metric-group-infos' property. """ # Dictionary of MetricGroupDefinition objects, by metric group name metric_group_definitions = dict() for mg_info in self.properties['metric-group-infos']: mg_name = mg_info['group-name'] mg_def = MetricGroupDefinition( name=mg_name, resource_class=_resource_class_from_group(mg_name), metric_definitions=dict()) for i, m_info in enumerate(mg_info['metric-infos']): m_name = m_info['metric-name'] m_def = MetricDefinition( index=i, name=m_name, type=_metric_type(m_info['metric-type']), unit=_metric_unit_from_name(m_name)) mg_def.metric_definitions[m_name] = m_def metric_group_definitions[mg_name] = mg_def return metric_group_definitions
[ "def", "_setup_metric_group_definitions", "(", "self", ")", ":", "# Dictionary of MetricGroupDefinition objects, by metric group name", "metric_group_definitions", "=", "dict", "(", ")", "for", "mg_info", "in", "self", ".", "properties", "[", "'metric-group-infos'", "]", ":...
Return the dict of MetricGroupDefinition objects for this metrics context, by processing its 'metric-group-infos' property.
[ "Return", "the", "dict", "of", "MetricGroupDefinition", "objects", "for", "this", "metrics", "context", "by", "processing", "its", "metric", "-", "group", "-", "infos", "property", "." ]
python
train
pyQode/pyqode.core
pyqode/core/widgets/output_window.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/output_window.py#L293-L311
def mouseMoveEvent(self, event): """ Handle mouse over file link. """ c = self.cursorForPosition(event.pos()) block = c.block() self._link_match = None self.viewport().setCursor(QtCore.Qt.IBeamCursor) for match in self.link_regex.finditer(block.text()): if not match: continue start, end = match.span() if start <= c.positionInBlock() <= end: self._link_match = match self.viewport().setCursor(QtCore.Qt.PointingHandCursor) break self._last_hovered_block = block super(OutputWindow, self).mouseMoveEvent(event)
[ "def", "mouseMoveEvent", "(", "self", ",", "event", ")", ":", "c", "=", "self", ".", "cursorForPosition", "(", "event", ".", "pos", "(", ")", ")", "block", "=", "c", ".", "block", "(", ")", "self", ".", "_link_match", "=", "None", "self", ".", "vie...
Handle mouse over file link.
[ "Handle", "mouse", "over", "file", "link", "." ]
python
train
bear/parsedatetime
parsedatetime/__init__.py
https://github.com/bear/parsedatetime/blob/830775dc5e36395622b41f12317f5e10c303d3a2/parsedatetime/__init__.py#L1125-L1153
def _evalDayStr(self, datetimeString, sourceTime): """ Evaluate text passed by L{_partialParseDaystr()} """ s = datetimeString.strip() sourceTime = self._evalDT(datetimeString, sourceTime) # Given string is a natural language date string like today, tomorrow.. (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime try: offset = self.ptc.dayOffsets[s] except KeyError: offset = 0 if self.ptc.StartTimeFromSourceTime: startHour = hr startMinute = mn startSecond = sec else: startHour = 9 startMinute = 0 startSecond = 0 self.currentContext.updateAccuracy(pdtContext.ACU_DAY) start = datetime.datetime(yr, mth, dy, startHour, startMinute, startSecond) target = start + datetime.timedelta(days=offset) return target.timetuple()
[ "def", "_evalDayStr", "(", "self", ",", "datetimeString", ",", "sourceTime", ")", ":", "s", "=", "datetimeString", ".", "strip", "(", ")", "sourceTime", "=", "self", ".", "_evalDT", "(", "datetimeString", ",", "sourceTime", ")", "# Given string is a natural lang...
Evaluate text passed by L{_partialParseDaystr()}
[ "Evaluate", "text", "passed", "by", "L", "{", "_partialParseDaystr", "()", "}" ]
python
train
StackStorm/pybind
pybind/slxos/v17r_1_01a/qos_mpls/map_/dscp_exp/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/qos_mpls/map_/dscp_exp/__init__.py#L131-L152
def _set_dscp(self, v, load=False): """ Setter method for dscp, mapped from YANG variable /qos_mpls/map/dscp_exp/dscp (list) If this variable is read-only (config: false) in the source YANG file, then _set_dscp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_dscp() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("dscp_in_values",dscp.dscp, yang_name="dscp", rest_name="dscp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-in-values', extensions={u'tailf-common': {u'info': u'Map Dscp value to Exp value', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'callpoint': u'QosMplsDscpExpCallpoint', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}), is_container='list', yang_name="dscp", rest_name="dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map Dscp value to Exp value', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'callpoint': u'QosMplsDscpExpCallpoint', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mpls', defining_module='brocade-qos-mpls', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """dscp must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("dscp_in_values",dscp.dscp, yang_name="dscp", rest_name="dscp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-in-values', extensions={u'tailf-common': {u'info': u'Map Dscp value to Exp value', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'callpoint': u'QosMplsDscpExpCallpoint', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}), is_container='list', yang_name="dscp", rest_name="dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map Dscp value to Exp value', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'callpoint': u'QosMplsDscpExpCallpoint', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mpls', defining_module='brocade-qos-mpls', yang_type='list', is_config=True)""", }) self.__dscp = t if hasattr(self, '_set'): self._set()
[ "def", "_set_dscp", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", ...
Setter method for dscp, mapped from YANG variable /qos_mpls/map/dscp_exp/dscp (list) If this variable is read-only (config: false) in the source YANG file, then _set_dscp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_dscp() directly.
[ "Setter", "method", "for", "dscp", "mapped", "from", "YANG", "variable", "/", "qos_mpls", "/", "map", "/", "dscp_exp", "/", "dscp", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", ...
python
train
dmwm/DBS
Client/src/python/dbs/apis/dbsClient.py
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Client/src/python/dbs/apis/dbsClient.py#L1432-L1454
def listRuns(self, **kwargs): """ API to list all run dictionary, for example: [{'run_num': [160578, 160498, 160447, 160379]}]. At least one parameter is mandatory. :param logical_file_name: List all runs in the file :type logical_file_name: str :param block_name: List all runs in the block :type block_name: str :param dataset: List all runs in that dataset :type dataset: str :param run_num: List all runs :type run_num: int, string or list """ validParameters = ['run_num', 'logical_file_name', 'block_name', 'dataset'] requiredParameters = {'multiple': validParameters} checkInputParameter(method="listRuns", parameters=kwargs.keys(), validParameters=validParameters, requiredParameters=requiredParameters) return self.__callServer("runs", params=kwargs)
[ "def", "listRuns", "(", "self", ",", "*", "*", "kwargs", ")", ":", "validParameters", "=", "[", "'run_num'", ",", "'logical_file_name'", ",", "'block_name'", ",", "'dataset'", "]", "requiredParameters", "=", "{", "'multiple'", ":", "validParameters", "}", "che...
API to list all run dictionary, for example: [{'run_num': [160578, 160498, 160447, 160379]}]. At least one parameter is mandatory. :param logical_file_name: List all runs in the file :type logical_file_name: str :param block_name: List all runs in the block :type block_name: str :param dataset: List all runs in that dataset :type dataset: str :param run_num: List all runs :type run_num: int, string or list
[ "API", "to", "list", "all", "run", "dictionary", "for", "example", ":", "[", "{", "run_num", ":", "[", "160578", "160498", "160447", "160379", "]", "}", "]", ".", "At", "least", "one", "parameter", "is", "mandatory", "." ]
python
train
andrewjsledge/django-hash-filter
django_hash_filter/templatetags/hash_filter.py
https://github.com/andrewjsledge/django-hash-filter/blob/ea90b2903938e0733d3abfafed308a8d041d9fe7/django_hash_filter/templatetags/hash_filter.py#L13-L27
def hash(value, arg): """ Returns a hex-digest of the passed in value for the hash algorithm given. """ arg = str(arg).lower() if sys.version_info >= (3,0): value = value.encode("utf-8") if not arg in get_available_hashes(): raise TemplateSyntaxError("The %s hash algorithm does not exist. Supported algorithms are: %" % (arg, get_available_hashes())) try: f = getattr(hashlib, arg) hashed = f(value).hexdigest() except Exception: raise ValueError("The %s hash algorithm cannot produce a hex digest. Ensure that OpenSSL is properly installed." % arg) return hashed
[ "def", "hash", "(", "value", ",", "arg", ")", ":", "arg", "=", "str", "(", "arg", ")", ".", "lower", "(", ")", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "0", ")", ":", "value", "=", "value", ".", "encode", "(", "\"utf-8\"", ")", ...
Returns a hex-digest of the passed in value for the hash algorithm given.
[ "Returns", "a", "hex", "-", "digest", "of", "the", "passed", "in", "value", "for", "the", "hash", "algorithm", "given", "." ]
python
train
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/cursor_manager.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/cursor_manager.py#L50-L65
def close(self, cursor_id, address): """Kill a cursor. Raises TypeError if cursor_id is not an instance of (int, long). :Parameters: - `cursor_id`: cursor id to close - `address`: the cursor's server's (host, port) pair .. versionchanged:: 3.0 Now requires an `address` argument. """ if not isinstance(cursor_id, integer_types): raise TypeError("cursor_id must be an integer") self.__client().kill_cursors([cursor_id], address)
[ "def", "close", "(", "self", ",", "cursor_id", ",", "address", ")", ":", "if", "not", "isinstance", "(", "cursor_id", ",", "integer_types", ")", ":", "raise", "TypeError", "(", "\"cursor_id must be an integer\"", ")", "self", ".", "__client", "(", ")", ".", ...
Kill a cursor. Raises TypeError if cursor_id is not an instance of (int, long). :Parameters: - `cursor_id`: cursor id to close - `address`: the cursor's server's (host, port) pair .. versionchanged:: 3.0 Now requires an `address` argument.
[ "Kill", "a", "cursor", "." ]
python
train
robotools/fontParts
Lib/fontParts/base/glyph.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/glyph.py#L382-L390
def _set_leftMargin(self, value): """ value will be an int or float. Subclasses may override this method. """ diff = value - self.leftMargin self.moveBy((diff, 0)) self.width += diff
[ "def", "_set_leftMargin", "(", "self", ",", "value", ")", ":", "diff", "=", "value", "-", "self", ".", "leftMargin", "self", ".", "moveBy", "(", "(", "diff", ",", "0", ")", ")", "self", ".", "width", "+=", "diff" ]
value will be an int or float. Subclasses may override this method.
[ "value", "will", "be", "an", "int", "or", "float", "." ]
python
train
lago-project/lago
lago/log_utils.py
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/log_utils.py#L327-L341
def should_show_by_level(self, record_level, base_level=None): """ Args: record_level (int): log level of the record to check base_level (int or None): log level to check against, will use the object's :class:`dump_level` if None is passed Returns: bool: True if the given log record should be shown according to the log level """ if base_level is None: base_level = self.dump_level return record_level >= base_level
[ "def", "should_show_by_level", "(", "self", ",", "record_level", ",", "base_level", "=", "None", ")", ":", "if", "base_level", "is", "None", ":", "base_level", "=", "self", ".", "dump_level", "return", "record_level", ">=", "base_level" ]
Args: record_level (int): log level of the record to check base_level (int or None): log level to check against, will use the object's :class:`dump_level` if None is passed Returns: bool: True if the given log record should be shown according to the log level
[ "Args", ":", "record_level", "(", "int", ")", ":", "log", "level", "of", "the", "record", "to", "check", "base_level", "(", "int", "or", "None", ")", ":", "log", "level", "to", "check", "against", "will", "use", "the", "object", "s", ":", "class", ":...
python
train
rcsb/mmtf-python
mmtf/api/mmtf_writer.py
https://github.com/rcsb/mmtf-python/blob/899bb877ca1b32a9396803d38c5bf38a2520754e/mmtf/api/mmtf_writer.py#L345-L352
def set_entity_info(self, chain_indices, sequence, description, entity_type): """Set the entity level information for the structure. :param chain_indices: the indices of the chains for this entity :param sequence: the one letter code sequence for this entity :param description: the description for this entity :param entity_type: the entity type (polymer,non-polymer,water) """ self.entity_list.append(make_entity_dict(chain_indices,sequence,description,entity_type))
[ "def", "set_entity_info", "(", "self", ",", "chain_indices", ",", "sequence", ",", "description", ",", "entity_type", ")", ":", "self", ".", "entity_list", ".", "append", "(", "make_entity_dict", "(", "chain_indices", ",", "sequence", ",", "description", ",", ...
Set the entity level information for the structure. :param chain_indices: the indices of the chains for this entity :param sequence: the one letter code sequence for this entity :param description: the description for this entity :param entity_type: the entity type (polymer,non-polymer,water)
[ "Set", "the", "entity", "level", "information", "for", "the", "structure", ".", ":", "param", "chain_indices", ":", "the", "indices", "of", "the", "chains", "for", "this", "entity", ":", "param", "sequence", ":", "the", "one", "letter", "code", "sequence", ...
python
train
googledatalab/pydatalab
solutionbox/image_classification/mltoolbox/image/classification/_trainer.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_trainer.py#L232-L244
def log(self, session): """Logs training progress.""" logging.info('Train [%s/%d], step %d (%.3f sec) %.1f ' 'global steps/s, %.1f local steps/s', self.task.type, self.task.index, self.global_step, (self.now - self.start_time), (self.global_step - self.last_global_step) / (self.now - self.last_global_time), (self.local_step - self.last_local_step) / (self.now - self.last_local_time)) self.last_log = self.now self.last_global_step, self.last_global_time = self.global_step, self.now self.last_local_step, self.last_local_time = self.local_step, self.now
[ "def", "log", "(", "self", ",", "session", ")", ":", "logging", ".", "info", "(", "'Train [%s/%d], step %d (%.3f sec) %.1f '", "'global steps/s, %.1f local steps/s'", ",", "self", ".", "task", ".", "type", ",", "self", ".", "task", ".", "index", ",", "self", "...
Logs training progress.
[ "Logs", "training", "progress", "." ]
python
train
spacetelescope/acstools
acstools/satdet.py
https://github.com/spacetelescope/acstools/blob/bbf8dd080cefcbf88529ec87c420f9e1b8002554/acstools/satdet.py#L934-L952
def _satdet_worker(work_queue, done_queue, sigma=2.0, low_thresh=0.1, h_thresh=0.5, small_edge=60, line_len=200, line_gap=75, percentile=(4.5, 93.0), buf=200): """Multiprocessing worker.""" for fil, chip in iter(work_queue.get, 'STOP'): try: result = _detsat_one( fil, chip, sigma=sigma, low_thresh=low_thresh, h_thresh=h_thresh, small_edge=small_edge, line_len=line_len, line_gap=line_gap, percentile=percentile, buf=buf, plot=False, verbose=False) except Exception as e: retcode = False result = '{0}: {1}'.format(type(e), str(e)) else: retcode = True done_queue.put((retcode, fil, chip, result)) return True
[ "def", "_satdet_worker", "(", "work_queue", ",", "done_queue", ",", "sigma", "=", "2.0", ",", "low_thresh", "=", "0.1", ",", "h_thresh", "=", "0.5", ",", "small_edge", "=", "60", ",", "line_len", "=", "200", ",", "line_gap", "=", "75", ",", "percentile",...
Multiprocessing worker.
[ "Multiprocessing", "worker", "." ]
python
train
ricequant/rqalpha
rqalpha/model/portfolio.py
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/portfolio.py#L205-L209
def market_value(self): """ [float] 市值 """ return sum(account.market_value for account in six.itervalues(self._accounts))
[ "def", "market_value", "(", "self", ")", ":", "return", "sum", "(", "account", ".", "market_value", "for", "account", "in", "six", ".", "itervalues", "(", "self", ".", "_accounts", ")", ")" ]
[float] 市值
[ "[", "float", "]", "市值" ]
python
train
davidchua/pymessenger
pymessenger/bot.py
https://github.com/davidchua/pymessenger/blob/c3aedb65b7a50e0ec82c0df39a566fceec734c85/pymessenger/bot.py#L249-L258
def send_file(self, recipient_id, file_path, notification_type=NotificationType.regular): """Send file to the specified recipient. https://developers.facebook.com/docs/messenger-platform/send-api-reference/file-attachment Input: recipient_id: recipient id to send to file_path: path to file to be sent Output: Response from API as <dict> """ return self.send_attachment(recipient_id, "file", file_path, notification_type)
[ "def", "send_file", "(", "self", ",", "recipient_id", ",", "file_path", ",", "notification_type", "=", "NotificationType", ".", "regular", ")", ":", "return", "self", ".", "send_attachment", "(", "recipient_id", ",", "\"file\"", ",", "file_path", ",", "notificat...
Send file to the specified recipient. https://developers.facebook.com/docs/messenger-platform/send-api-reference/file-attachment Input: recipient_id: recipient id to send to file_path: path to file to be sent Output: Response from API as <dict>
[ "Send", "file", "to", "the", "specified", "recipient", ".", "https", ":", "//", "developers", ".", "facebook", ".", "com", "/", "docs", "/", "messenger", "-", "platform", "/", "send", "-", "api", "-", "reference", "/", "file", "-", "attachment", "Input",...
python
train
gwastro/pycbc
pycbc/psd/estimate.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/psd/estimate.py#L187-L259
def inverse_spectrum_truncation(psd, max_filter_len, low_frequency_cutoff=None, trunc_method=None): """Modify a PSD such that the impulse response associated with its inverse square root is no longer than `max_filter_len` time samples. In practice this corresponds to a coarse graining or smoothing of the PSD. Parameters ---------- psd : FrequencySeries PSD whose inverse spectrum is to be truncated. max_filter_len : int Maximum length of the time-domain filter in samples. low_frequency_cutoff : {None, int} Frequencies below `low_frequency_cutoff` are zeroed in the output. trunc_method : {None, 'hann'} Function used for truncating the time-domain filter. None produces a hard truncation at `max_filter_len`. Returns ------- psd : FrequencySeries PSD whose inverse spectrum has been truncated. Raises ------ ValueError For invalid types or values of `max_filter_len` and `low_frequency_cutoff`. Notes ----- See arXiv:gr-qc/0509116 for details. """ # sanity checks if type(max_filter_len) is not int or max_filter_len <= 0: raise ValueError('max_filter_len must be a positive integer') if low_frequency_cutoff is not None and low_frequency_cutoff < 0 \ or low_frequency_cutoff > psd.sample_frequencies[-1]: raise ValueError('low_frequency_cutoff must be within the bandwidth of the PSD') N = (len(psd)-1)*2 inv_asd = FrequencySeries((1. / psd)**0.5, delta_f=psd.delta_f, \ dtype=complex_same_precision_as(psd)) inv_asd[0] = 0 inv_asd[N//2] = 0 q = TimeSeries(numpy.zeros(N), delta_t=(N / psd.delta_f), \ dtype=real_same_precision_as(psd)) if low_frequency_cutoff: kmin = int(low_frequency_cutoff / psd.delta_f) inv_asd[0:kmin] = 0 ifft(inv_asd, q) trunc_start = max_filter_len // 2 trunc_end = N - max_filter_len // 2 if trunc_end < trunc_start: raise ValueError('Invalid value in inverse_spectrum_truncation') if trunc_method == 'hann': trunc_window = Array(numpy.hanning(max_filter_len), dtype=q.dtype) q[0:trunc_start] *= trunc_window[max_filter_len//2:max_filter_len] q[trunc_end:N] *= trunc_window[0:max_filter_len//2] if trunc_start < trunc_end: q[trunc_start:trunc_end] = 0 psd_trunc = FrequencySeries(numpy.zeros(len(psd)), delta_f=psd.delta_f, \ dtype=complex_same_precision_as(psd)) fft(q, psd_trunc) psd_trunc *= psd_trunc.conj() psd_out = 1. / abs(psd_trunc) return psd_out
[ "def", "inverse_spectrum_truncation", "(", "psd", ",", "max_filter_len", ",", "low_frequency_cutoff", "=", "None", ",", "trunc_method", "=", "None", ")", ":", "# sanity checks", "if", "type", "(", "max_filter_len", ")", "is", "not", "int", "or", "max_filter_len", ...
Modify a PSD such that the impulse response associated with its inverse square root is no longer than `max_filter_len` time samples. In practice this corresponds to a coarse graining or smoothing of the PSD. Parameters ---------- psd : FrequencySeries PSD whose inverse spectrum is to be truncated. max_filter_len : int Maximum length of the time-domain filter in samples. low_frequency_cutoff : {None, int} Frequencies below `low_frequency_cutoff` are zeroed in the output. trunc_method : {None, 'hann'} Function used for truncating the time-domain filter. None produces a hard truncation at `max_filter_len`. Returns ------- psd : FrequencySeries PSD whose inverse spectrum has been truncated. Raises ------ ValueError For invalid types or values of `max_filter_len` and `low_frequency_cutoff`. Notes ----- See arXiv:gr-qc/0509116 for details.
[ "Modify", "a", "PSD", "such", "that", "the", "impulse", "response", "associated", "with", "its", "inverse", "square", "root", "is", "no", "longer", "than", "max_filter_len", "time", "samples", ".", "In", "practice", "this", "corresponds", "to", "a", "coarse", ...
python
train
Duke-GCB/DukeDSClient
ddsc/cmdparser.py
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/cmdparser.py#L61-L73
def add_project_name_or_id_arg(arg_parser, required=True, help_text_suffix="manage"): """ Adds project name or project id argument. These two are mutually exclusive. :param arg_parser: :param required: :param help_text: :return: """ project_name_or_id = arg_parser.add_mutually_exclusive_group(required=required) name_help_text = "Name of the project to {}.".format(help_text_suffix) add_project_name_arg(project_name_or_id, required=False, help_text=name_help_text) id_help_text = "ID of the project to {}.".format(help_text_suffix) add_project_id_arg(project_name_or_id, required=False, help_text=id_help_text)
[ "def", "add_project_name_or_id_arg", "(", "arg_parser", ",", "required", "=", "True", ",", "help_text_suffix", "=", "\"manage\"", ")", ":", "project_name_or_id", "=", "arg_parser", ".", "add_mutually_exclusive_group", "(", "required", "=", "required", ")", "name_help_...
Adds project name or project id argument. These two are mutually exclusive. :param arg_parser: :param required: :param help_text: :return:
[ "Adds", "project", "name", "or", "project", "id", "argument", ".", "These", "two", "are", "mutually", "exclusive", ".", ":", "param", "arg_parser", ":", ":", "param", "required", ":", ":", "param", "help_text", ":", ":", "return", ":" ]
python
train
CiscoTestAutomation/yang
ncdiff/src/yang/ncdiff/model.py
https://github.com/CiscoTestAutomation/yang/blob/c70ec5ac5a91f276c4060009203770ece92e76b4/ncdiff/src/yang/ncdiff/model.py#L1003-L1028
def node_equal(node1, node2): '''node_equal High-level api: Evaluate whether two nodes are equal. Parameters ---------- node1 : `Element` A node in a model tree. node2 : `Element` A node in another model tree. Returns ------- bool True if node1 and node2 are equal. ''' if ModelDiff.node_less(node1, node2) and \ ModelDiff.node_less(node2, node1): return True else: return False
[ "def", "node_equal", "(", "node1", ",", "node2", ")", ":", "if", "ModelDiff", ".", "node_less", "(", "node1", ",", "node2", ")", "and", "ModelDiff", ".", "node_less", "(", "node2", ",", "node1", ")", ":", "return", "True", "else", ":", "return", "False...
node_equal High-level api: Evaluate whether two nodes are equal. Parameters ---------- node1 : `Element` A node in a model tree. node2 : `Element` A node in another model tree. Returns ------- bool True if node1 and node2 are equal.
[ "node_equal" ]
python
train
ucfopen/canvasapi
canvasapi/canvas.py
https://github.com/ucfopen/canvasapi/blob/319064b5fc97ba54250af683eb98723ef3f76cf8/canvasapi/canvas.py#L928-L950
def list_group_participants(self, appointment_group, **kwargs): """ List student group participants in this appointment group. .. warning:: .. deprecated:: 0.10.0 Use :func:`canvasapi. canvas.Canvas.get_group_participants` instead. :calls: `GET /api/v1/appointment_groups/:id/groups \ <https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.groups>`_ :param appointment_group: The object or ID of the appointment group. :type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.group.Group` """ warnings.warn( "`list_group_participants` is being deprecated and will be removed " "in a future version. Use `get_group_participants` instead", DeprecationWarning ) return self.get_group_participants(appointment_group, **kwargs)
[ "def", "list_group_participants", "(", "self", ",", "appointment_group", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "\"`list_group_participants` is being deprecated and will be removed \"", "\"in a future version. Use `get_group_participants` instead\"", ","...
List student group participants in this appointment group. .. warning:: .. deprecated:: 0.10.0 Use :func:`canvasapi. canvas.Canvas.get_group_participants` instead. :calls: `GET /api/v1/appointment_groups/:id/groups \ <https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.groups>`_ :param appointment_group: The object or ID of the appointment group. :type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.group.Group`
[ "List", "student", "group", "participants", "in", "this", "appointment", "group", "." ]
python
train
saltstack/salt
salt/transport/zeromq.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/transport/zeromq.py#L675-L705
def post_fork(self, payload_handler, io_loop): ''' After forking we need to create all of the local sockets to listen to the router :param func payload_handler: A function to called to handle incoming payloads as they are picked up off the wire :param IOLoop io_loop: An instance of a Tornado IOLoop, to handle event scheduling ''' self.payload_handler = payload_handler self.io_loop = io_loop self.context = zmq.Context(1) self._socket = self.context.socket(zmq.REP) self._start_zmq_monitor() if self.opts.get('ipc_mode', '') == 'tcp': self.w_uri = 'tcp://127.0.0.1:{0}'.format( self.opts.get('tcp_master_workers', 4515) ) else: self.w_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'workers.ipc') ) log.info('Worker binding to socket %s', self.w_uri) self._socket.connect(self.w_uri) salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop) self.stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop) self.stream.on_recv_stream(self.handle_message)
[ "def", "post_fork", "(", "self", ",", "payload_handler", ",", "io_loop", ")", ":", "self", ".", "payload_handler", "=", "payload_handler", "self", ".", "io_loop", "=", "io_loop", "self", ".", "context", "=", "zmq", ".", "Context", "(", "1", ")", "self", ...
After forking we need to create all of the local sockets to listen to the router :param func payload_handler: A function to called to handle incoming payloads as they are picked up off the wire :param IOLoop io_loop: An instance of a Tornado IOLoop, to handle event scheduling
[ "After", "forking", "we", "need", "to", "create", "all", "of", "the", "local", "sockets", "to", "listen", "to", "the", "router" ]
python
train
tensorflow/cleverhans
cleverhans/model_zoo/deep_k_nearest_neighbors/dknn.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/model_zoo/deep_k_nearest_neighbors/dknn.py#L88-L132
def init_lsh(self): """ Initializes locality-sensitive hashing with FALCONN to find nearest neighbors in training data. """ self.query_objects = { } # contains the object that can be queried to find nearest neighbors at each layer. # mean of training data representation per layer (that needs to be substracted before LSH). self.centers = {} for layer in self.layers: assert self.nb_tables >= self.neighbors # Normalize all the lenghts, since we care about the cosine similarity. self.train_activations_lsh[layer] /= np.linalg.norm( self.train_activations_lsh[layer], axis=1).reshape(-1, 1) # Center the dataset and the queries: this improves the performance of LSH quite a bit. center = np.mean(self.train_activations_lsh[layer], axis=0) self.train_activations_lsh[layer] -= center self.centers[layer] = center # LSH parameters params_cp = falconn.LSHConstructionParameters() params_cp.dimension = len(self.train_activations_lsh[layer][1]) params_cp.lsh_family = falconn.LSHFamily.CrossPolytope params_cp.distance_function = falconn.DistanceFunction.EuclideanSquared params_cp.l = self.nb_tables params_cp.num_rotations = 2 # for dense set it to 1; for sparse data set it to 2 params_cp.seed = 5721840 # we want to use all the available threads to set up params_cp.num_setup_threads = 0 params_cp.storage_hash_table = falconn.StorageHashTable.BitPackedFlatHashTable # we build 18-bit hashes so that each table has # 2^18 bins; this is a good choice since 2^18 is of the same # order of magnitude as the number of data points falconn.compute_number_of_hash_functions(self.number_bits, params_cp) print('Constructing the LSH table') table = falconn.LSHIndex(params_cp) table.setup(self.train_activations_lsh[layer]) # Parse test feature vectors and find k nearest neighbors query_object = table.construct_query_object() query_object.set_num_probes(self.nb_tables) self.query_objects[layer] = query_object
[ "def", "init_lsh", "(", "self", ")", ":", "self", ".", "query_objects", "=", "{", "}", "# contains the object that can be queried to find nearest neighbors at each layer.", "# mean of training data representation per layer (that needs to be substracted before LSH).", "self", ".", "ce...
Initializes locality-sensitive hashing with FALCONN to find nearest neighbors in training data.
[ "Initializes", "locality", "-", "sensitive", "hashing", "with", "FALCONN", "to", "find", "nearest", "neighbors", "in", "training", "data", "." ]
python
train
quodlibet/mutagen
mutagen/_senf/_stdlib.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/_senf/_stdlib.py#L91-L132
def expanduser(path): """ Args: path (pathlike): A path to expand Returns: `fsnative` Like :func:`python:os.path.expanduser` but supports unicode home directories under Windows + Python 2 and always returns a `fsnative`. """ path = path2fsn(path) if path == "~": return _get_userdir() elif path.startswith("~" + sep) or ( altsep is not None and path.startswith("~" + altsep)): userdir = _get_userdir() if userdir is None: return path return userdir + path[1:] elif path.startswith("~"): sep_index = path.find(sep) if altsep is not None: alt_index = path.find(altsep) if alt_index != -1 and alt_index < sep_index: sep_index = alt_index if sep_index == -1: user = path[1:] rest = "" else: user = path[1:sep_index] rest = path[sep_index:] userdir = _get_userdir(user) if userdir is not None: return userdir + rest else: return path else: return path
[ "def", "expanduser", "(", "path", ")", ":", "path", "=", "path2fsn", "(", "path", ")", "if", "path", "==", "\"~\"", ":", "return", "_get_userdir", "(", ")", "elif", "path", ".", "startswith", "(", "\"~\"", "+", "sep", ")", "or", "(", "altsep", "is", ...
Args: path (pathlike): A path to expand Returns: `fsnative` Like :func:`python:os.path.expanduser` but supports unicode home directories under Windows + Python 2 and always returns a `fsnative`.
[ "Args", ":", "path", "(", "pathlike", ")", ":", "A", "path", "to", "expand", "Returns", ":", "fsnative" ]
python
train
arista-eosplus/pyeapi
pyeapi/api/stp.py
https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/api/stp.py#L181-L211
def get(self, name): """Returns the specified interfaces STP configuration resource The STP interface resource contains the following * name (str): The interface name * portfast (bool): The spanning-tree portfast admin state * bpduguard (bool): The spanning-tree bpduguard admin state * portfast_type (str): The spanning-tree portfast <type> value. Valid values include "edge", "network", "normal" Args: name (string): The interface identifier to retrieve the config for. Note: Spanning-tree interfaces are only supported on Ethernet and Port-Channel interfaces Returns: dict: A resource dict object that represents the interface configuration. None: If the specified interace is not a STP port """ if not isvalidinterface(name): return None config = self.get_block(r'^interface\s%s$' % name) resp = dict() resp.update(self._parse_bpduguard(config)) resp.update(self._parse_portfast(config)) resp.update(self._parse_portfast_type(config)) return resp
[ "def", "get", "(", "self", ",", "name", ")", ":", "if", "not", "isvalidinterface", "(", "name", ")", ":", "return", "None", "config", "=", "self", ".", "get_block", "(", "r'^interface\\s%s$'", "%", "name", ")", "resp", "=", "dict", "(", ")", "resp", ...
Returns the specified interfaces STP configuration resource The STP interface resource contains the following * name (str): The interface name * portfast (bool): The spanning-tree portfast admin state * bpduguard (bool): The spanning-tree bpduguard admin state * portfast_type (str): The spanning-tree portfast <type> value. Valid values include "edge", "network", "normal" Args: name (string): The interface identifier to retrieve the config for. Note: Spanning-tree interfaces are only supported on Ethernet and Port-Channel interfaces Returns: dict: A resource dict object that represents the interface configuration. None: If the specified interace is not a STP port
[ "Returns", "the", "specified", "interfaces", "STP", "configuration", "resource" ]
python
train
trailofbits/manticore
manticore/native/cpu/x86.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/x86.py#L550-L554
def _set_flags(self, reg, res): """ Set individual flags from a EFLAGS/RFLAGS value """ #assert sizeof (res) == 32 if reg == 'EFLAGS' else 64 for flag, offset in self._flags.items(): self.write(flag, Operators.EXTRACT(res, offset, 1))
[ "def", "_set_flags", "(", "self", ",", "reg", ",", "res", ")", ":", "#assert sizeof (res) == 32 if reg == 'EFLAGS' else 64", "for", "flag", ",", "offset", "in", "self", ".", "_flags", ".", "items", "(", ")", ":", "self", ".", "write", "(", "flag", ",", "Op...
Set individual flags from a EFLAGS/RFLAGS value
[ "Set", "individual", "flags", "from", "a", "EFLAGS", "/", "RFLAGS", "value" ]
python
valid
nickmckay/LiPD-utilities
Python/lipd/lpd_noaa.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/lpd_noaa.py#L1568-L1591
def __write_data_col_vals(self, ll, pc): """ Loop over value arrays and write index by index, to correspond to the rows of a txt file :param list ll: List of lists, column data :return: """ # all columns should have the same amount of values. grab that number try: _items_in_cols = len(ll[0]["values"]) for idx in range(0, _items_in_cols): # amount of columns _count = len(ll) self.noaa_txt += "# " for col in ll: self.noaa_txt += "{}\t".format(str(col["values"][idx])) _count -= 1 if (idx < _items_in_cols): self.noaa_txt += '\n' except IndexError: logger_lpd_noaa("_write_data_col_vals: IndexError: couldn't get length of columns") return
[ "def", "__write_data_col_vals", "(", "self", ",", "ll", ",", "pc", ")", ":", "# all columns should have the same amount of values. grab that number", "try", ":", "_items_in_cols", "=", "len", "(", "ll", "[", "0", "]", "[", "\"values\"", "]", ")", "for", "idx", "...
Loop over value arrays and write index by index, to correspond to the rows of a txt file :param list ll: List of lists, column data :return:
[ "Loop", "over", "value", "arrays", "and", "write", "index", "by", "index", "to", "correspond", "to", "the", "rows", "of", "a", "txt", "file", ":", "param", "list", "ll", ":", "List", "of", "lists", "column", "data", ":", "return", ":" ]
python
train
ecell/ecell4
ecell4/util/viz.py
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L28-L58
def plot_number_observer(*args, **kwargs): """ Generate a plot from NumberObservers and show it. See plot_number_observer_with_matplotlib and _with_nya for details. Parameters ---------- obs : NumberObserver (e.g. FixedIntervalNumberObserver) interactive : bool, default False Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with nyaplot. Examples -------- >>> plot_number_observer(obs1) >>> plot_number_observer(obs1, interactive=True) """ interactive = kwargs.pop('interactive', False) if interactive: plot_number_observer_with_nya(*args, **kwargs) # elif __on_ipython_notebook(): # kwargs['to_png'] = True # plot_number_observer_with_nya(*args, **kwargs) else: if kwargs.pop('to_png', None) is not None: #XXX: Remove an option available only on nyaplot for the consistency import warnings warnings.warn( "An option 'to_png' is not available with matplotlib. Just ignored.") plot_number_observer_with_matplotlib(*args, **kwargs)
[ "def", "plot_number_observer", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "interactive", "=", "kwargs", ".", "pop", "(", "'interactive'", ",", "False", ")", "if", "interactive", ":", "plot_number_observer_with_nya", "(", "*", "args", ",", "*", "*...
Generate a plot from NumberObservers and show it. See plot_number_observer_with_matplotlib and _with_nya for details. Parameters ---------- obs : NumberObserver (e.g. FixedIntervalNumberObserver) interactive : bool, default False Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with nyaplot. Examples -------- >>> plot_number_observer(obs1) >>> plot_number_observer(obs1, interactive=True)
[ "Generate", "a", "plot", "from", "NumberObservers", "and", "show", "it", ".", "See", "plot_number_observer_with_matplotlib", "and", "_with_nya", "for", "details", "." ]
python
train
tensorflow/tensorboard
tensorboard/backend/application.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/application.py#L163-L193
def TensorBoardWSGIApp(logdir, plugins, multiplexer, reload_interval, path_prefix='', reload_task='auto'): """Constructs the TensorBoard application. Args: logdir: the logdir spec that describes where data will be loaded. may be a directory, or comma,separated list of directories, or colons can be used to provide named directories plugins: A list of base_plugin.TBPlugin subclass instances. multiplexer: The EventMultiplexer with TensorBoard data to serve reload_interval: How often (in seconds) to reload the Multiplexer. Zero means reload just once at startup; negative means never load. path_prefix: A prefix of the path when app isn't served from root. reload_task: Indicates the type of background task to reload with. Returns: A WSGI application that implements the TensorBoard backend. Raises: ValueError: If something is wrong with the plugin configuration. :type plugins: list[base_plugin.TBPlugin] :rtype: TensorBoardWSGI """ path_to_run = parse_event_files_spec(logdir) if reload_interval >= 0: # We either reload the multiplexer once when TensorBoard starts up, or we # continuously reload the multiplexer. start_reloading_multiplexer(multiplexer, path_to_run, reload_interval, reload_task) return TensorBoardWSGI(plugins, path_prefix)
[ "def", "TensorBoardWSGIApp", "(", "logdir", ",", "plugins", ",", "multiplexer", ",", "reload_interval", ",", "path_prefix", "=", "''", ",", "reload_task", "=", "'auto'", ")", ":", "path_to_run", "=", "parse_event_files_spec", "(", "logdir", ")", "if", "reload_in...
Constructs the TensorBoard application. Args: logdir: the logdir spec that describes where data will be loaded. may be a directory, or comma,separated list of directories, or colons can be used to provide named directories plugins: A list of base_plugin.TBPlugin subclass instances. multiplexer: The EventMultiplexer with TensorBoard data to serve reload_interval: How often (in seconds) to reload the Multiplexer. Zero means reload just once at startup; negative means never load. path_prefix: A prefix of the path when app isn't served from root. reload_task: Indicates the type of background task to reload with. Returns: A WSGI application that implements the TensorBoard backend. Raises: ValueError: If something is wrong with the plugin configuration. :type plugins: list[base_plugin.TBPlugin] :rtype: TensorBoardWSGI
[ "Constructs", "the", "TensorBoard", "application", "." ]
python
train
Clinical-Genomics/scout
scout/utils/link.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/utils/link.py#L18-L57
def genes_by_alias(hgnc_genes): """Return a dictionary with hgnc symbols as keys Value of the dictionaries are information about the hgnc ids for a symbol. If the symbol is primary for a gene then 'true_id' will exist. A list of hgnc ids that the symbol points to is in ids. Args: hgnc_genes(dict): a dictionary with hgnc_id as key and gene info as value Returns: alias_genes(dict): { 'hgnc_symbol':{ 'true_id': int, 'ids': list(int) } } """ alias_genes = {} for hgnc_id in hgnc_genes: gene = hgnc_genes[hgnc_id] # This is the primary symbol: hgnc_symbol = gene['hgnc_symbol'] for alias in gene['previous_symbols']: true_id = None if alias == hgnc_symbol: true_id = hgnc_id if alias in alias_genes: alias_genes[alias.upper()]['ids'].add(hgnc_id) if true_id: alias_genes[alias.upper()]['true_id'] = hgnc_id else: alias_genes[alias.upper()] = { 'true': true_id, 'ids': set([hgnc_id]) } return alias_genes
[ "def", "genes_by_alias", "(", "hgnc_genes", ")", ":", "alias_genes", "=", "{", "}", "for", "hgnc_id", "in", "hgnc_genes", ":", "gene", "=", "hgnc_genes", "[", "hgnc_id", "]", "# This is the primary symbol:", "hgnc_symbol", "=", "gene", "[", "'hgnc_symbol'", "]",...
Return a dictionary with hgnc symbols as keys Value of the dictionaries are information about the hgnc ids for a symbol. If the symbol is primary for a gene then 'true_id' will exist. A list of hgnc ids that the symbol points to is in ids. Args: hgnc_genes(dict): a dictionary with hgnc_id as key and gene info as value Returns: alias_genes(dict): { 'hgnc_symbol':{ 'true_id': int, 'ids': list(int) } }
[ "Return", "a", "dictionary", "with", "hgnc", "symbols", "as", "keys" ]
python
test
jfilter/text-classification-keras
texcla/corpus.py
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/corpus.py#L8-L22
def read_folder(directory): """read text files in directory and returns them as array Args: directory: where the text files are Returns: Array of text """ res = [] for filename in os.listdir(directory): with io.open(os.path.join(directory, filename), encoding="utf-8") as f: content = f.read() res.append(content) return res
[ "def", "read_folder", "(", "directory", ")", ":", "res", "=", "[", "]", "for", "filename", "in", "os", ".", "listdir", "(", "directory", ")", ":", "with", "io", ".", "open", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "filename", ")...
read text files in directory and returns them as array Args: directory: where the text files are Returns: Array of text
[ "read", "text", "files", "in", "directory", "and", "returns", "them", "as", "array" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/parallel/client/magics.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/client/magics.py#L56-L84
def exec_args(f): """decorator for adding block/targets args for execution applied to %pxconfig and %%px """ args = [ magic_arguments.argument('-b', '--block', action="store_const", const=True, dest='block', help="use blocking (sync) execution", ), magic_arguments.argument('-a', '--noblock', action="store_const", const=False, dest='block', help="use non-blocking (async) execution", ), magic_arguments.argument('-t', '--targets', type=str, help="specify the targets on which to execute", ), magic_arguments.argument('--verbose', action="store_const", const=True, dest="set_verbose", help="print a message at each execution", ), magic_arguments.argument('--no-verbose', action="store_const", const=False, dest="set_verbose", help="don't print any messages", ), ] for a in args: f = a(f) return f
[ "def", "exec_args", "(", "f", ")", ":", "args", "=", "[", "magic_arguments", ".", "argument", "(", "'-b'", ",", "'--block'", ",", "action", "=", "\"store_const\"", ",", "const", "=", "True", ",", "dest", "=", "'block'", ",", "help", "=", "\"use blocking ...
decorator for adding block/targets args for execution applied to %pxconfig and %%px
[ "decorator", "for", "adding", "block", "/", "targets", "args", "for", "execution", "applied", "to", "%pxconfig", "and", "%%px" ]
python
test
scott-griffiths/bitstring
bitstring.py
https://github.com/scott-griffiths/bitstring/blob/ab40ae7f0b43fe223a39b63cbc0529b09f3ef653/bitstring.py#L3806-L3812
def _setbitpos(self, pos): """Move to absolute postion bit in bitstream.""" if pos < 0: raise ValueError("Bit position cannot be negative.") if pos > self.len: raise ValueError("Cannot seek past the end of the data.") self._pos = pos
[ "def", "_setbitpos", "(", "self", ",", "pos", ")", ":", "if", "pos", "<", "0", ":", "raise", "ValueError", "(", "\"Bit position cannot be negative.\"", ")", "if", "pos", ">", "self", ".", "len", ":", "raise", "ValueError", "(", "\"Cannot seek past the end of t...
Move to absolute postion bit in bitstream.
[ "Move", "to", "absolute", "postion", "bit", "in", "bitstream", "." ]
python
train
line/line-bot-sdk-python
linebot/api.py
https://github.com/line/line-bot-sdk-python/blob/1b38bfc2497ff3e3c75be4b50e0f1b7425a07ce0/linebot/api.py#L98-L126
def push_message(self, to, messages, timeout=None): """Call push message API. https://devdocs.line.me/en/#push-message Send messages to users, groups, and rooms at any time. :param str to: ID of the receiver :param messages: Messages. Max: 5 :type messages: T <= :py:class:`linebot.models.send_messages.SendMessage` | list[T <= :py:class:`linebot.models.send_messages.SendMessage`] :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.http_client.timeout :type timeout: float | tuple(float, float) """ if not isinstance(messages, (list, tuple)): messages = [messages] data = { 'to': to, 'messages': [message.as_json_dict() for message in messages] } self._post( '/v2/bot/message/push', data=json.dumps(data), timeout=timeout )
[ "def", "push_message", "(", "self", ",", "to", ",", "messages", ",", "timeout", "=", "None", ")", ":", "if", "not", "isinstance", "(", "messages", ",", "(", "list", ",", "tuple", ")", ")", ":", "messages", "=", "[", "messages", "]", "data", "=", "{...
Call push message API. https://devdocs.line.me/en/#push-message Send messages to users, groups, and rooms at any time. :param str to: ID of the receiver :param messages: Messages. Max: 5 :type messages: T <= :py:class:`linebot.models.send_messages.SendMessage` | list[T <= :py:class:`linebot.models.send_messages.SendMessage`] :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.http_client.timeout :type timeout: float | tuple(float, float)
[ "Call", "push", "message", "API", "." ]
python
train
pypa/pipenv
pipenv/vendor/distlib/locators.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/locators.py#L551-L576
def links(self): """ Return the URLs of all the links on a page together with information about their "rel" attribute, for determining which ones to treat as downloads and which ones to queue for further scraping. """ def clean(url): "Tidy up an URL." scheme, netloc, path, params, query, frag = urlparse(url) return urlunparse((scheme, netloc, quote(path), params, query, frag)) result = set() for match in self._href.finditer(self.data): d = match.groupdict('') rel = (d['rel1'] or d['rel2'] or d['rel3'] or d['rel4'] or d['rel5'] or d['rel6']) url = d['url1'] or d['url2'] or d['url3'] url = urljoin(self.base_url, url) url = unescape(url) url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url) result.add((url, rel)) # We sort the result, hoping to bring the most recent versions # to the front result = sorted(result, key=lambda t: t[0], reverse=True) return result
[ "def", "links", "(", "self", ")", ":", "def", "clean", "(", "url", ")", ":", "\"Tidy up an URL.\"", "scheme", ",", "netloc", ",", "path", ",", "params", ",", "query", ",", "frag", "=", "urlparse", "(", "url", ")", "return", "urlunparse", "(", "(", "s...
Return the URLs of all the links on a page together with information about their "rel" attribute, for determining which ones to treat as downloads and which ones to queue for further scraping.
[ "Return", "the", "URLs", "of", "all", "the", "links", "on", "a", "page", "together", "with", "information", "about", "their", "rel", "attribute", "for", "determining", "which", "ones", "to", "treat", "as", "downloads", "and", "which", "ones", "to", "queue", ...
python
train
kubernetes-client/python
kubernetes/client/apis/storage_v1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/storage_v1_api.py#L1739-L1762
def replace_storage_class(self, name, body, **kwargs): """ replace the specified StorageClass This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_storage_class(name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the StorageClass (required) :param V1StorageClass body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1StorageClass If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_storage_class_with_http_info(name, body, **kwargs) else: (data) = self.replace_storage_class_with_http_info(name, body, **kwargs) return data
[ "def", "replace_storage_class", "(", "self", ",", "name", ",", "body", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "repla...
replace the specified StorageClass This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_storage_class(name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the StorageClass (required) :param V1StorageClass body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1StorageClass If the method is called asynchronously, returns the request thread.
[ "replace", "the", "specified", "StorageClass", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api...
python
train
delfick/nose-of-yeti
noseOfYeti/tokeniser/tracker.py
https://github.com/delfick/nose-of-yeti/blob/0b545ff350cebd59b40b601333c13033ce40d6dc/noseOfYeti/tokeniser/tracker.py#L387-L400
def add_tokens_for_group(self, with_pass=False): """Add the tokens for the group signature""" kls = self.groups.super_kls name = self.groups.kls_name # Reset indentation to beginning and add signature self.reset_indentation('') self.result.extend(self.tokens.make_describe(kls, name)) # Add pass if necessary if with_pass: self.add_tokens_for_pass() self.groups.finish_signature()
[ "def", "add_tokens_for_group", "(", "self", ",", "with_pass", "=", "False", ")", ":", "kls", "=", "self", ".", "groups", ".", "super_kls", "name", "=", "self", ".", "groups", ".", "kls_name", "# Reset indentation to beginning and add signature", "self", ".", "re...
Add the tokens for the group signature
[ "Add", "the", "tokens", "for", "the", "group", "signature" ]
python
train
Clinical-Genomics/scout
scout/adapter/mongo/hgnc.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/hgnc.py#L209-L216
def drop_genes(self, build=None): """Delete the genes collection""" if build: LOG.info("Dropping the hgnc_gene collection, build %s", build) self.hgnc_collection.delete_many({'build': build}) else: LOG.info("Dropping the hgnc_gene collection") self.hgnc_collection.drop()
[ "def", "drop_genes", "(", "self", ",", "build", "=", "None", ")", ":", "if", "build", ":", "LOG", ".", "info", "(", "\"Dropping the hgnc_gene collection, build %s\"", ",", "build", ")", "self", ".", "hgnc_collection", ".", "delete_many", "(", "{", "'build'", ...
Delete the genes collection
[ "Delete", "the", "genes", "collection" ]
python
test
saltstack/salt
salt/queues/pgjsonb_queue.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/queues/pgjsonb_queue.py#L183-L208
def insert(queue, items): ''' Add an item or items to a queue ''' handle_queue_creation(queue) with _conn(commit=True) as cur: if isinstance(items, dict): items = salt.utils.json.dumps(items) cmd = str('''INSERT INTO {0}(data) VALUES('{1}')''').format(queue, items) # future lint: disable=blacklisted-function log.debug('SQL Query: %s', cmd) try: cur.execute(cmd) except psycopg2.IntegrityError as esc: return ('Item already exists in this queue. ' 'postgres error: {0}'.format(esc)) if isinstance(items, list): items = [(salt.utils.json.dumps(el),) for el in items] cmd = str("INSERT INTO {0}(data) VALUES (%s)").format(queue) # future lint: disable=blacklisted-function log.debug('SQL Query: %s', cmd) try: cur.executemany(cmd, items) except psycopg2.IntegrityError as esc: return ('One or more items already exists in this queue. ' 'postgres error: {0}'.format(esc)) return True
[ "def", "insert", "(", "queue", ",", "items", ")", ":", "handle_queue_creation", "(", "queue", ")", "with", "_conn", "(", "commit", "=", "True", ")", "as", "cur", ":", "if", "isinstance", "(", "items", ",", "dict", ")", ":", "items", "=", "salt", ".",...
Add an item or items to a queue
[ "Add", "an", "item", "or", "items", "to", "a", "queue" ]
python
train
greyli/flask-avatars
flask_avatars/identicon.py
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L205-L221
def generate(self, text): """Generate and save avatars, return a list of file name: [filename_s, filename_m, filename_l]. :param text: The text used to generate image. """ sizes = current_app.config['AVATARS_SIZE_TUPLE'] path = current_app.config['AVATARS_SAVE_PATH'] suffix = {sizes[0]: 's', sizes[1]: 'm', sizes[2]: 'l'} for size in sizes: image_byte_array = self.get_image( string=str(text), width=int(size), height=int(size), pad=int(size * 0.1)) self.save(image_byte_array, save_location=os.path.join(path, '%s_%s.png' % (text, suffix[size]))) return [text + '_s.png', text + '_m.png', text + '_l.png']
[ "def", "generate", "(", "self", ",", "text", ")", ":", "sizes", "=", "current_app", ".", "config", "[", "'AVATARS_SIZE_TUPLE'", "]", "path", "=", "current_app", ".", "config", "[", "'AVATARS_SAVE_PATH'", "]", "suffix", "=", "{", "sizes", "[", "0", "]", "...
Generate and save avatars, return a list of file name: [filename_s, filename_m, filename_l]. :param text: The text used to generate image.
[ "Generate", "and", "save", "avatars", "return", "a", "list", "of", "file", "name", ":", "[", "filename_s", "filename_m", "filename_l", "]", "." ]
python
train
belbio/bel
bel/lang/bel_specification.py
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L592-L661
def create_ebnf_parser(files): """Create EBNF files and EBNF-based parsers""" flag = False for belspec_fn in files: # Get EBNF Jinja template from Github if enabled if config["bel"]["lang"]["specification_github_repo"]: tmpl_fn = get_ebnf_template() # Check if EBNF file is more recent than belspec_fn ebnf_fn = belspec_fn.replace(".yaml", ".ebnf") if not os.path.exists(ebnf_fn) or os.path.getmtime(belspec_fn) > os.path.getmtime(ebnf_fn): with open(belspec_fn, "r") as f: belspec = yaml.load(f, Loader=yaml.SafeLoader) tmpl_dir = os.path.dirname(tmpl_fn) tmpl_basename = os.path.basename(tmpl_fn) bel_major_version = belspec["version"].split(".")[0] env = jinja2.Environment( loader=jinja2.FileSystemLoader(tmpl_dir) ) # create environment for template template = env.get_template(tmpl_basename) # get the template # replace template placeholders with appropriate variables relations_list = [ (relation, belspec["relations"]["info"][relation]["abbreviation"]) for relation in belspec["relations"]["info"] ] relations_list = sorted(list(itertools.chain(*relations_list)), key=len, reverse=True) functions_list = [ (function, belspec["functions"]["info"][function]["abbreviation"]) for function in belspec["functions"]["info"] if belspec["functions"]["info"][function]["type"] == "primary" ] functions_list = sorted(list(itertools.chain(*functions_list)), key=len, reverse=True) modifiers_list = [ (function, belspec["functions"]["info"][function]["abbreviation"]) for function in belspec["functions"]["info"] if belspec["functions"]["info"][function]["type"] == "modifier" ] modifiers_list = sorted(list(itertools.chain(*modifiers_list)), key=len, reverse=True) created_time = datetime.datetime.now().strftime("%B %d, %Y - %I:%M:%S%p") ebnf = template.render( functions=functions_list, m_functions=modifiers_list, relations=relations_list, bel_version=belspec["version"], bel_major_version=bel_major_version, created_time=created_time, ) with open(ebnf_fn, "w") as f: f.write(ebnf) parser_fn = ebnf_fn.replace(".ebnf", "_parser.py") parser = tatsu.to_python_sourcecode(ebnf, filename=parser_fn) flag = True with open(parser_fn, "wt") as f: f.write(parser) if flag: # In case we created new parser modules importlib.invalidate_caches()
[ "def", "create_ebnf_parser", "(", "files", ")", ":", "flag", "=", "False", "for", "belspec_fn", "in", "files", ":", "# Get EBNF Jinja template from Github if enabled", "if", "config", "[", "\"bel\"", "]", "[", "\"lang\"", "]", "[", "\"specification_github_repo\"", "...
Create EBNF files and EBNF-based parsers
[ "Create", "EBNF", "files", "and", "EBNF", "-", "based", "parsers" ]
python
train
andreasjansson/head-in-the-clouds
headintheclouds/dependencies/PyDbLite/MySQL.py
https://github.com/andreasjansson/head-in-the-clouds/blob/32c1d00d01036834dc94368e7f38b0afd3f7a82f/headintheclouds/dependencies/PyDbLite/MySQL.py#L165-L171
def _table_exists(self): """Database-specific method to see if the table exists""" self.cursor.execute("SHOW TABLES") for table in self.cursor.fetchall(): if table[0].lower() == self.name.lower(): return True return False
[ "def", "_table_exists", "(", "self", ")", ":", "self", ".", "cursor", ".", "execute", "(", "\"SHOW TABLES\"", ")", "for", "table", "in", "self", ".", "cursor", ".", "fetchall", "(", ")", ":", "if", "table", "[", "0", "]", ".", "lower", "(", ")", "=...
Database-specific method to see if the table exists
[ "Database", "-", "specific", "method", "to", "see", "if", "the", "table", "exists" ]
python
train
ArchiveTeam/wpull
wpull/warc/recorder.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/warc/recorder.py#L184-L211
def _setup_log(self): '''Set up the logging file.''' logger = logging.getLogger() formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') self._log_temp_file = NamedTemporaryFile( prefix='tmp-wpull-warc-', dir=self._params.temp_dir, suffix='.log.gz', delete=False, ) self._log_temp_file.close() # For Windows self._log_handler = handler = logging.StreamHandler( io.TextIOWrapper( gzip.GzipFile( filename=self._log_temp_file.name, mode='wb' ), encoding='utf-8' ) ) logger.setLevel(logging.DEBUG) logger.debug('Wpull needs the root logger level set to DEBUG.') handler.setFormatter(formatter) logger.addHandler(handler) handler.setLevel(logging.INFO)
[ "def", "_setup_log", "(", "self", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", ")", "formatter", "=", "logging", ".", "Formatter", "(", "'%(asctime)s - %(name)s - %(levelname)s - %(message)s'", ")", "self", ".", "_log_temp_file", "=", "NamedTemporaryFi...
Set up the logging file.
[ "Set", "up", "the", "logging", "file", "." ]
python
train
docker/docker-py
docker/models/images.py
https://github.com/docker/docker-py/blob/613d6aad83acc9931ff2ecfd6a6c7bd8061dc125/docker/models/images.py#L63-L103
def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE, named=False): """ Get a tarball of an image. Similar to the ``docker save`` command. Args: chunk_size (int): The generator will return up to that much data per iteration, but may return less. If ``None``, data will be streamed as it is received. Default: 2 MB named (str or bool): If ``False`` (default), the tarball will not retain repository and tag information for this image. If set to ``True``, the first tag in the :py:attr:`~tags` list will be used to identify the image. Alternatively, any element of the :py:attr:`~tags` list can be used as an argument to use that specific tag as the saved identifier. Returns: (generator): A stream of raw archive data. Raises: :py:class:`docker.errors.APIError` If the server returns an error. Example: >>> image = cli.get_image("busybox:latest") >>> f = open('/tmp/busybox-latest.tar', 'wb') >>> for chunk in image: >>> f.write(chunk) >>> f.close() """ img = self.id if named: img = self.tags[0] if self.tags else img if isinstance(named, six.string_types): if named not in self.tags: raise InvalidArgument( "{} is not a valid tag for this image".format(named) ) img = named return self.client.api.get_image(img, chunk_size)
[ "def", "save", "(", "self", ",", "chunk_size", "=", "DEFAULT_DATA_CHUNK_SIZE", ",", "named", "=", "False", ")", ":", "img", "=", "self", ".", "id", "if", "named", ":", "img", "=", "self", ".", "tags", "[", "0", "]", "if", "self", ".", "tags", "else...
Get a tarball of an image. Similar to the ``docker save`` command. Args: chunk_size (int): The generator will return up to that much data per iteration, but may return less. If ``None``, data will be streamed as it is received. Default: 2 MB named (str or bool): If ``False`` (default), the tarball will not retain repository and tag information for this image. If set to ``True``, the first tag in the :py:attr:`~tags` list will be used to identify the image. Alternatively, any element of the :py:attr:`~tags` list can be used as an argument to use that specific tag as the saved identifier. Returns: (generator): A stream of raw archive data. Raises: :py:class:`docker.errors.APIError` If the server returns an error. Example: >>> image = cli.get_image("busybox:latest") >>> f = open('/tmp/busybox-latest.tar', 'wb') >>> for chunk in image: >>> f.write(chunk) >>> f.close()
[ "Get", "a", "tarball", "of", "an", "image", ".", "Similar", "to", "the", "docker", "save", "command", "." ]
python
train
artefactual-labs/mets-reader-writer
metsrw/validate.py
https://github.com/artefactual-labs/mets-reader-writer/blob/d95939cabdfdc25cb1bf67df0c84bd0d6e6a73ff/metsrw/validate.py#L29-L36
def get_schematron(sct_path): """Return an lxml ``isoschematron.Schematron()`` instance using the schematron file at ``sct_path``. """ sct_path = _get_file_path(sct_path) parser = etree.XMLParser(remove_blank_text=True) sct_doc = etree.parse(sct_path, parser=parser) return isoschematron.Schematron(sct_doc, store_report=True)
[ "def", "get_schematron", "(", "sct_path", ")", ":", "sct_path", "=", "_get_file_path", "(", "sct_path", ")", "parser", "=", "etree", ".", "XMLParser", "(", "remove_blank_text", "=", "True", ")", "sct_doc", "=", "etree", ".", "parse", "(", "sct_path", ",", ...
Return an lxml ``isoschematron.Schematron()`` instance using the schematron file at ``sct_path``.
[ "Return", "an", "lxml", "isoschematron", ".", "Schematron", "()", "instance", "using", "the", "schematron", "file", "at", "sct_path", "." ]
python
train
limix/glimix-core
glimix_core/lmm/_kron2sum_scan.py
https://github.com/limix/glimix-core/blob/cddd0994591d100499cc41c1f480ddd575e7a980/glimix_core/lmm/_kron2sum_scan.py#L60-L77
def null_lml(self): """ Log of the marginal likelihood for the null hypothesis. It is implemented as :: 2·log(p(Y)) = -n·p·log(2𝜋s) - log|K| - n·p, for which s and 𝚩 are optimal. Returns ------- lml : float Log of the marginal likelihood. """ np = self._nsamples * self._ntraits scale = self.null_scale return self._static_lml() / 2 - np * safe_log(scale) / 2 - np / 2
[ "def", "null_lml", "(", "self", ")", ":", "np", "=", "self", ".", "_nsamples", "*", "self", ".", "_ntraits", "scale", "=", "self", ".", "null_scale", "return", "self", ".", "_static_lml", "(", ")", "/", "2", "-", "np", "*", "safe_log", "(", "scale", ...
Log of the marginal likelihood for the null hypothesis. It is implemented as :: 2·log(p(Y)) = -n·p·log(2𝜋s) - log|K| - n·p, for which s and 𝚩 are optimal. Returns ------- lml : float Log of the marginal likelihood.
[ "Log", "of", "the", "marginal", "likelihood", "for", "the", "null", "hypothesis", "." ]
python
valid
HttpRunner/har2case
har2case/core.py
https://github.com/HttpRunner/har2case/blob/369e576b24b3521832c35344b104828e30742170/har2case/core.py#L98-L130
def __make_request_headers(self, teststep_dict, entry_json): """ parse HAR entry request headers, and make teststep headers. header in IGNORE_REQUEST_HEADERS will be ignored. Args: entry_json (dict): { "request": { "headers": [ {"name": "Host", "value": "httprunner.top"}, {"name": "Content-Type", "value": "application/json"}, {"name": "User-Agent", "value": "iOS/10.3"} ], }, "response": {} } Returns: { "request": { headers: {"Content-Type": "application/json"} } """ teststep_headers = {} for header in entry_json["request"].get("headers", []): if header["name"].lower() in IGNORE_REQUEST_HEADERS: continue teststep_headers[header["name"]] = header["value"] if teststep_headers: teststep_dict["request"]["headers"] = teststep_headers
[ "def", "__make_request_headers", "(", "self", ",", "teststep_dict", ",", "entry_json", ")", ":", "teststep_headers", "=", "{", "}", "for", "header", "in", "entry_json", "[", "\"request\"", "]", ".", "get", "(", "\"headers\"", ",", "[", "]", ")", ":", "if",...
parse HAR entry request headers, and make teststep headers. header in IGNORE_REQUEST_HEADERS will be ignored. Args: entry_json (dict): { "request": { "headers": [ {"name": "Host", "value": "httprunner.top"}, {"name": "Content-Type", "value": "application/json"}, {"name": "User-Agent", "value": "iOS/10.3"} ], }, "response": {} } Returns: { "request": { headers: {"Content-Type": "application/json"} }
[ "parse", "HAR", "entry", "request", "headers", "and", "make", "teststep", "headers", ".", "header", "in", "IGNORE_REQUEST_HEADERS", "will", "be", "ignored", "." ]
python
train
gem/oq-engine
openquake/calculators/extract.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/extract.py#L293-L302
def extract_asset_tags(dstore, tagname): """ Extract an array of asset tags for the given tagname. Use it as /extract/asset_tags or /extract/asset_tags/taxonomy """ tagcol = dstore['assetcol/tagcol'] if tagname: yield tagname, barray(tagcol.gen_tags(tagname)) for tagname in tagcol.tagnames: yield tagname, barray(tagcol.gen_tags(tagname))
[ "def", "extract_asset_tags", "(", "dstore", ",", "tagname", ")", ":", "tagcol", "=", "dstore", "[", "'assetcol/tagcol'", "]", "if", "tagname", ":", "yield", "tagname", ",", "barray", "(", "tagcol", ".", "gen_tags", "(", "tagname", ")", ")", "for", "tagname...
Extract an array of asset tags for the given tagname. Use it as /extract/asset_tags or /extract/asset_tags/taxonomy
[ "Extract", "an", "array", "of", "asset", "tags", "for", "the", "given", "tagname", ".", "Use", "it", "as", "/", "extract", "/", "asset_tags", "or", "/", "extract", "/", "asset_tags", "/", "taxonomy" ]
python
train
jgillick/LendingClub
lendingclub/session.py
https://github.com/jgillick/LendingClub/blob/4495f99fd869810f39c00e02b0f4112c6b210384/lendingclub/session.py#L298-L302
def get(self, path, query=None, redirects=True): """ GET request wrapper for :func:`request()` """ return self.request('GET', path, query, None, redirects)
[ "def", "get", "(", "self", ",", "path", ",", "query", "=", "None", ",", "redirects", "=", "True", ")", ":", "return", "self", ".", "request", "(", "'GET'", ",", "path", ",", "query", ",", "None", ",", "redirects", ")" ]
GET request wrapper for :func:`request()`
[ "GET", "request", "wrapper", "for", ":", "func", ":", "request", "()" ]
python
train
jeffrimko/Qprompt
lib/qprompt.py
https://github.com/jeffrimko/Qprompt/blob/1887c53656dfecac49e0650e0f912328801cbb83/lib/qprompt.py#L558-L563
def clear(): """Clears the console.""" if sys.platform.startswith("win"): call("cls", shell=True) else: call("clear", shell=True)
[ "def", "clear", "(", ")", ":", "if", "sys", ".", "platform", ".", "startswith", "(", "\"win\"", ")", ":", "call", "(", "\"cls\"", ",", "shell", "=", "True", ")", "else", ":", "call", "(", "\"clear\"", ",", "shell", "=", "True", ")" ]
Clears the console.
[ "Clears", "the", "console", "." ]
python
train
richardchien/nonebot
nonebot/command/argfilter/converters.py
https://github.com/richardchien/nonebot/blob/13ed9e4e87d9824b61592520aabda6d2737c8848/nonebot/command/argfilter/converters.py#L4-L26
def _simple_chinese_to_bool(text: str) -> Optional[bool]: """ Convert a chinese text to boolean. Examples: 是的 -> True 好的呀 -> True 不要 -> False 不用了 -> False 你好呀 -> None """ text = text.strip().lower().replace(' ', '') \ .rstrip(',.!?~,。!?~了的呢吧呀啊呗啦') if text in {'要', '用', '是', '好', '对', '嗯', '行', 'ok', 'okay', 'yeah', 'yep', '当真', '当然', '必须', '可以', '肯定', '没错', '确定', '确认'}: return True if text in {'不', '不要', '不用', '不是', '否', '不好', '不对', '不行', '别', 'no', 'nono', 'nonono', 'nope', '不ok', '不可以', '不能', '不可以'}: return False return None
[ "def", "_simple_chinese_to_bool", "(", "text", ":", "str", ")", "->", "Optional", "[", "bool", "]", ":", "text", "=", "text", ".", "strip", "(", ")", ".", "lower", "(", ")", ".", "replace", "(", "' '", ",", "''", ")", ".", "rstrip", "(", "',.!?~,。!...
Convert a chinese text to boolean. Examples: 是的 -> True 好的呀 -> True 不要 -> False 不用了 -> False 你好呀 -> None
[ "Convert", "a", "chinese", "text", "to", "boolean", "." ]
python
train
gtaylor/python-colormath
colormath/color_conversions.py
https://github.com/gtaylor/python-colormath/blob/1d168613718d2d7d31ec4230524e987ef66823c7/colormath/color_conversions.py#L231-L246
def Lab_to_LCHab(cobj, *args, **kwargs): """ Convert from CIE Lab to LCH(ab). """ lch_l = cobj.lab_l lch_c = math.sqrt( math.pow(float(cobj.lab_a), 2) + math.pow(float(cobj.lab_b), 2)) lch_h = math.atan2(float(cobj.lab_b), float(cobj.lab_a)) if lch_h > 0: lch_h = (lch_h / math.pi) * 180 else: lch_h = 360 - (math.fabs(lch_h) / math.pi) * 180 return LCHabColor( lch_l, lch_c, lch_h, observer=cobj.observer, illuminant=cobj.illuminant)
[ "def", "Lab_to_LCHab", "(", "cobj", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "lch_l", "=", "cobj", ".", "lab_l", "lch_c", "=", "math", ".", "sqrt", "(", "math", ".", "pow", "(", "float", "(", "cobj", ".", "lab_a", ")", ",", "2", ")"...
Convert from CIE Lab to LCH(ab).
[ "Convert", "from", "CIE", "Lab", "to", "LCH", "(", "ab", ")", "." ]
python
train
angr/angr
angr/state_plugins/unicorn_engine.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/unicorn_engine.py#L1215-L1316
def get_regs(self): ''' loading registers from unicorn ''' # first, get the ignore list (in case of symbolic registers) if options.UNICORN_SYM_REGS_SUPPORT in self.state.options: highest_reg_offset, reg_size = max(self.state.arch.registers.values()) symbolic_list = (ctypes.c_uint64*(highest_reg_offset + reg_size))() num_regs = _UC_NATIVE.get_symbolic_registers(self._uc_state, symbolic_list) # we take the approach of saving off the symbolic regs and then writing them back saved_registers = [ ] cur_group = None last = None for i in sorted(symbolic_list[:num_regs]): if cur_group is None: cur_group = i elif i != last + 1 or cur_group//self.state.arch.bytes != i//self.state.arch.bytes: saved_registers.append(( cur_group, self.state.registers.load(cur_group, last-cur_group+1) )) cur_group = i last = i if cur_group is not None: saved_registers.append(( cur_group, self.state.registers.load(cur_group, last-cur_group+1) )) # now we sync registers out of unicorn for r, c in self._uc_regs.items(): if r in self.reg_blacklist: continue v = self.uc.reg_read(c) # l.debug('getting $%s = %#x', r, v) setattr(self.state.regs, r, v) # some architecture-specific register fixups if self.state.arch.name in ('X86', 'AMD64'): if self.jumpkind.startswith('Ijk_Sys'): self.state.registers.store('ip_at_syscall', self.state.regs.ip - 2) # update the eflags self.state.regs.eflags = self.state.solver.BVV(self.uc.reg_read(self._uc_const.UC_X86_REG_EFLAGS), self.state.arch.bits) # sync the fp clerical data status = self.uc.reg_read(unicorn.x86_const.UC_X86_REG_FPSW) c3210 = status & 0x4700 top = (status & 0x3800) >> 11 control = self.uc.reg_read(unicorn.x86_const.UC_X86_REG_FPCW) rm = (control & 0x0C00) >> 10 self.state.regs.fpround = rm self.state.regs.fc3210 = c3210 self.state.regs.ftop = top # sync the stx registers # we gotta round the 80-bit extended precision values to 64-bit doubles! uc_offset = unicorn.x86_const.UC_X86_REG_FP0 vex_offset = self.state.arch.registers['fpu_regs'][0] vex_tag_offset = self.state.arch.registers['fpu_tags'][0] + 7 tag_word = self.uc.reg_read(unicorn.x86_const.UC_X86_REG_FPTAG) for _ in range(8): if tag_word & 3 == 3: self.state.registers.store(vex_tag_offset, 0, size=1) else: self.state.registers.store(vex_tag_offset, 1, size=1) mantissa, exponent = self.uc.reg_read(uc_offset) sign = bool(exponent & 0x8000) exponent = (exponent & 0x7FFF) if exponent not in (0, 0x7FFF): # normal value exponent = exponent - 16383 + 1023 if exponent <= 0: # underflow to zero exponent = 0 mantissa = 0 elif exponent >= 0x7FF: # overflow to infinity exponent = 0x7FF mantissa = 0 elif exponent == 0: # zero or subnormal value mantissa = 0 elif exponent == 0x7FFF: # nan or infinity exponent = 0x7FF if mantissa != 0: mantissa = 0xFFFF val = 0x8000000000000000 if sign else 0 val |= exponent << 52 val |= (mantissa >> 11) & 0xFFFFFFFFFFFFF # the mantissa calculation is to convert from the 64-bit mantissa to 52-bit # additionally, extended precision keeps around an high bit that we don't care about # so 11-shift, not 12 self.state.registers.store(vex_offset, val, size=8) uc_offset += 1 vex_offset += 8 tag_word >>= 2 vex_tag_offset -= 1 # now, we restore the symbolic registers if options.UNICORN_SYM_REGS_SUPPORT in self.state.options: for o,r in saved_registers: self.state.registers.store(o, r)
[ "def", "get_regs", "(", "self", ")", ":", "# first, get the ignore list (in case of symbolic registers)", "if", "options", ".", "UNICORN_SYM_REGS_SUPPORT", "in", "self", ".", "state", ".", "options", ":", "highest_reg_offset", ",", "reg_size", "=", "max", "(", "self",...
loading registers from unicorn
[ "loading", "registers", "from", "unicorn" ]
python
train
6809/dragonlib
dragonlib/core/basic_parser.py
https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/core/basic_parser.py#L118-L136
def parse(self, ascii_listing): """ parse the given ASCII BASIC listing. Return a ParsedBASIC() instance. """ self.parsed_lines = ParsedBASIC() for match in self.regex_line_no.finditer(ascii_listing): log.info("_" * 79) log.info("parse line >>>%r<<<", match.group()) line_no = int(match.group("no")) line_content = match.group("content") self.line_data = [] self._parse_code(line_content) log.info("*** line %s result: %r", line_no, self.line_data) self.parsed_lines[line_no] = self.line_data return self.parsed_lines
[ "def", "parse", "(", "self", ",", "ascii_listing", ")", ":", "self", ".", "parsed_lines", "=", "ParsedBASIC", "(", ")", "for", "match", "in", "self", ".", "regex_line_no", ".", "finditer", "(", "ascii_listing", ")", ":", "log", ".", "info", "(", "\"_\"",...
parse the given ASCII BASIC listing. Return a ParsedBASIC() instance.
[ "parse", "the", "given", "ASCII", "BASIC", "listing", ".", "Return", "a", "ParsedBASIC", "()", "instance", "." ]
python
train
shichao-an/twitter-photos
twphotos/increment.py
https://github.com/shichao-an/twitter-photos/blob/32de6e8805edcbb431d08af861e9d2f0ab221106/twphotos/increment.py#L19-L31
def read_since_ids(users): """ Read max ids of the last downloads :param users: A list of users Return a dictionary mapping users to ids """ since_ids = {} for user in users: if config.has_option(SECTIONS['INCREMENTS'], user): since_ids[user] = config.getint(SECTIONS['INCREMENTS'], user) + 1 return since_ids
[ "def", "read_since_ids", "(", "users", ")", ":", "since_ids", "=", "{", "}", "for", "user", "in", "users", ":", "if", "config", ".", "has_option", "(", "SECTIONS", "[", "'INCREMENTS'", "]", ",", "user", ")", ":", "since_ids", "[", "user", "]", "=", "...
Read max ids of the last downloads :param users: A list of users Return a dictionary mapping users to ids
[ "Read", "max", "ids", "of", "the", "last", "downloads" ]
python
train
pyhys/minimalmodbus
minimalmodbus.py
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L1850-L1887
def _fromTwosComplement(x, bits=16): """Calculate the inverse(?) of a two's complement of an integer. Args: * x (int): input integer. * bits (int): number of bits, must be > 0. Returns: An int, that represents the inverse(?) of two's complement of the input. Example for bits=8: === ======= x returns === ======= 0 0 1 1 127 127 128 -128 129 -127 255 -1 === ======= """ _checkInt(bits, minvalue=0, description='number of bits') _checkInt(x, description='input') upperlimit = 2 ** (bits) - 1 lowerlimit = 0 if x > upperlimit or x < lowerlimit: raise ValueError('The input value is out of range. Given value is {0}, but allowed range is {1} to {2} when using {3} bits.' \ .format(x, lowerlimit, upperlimit, bits)) # Calculate inverse(?) of two'2 complement limit = 2 ** (bits - 1) - 1 if x <= limit: return x return x - 2 ** bits
[ "def", "_fromTwosComplement", "(", "x", ",", "bits", "=", "16", ")", ":", "_checkInt", "(", "bits", ",", "minvalue", "=", "0", ",", "description", "=", "'number of bits'", ")", "_checkInt", "(", "x", ",", "description", "=", "'input'", ")", "upperlimit", ...
Calculate the inverse(?) of a two's complement of an integer. Args: * x (int): input integer. * bits (int): number of bits, must be > 0. Returns: An int, that represents the inverse(?) of two's complement of the input. Example for bits=8: === ======= x returns === ======= 0 0 1 1 127 127 128 -128 129 -127 255 -1 === =======
[ "Calculate", "the", "inverse", "(", "?", ")", "of", "a", "two", "s", "complement", "of", "an", "integer", "." ]
python
train
tchellomello/raincloudy
raincloudy/controller.py
https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/controller.py#L103-L116
def _refresh_html_home(self): """ Function to refresh the self._parent.html['home'] object which provides the status if zones are scheduled to start automatically (program_toggle). """ req = self._parent.client.get(HOME_ENDPOINT) if req.status_code == 403: self._parent.login() self.update() elif req.status_code == 200: self._parent.html['home'] = generate_soup_html(req.text) else: req.raise_for_status()
[ "def", "_refresh_html_home", "(", "self", ")", ":", "req", "=", "self", ".", "_parent", ".", "client", ".", "get", "(", "HOME_ENDPOINT", ")", "if", "req", ".", "status_code", "==", "403", ":", "self", ".", "_parent", ".", "login", "(", ")", "self", "...
Function to refresh the self._parent.html['home'] object which provides the status if zones are scheduled to start automatically (program_toggle).
[ "Function", "to", "refresh", "the", "self", ".", "_parent", ".", "html", "[", "home", "]", "object", "which", "provides", "the", "status", "if", "zones", "are", "scheduled", "to", "start", "automatically", "(", "program_toggle", ")", "." ]
python
train
tijme/not-your-average-web-crawler
nyawc/Queue.py
https://github.com/tijme/not-your-average-web-crawler/blob/d77c14e1616c541bb3980f649a7e6f8ed02761fb/nyawc/Queue.py#L150-L166
def get_first(self, status): """Get the first item in the queue that has the given status. Args: status (str): return the first item with this status. Returns: :class:`nyawc.QueueItem`: The first queue item with the given status. """ items = self.get_all(status) if items: return list(items.items())[0][1] return None
[ "def", "get_first", "(", "self", ",", "status", ")", ":", "items", "=", "self", ".", "get_all", "(", "status", ")", "if", "items", ":", "return", "list", "(", "items", ".", "items", "(", ")", ")", "[", "0", "]", "[", "1", "]", "return", "None" ]
Get the first item in the queue that has the given status. Args: status (str): return the first item with this status. Returns: :class:`nyawc.QueueItem`: The first queue item with the given status.
[ "Get", "the", "first", "item", "in", "the", "queue", "that", "has", "the", "given", "status", "." ]
python
train
cslarsen/elv
elv/elv.py
https://github.com/cslarsen/elv/blob/4bacf2093a0dcbe6a2b4d79be0fe339bb2b99097/elv/elv.py#L455-L463
def parse_stream(stream, format=u"Jæren Sparebank"): """Parses bank CSV stream (like a file handle or StringIO) and returns Transactions instance. Returns: A ``Transactions`` object. """ Class = formats[format.lower()] return Class.csv_to_transactions(stream)
[ "def", "parse_stream", "(", "stream", ",", "format", "=", "u\"Jæren Sparebank\")", ":", "", "Class", "=", "formats", "[", "format", ".", "lower", "(", ")", "]", "return", "Class", ".", "csv_to_transactions", "(", "stream", ")" ]
Parses bank CSV stream (like a file handle or StringIO) and returns Transactions instance. Returns: A ``Transactions`` object.
[ "Parses", "bank", "CSV", "stream", "(", "like", "a", "file", "handle", "or", "StringIO", ")", "and", "returns", "Transactions", "instance", "." ]
python
train
ppo/django-guitar
guitar/middlewares.py
https://github.com/ppo/django-guitar/blob/857282219c0c4ff5907c3ad04ef012281d245348/guitar/middlewares.py#L24-L38
def _get_route_info(self, request): """Return information about the current URL.""" resolve_match = resolve(request.path) app_name = resolve_match.app_name # The application namespace for the URL pattern that matches the URL. namespace = resolve_match.namespace # The instance namespace for the URL pattern that matches the URL. url_name = resolve_match.url_name # The name of the URL pattern that matches the URL. view_name = resolve_match.view_name # Name of the view that matches the URL, incl. namespace if there's one. return { "app_name": app_name or None, "namespace": namespace or None, "url_name": url_name or None, "view_name": view_name or None, }
[ "def", "_get_route_info", "(", "self", ",", "request", ")", ":", "resolve_match", "=", "resolve", "(", "request", ".", "path", ")", "app_name", "=", "resolve_match", ".", "app_name", "# The application namespace for the URL pattern that matches the URL.", "namespace", "...
Return information about the current URL.
[ "Return", "information", "about", "the", "current", "URL", "." ]
python
train
pybel/pybel
src/pybel/utils.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/utils.py#L247-L253
def canonicalize_edge(edge_data: EdgeData) -> Tuple[str, Optional[Tuple], Optional[Tuple]]: """Canonicalize the edge to a tuple based on the relation, subject modifications, and object modifications.""" return ( edge_data[RELATION], _canonicalize_edge_modifications(edge_data.get(SUBJECT)), _canonicalize_edge_modifications(edge_data.get(OBJECT)), )
[ "def", "canonicalize_edge", "(", "edge_data", ":", "EdgeData", ")", "->", "Tuple", "[", "str", ",", "Optional", "[", "Tuple", "]", ",", "Optional", "[", "Tuple", "]", "]", ":", "return", "(", "edge_data", "[", "RELATION", "]", ",", "_canonicalize_edge_modi...
Canonicalize the edge to a tuple based on the relation, subject modifications, and object modifications.
[ "Canonicalize", "the", "edge", "to", "a", "tuple", "based", "on", "the", "relation", "subject", "modifications", "and", "object", "modifications", "." ]
python
train
redhat-cip/python-dciclient
dciclient/v1/shell_commands/remoteci.py
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/shell_commands/remoteci.py#L308-L324
def list_user(context, id, sort, limit, where, verbose): """list_user(context, id, sort, limit, where, verbose) List users attached to a remoteci. >>> dcictl remoteci-list-user [OPTIONS] :param string id: ID of the remoteci to list the user from [required] :param string sort: Field to apply sort :param integer limit: Max number of rows to return :param string where: An optional filter criteria :param boolean verbose: Display verbose output """ result = remoteci.list_users(context, id=id, sort=sort, limit=limit, where=where) utils.format_output(result, context.format, verbose=verbose)
[ "def", "list_user", "(", "context", ",", "id", ",", "sort", ",", "limit", ",", "where", ",", "verbose", ")", ":", "result", "=", "remoteci", ".", "list_users", "(", "context", ",", "id", "=", "id", ",", "sort", "=", "sort", ",", "limit", "=", "limi...
list_user(context, id, sort, limit, where, verbose) List users attached to a remoteci. >>> dcictl remoteci-list-user [OPTIONS] :param string id: ID of the remoteci to list the user from [required] :param string sort: Field to apply sort :param integer limit: Max number of rows to return :param string where: An optional filter criteria :param boolean verbose: Display verbose output
[ "list_user", "(", "context", "id", "sort", "limit", "where", "verbose", ")" ]
python
train
juju/charm-helpers
charmhelpers/contrib/ansible/__init__.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/ansible/__init__.py#L219-L229
def execute(self, args): """Execute the hook followed by the playbook using the hook as tag.""" hook_name = os.path.basename(args[0]) extra_vars = None if hook_name in self._actions: extra_vars = self._actions[hook_name](args[1:]) else: super(AnsibleHooks, self).execute(args) charmhelpers.contrib.ansible.apply_playbook( self.playbook_path, tags=[hook_name], extra_vars=extra_vars)
[ "def", "execute", "(", "self", ",", "args", ")", ":", "hook_name", "=", "os", ".", "path", ".", "basename", "(", "args", "[", "0", "]", ")", "extra_vars", "=", "None", "if", "hook_name", "in", "self", ".", "_actions", ":", "extra_vars", "=", "self", ...
Execute the hook followed by the playbook using the hook as tag.
[ "Execute", "the", "hook", "followed", "by", "the", "playbook", "using", "the", "hook", "as", "tag", "." ]
python
train
dcwatson/bbcode
bbcode.py
https://github.com/dcwatson/bbcode/blob/eb6f7ff140a78ddb1641102d7382479c4d7c1c78/bbcode.py#L578-L586
def format(self, data, **context): """ Formats the input text using any installed renderers. Any context keyword arguments given here will be passed along to the render functions as a context dictionary. """ tokens = self.tokenize(data) full_context = self.default_context.copy() full_context.update(context) return self._format_tokens(tokens, None, **full_context).replace('\r', self.newline)
[ "def", "format", "(", "self", ",", "data", ",", "*", "*", "context", ")", ":", "tokens", "=", "self", ".", "tokenize", "(", "data", ")", "full_context", "=", "self", ".", "default_context", ".", "copy", "(", ")", "full_context", ".", "update", "(", "...
Formats the input text using any installed renderers. Any context keyword arguments given here will be passed along to the render functions as a context dictionary.
[ "Formats", "the", "input", "text", "using", "any", "installed", "renderers", ".", "Any", "context", "keyword", "arguments", "given", "here", "will", "be", "passed", "along", "to", "the", "render", "functions", "as", "a", "context", "dictionary", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xsplitbutton.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xsplitbutton.py#L208-L218
def count(self): """ Returns the number of actions associated with this button. :return <int> """ actions = self._actionGroup.actions() if len(actions) == 1 and actions[0].objectName() == 'place_holder': return 0 return len(actions)
[ "def", "count", "(", "self", ")", ":", "actions", "=", "self", ".", "_actionGroup", ".", "actions", "(", ")", "if", "len", "(", "actions", ")", "==", "1", "and", "actions", "[", "0", "]", ".", "objectName", "(", ")", "==", "'place_holder'", ":", "r...
Returns the number of actions associated with this button. :return <int>
[ "Returns", "the", "number", "of", "actions", "associated", "with", "this", "button", ".", ":", "return", "<int", ">" ]
python
train
CZ-NIC/python-rt
rt.py
https://github.com/CZ-NIC/python-rt/blob/e7a9f555e136708aec3317f857045145a2271e16/rt.py#L408-L419
def last_updated(self, since, queue=None): """ Obtains tickets changed after given date. :param since: Date as string in form '2011-02-24' :keyword queue: Queue where to search :returns: List of tickets with LastUpdated parameter later than *since* ordered in decreasing order by LastUpdated. Each tickets is dictionary, the same as in :py:meth:`~Rt.get_ticket`. """ return self.search(Queue=queue, order='-LastUpdated', LastUpdatedBy__notexact=self.default_login, LastUpdated__gt=since)
[ "def", "last_updated", "(", "self", ",", "since", ",", "queue", "=", "None", ")", ":", "return", "self", ".", "search", "(", "Queue", "=", "queue", ",", "order", "=", "'-LastUpdated'", ",", "LastUpdatedBy__notexact", "=", "self", ".", "default_login", ",",...
Obtains tickets changed after given date. :param since: Date as string in form '2011-02-24' :keyword queue: Queue where to search :returns: List of tickets with LastUpdated parameter later than *since* ordered in decreasing order by LastUpdated. Each tickets is dictionary, the same as in :py:meth:`~Rt.get_ticket`.
[ "Obtains", "tickets", "changed", "after", "given", "date", "." ]
python
train
trailofbits/manticore
manticore/native/models.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/models.py#L35-L58
def _find_zero(cpu, constrs, ptr): """ Helper for finding the closest NULL or, effectively NULL byte from a starting address. :param Cpu cpu: :param ConstraintSet constrs: Constraints for current `State` :param int ptr: Address to start searching for a zero from :return: Offset from `ptr` to first byte that is 0 or an `Expression` that must be zero """ offset = 0 while True: byt = cpu.read_int(ptr + offset, 8) if issymbolic(byt): if not solver.can_be_true(constrs, byt != 0): break else: if byt == 0: break offset += 1 return offset
[ "def", "_find_zero", "(", "cpu", ",", "constrs", ",", "ptr", ")", ":", "offset", "=", "0", "while", "True", ":", "byt", "=", "cpu", ".", "read_int", "(", "ptr", "+", "offset", ",", "8", ")", "if", "issymbolic", "(", "byt", ")", ":", "if", "not", ...
Helper for finding the closest NULL or, effectively NULL byte from a starting address. :param Cpu cpu: :param ConstraintSet constrs: Constraints for current `State` :param int ptr: Address to start searching for a zero from :return: Offset from `ptr` to first byte that is 0 or an `Expression` that must be zero
[ "Helper", "for", "finding", "the", "closest", "NULL", "or", "effectively", "NULL", "byte", "from", "a", "starting", "address", "." ]
python
valid
chrisspen/burlap
burlap/common.py
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/common.py#L981-L988
def local_renderer(self): """ Retrieves the cached local renderer. """ if not self._local_renderer: r = self.create_local_renderer() self._local_renderer = r return self._local_renderer
[ "def", "local_renderer", "(", "self", ")", ":", "if", "not", "self", ".", "_local_renderer", ":", "r", "=", "self", ".", "create_local_renderer", "(", ")", "self", ".", "_local_renderer", "=", "r", "return", "self", ".", "_local_renderer" ]
Retrieves the cached local renderer.
[ "Retrieves", "the", "cached", "local", "renderer", "." ]
python
valid
nugget/python-insteonplm
insteonplm/states/onOff.py
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L916-L919
def _set_led_value(self, group, val): """Set the LED value and confirm with a status check.""" new_bitmask = set_bit(self._value, group, bool(val)) self._set_led_bitmask(new_bitmask)
[ "def", "_set_led_value", "(", "self", ",", "group", ",", "val", ")", ":", "new_bitmask", "=", "set_bit", "(", "self", ".", "_value", ",", "group", ",", "bool", "(", "val", ")", ")", "self", ".", "_set_led_bitmask", "(", "new_bitmask", ")" ]
Set the LED value and confirm with a status check.
[ "Set", "the", "LED", "value", "and", "confirm", "with", "a", "status", "check", "." ]
python
train
thombashi/SimpleSQLite
simplesqlite/core.py
https://github.com/thombashi/SimpleSQLite/blob/b16f212132b9b98773e68bf7395abc2f60f56fe5/simplesqlite/core.py#L643-L674
def fetch_value(self, select, table_name, where=None, extra=None): """ Fetch a value from the table. Return |None| if no value matches the conditions, or the table not found in the database. :param str select: Attribute for SELECT query :param str table_name: Table name of executing the query. :param where: |arg_select_where| :type where: |arg_where_type| :return: Result of execution of the query. :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.OperationalError: |raises_operational_error| """ try: self.verify_table_existence(table_name) except TableNotFoundError as e: logger.debug(e) return None result = self.execute_query( Select(select, table_name, where, extra), logging.getLogger().findCaller() ) if result is None: return None fetch = result.fetchone() if fetch is None: return None return fetch[0]
[ "def", "fetch_value", "(", "self", ",", "select", ",", "table_name", ",", "where", "=", "None", ",", "extra", "=", "None", ")", ":", "try", ":", "self", ".", "verify_table_existence", "(", "table_name", ")", "except", "TableNotFoundError", "as", "e", ":", ...
Fetch a value from the table. Return |None| if no value matches the conditions, or the table not found in the database. :param str select: Attribute for SELECT query :param str table_name: Table name of executing the query. :param where: |arg_select_where| :type where: |arg_where_type| :return: Result of execution of the query. :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.OperationalError: |raises_operational_error|
[ "Fetch", "a", "value", "from", "the", "table", ".", "Return", "|None|", "if", "no", "value", "matches", "the", "conditions", "or", "the", "table", "not", "found", "in", "the", "database", "." ]
python
train
saltstack/salt
salt/modules/parallels.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/parallels.py#L151-L205
def list_vms(name=None, info=False, all=False, args=None, runas=None, template=False): ''' List information about the VMs :param str name: Name/ID of VM to list .. versionchanged:: 2016.11.0 No longer implies ``info=True`` :param str info: List extra information :param bool all: List all non-template VMs :param tuple args: Additional arguments given to ``prctl list`` :param str runas: The user that the prlctl command will be run as :param bool template: List the available virtual machine templates. The real virtual machines will not be included in the output .. versionadded:: 2016.11.0 Example: .. code-block:: bash salt '*' parallels.list_vms runas=macdev salt '*' parallels.list_vms name=macvm info=True runas=macdev salt '*' parallels.list_vms info=True runas=macdev salt '*' parallels.list_vms ' -o uuid,status' all=True runas=macdev ''' # Construct argument list if args is None: args = [] else: args = _normalize_args(args) if name: args.extend([name]) if info: args.append('--info') if all: args.append('--all') if template: args.append('--template') # Execute command and return output return prlctl('list', args, runas=runas)
[ "def", "list_vms", "(", "name", "=", "None", ",", "info", "=", "False", ",", "all", "=", "False", ",", "args", "=", "None", ",", "runas", "=", "None", ",", "template", "=", "False", ")", ":", "# Construct argument list", "if", "args", "is", "None", "...
List information about the VMs :param str name: Name/ID of VM to list .. versionchanged:: 2016.11.0 No longer implies ``info=True`` :param str info: List extra information :param bool all: List all non-template VMs :param tuple args: Additional arguments given to ``prctl list`` :param str runas: The user that the prlctl command will be run as :param bool template: List the available virtual machine templates. The real virtual machines will not be included in the output .. versionadded:: 2016.11.0 Example: .. code-block:: bash salt '*' parallels.list_vms runas=macdev salt '*' parallels.list_vms name=macvm info=True runas=macdev salt '*' parallels.list_vms info=True runas=macdev salt '*' parallels.list_vms ' -o uuid,status' all=True runas=macdev
[ "List", "information", "about", "the", "VMs" ]
python
train
Erotemic/ubelt
ubelt/util_stream.py
https://github.com/Erotemic/ubelt/blob/db802f3ad8abba025db74b54f86e6892b8927325/ubelt/util_stream.py#L130-L136
def log_part(self): """ Log what has been captured so far """ self.cap_stdout.seek(self._pos) text = self.cap_stdout.read() self._pos = self.cap_stdout.tell() self.parts.append(text) self.text = text
[ "def", "log_part", "(", "self", ")", ":", "self", ".", "cap_stdout", ".", "seek", "(", "self", ".", "_pos", ")", "text", "=", "self", ".", "cap_stdout", ".", "read", "(", ")", "self", ".", "_pos", "=", "self", ".", "cap_stdout", ".", "tell", "(", ...
Log what has been captured so far
[ "Log", "what", "has", "been", "captured", "so", "far" ]
python
valid
greenbender/pynntp
nntp/nntp.py
https://github.com/greenbender/pynntp/blob/991a76331cdf5d8f9dbf5b18f6e29adc80749a2f/nntp/nntp.py#L570-L591
def date(self): """DATE command. Coordinated Universal time from the perspective of the usenet server. It can be used to provide information that might be useful when using the NEWNEWS command. See <http://tools.ietf.org/html/rfc3977#section-7.1> Returns: The UTC time according to the server as a datetime object. Raises: NNTPDataError: If the timestamp can't be parsed. """ code, message = self.command("DATE") if code != 111: raise NNTPReplyError(code, message) ts = date.datetimeobj(message, fmt="%Y%m%d%H%M%S") return ts
[ "def", "date", "(", "self", ")", ":", "code", ",", "message", "=", "self", ".", "command", "(", "\"DATE\"", ")", "if", "code", "!=", "111", ":", "raise", "NNTPReplyError", "(", "code", ",", "message", ")", "ts", "=", "date", ".", "datetimeobj", "(", ...
DATE command. Coordinated Universal time from the perspective of the usenet server. It can be used to provide information that might be useful when using the NEWNEWS command. See <http://tools.ietf.org/html/rfc3977#section-7.1> Returns: The UTC time according to the server as a datetime object. Raises: NNTPDataError: If the timestamp can't be parsed.
[ "DATE", "command", "." ]
python
test
streamlink/streamlink
src/streamlink/stream/hls.py
https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink/stream/hls.py#L358-L486
def parse_variant_playlist(cls, session_, url, name_key="name", name_prefix="", check_streams=False, force_restart=False, name_fmt=None, start_offset=0, duration=None, **request_params): """Attempts to parse a variant playlist and return its streams. :param url: The URL of the variant playlist. :param name_key: Prefer to use this key as stream name, valid keys are: name, pixels, bitrate. :param name_prefix: Add this prefix to the stream names. :param check_streams: Only allow streams that are accessible. :param force_restart: Start at the first segment even for a live stream :param name_fmt: A format string for the name, allowed format keys are name, pixels, bitrate. """ locale = session_.localization # Backwards compatibility with "namekey" and "nameprefix" params. name_key = request_params.pop("namekey", name_key) name_prefix = request_params.pop("nameprefix", name_prefix) audio_select = session_.options.get("hls-audio-select") or [] res = session_.http.get(url, exception=IOError, **request_params) try: parser = hls_playlist.load(res.text, base_uri=res.url) except ValueError as err: raise IOError("Failed to parse playlist: {0}".format(err)) streams = {} for playlist in filter(lambda p: not p.is_iframe, parser.playlists): names = dict(name=None, pixels=None, bitrate=None) audio_streams = [] fallback_audio = [] default_audio = [] preferred_audio = [] for media in playlist.media: if media.type == "VIDEO" and media.name: names["name"] = media.name elif media.type == "AUDIO": audio_streams.append(media) for media in audio_streams: # Media without a uri is not relevant as external audio if not media.uri: continue if not fallback_audio and media.default: fallback_audio = [media] # if the media is "audoselect" and it better matches the users preferences, use that # instead of default if not default_audio and (media.autoselect and locale.equivalent(language=media.language)): default_audio = [media] # select the first audio stream that matches the users explict language selection if (('*' in audio_select or media.language in audio_select or media.name in audio_select) or ((not preferred_audio or media.default) and locale.explicit and locale.equivalent( language=media.language))): preferred_audio.append(media) # final fallback on the first audio stream listed fallback_audio = fallback_audio or (len(audio_streams) and audio_streams[0].uri and [audio_streams[0]]) if playlist.stream_info.resolution: width, height = playlist.stream_info.resolution names["pixels"] = "{0}p".format(height) if playlist.stream_info.bandwidth: bw = playlist.stream_info.bandwidth if bw >= 1000: names["bitrate"] = "{0}k".format(int(bw / 1000.0)) else: names["bitrate"] = "{0}k".format(bw / 1000.0) if name_fmt: stream_name = name_fmt.format(**names) else: stream_name = (names.get(name_key) or names.get("name") or names.get("pixels") or names.get("bitrate")) if not stream_name: continue if stream_name in streams: # rename duplicate streams stream_name = "{0}_alt".format(stream_name) num_alts = len(list(filter(lambda n: n.startswith(stream_name), streams.keys()))) # We shouldn't need more than 2 alt streams if num_alts >= 2: continue elif num_alts > 0: stream_name = "{0}{1}".format(stream_name, num_alts + 1) if check_streams: try: session_.http.get(playlist.uri, **request_params) except KeyboardInterrupt: raise except Exception: continue external_audio = preferred_audio or default_audio or fallback_audio if external_audio and FFMPEGMuxer.is_usable(session_): external_audio_msg = ", ".join([ "(language={0}, name={1})".format(x.language, (x.name or "N/A")) for x in external_audio ]) log.debug("Using external audio tracks for stream {0} {1}", name_prefix + stream_name, external_audio_msg) stream = MuxedHLSStream(session_, video=playlist.uri, audio=[x.uri for x in external_audio if x.uri], force_restart=force_restart, start_offset=start_offset, duration=duration, **request_params) else: stream = cls(session_, playlist.uri, force_restart=force_restart, start_offset=start_offset, duration=duration, **request_params) streams[name_prefix + stream_name] = stream return streams
[ "def", "parse_variant_playlist", "(", "cls", ",", "session_", ",", "url", ",", "name_key", "=", "\"name\"", ",", "name_prefix", "=", "\"\"", ",", "check_streams", "=", "False", ",", "force_restart", "=", "False", ",", "name_fmt", "=", "None", ",", "start_off...
Attempts to parse a variant playlist and return its streams. :param url: The URL of the variant playlist. :param name_key: Prefer to use this key as stream name, valid keys are: name, pixels, bitrate. :param name_prefix: Add this prefix to the stream names. :param check_streams: Only allow streams that are accessible. :param force_restart: Start at the first segment even for a live stream :param name_fmt: A format string for the name, allowed format keys are name, pixels, bitrate.
[ "Attempts", "to", "parse", "a", "variant", "playlist", "and", "return", "its", "streams", "." ]
python
test
apache/spark
python/pyspark/sql/column.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/column.py#L538-L570
def alias(self, *alias, **kwargs): """ Returns this column aliased with a new name or names (in the case of expressions that return more than one column, such as explode). :param alias: strings of desired column names (collects all positional arguments passed) :param metadata: a dict of information to be stored in ``metadata`` attribute of the corresponding :class: `StructField` (optional, keyword only argument) .. versionchanged:: 2.2 Added optional ``metadata`` argument. >>> df.select(df.age.alias("age2")).collect() [Row(age2=2), Row(age2=5)] >>> df.select(df.age.alias("age3", metadata={'max': 99})).schema['age3'].metadata['max'] 99 """ metadata = kwargs.pop('metadata', None) assert not kwargs, 'Unexpected kwargs where passed: %s' % kwargs sc = SparkContext._active_spark_context if len(alias) == 1: if metadata: jmeta = sc._jvm.org.apache.spark.sql.types.Metadata.fromJson( json.dumps(metadata)) return Column(getattr(self._jc, "as")(alias[0], jmeta)) else: return Column(getattr(self._jc, "as")(alias[0])) else: if metadata: raise ValueError('metadata can only be provided for a single column') return Column(getattr(self._jc, "as")(_to_seq(sc, list(alias))))
[ "def", "alias", "(", "self", ",", "*", "alias", ",", "*", "*", "kwargs", ")", ":", "metadata", "=", "kwargs", ".", "pop", "(", "'metadata'", ",", "None", ")", "assert", "not", "kwargs", ",", "'Unexpected kwargs where passed: %s'", "%", "kwargs", "sc", "=...
Returns this column aliased with a new name or names (in the case of expressions that return more than one column, such as explode). :param alias: strings of desired column names (collects all positional arguments passed) :param metadata: a dict of information to be stored in ``metadata`` attribute of the corresponding :class: `StructField` (optional, keyword only argument) .. versionchanged:: 2.2 Added optional ``metadata`` argument. >>> df.select(df.age.alias("age2")).collect() [Row(age2=2), Row(age2=5)] >>> df.select(df.age.alias("age3", metadata={'max': 99})).schema['age3'].metadata['max'] 99
[ "Returns", "this", "column", "aliased", "with", "a", "new", "name", "or", "names", "(", "in", "the", "case", "of", "expressions", "that", "return", "more", "than", "one", "column", "such", "as", "explode", ")", "." ]
python
train
memphis-iis/GLUDB
gludb/backends/dynamodb.py
https://github.com/memphis-iis/GLUDB/blob/25692528ff6fe8184a3570f61f31f1a90088a388/gludb/backends/dynamodb.py#L139-L151
def find_one(self, cls, id): """Required functionality.""" try: db_result = self.get_class_table(cls).lookup(id) except ItemNotFound: # according to docs, this shouldn't be required, but it IS db_result = None if not db_result: return None obj = cls.from_data(db_result['value']) return obj
[ "def", "find_one", "(", "self", ",", "cls", ",", "id", ")", ":", "try", ":", "db_result", "=", "self", ".", "get_class_table", "(", "cls", ")", ".", "lookup", "(", "id", ")", "except", "ItemNotFound", ":", "# according to docs, this shouldn't be required, but ...
Required functionality.
[ "Required", "functionality", "." ]
python
train
tonyo/pyope
pyope/ope.py
https://github.com/tonyo/pyope/blob/1e9f9f15cd4b989d1bf3c607270bf6a8ae808b1e/pyope/ope.py#L100-L106
def encrypt(self, plaintext): """Encrypt the given plaintext value""" if not isinstance(plaintext, int): raise ValueError('Plaintext must be an integer value') if not self.in_range.contains(plaintext): raise OutOfRangeError('Plaintext is not within the input range') return self.encrypt_recursive(plaintext, self.in_range, self.out_range)
[ "def", "encrypt", "(", "self", ",", "plaintext", ")", ":", "if", "not", "isinstance", "(", "plaintext", ",", "int", ")", ":", "raise", "ValueError", "(", "'Plaintext must be an integer value'", ")", "if", "not", "self", ".", "in_range", ".", "contains", "(",...
Encrypt the given plaintext value
[ "Encrypt", "the", "given", "plaintext", "value" ]
python
train
SatelliteQE/nailgun
nailgun/entities.py
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entities.py#L2948-L2963
def path(self, which=None): """Extend ``nailgun.entity_mixins.Entity.path``. The format of the returned path depends on the value of ``which``: smart_class_parameters /api/environments/:environment_id/smart_class_parameters Otherwise, call ``super``. """ if which in ('smart_class_parameters',): return '{0}/{1}'.format( super(Environment, self).path(which='self'), which ) return super(Environment, self).path(which)
[ "def", "path", "(", "self", ",", "which", "=", "None", ")", ":", "if", "which", "in", "(", "'smart_class_parameters'", ",", ")", ":", "return", "'{0}/{1}'", ".", "format", "(", "super", "(", "Environment", ",", "self", ")", ".", "path", "(", "which", ...
Extend ``nailgun.entity_mixins.Entity.path``. The format of the returned path depends on the value of ``which``: smart_class_parameters /api/environments/:environment_id/smart_class_parameters Otherwise, call ``super``.
[ "Extend", "nailgun", ".", "entity_mixins", ".", "Entity", ".", "path", ".", "The", "format", "of", "the", "returned", "path", "depends", "on", "the", "value", "of", "which", ":" ]
python
train
cole/aiosmtplib
src/aiosmtplib/email.py
https://github.com/cole/aiosmtplib/blob/0cd00e5059005371cbdfca995feff9183a16a51f/src/aiosmtplib/email.py#L16-L22
def parse_address(address: str) -> str: """ Parse an email address, falling back to the raw string given. """ display_name, parsed_address = email.utils.parseaddr(address) return parsed_address or address
[ "def", "parse_address", "(", "address", ":", "str", ")", "->", "str", ":", "display_name", ",", "parsed_address", "=", "email", ".", "utils", ".", "parseaddr", "(", "address", ")", "return", "parsed_address", "or", "address" ]
Parse an email address, falling back to the raw string given.
[ "Parse", "an", "email", "address", "falling", "back", "to", "the", "raw", "string", "given", "." ]
python
train
satellogic/telluric
telluric/georaster.py
https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L1081-L1098
def crop(self, vector, resolution=None, masked=None, bands=None, resampling=Resampling.cubic): """ crops raster outside vector (convex hull) :param vector: GeoVector, GeoFeature, FeatureCollection :param resolution: output resolution, None for full resolution :param resampling: reprojection resampling method, default `cubic` :return: GeoRaster """ bounds, window = self._vector_to_raster_bounds(vector.envelope, boundless=self._image is None) if resolution: xsize, ysize = self._resolution_to_output_shape(bounds, resolution) else: xsize, ysize = (None, None) return self.pixel_crop(bounds, xsize, ysize, window=window, masked=masked, bands=bands, resampling=resampling)
[ "def", "crop", "(", "self", ",", "vector", ",", "resolution", "=", "None", ",", "masked", "=", "None", ",", "bands", "=", "None", ",", "resampling", "=", "Resampling", ".", "cubic", ")", ":", "bounds", ",", "window", "=", "self", ".", "_vector_to_raste...
crops raster outside vector (convex hull) :param vector: GeoVector, GeoFeature, FeatureCollection :param resolution: output resolution, None for full resolution :param resampling: reprojection resampling method, default `cubic` :return: GeoRaster
[ "crops", "raster", "outside", "vector", "(", "convex", "hull", ")", ":", "param", "vector", ":", "GeoVector", "GeoFeature", "FeatureCollection", ":", "param", "resolution", ":", "output", "resolution", "None", "for", "full", "resolution", ":", "param", "resampli...
python
train
uw-it-aca/uw-restclients
restclients/trumba/calendar.py
https://github.com/uw-it-aca/uw-restclients/blob/e12dcd32bf5296b6ebdf71798031594afb7852cb/restclients/trumba/calendar.py#L148-L163
def get_tac_permissions(calendar_id): """ Return a list of sorted Permission objects representing the user permissions of a given Tacoma calendar. :return: a list of trumba.Permission objects corresponding to the given campus calendar. None if error, [] if not exists raise DataFailureException or a corresponding TrumbaException if the request failed or an error code has been returned. """ return _process_get_perm_resp( get_permissions_url, post_tac_resource(get_permissions_url, _create_get_perm_body(calendar_id)), TrumbaCalendar.TAC_CAMPUS_CODE, calendar_id)
[ "def", "get_tac_permissions", "(", "calendar_id", ")", ":", "return", "_process_get_perm_resp", "(", "get_permissions_url", ",", "post_tac_resource", "(", "get_permissions_url", ",", "_create_get_perm_body", "(", "calendar_id", ")", ")", ",", "TrumbaCalendar", ".", "TAC...
Return a list of sorted Permission objects representing the user permissions of a given Tacoma calendar. :return: a list of trumba.Permission objects corresponding to the given campus calendar. None if error, [] if not exists raise DataFailureException or a corresponding TrumbaException if the request failed or an error code has been returned.
[ "Return", "a", "list", "of", "sorted", "Permission", "objects", "representing", "the", "user", "permissions", "of", "a", "given", "Tacoma", "calendar", ".", ":", "return", ":", "a", "list", "of", "trumba", ".", "Permission", "objects", "corresponding", "to", ...
python
train
arista-eosplus/pyeapi
pyeapi/api/system.py
https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/api/system.py#L97-L116
def _parse_banners(self): """Parses the global config and returns the value for both motd and login banners. Returns: dict: The configure value for modtd and login banners. If the banner is not set it will return a value of None for that key. The returned dict object is intendd to be merged into the resource dict """ motd_value = login_value = None matches = re.findall('^banner\s+(login|motd)\s?$\n(.*?)$\nEOF$\n', self.config, re.DOTALL | re.M) for match in matches: if match[0].strip() == "motd": motd_value = match[1] elif match[0].strip() == "login": login_value = match[1] return dict(banner_motd=motd_value, banner_login=login_value)
[ "def", "_parse_banners", "(", "self", ")", ":", "motd_value", "=", "login_value", "=", "None", "matches", "=", "re", ".", "findall", "(", "'^banner\\s+(login|motd)\\s?$\\n(.*?)$\\nEOF$\\n'", ",", "self", ".", "config", ",", "re", ".", "DOTALL", "|", "re", ".",...
Parses the global config and returns the value for both motd and login banners. Returns: dict: The configure value for modtd and login banners. If the banner is not set it will return a value of None for that key. The returned dict object is intendd to be merged into the resource dict
[ "Parses", "the", "global", "config", "and", "returns", "the", "value", "for", "both", "motd", "and", "login", "banners", "." ]
python
train
klahnakoski/pyLibrary
mo_math/__init__.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_math/__init__.py#L225-L234
def ceiling(value, mod=1): """ RETURN SMALLEST INTEGER GREATER THAN value """ if value == None: return None mod = int(mod) v = int(math_floor(value + mod)) return v - (v % mod)
[ "def", "ceiling", "(", "value", ",", "mod", "=", "1", ")", ":", "if", "value", "==", "None", ":", "return", "None", "mod", "=", "int", "(", "mod", ")", "v", "=", "int", "(", "math_floor", "(", "value", "+", "mod", ")", ")", "return", "v", "-", ...
RETURN SMALLEST INTEGER GREATER THAN value
[ "RETURN", "SMALLEST", "INTEGER", "GREATER", "THAN", "value" ]
python
train
Nekroze/librarian
librarian/card.py
https://github.com/Nekroze/librarian/blob/5d3da2980d91a637f80ad7164fbf204a2dd2bd58/librarian/card.py#L54-L61
def add_ability(self, phase, ability): """Add the given ability to this Card under the given phase. Returns the length of the abilities for the given phase after the addition. """ if phase not in self.abilities: self.abilities[phase] = [] self.abilities[phase].append(ability) return len(self.abilities[phase])
[ "def", "add_ability", "(", "self", ",", "phase", ",", "ability", ")", ":", "if", "phase", "not", "in", "self", ".", "abilities", ":", "self", ".", "abilities", "[", "phase", "]", "=", "[", "]", "self", ".", "abilities", "[", "phase", "]", ".", "app...
Add the given ability to this Card under the given phase. Returns the length of the abilities for the given phase after the addition.
[ "Add", "the", "given", "ability", "to", "this", "Card", "under", "the", "given", "phase", ".", "Returns", "the", "length", "of", "the", "abilities", "for", "the", "given", "phase", "after", "the", "addition", "." ]
python
train
DataDog/integrations-core
datadog_checks_base/datadog_checks/base/checks/kube_leader/record.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/datadog_checks_base/datadog_checks/base/checks/kube_leader/record.py#L76-L83
def seconds_until_renew(self): """ Returns the number of seconds between the current time and the set renew time. It can be negative if the leader election is running late. """ delta = self.renew_time - datetime.now(self.renew_time.tzinfo) return delta.total_seconds()
[ "def", "seconds_until_renew", "(", "self", ")", ":", "delta", "=", "self", ".", "renew_time", "-", "datetime", ".", "now", "(", "self", ".", "renew_time", ".", "tzinfo", ")", "return", "delta", ".", "total_seconds", "(", ")" ]
Returns the number of seconds between the current time and the set renew time. It can be negative if the leader election is running late.
[ "Returns", "the", "number", "of", "seconds", "between", "the", "current", "time", "and", "the", "set", "renew", "time", ".", "It", "can", "be", "negative", "if", "the", "leader", "election", "is", "running", "late", "." ]
python
train
aio-libs/aiomcache
aiomcache/client.py
https://github.com/aio-libs/aiomcache/blob/75d44b201aea91bc2856b10940922d5ebfbfcd7b/aiomcache/client.py#L165-L174
def multi_get(self, conn, *keys): """Takes a list of keys and returns a list of values. :param keys: ``list`` keys for the item being fetched. :return: ``list`` of values for the specified keys. :raises:``ValidationException``, ``ClientException``, and socket errors """ values, _ = yield from self._multi_get(conn, *keys) return tuple(values.get(key) for key in keys)
[ "def", "multi_get", "(", "self", ",", "conn", ",", "*", "keys", ")", ":", "values", ",", "_", "=", "yield", "from", "self", ".", "_multi_get", "(", "conn", ",", "*", "keys", ")", "return", "tuple", "(", "values", ".", "get", "(", "key", ")", "for...
Takes a list of keys and returns a list of values. :param keys: ``list`` keys for the item being fetched. :return: ``list`` of values for the specified keys. :raises:``ValidationException``, ``ClientException``, and socket errors
[ "Takes", "a", "list", "of", "keys", "and", "returns", "a", "list", "of", "values", "." ]
python
train
HearthSim/dj-paypal
djpaypal/models/webhooks.py
https://github.com/HearthSim/dj-paypal/blob/867368f6068c2539e22d486eb7a6d2ecfb9485e0/djpaypal/models/webhooks.py#L164-L201
def from_request(cls, request, webhook_id=PAYPAL_WEBHOOK_ID): """ Create, validate and process a WebhookEventTrigger given a Django request object. The webhook_id parameter expects the ID of the Webhook that was triggered (defaults to settings.PAYPAL_WEBHOOK_ID). This is required for Webhook verification. The process is three-fold: 1. Create a WebhookEventTrigger object from a Django request. 2. Verify the WebhookEventTrigger as a Paypal webhook using the SDK. 3. If valid, process it into a WebhookEvent object (and child resource). """ headers = fix_django_headers(request.META) assert headers try: body = request.body.decode(request.encoding or "utf-8") except Exception: body = "(error decoding body)" ip = request.META["REMOTE_ADDR"] obj = cls.objects.create(headers=headers, body=body, remote_ip=ip) try: obj.valid = obj.verify(PAYPAL_WEBHOOK_ID) if obj.valid: # Process the item (do not save it, it'll get saved below) obj.process(save=False) except Exception as e: max_length = WebhookEventTrigger._meta.get_field("exception").max_length obj.exception = str(e)[:max_length] obj.traceback = format_exc() finally: obj.save() return obj
[ "def", "from_request", "(", "cls", ",", "request", ",", "webhook_id", "=", "PAYPAL_WEBHOOK_ID", ")", ":", "headers", "=", "fix_django_headers", "(", "request", ".", "META", ")", "assert", "headers", "try", ":", "body", "=", "request", ".", "body", ".", "de...
Create, validate and process a WebhookEventTrigger given a Django request object. The webhook_id parameter expects the ID of the Webhook that was triggered (defaults to settings.PAYPAL_WEBHOOK_ID). This is required for Webhook verification. The process is three-fold: 1. Create a WebhookEventTrigger object from a Django request. 2. Verify the WebhookEventTrigger as a Paypal webhook using the SDK. 3. If valid, process it into a WebhookEvent object (and child resource).
[ "Create", "validate", "and", "process", "a", "WebhookEventTrigger", "given", "a", "Django", "request", "object", "." ]
python
valid
konnected-io/konnected-py
konnected/__init__.py
https://github.com/konnected-io/konnected-py/blob/0a3f2d0cfe23deb222ed92e43dee96f27b0f664c/konnected/__init__.py#L20-L27
def get_device(self, pin=None): """ Query the status of a specific pin (or all configured pins if pin is ommitted) """ url = self.base_url + '/device' try: r = requests.get(url, params={'pin': pin}, timeout=10) return r.json() except RequestException as err: raise Client.ClientError(err)
[ "def", "get_device", "(", "self", ",", "pin", "=", "None", ")", ":", "url", "=", "self", ".", "base_url", "+", "'/device'", "try", ":", "r", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "{", "'pin'", ":", "pin", "}", ",", "timeout...
Query the status of a specific pin (or all configured pins if pin is ommitted)
[ "Query", "the", "status", "of", "a", "specific", "pin", "(", "or", "all", "configured", "pins", "if", "pin", "is", "ommitted", ")" ]
python
train
inasafe/inasafe
safe/gui/tools/extent_selector_dialog.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/extent_selector_dialog.py#L353-L363
def on_hazard_exposure_bookmark_toggled(self, enabled): """Update the UI when the user toggles the bookmarks radiobutton. :param enabled: The status of the radiobutton. :type enabled: bool """ if enabled: self.bookmarks_index_changed() else: self.ok_button.setEnabled(True) self._populate_coordinates()
[ "def", "on_hazard_exposure_bookmark_toggled", "(", "self", ",", "enabled", ")", ":", "if", "enabled", ":", "self", ".", "bookmarks_index_changed", "(", ")", "else", ":", "self", ".", "ok_button", ".", "setEnabled", "(", "True", ")", "self", ".", "_populate_coo...
Update the UI when the user toggles the bookmarks radiobutton. :param enabled: The status of the radiobutton. :type enabled: bool
[ "Update", "the", "UI", "when", "the", "user", "toggles", "the", "bookmarks", "radiobutton", "." ]
python
train
LuminosoInsight/luminoso-api-client-python
luminoso_api/v5_client.py
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_client.py#L125-L153
def connect_with_username_and_password(cls, url=None, username=None, password=None): """ Returns an object that makes requests to the API, authenticated with a short-lived token retrieved from username and password. If username or password is not supplied, the method will prompt for a username and/or password to be entered interactively. See the connect method for more details about the `url` argument. PLEASE NOTE: This method is being provided as a temporary measure. We strongly encourage users of the Luminoso API to use a long-lived token instead, as explained in the V5_README file. """ from .v4_client import LuminosoClient as v4LC if username is None: username = input('Username: ') v4client = v4LC.connect(url=url, username=username, password=password) if url is None: url = '/' if url.startswith('http'): root_url = get_root_url(url) else: url = URL_BASE + '/' + url.lstrip('/') root_url = URL_BASE return cls(v4client.session, root_url)
[ "def", "connect_with_username_and_password", "(", "cls", ",", "url", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "from", ".", "v4_client", "import", "LuminosoClient", "as", "v4LC", "if", "username", "is", "None", ":", ...
Returns an object that makes requests to the API, authenticated with a short-lived token retrieved from username and password. If username or password is not supplied, the method will prompt for a username and/or password to be entered interactively. See the connect method for more details about the `url` argument. PLEASE NOTE: This method is being provided as a temporary measure. We strongly encourage users of the Luminoso API to use a long-lived token instead, as explained in the V5_README file.
[ "Returns", "an", "object", "that", "makes", "requests", "to", "the", "API", "authenticated", "with", "a", "short", "-", "lived", "token", "retrieved", "from", "username", "and", "password", ".", "If", "username", "or", "password", "is", "not", "supplied", "t...
python
test
adafruit/Adafruit_Python_LED_Backpack
Adafruit_LED_Backpack/HT16K33.py
https://github.com/adafruit/Adafruit_Python_LED_Backpack/blob/7356b4dd8b4bb162d60987878c2cb752fdd017d5/Adafruit_LED_Backpack/HT16K33.py#L50-L57
def begin(self): """Initialize driver with LEDs enabled and all turned off.""" # Turn on the oscillator. self._device.writeList(HT16K33_SYSTEM_SETUP | HT16K33_OSCILLATOR, []) # Turn display on with no blinking. self.set_blink(HT16K33_BLINK_OFF) # Set display to full brightness. self.set_brightness(15)
[ "def", "begin", "(", "self", ")", ":", "# Turn on the oscillator.", "self", ".", "_device", ".", "writeList", "(", "HT16K33_SYSTEM_SETUP", "|", "HT16K33_OSCILLATOR", ",", "[", "]", ")", "# Turn display on with no blinking.", "self", ".", "set_blink", "(", "HT16K33_B...
Initialize driver with LEDs enabled and all turned off.
[ "Initialize", "driver", "with", "LEDs", "enabled", "and", "all", "turned", "off", "." ]
python
train