repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
callowayproject/Transmogrify
transmogrify/images2gif.py
https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/images2gif.py#L154-L164
def get_header_anim(self, im): """ get_header_anim(im) Get animation header. To replace PILs getheader()[0] """ bb = "GIF89a" bb += int_to_bin(im.size[0]) bb += int_to_bin(im.size[1]) bb += "\x87\x00\x00" return bb
[ "def", "get_header_anim", "(", "self", ",", "im", ")", ":", "bb", "=", "\"GIF89a\"", "bb", "+=", "int_to_bin", "(", "im", ".", "size", "[", "0", "]", ")", "bb", "+=", "int_to_bin", "(", "im", ".", "size", "[", "1", "]", ")", "bb", "+=", "\"\\x87\...
get_header_anim(im) Get animation header. To replace PILs getheader()[0]
[ "get_header_anim", "(", "im", ")" ]
python
train
saimn/sigal
sigal/writer.py
https://github.com/saimn/sigal/blob/912ca39991355d358dc85fd55c7aeabdd7acc386/sigal/writer.py#L98-L111
def generate_context(self, album): """Generate the context dict for the given path.""" from . import __url__ as sigal_link self.logger.info("Output album : %r", album) return { 'album': album, 'index_title': self.index_title, 'settings': self.settings, 'sigal_link': sigal_link, 'theme': {'name': os.path.basename(self.theme), 'url': url_from_path(os.path.relpath(self.theme_path, album.dst_path))}, }
[ "def", "generate_context", "(", "self", ",", "album", ")", ":", "from", ".", "import", "__url__", "as", "sigal_link", "self", ".", "logger", ".", "info", "(", "\"Output album : %r\"", ",", "album", ")", "return", "{", "'album'", ":", "album", ",", "'index_...
Generate the context dict for the given path.
[ "Generate", "the", "context", "dict", "for", "the", "given", "path", "." ]
python
valid
inveniosoftware-contrib/invenio-workflows
invenio_workflows/engine.py
https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/engine.py#L222-L230
def has_completed(self): """Return True if workflow is fully completed.""" objects_in_db = WorkflowObjectModel.query.filter( WorkflowObjectModel.id_workflow == self.uuid, WorkflowObjectModel.id_parent == None, # noqa ).filter(WorkflowObjectModel.status.in_([ workflow_object_class.known_statuses.COMPLETED ])).count() return objects_in_db == len(list(self.objects))
[ "def", "has_completed", "(", "self", ")", ":", "objects_in_db", "=", "WorkflowObjectModel", ".", "query", ".", "filter", "(", "WorkflowObjectModel", ".", "id_workflow", "==", "self", ".", "uuid", ",", "WorkflowObjectModel", ".", "id_parent", "==", "None", ",", ...
Return True if workflow is fully completed.
[ "Return", "True", "if", "workflow", "is", "fully", "completed", "." ]
python
train
planetlabs/planet-client-python
planet/scripts/v1.py
https://github.com/planetlabs/planet-client-python/blob/1c62ce7d416819951dddee0c22068fef6d40b027/planet/scripts/v1.py#L97-L101
def create_search(pretty, **kw): '''Create a saved search''' req = search_req_from_opts(**kw) cl = clientv1() echo_json_response(call_and_wrap(cl.create_search, req), pretty)
[ "def", "create_search", "(", "pretty", ",", "*", "*", "kw", ")", ":", "req", "=", "search_req_from_opts", "(", "*", "*", "kw", ")", "cl", "=", "clientv1", "(", ")", "echo_json_response", "(", "call_and_wrap", "(", "cl", ".", "create_search", ",", "req", ...
Create a saved search
[ "Create", "a", "saved", "search" ]
python
train
raiden-network/raiden
raiden/utils/echo_node.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/utils/echo_node.py#L87-L132
def poll_all_received_events(self): """ This will be triggered once for each `echo_node_alarm_callback`. It polls all channels for `EventPaymentReceivedSuccess` events, adds all new events to the `self.received_transfers` queue and respawns `self.echo_node_worker`, if it died. """ locked = False try: with Timeout(10): locked = self.lock.acquire(blocking=False) if not locked: return else: received_transfers = self.api.get_raiden_events_payment_history( token_address=self.token_address, offset=self.last_poll_offset, ) # received transfer is a tuple of (block_number, event) received_transfers = [ event for event in received_transfers if type(event) == EventPaymentReceivedSuccess ] for event in received_transfers: transfer = copy.deepcopy(event) self.received_transfers.put(transfer) # set last_poll_block after events are enqueued (timeout safe) if received_transfers: self.last_poll_offset += len(received_transfers) if not self.echo_worker_greenlet.started: log.debug( 'restarting echo_worker_greenlet', dead=self.echo_worker_greenlet.dead, successful=self.echo_worker_greenlet.successful(), exception=self.echo_worker_greenlet.exception, ) self.echo_worker_greenlet = gevent.spawn(self.echo_worker) except Timeout: log.info('timeout while polling for events') finally: if locked: self.lock.release()
[ "def", "poll_all_received_events", "(", "self", ")", ":", "locked", "=", "False", "try", ":", "with", "Timeout", "(", "10", ")", ":", "locked", "=", "self", ".", "lock", ".", "acquire", "(", "blocking", "=", "False", ")", "if", "not", "locked", ":", ...
This will be triggered once for each `echo_node_alarm_callback`. It polls all channels for `EventPaymentReceivedSuccess` events, adds all new events to the `self.received_transfers` queue and respawns `self.echo_node_worker`, if it died.
[ "This", "will", "be", "triggered", "once", "for", "each", "echo_node_alarm_callback", ".", "It", "polls", "all", "channels", "for", "EventPaymentReceivedSuccess", "events", "adds", "all", "new", "events", "to", "the", "self", ".", "received_transfers", "queue", "a...
python
train
wishtack/pysynthetic
synthetic/synthetic_constructor_factory.py
https://github.com/wishtack/pysynthetic/blob/f37a4a2f1e0313b8c544f60d37c93726bc806ec6/synthetic/synthetic_constructor_factory.py#L85-L112
def _positionalArgumentKeyValueList(self, originalConstructorExpectedArgList, syntheticMemberList, argTuple): """Transforms args tuple to a dictionary mapping argument names to values using original constructor positional args specification, then it adds synthesized members at the end if they are not already present. :type syntheticMemberList: list(SyntheticMember) :type argTuple: tuple """ # First, the list of expected arguments is set to original constructor's arg spec. expectedArgList = copy.copy(originalConstructorExpectedArgList) # ... then we append members that are not already present. for syntheticMember in syntheticMemberList: memberName = syntheticMember.memberName() if memberName not in expectedArgList: expectedArgList.append(memberName) # Makes a list of tuples (argumentName, argumentValue) with each element of each list (expectedArgList, argTuple) # until the shortest list's end is reached. positionalArgumentKeyValueList = list(zip(expectedArgList, argTuple)) # Add remanining arguments (those that are not expected by the original constructor). for argumentValue in argTuple[len(positionalArgumentKeyValueList):]: positionalArgumentKeyValueList.append((None, argumentValue)) return positionalArgumentKeyValueList
[ "def", "_positionalArgumentKeyValueList", "(", "self", ",", "originalConstructorExpectedArgList", ",", "syntheticMemberList", ",", "argTuple", ")", ":", "# First, the list of expected arguments is set to original constructor's arg spec. ", "expectedArgList", "=", "copy", ".", "copy...
Transforms args tuple to a dictionary mapping argument names to values using original constructor positional args specification, then it adds synthesized members at the end if they are not already present. :type syntheticMemberList: list(SyntheticMember) :type argTuple: tuple
[ "Transforms", "args", "tuple", "to", "a", "dictionary", "mapping", "argument", "names", "to", "values", "using", "original", "constructor", "positional", "args", "specification", "then", "it", "adds", "synthesized", "members", "at", "the", "end", "if", "they", "...
python
train
bxlab/bx-python
lib/bx/align/score.py
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/align/score.py#L105-L196
def build_scoring_scheme( s, gap_open, gap_extend, gap1="-", gap2=None, **kwargs ): """ Initialize scoring scheme from a blastz style text blob, first line specifies the bases for each row/col, subsequent lines contain the corresponding scores. Slaw extensions allow for unusual and/or asymmetric alphabets. Symbols can be two digit hex, and each row begins with symbol. Note that a row corresponds to a symbol in text1 and a column to a symbol in text2. examples: blastz slaw A C G T 01 02 A C G T 91 -114 -31 -123 01 200 -200 -50 100 -50 100 -114 100 -125 -31 02 -200 200 100 -50 100 -50 -31 -125 100 -114 -123 -31 -114 91 """ # perform initial parse to determine alphabets and locate scores bad_matrix = "invalid scoring matrix" s = s.rstrip( "\n" ) lines = s.split( "\n" ) rows = [] symbols2 = lines.pop(0).split() symbols1 = None rows_have_syms = False a_la_blastz = True for i, line in enumerate( lines ): row_scores = line.split() if len( row_scores ) == len( symbols2 ): # blastz-style row if symbols1 == None: if len( lines ) != len( symbols2 ): raise bad_matrix symbols1 = symbols2 elif (rows_have_syms): raise bad_matrix elif len( row_scores ) == len( symbols2 ) + 1: # row starts with symbol if symbols1 == None: symbols1 = [] rows_have_syms = True a_la_blastz = False elif not rows_have_syms: raise bad_matrix symbols1.append( row_scores.pop(0) ) else: raise bad_matrix rows.append( row_scores ) # convert alphabets from strings to characters try: alphabet1 = [sym_to_char( sym ) for sym in symbols1] alphabet2 = [sym_to_char( sym ) for sym in symbols2] except ValueError: raise bad_matrix if (alphabet1 != symbols1) or (alphabet2 != symbols2): a_la_blastz = False if a_la_blastz: alphabet1 = [ch.upper() for ch in alphabet1] alphabet2 = [ch.upper() for ch in alphabet2] # decide if rows and/or columns should reflect case if a_la_blastz: foldcase1 = foldcase2 = True else: foldcase1 = "".join( alphabet1 ) == "ACGT" foldcase2 = "".join( alphabet2 ) == "ACGT" # create appropriately sized matrix text1_range = text2_range = 128 if ord( max( alphabet1 ) ) >= 128: text1_range = 256 if ord( max( alphabet2 ) ) >= 128: text2_range = 256 typecode = int32 for i, row_scores in enumerate( rows ): for j, score in enumerate( map( int_or_float, row_scores ) ): if type( score ) == float: typecode = float32 if type( gap_open ) == float: typecode = float32 if type( gap_extend ) == float: typecode = float32 ss = ScoringScheme( gap_open, gap_extend, alphabet1=alphabet1, alphabet2=alphabet2, gap1=gap1, gap2=gap2, text1_range=text1_range, text2_range=text2_range, typecode=typecode, **kwargs ) # fill matrix for i, row_scores in enumerate( rows ): for j, score in enumerate( map( int_or_float, row_scores ) ): ss.set_score( ord( alphabet1[i] ), ord( alphabet2[j] ), score ) if foldcase1 and foldcase2: ss.set_score( ord( alphabet1[i].lower() ), ord( alphabet2[j].upper() ), score ) ss.set_score( ord( alphabet1[i].upper() ), ord( alphabet2[j].lower() ), score ) ss.set_score( ord( alphabet1[i].lower() ), ord( alphabet2[j].lower() ), score ) elif foldcase1: ss.set_score( ord( alphabet1[i].lower() ), ord( alphabet2[j] ), score ) elif foldcase2: ss.set_score( ord( alphabet1[i] ), ord( alphabet2[j].lower() ), score ) return ss
[ "def", "build_scoring_scheme", "(", "s", ",", "gap_open", ",", "gap_extend", ",", "gap1", "=", "\"-\"", ",", "gap2", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# perform initial parse to determine alphabets and locate scores", "bad_matrix", "=", "\"invalid sco...
Initialize scoring scheme from a blastz style text blob, first line specifies the bases for each row/col, subsequent lines contain the corresponding scores. Slaw extensions allow for unusual and/or asymmetric alphabets. Symbols can be two digit hex, and each row begins with symbol. Note that a row corresponds to a symbol in text1 and a column to a symbol in text2. examples: blastz slaw A C G T 01 02 A C G T 91 -114 -31 -123 01 200 -200 -50 100 -50 100 -114 100 -125 -31 02 -200 200 100 -50 100 -50 -31 -125 100 -114 -123 -31 -114 91
[ "Initialize", "scoring", "scheme", "from", "a", "blastz", "style", "text", "blob", "first", "line", "specifies", "the", "bases", "for", "each", "row", "/", "col", "subsequent", "lines", "contain", "the", "corresponding", "scores", ".", "Slaw", "extensions", "a...
python
train
titilambert/pyhydroquebec
pyhydroquebec/client.py
https://github.com/titilambert/pyhydroquebec/blob/4ea1374a63944413889c147d91961eda0605d4fd/pyhydroquebec/client.py#L395-L468
def fetch_data(self): """Get the latest data from HydroQuebec.""" # Get http session yield from self._get_httpsession() # Get login page login_url = yield from self._get_login_page() # Post login page yield from self._post_login_page(login_url) # Get p_p_id and contracts p_p_id, contracts = yield from self._get_p_p_id_and_contract() # If we don't have any contrats that means we have only # onecontract. Let's get it if contracts == {}: contracts = yield from self._get_lonely_contract() # Get balance balances = yield from self._get_balances() balances_len = len(balances) balance_id = 0 # For all contracts for contract, contract_url in contracts.items(): if contract_url: yield from self._load_contract_page(contract_url) # Get Hourly data try: yesterday = datetime.datetime.now(HQ_TIMEZONE) - datetime.timedelta(days=1) day_date = yesterday.strftime("%Y-%m-%d") hourly_data = yield from self._get_hourly_data(day_date, p_p_id) hourly_data = hourly_data['processed_hourly_data'] except Exception: # pylint: disable=W0703 # We don't have hourly data for some reason hourly_data = {} # Get Annual data try: annual_data = yield from self._get_annual_data(p_p_id) except PyHydroQuebecAnnualError: # We don't have annual data, which is possible if your # contract is younger than 1 year annual_data = {} # Get Monthly data monthly_data = yield from self._get_monthly_data(p_p_id) monthly_data = monthly_data[0] # Get daily data start_date = monthly_data.get('dateDebutPeriode') end_date = monthly_data.get('dateFinPeriode') try: daily_data = yield from self._get_daily_data(p_p_id, start_date, end_date) except Exception: # pylint: disable=W0703 daily_data = [] # We have to test daily_data because it's empty # At the end/starts of a period if daily_data: daily_data = daily_data[0]['courant'] # format data contract_data = {"balance": balances[balance_id]} for key1, key2 in MONTHLY_MAP: contract_data[key1] = monthly_data[key2] for key1, key2 in ANNUAL_MAP: contract_data[key1] = annual_data.get(key2, "") # We have to test daily_data because it's empty # At the end/starts of a period if daily_data: for key1, key2 in DAILY_MAP: contract_data[key1] = daily_data[key2] # Hourly if hourly_data: contract_data['yesterday_hourly_consumption'] = hourly_data # Add contract self._data[contract] = contract_data balance_count = balance_id + 1 if balance_count < balances_len: balance_id += 1
[ "def", "fetch_data", "(", "self", ")", ":", "# Get http session", "yield", "from", "self", ".", "_get_httpsession", "(", ")", "# Get login page", "login_url", "=", "yield", "from", "self", ".", "_get_login_page", "(", ")", "# Post login page", "yield", "from", "...
Get the latest data from HydroQuebec.
[ "Get", "the", "latest", "data", "from", "HydroQuebec", "." ]
python
train
saltstack/salt
salt/states/cimc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/cimc.py#L103-L165
def logging_levels(name, remote=None, local=None): ''' Ensures that the logging levels are set on the device. The logging levels must match the following options: emergency, alert, critical, error, warning, notice, informational, debug. .. versionadded:: 2019.2.0 name: The name of the module function to execute. remote(str): The logging level for SYSLOG logs. local(str): The logging level for the local device. SLS Example: .. code-block:: yaml logging_levels: cimc.logging_levels: - remote: informational - local: notice ''' ret = _default_ret(name) syslog_conf = __salt__['cimc.get_syslog_settings']() req_change = False try: syslog_dict = syslog_conf['outConfigs']['commSyslog'][0] if remote and syslog_dict['remoteSeverity'] != remote: req_change = True elif local and syslog_dict['localSeverity'] != local: req_change = True if req_change: update = __salt__['cimc.set_logging_levels'](remote, local) if update['outConfig']['commSyslog'][0]['status'] != 'modified': ret['result'] = False ret['comment'] = "Error setting logging levels." return ret ret['changes']['before'] = syslog_conf ret['changes']['after'] = __salt__['cimc.get_syslog_settings']() ret['comment'] = "Logging level settings modified." else: ret['comment'] = "Logging level already configured. No changes required." except Exception as err: ret['result'] = False ret['comment'] = "Error occurred setting logging level settings." log.error(err) return ret ret['result'] = True return ret
[ "def", "logging_levels", "(", "name", ",", "remote", "=", "None", ",", "local", "=", "None", ")", ":", "ret", "=", "_default_ret", "(", "name", ")", "syslog_conf", "=", "__salt__", "[", "'cimc.get_syslog_settings'", "]", "(", ")", "req_change", "=", "False...
Ensures that the logging levels are set on the device. The logging levels must match the following options: emergency, alert, critical, error, warning, notice, informational, debug. .. versionadded:: 2019.2.0 name: The name of the module function to execute. remote(str): The logging level for SYSLOG logs. local(str): The logging level for the local device. SLS Example: .. code-block:: yaml logging_levels: cimc.logging_levels: - remote: informational - local: notice
[ "Ensures", "that", "the", "logging", "levels", "are", "set", "on", "the", "device", ".", "The", "logging", "levels", "must", "match", "the", "following", "options", ":", "emergency", "alert", "critical", "error", "warning", "notice", "informational", "debug", ...
python
train
ajk8/hatchery
hatchery/project.py
https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L106-L113
def set_version(package_name, version_str): """ Set the version in _version.py to version_str """ current_version = get_version(package_name) version_file_path = helpers.package_file_path('_version.py', package_name) version_file_content = helpers.get_file_content(version_file_path) version_file_content = version_file_content.replace(current_version, version_str) with open(version_file_path, 'w') as version_file: version_file.write(version_file_content)
[ "def", "set_version", "(", "package_name", ",", "version_str", ")", ":", "current_version", "=", "get_version", "(", "package_name", ")", "version_file_path", "=", "helpers", ".", "package_file_path", "(", "'_version.py'", ",", "package_name", ")", "version_file_conte...
Set the version in _version.py to version_str
[ "Set", "the", "version", "in", "_version", ".", "py", "to", "version_str" ]
python
train
mvcisback/py-aiger-bv
aigerbv/common.py
https://github.com/mvcisback/py-aiger-bv/blob/855819844c429c35cdd8dc0b134bcd11f7b2fda3/aigerbv/common.py#L430-L464
def kmodels(wordlen: int, k: int, input=None, output=None): """Return a circuit taking a wordlen bitvector where only k valuations return True. Uses encoding from [1]. Note that this is equivalent to (~x < k). - TODO: Add automated simplification so that the circuits are equiv. [1]: Chakraborty, Supratik, et al. "From Weighted to Unweighted Model Counting." IJCAI. 2015. """ assert 0 <= k < 2**wordlen if output is None: output = _fresh() if input is None: input = _fresh() input_names = named_indexes(wordlen, input) atoms = map(aiger.atom, input_names) active = False expr = aiger.atom(False) for atom, bit in zip(atoms, encode_int(wordlen, k, signed=False)): active |= bit if not active: # Skip until first 1. continue expr = (expr | atom) if bit else (expr & atom) return aigbv.AIGBV( aig=expr.aig, input_map=frozenset([(input, tuple(input_names))]), output_map=frozenset([(output, (expr.output,))]), )
[ "def", "kmodels", "(", "wordlen", ":", "int", ",", "k", ":", "int", ",", "input", "=", "None", ",", "output", "=", "None", ")", ":", "assert", "0", "<=", "k", "<", "2", "**", "wordlen", "if", "output", "is", "None", ":", "output", "=", "_fresh", ...
Return a circuit taking a wordlen bitvector where only k valuations return True. Uses encoding from [1]. Note that this is equivalent to (~x < k). - TODO: Add automated simplification so that the circuits are equiv. [1]: Chakraborty, Supratik, et al. "From Weighted to Unweighted Model Counting." IJCAI. 2015.
[ "Return", "a", "circuit", "taking", "a", "wordlen", "bitvector", "where", "only", "k", "valuations", "return", "True", ".", "Uses", "encoding", "from", "[", "1", "]", "." ]
python
train
FujiMakoto/AgentML
agentml/__init__.py
https://github.com/FujiMakoto/AgentML/blob/c8cb64b460d876666bf29ea2c682189874c7c403/agentml/__init__.py#L295-L321
def parse_substitutions(self, messages): """ Parse substitutions in a supplied message :param messages: A tuple messages being parsed (normalized, case preserved, raw) :type messages: tuple of (str, str, str) :return: Substituted messages (normalized, case preserved, raw) :rtype : tuple of (str, str, str) """ # If no substitutions have been defined, just normalize the message if not self._substitutions: self._log.info('No substitutions to process') return messages self._log.info('Processing message substitutions') def substitute(sub_group, sub_message): word, substitution = sub_group return word.sub(substitution, sub_message) normalized, preserve_case, raw = messages for sub_normalized, sub_preserve_case, sub_raw in self._substitutions: normalized = substitute(sub_normalized, normalized) preserve_case = substitute(sub_preserve_case, preserve_case) raw = substitute(sub_raw, raw) return normalized, preserve_case, raw
[ "def", "parse_substitutions", "(", "self", ",", "messages", ")", ":", "# If no substitutions have been defined, just normalize the message", "if", "not", "self", ".", "_substitutions", ":", "self", ".", "_log", ".", "info", "(", "'No substitutions to process'", ")", "re...
Parse substitutions in a supplied message :param messages: A tuple messages being parsed (normalized, case preserved, raw) :type messages: tuple of (str, str, str) :return: Substituted messages (normalized, case preserved, raw) :rtype : tuple of (str, str, str)
[ "Parse", "substitutions", "in", "a", "supplied", "message", ":", "param", "messages", ":", "A", "tuple", "messages", "being", "parsed", "(", "normalized", "case", "preserved", "raw", ")", ":", "type", "messages", ":", "tuple", "of", "(", "str", "str", "str...
python
train
hubo1016/vlcp
vlcp/event/event.py
https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/event.py#L166-L202
def two_way_difference(self, b, extra_add = (), extra_remove = ()): """ Return (self - b, b - self) """ if self is b: return ((), ()) if isinstance(b, DiffRef_): extra_remove = extra_remove + b.add b = b.origin if extra_add == extra_remove: extra_add = extra_remove = () if isinstance(b, Diff_): if self.base is b.base: first = self.add + b.remove second = self.remove + b.add elif self.base is b: first = self.add second = self.remove elif b.base is self: first = b.remove second = b.add else: first = self second = b else: first = self second = b if not first and not extra_add: return ((), tuple(second) + tuple(extra_remove)) elif not second and not extra_remove: return (tuple(first) + tuple(extra_add), ()) else: first = set(first) first.update(extra_add) second = set(second) second.update(extra_remove) return tuple(first.difference(second)), tuple(second.difference(first))
[ "def", "two_way_difference", "(", "self", ",", "b", ",", "extra_add", "=", "(", ")", ",", "extra_remove", "=", "(", ")", ")", ":", "if", "self", "is", "b", ":", "return", "(", "(", ")", ",", "(", ")", ")", "if", "isinstance", "(", "b", ",", "Di...
Return (self - b, b - self)
[ "Return", "(", "self", "-", "b", "b", "-", "self", ")" ]
python
train
brbsix/subsystem
subsystem/subsystem.py
https://github.com/brbsix/subsystem/blob/57705bc20d71ceaed9e22e21246265d717e98eb8/subsystem/subsystem.py#L183-L189
def failure(path, downloader): """Display warning message via stderr or GUI.""" base = os.path.basename(path) if sys.stdin.isatty(): print('INFO [{}]: Failed to download {!r}'.format(downloader, base)) else: notify_failure(base, downloader)
[ "def", "failure", "(", "path", ",", "downloader", ")", ":", "base", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "if", "sys", ".", "stdin", ".", "isatty", "(", ")", ":", "print", "(", "'INFO [{}]: Failed to download {!r}'", ".", "format", ...
Display warning message via stderr or GUI.
[ "Display", "warning", "message", "via", "stderr", "or", "GUI", "." ]
python
train
TomasTomecek/sen
sen/tui/ui.py
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/tui/ui.py#L93-L104
def _set_main_widget(self, widget, redraw): """ add provided widget to widget list and display it :param widget: :return: """ self.set_body(widget) self.reload_footer() if redraw: logger.debug("redraw main widget") self.refresh()
[ "def", "_set_main_widget", "(", "self", ",", "widget", ",", "redraw", ")", ":", "self", ".", "set_body", "(", "widget", ")", "self", ".", "reload_footer", "(", ")", "if", "redraw", ":", "logger", ".", "debug", "(", "\"redraw main widget\"", ")", "self", ...
add provided widget to widget list and display it :param widget: :return:
[ "add", "provided", "widget", "to", "widget", "list", "and", "display", "it" ]
python
train
garenchan/policy
policy/enforcer.py
https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/enforcer.py#L39-L46
def from_dict(cls, rules_dict: dict, default_rule=None, raise_error=False): """Allow loading of rule data from a dictionary.""" # Parse the rules stored in the dictionary rules = {k: _parser.parse_rule(v, raise_error) for k, v in rules_dict.items()} return cls(rules, default_rule)
[ "def", "from_dict", "(", "cls", ",", "rules_dict", ":", "dict", ",", "default_rule", "=", "None", ",", "raise_error", "=", "False", ")", ":", "# Parse the rules stored in the dictionary", "rules", "=", "{", "k", ":", "_parser", ".", "parse_rule", "(", "v", "...
Allow loading of rule data from a dictionary.
[ "Allow", "loading", "of", "rule", "data", "from", "a", "dictionary", "." ]
python
train
phaethon/kamene
kamene/contrib/gsm_um.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/contrib/gsm_um.py#L2689-L2695
def requestPdpContextActivationReject(): """REQUEST PDP CONTEXT ACTIVATION REJECT Section 9.5.5""" a = TpPd(pd=0x8) b = MessageType(mesType=0x45) # 01000101 c = SmCause() packet = a / b / c return packet
[ "def", "requestPdpContextActivationReject", "(", ")", ":", "a", "=", "TpPd", "(", "pd", "=", "0x8", ")", "b", "=", "MessageType", "(", "mesType", "=", "0x45", ")", "# 01000101", "c", "=", "SmCause", "(", ")", "packet", "=", "a", "/", "b", "/", "c", ...
REQUEST PDP CONTEXT ACTIVATION REJECT Section 9.5.5
[ "REQUEST", "PDP", "CONTEXT", "ACTIVATION", "REJECT", "Section", "9", ".", "5", ".", "5" ]
python
train
lrq3000/pyFileFixity
pyFileFixity/header_ecc.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/header_ecc.py#L92-L124
def entry_fields(entry, field_delim="\xFF"): '''From a raw ecc entry (a string), extract the metadata fields (filename, filesize, ecc for both), and the rest being blocks of hash and ecc per blocks of the original file's header''' entry = entry.lstrip(field_delim) # if there was some slight adjustment error (example: the last ecc block of the last file was the field_delim, then we will start with a field_delim, and thus we need to remove the trailing field_delim which is useless and will make the field detection buggy). This is not really a big problem for the previous file's ecc block: the missing ecc characters (which were mistaken for a field_delim), will just be missing (so we will lose a bit of resiliency for the last block of the previous file, but that's not a huge issue, the correction can still rely on the other characters). # Find metadata fields delimiters positions # TODO: automate this part, just give in argument the number of field_delim to find, and the func will find the x field_delims (the number needs to be specified in argument because the field_delim can maybe be found wrongly inside the ecc stream, which we don't want) first = entry.find(field_delim) second = entry.find(field_delim, first+len(field_delim)) third = entry.find(field_delim, second+len(field_delim)) fourth = entry.find(field_delim, third+len(field_delim)) # Note: we do not try to find all the field delimiters because we optimize here: we just walk the string to find the exact number of field_delim we are looking for, and after we stop, no need to walk through the whole string. # Extract the content of the fields # Metadata fields relfilepath = entry[:first] filesize = entry[first+len(field_delim):second] relfilepath_ecc = entry[second+len(field_delim):third] filesize_ecc = entry[third+len(field_delim):fourth] # Ecc stream field (aka ecc blocks) ecc_field = entry[fourth+len(field_delim):] # Try to convert to an int, an error may happen try: filesize = int(filesize) except Exception, e: print("Exception when trying to detect the filesize in ecc field (it may be corrupted), skipping: ") print(e) #filesize = 0 # avoid setting to 0, we keep as an int so that we can try to fix using intra-ecc # entries = [ {"message":, "ecc":, "hash":}, etc.] #print(entry) #print(len(entry)) return {"relfilepath": relfilepath, "relfilepath_ecc": relfilepath_ecc, "filesize": filesize, "filesize_ecc": filesize_ecc, "ecc_field": ecc_field}
[ "def", "entry_fields", "(", "entry", ",", "field_delim", "=", "\"\\xFF\"", ")", ":", "entry", "=", "entry", ".", "lstrip", "(", "field_delim", ")", "# if there was some slight adjustment error (example: the last ecc block of the last file was the field_delim, then we will start w...
From a raw ecc entry (a string), extract the metadata fields (filename, filesize, ecc for both), and the rest being blocks of hash and ecc per blocks of the original file's header
[ "From", "a", "raw", "ecc", "entry", "(", "a", "string", ")", "extract", "the", "metadata", "fields", "(", "filename", "filesize", "ecc", "for", "both", ")", "and", "the", "rest", "being", "blocks", "of", "hash", "and", "ecc", "per", "blocks", "of", "th...
python
train
aaren/notedown
notedown/notedown.py
https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/notedown.py#L536-L551
def data2uri(data, data_type): """Convert base64 data into a data uri with the given data_type.""" MIME_MAP = { 'image/jpeg': 'jpeg', 'image/png': 'png', 'text/plain': 'text', 'text/html': 'html', 'text/latex': 'latex', 'application/javascript': 'html', 'image/svg+xml': 'svg', } inverse_map = {v: k for k, v in list(MIME_MAP.items())} mime_type = inverse_map[data_type] uri = r"data:{mime};base64,{data}" return uri.format(mime=mime_type, data=data[mime_type].replace('\n', ''))
[ "def", "data2uri", "(", "data", ",", "data_type", ")", ":", "MIME_MAP", "=", "{", "'image/jpeg'", ":", "'jpeg'", ",", "'image/png'", ":", "'png'", ",", "'text/plain'", ":", "'text'", ",", "'text/html'", ":", "'html'", ",", "'text/latex'", ":", "'latex'", "...
Convert base64 data into a data uri with the given data_type.
[ "Convert", "base64", "data", "into", "a", "data", "uri", "with", "the", "given", "data_type", "." ]
python
train
nephila/djangocms-blog
djangocms_blog/cms_menus.py
https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/cms_menus.py#L29-L122
def get_nodes(self, request): """ Generates the nodelist :param request: :return: list of nodes """ nodes = [] language = get_language_from_request(request, check_path=True) current_site = get_current_site(request) page_site = self.instance.node.site if self.instance and page_site != current_site: return [] categories_menu = False posts_menu = False config = False if self.instance: if not self._config.get(self.instance.application_namespace, False): self._config[self.instance.application_namespace] = BlogConfig.objects.get( namespace=self.instance.application_namespace ) config = self._config[self.instance.application_namespace] if not getattr(request, 'toolbar', False) or not request.toolbar.edit_mode_active: if self.instance == self.instance.get_draft_object(): return [] else: if self.instance == self.instance.get_public_object(): return [] if config and config.menu_structure in (MENU_TYPE_COMPLETE, MENU_TYPE_CATEGORIES): categories_menu = True if config and config.menu_structure in (MENU_TYPE_COMPLETE, MENU_TYPE_POSTS): posts_menu = True if config and config.menu_structure in (MENU_TYPE_NONE, ): return nodes used_categories = [] if posts_menu: posts = Post.objects if hasattr(self, 'instance') and self.instance: posts = posts.namespace(self.instance.application_namespace).on_site() posts = posts.active_translations(language).distinct().\ select_related('app_config').prefetch_related('translations', 'categories') for post in posts: post_id = None parent = None used_categories.extend(post.categories.values_list('pk', flat=True)) if categories_menu: category = post.categories.first() if category: parent = '{0}-{1}'.format(category.__class__.__name__, category.pk) post_id = '{0}-{1}'.format(post.__class__.__name__, post.pk), else: post_id = '{0}-{1}'.format(post.__class__.__name__, post.pk), if post_id: node = NavigationNode( post.get_title(), post.get_absolute_url(language), post_id, parent ) nodes.append(node) if categories_menu: categories = BlogCategory.objects if config: categories = categories.namespace(self.instance.application_namespace) if config and not config.menu_empty_categories: categories = categories.active_translations(language).filter( pk__in=used_categories ).distinct() else: categories = categories.active_translations(language).distinct() categories = categories.order_by('parent__id', 'translations__name').\ select_related('app_config').prefetch_related('translations') added_categories = [] for category in categories: if category.pk not in added_categories: node = NavigationNode( category.name, category.get_absolute_url(), '{0}-{1}'.format(category.__class__.__name__, category.pk), ( '{0}-{1}'.format( category.__class__.__name__, category.parent.id ) if category.parent else None ) ) nodes.append(node) added_categories.append(category.pk) return nodes
[ "def", "get_nodes", "(", "self", ",", "request", ")", ":", "nodes", "=", "[", "]", "language", "=", "get_language_from_request", "(", "request", ",", "check_path", "=", "True", ")", "current_site", "=", "get_current_site", "(", "request", ")", "page_site", "...
Generates the nodelist :param request: :return: list of nodes
[ "Generates", "the", "nodelist" ]
python
train
trailofbits/manticore
manticore/native/cpu/x86.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/x86.py#L1135-L1177
def AAS(cpu): """ ASCII Adjust AL after subtraction. Adjusts the result of the subtraction of two unpacked BCD values to create a unpacked BCD result. The AL register is the implied source and destination operand for this instruction. The AAS instruction is only useful when it follows a SUB instruction that subtracts (binary subtraction) one unpacked BCD value from another and stores a byte result in the AL register. The AAA instruction then adjusts the contents of the AL register to contain the correct 1-digit unpacked BCD result. If the subtraction produced a decimal carry, the AH register is decremented by 1, and the CF and AF flags are set. If no decimal carry occurred, the CF and AF flags are cleared, and the AH register is unchanged. In either case, the AL register is left with its top nibble set to 0. The AF and CF flags are set to 1 if there is a decimal borrow; otherwise, they are cleared to 0. This instruction executes as described in compatibility mode and legacy mode. It is not valid in 64-bit mode.:: IF ((AL AND 0FH) > 9) Operators.OR(AF = 1) THEN AX = AX - 6; AH = AH - 1; AF = 1; CF = 1; ELSE CF = 0; AF = 0; FI; AL = AL AND 0FH; :param cpu: current CPU. """ if (cpu.AL & 0x0F > 9) or cpu.AF == 1: cpu.AX = cpu.AX - 6 cpu.AH = cpu.AH - 1 cpu.AF = True cpu.CF = True else: cpu.AF = False cpu.CF = False cpu.AL = cpu.AL & 0x0f
[ "def", "AAS", "(", "cpu", ")", ":", "if", "(", "cpu", ".", "AL", "&", "0x0F", ">", "9", ")", "or", "cpu", ".", "AF", "==", "1", ":", "cpu", ".", "AX", "=", "cpu", ".", "AX", "-", "6", "cpu", ".", "AH", "=", "cpu", ".", "AH", "-", "1", ...
ASCII Adjust AL after subtraction. Adjusts the result of the subtraction of two unpacked BCD values to create a unpacked BCD result. The AL register is the implied source and destination operand for this instruction. The AAS instruction is only useful when it follows a SUB instruction that subtracts (binary subtraction) one unpacked BCD value from another and stores a byte result in the AL register. The AAA instruction then adjusts the contents of the AL register to contain the correct 1-digit unpacked BCD result. If the subtraction produced a decimal carry, the AH register is decremented by 1, and the CF and AF flags are set. If no decimal carry occurred, the CF and AF flags are cleared, and the AH register is unchanged. In either case, the AL register is left with its top nibble set to 0. The AF and CF flags are set to 1 if there is a decimal borrow; otherwise, they are cleared to 0. This instruction executes as described in compatibility mode and legacy mode. It is not valid in 64-bit mode.:: IF ((AL AND 0FH) > 9) Operators.OR(AF = 1) THEN AX = AX - 6; AH = AH - 1; AF = 1; CF = 1; ELSE CF = 0; AF = 0; FI; AL = AL AND 0FH; :param cpu: current CPU.
[ "ASCII", "Adjust", "AL", "after", "subtraction", "." ]
python
valid
tanghaibao/jcvi
jcvi/utils/brewer2mpl.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/brewer2mpl.py#L300-L336
def _load_maps_by_type(map_type): """ Load all maps of a given type into a dictionary. Color maps are loaded as BrewerMap objects. Dictionary is keyed by map name and then integer numbers of defined colors. There is an additional 'max' key that points to the color map with the largest number of defined colors. Parameters ---------- map_type : {'Sequential', 'Diverging', 'Qualitative'} Returns ------- maps : dict of BrewerMap """ seq_maps = COLOR_MAPS[map_type] loaded_maps = {} for map_name in seq_maps: loaded_maps[map_name] = {} for num in seq_maps[map_name]: inum = int(num) colors = seq_maps[map_name][num]['Colors'] bmap = BrewerMap(map_name, map_type, colors) loaded_maps[map_name][inum] = bmap max_num = int(max(seq_maps[map_name].keys(), key=int)) loaded_maps[map_name]['max'] = loaded_maps[map_name][max_num] return loaded_maps
[ "def", "_load_maps_by_type", "(", "map_type", ")", ":", "seq_maps", "=", "COLOR_MAPS", "[", "map_type", "]", "loaded_maps", "=", "{", "}", "for", "map_name", "in", "seq_maps", ":", "loaded_maps", "[", "map_name", "]", "=", "{", "}", "for", "num", "in", "...
Load all maps of a given type into a dictionary. Color maps are loaded as BrewerMap objects. Dictionary is keyed by map name and then integer numbers of defined colors. There is an additional 'max' key that points to the color map with the largest number of defined colors. Parameters ---------- map_type : {'Sequential', 'Diverging', 'Qualitative'} Returns ------- maps : dict of BrewerMap
[ "Load", "all", "maps", "of", "a", "given", "type", "into", "a", "dictionary", "." ]
python
train
peshay/tpm
tpm.py
https://github.com/peshay/tpm/blob/8e64a4d8b89d54bdd2c92d965463a7508aa3d0bc/tpm.py#L301-L306
def change_parent_of_project(self, ID, NewParrentID): """Change parent of project.""" # http://teampasswordmanager.com/docs/api-projects/#change_parent log.info('Change parrent for project %s to %s' % (ID, NewParrentID)) data = {'parent_id': NewParrentID} self.put('projects/%s/change_parent.json' % ID, data)
[ "def", "change_parent_of_project", "(", "self", ",", "ID", ",", "NewParrentID", ")", ":", "# http://teampasswordmanager.com/docs/api-projects/#change_parent", "log", ".", "info", "(", "'Change parrent for project %s to %s'", "%", "(", "ID", ",", "NewParrentID", ")", ")", ...
Change parent of project.
[ "Change", "parent", "of", "project", "." ]
python
train
libtcod/python-tcod
tcod/random.py
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/random.py#L76-L86
def uniform(self, low: float, high: float) -> float: """Return a random floating number in the range: low <= n <= high. Args: low (float): The lower bound of the random range. high (float): The upper bound of the random range. Returns: float: A random float. """ return float(lib.TCOD_random_get_double(self.random_c, low, high))
[ "def", "uniform", "(", "self", ",", "low", ":", "float", ",", "high", ":", "float", ")", "->", "float", ":", "return", "float", "(", "lib", ".", "TCOD_random_get_double", "(", "self", ".", "random_c", ",", "low", ",", "high", ")", ")" ]
Return a random floating number in the range: low <= n <= high. Args: low (float): The lower bound of the random range. high (float): The upper bound of the random range. Returns: float: A random float.
[ "Return", "a", "random", "floating", "number", "in", "the", "range", ":", "low", "<", "=", "n", "<", "=", "high", "." ]
python
train
google/textfsm
textfsm/parser.py
https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/parser.py#L628-L657
def _AppendRecord(self): """Adds current record to result if well formed.""" # If no Values then don't output. if not self.values: return cur_record = [] for value in self.values: try: value.OnSaveRecord() except SkipRecord: self._ClearRecord() return except SkipValue: continue # Build current record into a list. cur_record.append(value.value) # If no Values in template or whole record is empty then don't output. if len(cur_record) == (cur_record.count(None) + cur_record.count([])): return # Replace any 'None' entries with null string ''. while None in cur_record: cur_record[cur_record.index(None)] = '' self._result.append(cur_record) self._ClearRecord()
[ "def", "_AppendRecord", "(", "self", ")", ":", "# If no Values then don't output.", "if", "not", "self", ".", "values", ":", "return", "cur_record", "=", "[", "]", "for", "value", "in", "self", ".", "values", ":", "try", ":", "value", ".", "OnSaveRecord", ...
Adds current record to result if well formed.
[ "Adds", "current", "record", "to", "result", "if", "well", "formed", "." ]
python
train
gregoil/ipdbugger
ipdbugger/__init__.py
https://github.com/gregoil/ipdbugger/blob/9575734ec26f6be86ae263496d50eb60bb988b21/ipdbugger/__init__.py#L85-L105
def start_debugging(): """Start a debugging session after catching an exception. This prints the traceback and start ipdb session in the frame of the error. """ exc_type, exc_value, exc_tb = sys.exc_info() # If the exception has been annotated to be re-raised, raise the exception if hasattr(exc_value, '_ipdbugger_let_raise'): raise_(*sys.exc_info()) print() for line in traceback.format_exception(exc_type, exc_value, exc_tb): print(colored(line, 'red'), end=' ') # Get the frame with the error. test_frame = sys._getframe(-1).f_back from ipdb.__main__ import wrap_sys_excepthook wrap_sys_excepthook() IPDBugger(exc_info=sys.exc_info()).set_trace(test_frame)
[ "def", "start_debugging", "(", ")", ":", "exc_type", ",", "exc_value", ",", "exc_tb", "=", "sys", ".", "exc_info", "(", ")", "# If the exception has been annotated to be re-raised, raise the exception", "if", "hasattr", "(", "exc_value", ",", "'_ipdbugger_let_raise'", "...
Start a debugging session after catching an exception. This prints the traceback and start ipdb session in the frame of the error.
[ "Start", "a", "debugging", "session", "after", "catching", "an", "exception", "." ]
python
train
9wfox/tornadoweb
tornadoweb/utility.py
https://github.com/9wfox/tornadoweb/blob/2286b66fbe10e4d9f212b979664c15fa17adf378/tornadoweb/utility.py#L131-L138
def args_length(min_len, max_len, *args): """ 检查参数长度 """ not_null(*args) if not all(map(lambda v: min_len <= len(v) <= max_len, args)): raise ValueError("Argument length must be between {0} and {1}!".format(min_len, max_len))
[ "def", "args_length", "(", "min_len", ",", "max_len", ",", "*", "args", ")", ":", "not_null", "(", "*", "args", ")", "if", "not", "all", "(", "map", "(", "lambda", "v", ":", "min_len", "<=", "len", "(", "v", ")", "<=", "max_len", ",", "args", ")"...
检查参数长度
[ "检查参数长度" ]
python
train
gagneurlab/concise
concise/legacy/concise.py
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L642-L651
def _predict_in_session(self, sess, other_var, X_feat, X_seq, variable="y_pred"): """ Predict y (or any other variable) from inside the tf session. Variable has to be in other_var """ # other_var["tf_X_seq"]: X_seq, tf_y: y, feed_dict = {other_var["tf_X_feat"]: X_feat, other_var["tf_X_seq"]: X_seq} y_pred = sess.run(other_var[variable], feed_dict=feed_dict) return y_pred
[ "def", "_predict_in_session", "(", "self", ",", "sess", ",", "other_var", ",", "X_feat", ",", "X_seq", ",", "variable", "=", "\"y_pred\"", ")", ":", "# other_var[\"tf_X_seq\"]: X_seq, tf_y: y,", "feed_dict", "=", "{", "other_var", "[", "\"tf_X_feat\"", "]", ":", ...
Predict y (or any other variable) from inside the tf session. Variable has to be in other_var
[ "Predict", "y", "(", "or", "any", "other", "variable", ")", "from", "inside", "the", "tf", "session", ".", "Variable", "has", "to", "be", "in", "other_var" ]
python
train
tjcsl/cslbot
cslbot/commands/gcc.py
https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/commands/gcc.py#L26-L59
def cmd(send, msg, args): """Compiles stuff. Syntax: {command} <code> """ if args['type'] == 'privmsg': send('GCC is a group exercise!') return if 'include' in msg: send("We're not a terribly inclusive community around here.") return if 'import' in msg: send("I'll have you know that standards compliance is important.") return tmpfile = tempfile.NamedTemporaryFile() for line in msg.splitlines(): line = line + '\n' tmpfile.write(line.encode()) tmpfile.flush() process = subprocess.run(['gcc', '-o', '/dev/null', '-xc', tmpfile.name], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, timeout=5, universal_newlines=True) tmpfile.close() # Take the last 3 lines to prevent Excess Flood on long error messages output = process.stdout.splitlines()[:3] for line in output: send(line, target=args['nick']) if process.returncode == 0: send(gen_slogan("gcc victory")) else: send(gen_slogan("gcc failed"))
[ "def", "cmd", "(", "send", ",", "msg", ",", "args", ")", ":", "if", "args", "[", "'type'", "]", "==", "'privmsg'", ":", "send", "(", "'GCC is a group exercise!'", ")", "return", "if", "'include'", "in", "msg", ":", "send", "(", "\"We're not a terribly incl...
Compiles stuff. Syntax: {command} <code>
[ "Compiles", "stuff", "." ]
python
train
genialis/resolwe-runtime-utils
resolwe_runtime_utils.py
https://github.com/genialis/resolwe-runtime-utils/blob/5657d7cf981972a5259b9b475eae220479401001/resolwe_runtime_utils.py#L73-L98
def save_file(key, file_path, *refs): """Convert the given parameters to a special JSON object. JSON object is of the form: { key: {"file": file_path}}, or { key: {"file": file_path, "refs": [refs[0], refs[1], ... ]}} """ if not os.path.isfile(file_path): return error("Output '{}' set to a missing file: '{}'.".format(key, file_path)) result = {key: {"file": file_path}} if refs: missing_refs = [ ref for ref in refs if not (os.path.isfile(ref) or os.path.isdir(ref)) ] if len(missing_refs) > 0: return error( "Output '{}' set to missing references: '{}'.".format( key, ', '.join(missing_refs) ) ) result[key]['refs'] = refs return json.dumps(result)
[ "def", "save_file", "(", "key", ",", "file_path", ",", "*", "refs", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "file_path", ")", ":", "return", "error", "(", "\"Output '{}' set to a missing file: '{}'.\"", ".", "format", "(", "key", ",",...
Convert the given parameters to a special JSON object. JSON object is of the form: { key: {"file": file_path}}, or { key: {"file": file_path, "refs": [refs[0], refs[1], ... ]}}
[ "Convert", "the", "given", "parameters", "to", "a", "special", "JSON", "object", "." ]
python
train
akfullfo/taskforce
taskforce/watch_modules.py
https://github.com/akfullfo/taskforce/blob/bc6dd744bd33546447d085dbd18a350532220193/taskforce/watch_modules.py#L67-L102
def _build(self, name, **params): """ Rebuild operations by removing open modules that no longer need to be watched, and adding new modules if they are not currently being watched. This is done by comparing self.modules to watch_files.paths_open """ log = self._getparam('log', self._discard, **params) # Find all the modules that no longer need watching # rebuild = False wparams = params.copy() wparams['commit'] = False for path in list(self._watch.paths_open): if path in self.modules: continue try: self._watch.remove(path, **wparams) rebuild = True except Exception as e: log.warning("Remove of watched module %r failed -- %s", path, e) log.debug("Removed watch for path %r", path) # Find all the modules that are new and should be watched # for path in list(self.modules): if path not in self._watch.paths_open: try: self._watch.add(path, **wparams) rebuild = True except Exception as e: log.error("watch failed on module %r -- %s", path, e) continue if rebuild: self._watch.commit(**params)
[ "def", "_build", "(", "self", ",", "name", ",", "*", "*", "params", ")", ":", "log", "=", "self", ".", "_getparam", "(", "'log'", ",", "self", ".", "_discard", ",", "*", "*", "params", ")", "# Find all the modules that no longer need watching", "#", "rebu...
Rebuild operations by removing open modules that no longer need to be watched, and adding new modules if they are not currently being watched. This is done by comparing self.modules to watch_files.paths_open
[ "Rebuild", "operations", "by", "removing", "open", "modules", "that", "no", "longer", "need", "to", "be", "watched", "and", "adding", "new", "modules", "if", "they", "are", "not", "currently", "being", "watched", "." ]
python
train
waqasbhatti/astrobase
astrobase/plotbase.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/plotbase.py#L97-L443
def plot_magseries(times, mags, magsarefluxes=False, errs=None, out=None, sigclip=30.0, normto='globalmedian', normmingap=4.0, timebin=None, yrange=None, segmentmingap=100.0, plotdpi=100): '''This plots a magnitude/flux time-series. Parameters ---------- times,mags : np.array The mag/flux time-series to plot as a function of time. magsarefluxes : bool Indicates if the input `mags` array is actually an array of flux measurements instead of magnitude measurements. If this is set to True, then the plot y-axis will be set as appropriate for mag or fluxes. In addition: - if `normto` is 'zero', then the median flux is divided from each observation's flux value to yield normalized fluxes with 1.0 as the global median. - if `normto` is 'globalmedian', then the global median flux value across the entire time series is multiplied with each measurement. - if `norm` is set to a `float`, then this number is multiplied with the flux value for each measurement. errs : np.array or None If this is provided, contains the measurement errors associated with each measurement of flux/mag in time-series. Providing this kwarg will add errbars to the output plot. out : str or StringIO/BytesIO object or None Sets the output type and target: - If `out` is a string, will save the plot to the specified file name. - If `out` is a StringIO/BytesIO object, will save the plot to that file handle. This can be useful to carry out additional operations on the output binary stream, or convert it to base64 text for embedding in HTML pages. - If `out` is None, will save the plot to a file called 'magseries-plot.png' in the current working directory. sigclip : float or int or sequence of two floats/ints or None If a single float or int, a symmetric sigma-clip will be performed using the number provided as the sigma-multiplier to cut out from the input time-series. If a list of two ints/floats is provided, the function will perform an 'asymmetric' sigma-clip. The first element in this list is the sigma value to use for fainter flux/mag values; the second element in this list is the sigma value to use for brighter flux/mag values. For example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma dimmings and greater than 3-sigma brightenings. Here the meaning of "dimming" and "brightening" is set by *physics* (not the magnitude system), which is why the `magsarefluxes` kwarg must be correctly set. If `sigclip` is None, no sigma-clipping will be performed, and the time-series (with non-finite elems removed) will be passed through to the output. normto : {'globalmedian', 'zero'} or a float Sets the normalization target:: 'globalmedian' -> norms each mag to the global median of the LC column 'zero' -> norms each mag to zero a float -> norms each mag to this specified float value. normmingap : float This defines how much the difference between consecutive measurements is allowed to be to consider them as parts of different timegroups. By default it is set to 4.0 days. timebin : float or None The bin size to use to group together measurements closer than this amount in time. This is in seconds. If this is None, no time-binning will be performed. yrange : list of two floats or None This is used to provide a custom y-axis range to the plot. If None, will automatically determine y-axis range. segmentmingap : float or None This controls the minimum length of time (in days) required to consider a timegroup in the light curve as a separate segment. This is useful when the light curve consists of measurements taken over several seasons, so there's lots of dead space in the plot that can be cut out to zoom in on the interesting stuff. If `segmentmingap` is not None, the magseries plot will be cut in this way and the x-axis will show these breaks. plotdpi : int Sets the resolution in DPI for PNG plots (default = 100). Returns ------- str or BytesIO/StringIO object Returns based on the input: - If `out` is a str or None, the path to the generated plot file is returned. - If `out` is a StringIO/BytesIO object, will return the StringIO/BytesIO object to which the plot was written. ''' # sigclip the magnitude timeseries stimes, smags, serrs = sigclip_magseries(times, mags, errs, magsarefluxes=magsarefluxes, sigclip=sigclip) # now we proceed to binning if timebin and errs is not None: binned = time_bin_magseries_with_errs(stimes, smags, serrs, binsize=timebin) btimes, bmags, berrs = (binned['binnedtimes'], binned['binnedmags'], binned['binnederrs']) elif timebin and errs is None: binned = time_bin_magseries(stimes, smags, binsize=timebin) btimes, bmags, berrs = binned['binnedtimes'], binned['binnedmags'], None else: btimes, bmags, berrs = stimes, smags, serrs # check if we need to normalize if normto is not False: btimes, bmags = normalize_magseries(btimes, bmags, normto=normto, magsarefluxes=magsarefluxes, mingap=normmingap) btimeorigin = btimes.min() btimes = btimes - btimeorigin ################################## ## FINALLY PLOT THE LIGHT CURVE ## ################################## # if we're going to plot with segment gaps highlighted, then find the gaps if segmentmingap is not None: ntimegroups, timegroups = find_lc_timegroups(btimes, mingap=segmentmingap) # get the yrange for all the plots if it's given if yrange and isinstance(yrange,(list,tuple)) and len(yrange) == 2: ymin, ymax = yrange # if it's not given, figure it out else: # the plot y limits are just 0.05 mags on each side if mags are used if not magsarefluxes: ymin, ymax = (bmags.min() - 0.05, bmags.max() + 0.05) # if we're dealing with fluxes, limits are 2% of the flux range per side else: ycov = bmags.max() - bmags.min() ymin = bmags.min() - 0.02*ycov ymax = bmags.max() + 0.02*ycov # if we're supposed to make the plot segment-aware (i.e. gaps longer than # segmentmingap will be cut out) if segmentmingap and ntimegroups > 1: LOGINFO('%s time groups found' % ntimegroups) # our figure is now a multiple axis plot # the aspect ratio is a bit wider fig, axes = plt.subplots(1,ntimegroups,sharey=True) fig.set_size_inches(10,4.8) axes = np.ravel(axes) # now go through each axis and make the plots for each timegroup for timegroup, ax, axind in zip(timegroups, axes, range(len(axes))): tgtimes = btimes[timegroup] tgmags = bmags[timegroup] if berrs: tgerrs = berrs[timegroup] else: tgerrs = None LOGINFO('axes: %s, timegroup %s: JD %.3f to %.3f' % ( axind, axind+1, btimeorigin + tgtimes.min(), btimeorigin + tgtimes.max()) ) ax.errorbar(tgtimes, tgmags, fmt='go', yerr=tgerrs, markersize=2.0, markeredgewidth=0.0, ecolor='grey', capsize=0) # don't use offsets on any xaxis ax.get_xaxis().get_major_formatter().set_useOffset(False) # fix the ticks to use no yoffsets and remove right spines for first # axes instance if axind == 0: ax.get_yaxis().get_major_formatter().set_useOffset(False) ax.spines['right'].set_visible(False) ax.yaxis.tick_left() # remove the right and left spines for the other axes instances elif 0 < axind < (len(axes)-1): ax.spines['right'].set_visible(False) ax.spines['left'].set_visible(False) ax.tick_params(right='off', labelright='off', left='off',labelleft='off') # make the left spines invisible for the last axes instance elif axind == (len(axes)-1): ax.spines['left'].set_visible(False) ax.spines['right'].set_visible(True) ax.yaxis.tick_right() # set the yaxis limits if not magsarefluxes: ax.set_ylim(ymax, ymin) else: ax.set_ylim(ymin, ymax) # now figure out the xaxis ticklabels and ranges tgrange = tgtimes.max() - tgtimes.min() if tgrange < 10.0: ticklocations = [tgrange/2.0] ax.set_xlim(npmin(tgtimes) - 0.5, npmax(tgtimes) + 0.5) elif 10.0 < tgrange < 30.0: ticklocations = np.linspace(tgtimes.min()+5.0, tgtimes.max()-5.0, num=2) ax.set_xlim(npmin(tgtimes) - 2.0, npmax(tgtimes) + 2.0) elif 30.0 < tgrange < 100.0: ticklocations = np.linspace(tgtimes.min()+10.0, tgtimes.max()-10.0, num=3) ax.set_xlim(npmin(tgtimes) - 2.5, npmax(tgtimes) + 2.5) else: ticklocations = np.linspace(tgtimes.min()+20.0, tgtimes.max()-20.0, num=3) ax.set_xlim(npmin(tgtimes) - 3.0, npmax(tgtimes) + 3.0) ax.xaxis.set_ticks([int(x) for x in ticklocations]) # done with plotting all the sub axes # make the distance between sub plots smaller plt.subplots_adjust(wspace=0.07) # make the overall x and y labels fig.text(0.5, 0.00, 'JD - %.3f (not showing gaps > %.2f d)' % (btimeorigin, segmentmingap), ha='center') if not magsarefluxes: fig.text(0.02, 0.5, 'magnitude', va='center', rotation='vertical') else: fig.text(0.02, 0.5, 'flux', va='center', rotation='vertical') # make normal figure otherwise else: fig = plt.figure() fig.set_size_inches(7.5,4.8) plt.errorbar(btimes, bmags, fmt='go', yerr=berrs, markersize=2.0, markeredgewidth=0.0, ecolor='grey', capsize=0) # make a grid plt.grid(color='#a9a9a9', alpha=0.9, zorder=0, linewidth=1.0, linestyle=':') # fix the ticks to use no offsets plt.gca().get_yaxis().get_major_formatter().set_useOffset(False) plt.gca().get_xaxis().get_major_formatter().set_useOffset(False) plt.xlabel('JD - %.3f' % btimeorigin) # set the yaxis limits and labels if not magsarefluxes: plt.ylim(ymax, ymin) plt.ylabel('magnitude') else: plt.ylim(ymin, ymax) plt.ylabel('flux') # check if the output filename is actually an instance of StringIO if sys.version_info[:2] < (3,0): is_Strio = isinstance(out, cStringIO.InputType) else: is_Strio = isinstance(out, Strio) # write the plot out to a file if requested if out and not is_Strio: if out.endswith('.png'): plt.savefig(out,bbox_inches='tight',dpi=plotdpi) else: plt.savefig(out,bbox_inches='tight') plt.close() return os.path.abspath(out) elif out and is_Strio: plt.savefig(out, bbox_inches='tight', dpi=plotdpi, format='png') return out elif not out and dispok: plt.show() plt.close() return else: LOGWARNING('no output file specified and no $DISPLAY set, ' 'saving to magseries-plot.png in current directory') outfile = 'magseries-plot.png' plt.savefig(outfile,bbox_inches='tight',dpi=plotdpi) plt.close() return os.path.abspath(outfile)
[ "def", "plot_magseries", "(", "times", ",", "mags", ",", "magsarefluxes", "=", "False", ",", "errs", "=", "None", ",", "out", "=", "None", ",", "sigclip", "=", "30.0", ",", "normto", "=", "'globalmedian'", ",", "normmingap", "=", "4.0", ",", "timebin", ...
This plots a magnitude/flux time-series. Parameters ---------- times,mags : np.array The mag/flux time-series to plot as a function of time. magsarefluxes : bool Indicates if the input `mags` array is actually an array of flux measurements instead of magnitude measurements. If this is set to True, then the plot y-axis will be set as appropriate for mag or fluxes. In addition: - if `normto` is 'zero', then the median flux is divided from each observation's flux value to yield normalized fluxes with 1.0 as the global median. - if `normto` is 'globalmedian', then the global median flux value across the entire time series is multiplied with each measurement. - if `norm` is set to a `float`, then this number is multiplied with the flux value for each measurement. errs : np.array or None If this is provided, contains the measurement errors associated with each measurement of flux/mag in time-series. Providing this kwarg will add errbars to the output plot. out : str or StringIO/BytesIO object or None Sets the output type and target: - If `out` is a string, will save the plot to the specified file name. - If `out` is a StringIO/BytesIO object, will save the plot to that file handle. This can be useful to carry out additional operations on the output binary stream, or convert it to base64 text for embedding in HTML pages. - If `out` is None, will save the plot to a file called 'magseries-plot.png' in the current working directory. sigclip : float or int or sequence of two floats/ints or None If a single float or int, a symmetric sigma-clip will be performed using the number provided as the sigma-multiplier to cut out from the input time-series. If a list of two ints/floats is provided, the function will perform an 'asymmetric' sigma-clip. The first element in this list is the sigma value to use for fainter flux/mag values; the second element in this list is the sigma value to use for brighter flux/mag values. For example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma dimmings and greater than 3-sigma brightenings. Here the meaning of "dimming" and "brightening" is set by *physics* (not the magnitude system), which is why the `magsarefluxes` kwarg must be correctly set. If `sigclip` is None, no sigma-clipping will be performed, and the time-series (with non-finite elems removed) will be passed through to the output. normto : {'globalmedian', 'zero'} or a float Sets the normalization target:: 'globalmedian' -> norms each mag to the global median of the LC column 'zero' -> norms each mag to zero a float -> norms each mag to this specified float value. normmingap : float This defines how much the difference between consecutive measurements is allowed to be to consider them as parts of different timegroups. By default it is set to 4.0 days. timebin : float or None The bin size to use to group together measurements closer than this amount in time. This is in seconds. If this is None, no time-binning will be performed. yrange : list of two floats or None This is used to provide a custom y-axis range to the plot. If None, will automatically determine y-axis range. segmentmingap : float or None This controls the minimum length of time (in days) required to consider a timegroup in the light curve as a separate segment. This is useful when the light curve consists of measurements taken over several seasons, so there's lots of dead space in the plot that can be cut out to zoom in on the interesting stuff. If `segmentmingap` is not None, the magseries plot will be cut in this way and the x-axis will show these breaks. plotdpi : int Sets the resolution in DPI for PNG plots (default = 100). Returns ------- str or BytesIO/StringIO object Returns based on the input: - If `out` is a str or None, the path to the generated plot file is returned. - If `out` is a StringIO/BytesIO object, will return the StringIO/BytesIO object to which the plot was written.
[ "This", "plots", "a", "magnitude", "/", "flux", "time", "-", "series", "." ]
python
valid
tensorflow/datasets
tensorflow_datasets/core/utils/py_utils.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/py_utils.py#L55-L60
def temporary_assignment(obj, attr, value): """Temporarily assign obj.attr to value.""" original = getattr(obj, attr, None) setattr(obj, attr, value) yield setattr(obj, attr, original)
[ "def", "temporary_assignment", "(", "obj", ",", "attr", ",", "value", ")", ":", "original", "=", "getattr", "(", "obj", ",", "attr", ",", "None", ")", "setattr", "(", "obj", ",", "attr", ",", "value", ")", "yield", "setattr", "(", "obj", ",", "attr",...
Temporarily assign obj.attr to value.
[ "Temporarily", "assign", "obj", ".", "attr", "to", "value", "." ]
python
train
spyder-ide/spyder-kernels
spyder_kernels/utils/nsview.py
https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/nsview.py#L533-L544
def get_human_readable_type(item): """Return human-readable type string of an item""" if isinstance(item, (ndarray, MaskedArray)): return item.dtype.name elif isinstance(item, Image): return "Image" else: text = get_type_string(item) if text is None: text = to_text_string('unknown') else: return text[text.find('.')+1:]
[ "def", "get_human_readable_type", "(", "item", ")", ":", "if", "isinstance", "(", "item", ",", "(", "ndarray", ",", "MaskedArray", ")", ")", ":", "return", "item", ".", "dtype", ".", "name", "elif", "isinstance", "(", "item", ",", "Image", ")", ":", "r...
Return human-readable type string of an item
[ "Return", "human", "-", "readable", "type", "string", "of", "an", "item" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/parallel/controller/sqlitedb.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/controller/sqlitedb.py#L361-L365
def drop_matching_records(self, check): """Remove a record from the DB.""" expr,args = self._render_expression(check) query = "DELETE FROM %s WHERE %s"%(self.table, expr) self._db.execute(query,args)
[ "def", "drop_matching_records", "(", "self", ",", "check", ")", ":", "expr", ",", "args", "=", "self", ".", "_render_expression", "(", "check", ")", "query", "=", "\"DELETE FROM %s WHERE %s\"", "%", "(", "self", ".", "table", ",", "expr", ")", "self", ".",...
Remove a record from the DB.
[ "Remove", "a", "record", "from", "the", "DB", "." ]
python
test
mattloper/opendr
opendr/geometry.py
https://github.com/mattloper/opendr/blob/bc16a6a51771d6e062d088ba5cede66649b7c7ec/opendr/geometry.py#L411-L429
def Bx(self): """Compute a stack of skew-symmetric matrices which can be multiplied by 'a' to get the cross product. See: http://en.wikipedia.org/wiki/Cross_product#Conversion_to_matrix_multiplication """ # 0 self.b3 -self.b2 # -self.b3 0 self.b1 # self.b2 -self.b1 0 m = np.zeros((len(self.b1), 3, 3)) m[:, 0, 1] = +self.b3 m[:, 0, 2] = -self.b2 m[:, 1, 0] = -self.b3 m[:, 1, 2] = +self.b1 m[:, 2, 0] = +self.b2 m[:, 2, 1] = -self.b1 return m
[ "def", "Bx", "(", "self", ")", ":", "# 0 self.b3 -self.b2", "# -self.b3 0 self.b1", "# self.b2 -self.b1 0", "m", "=", "np", ".", "zeros", "(", "(", "len", "(", "self", ".", "b1", ")", ",", "3", ",", "3", ")", ")", "m", "[", ":", "...
Compute a stack of skew-symmetric matrices which can be multiplied by 'a' to get the cross product. See: http://en.wikipedia.org/wiki/Cross_product#Conversion_to_matrix_multiplication
[ "Compute", "a", "stack", "of", "skew", "-", "symmetric", "matrices", "which", "can", "be", "multiplied", "by", "a", "to", "get", "the", "cross", "product", ".", "See", ":" ]
python
train
desbma/sacad
sacad/cover.py
https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/cover.py#L320-L325
def setSizeMetadata(self, size): """ Set size image metadata to what has been reliably identified. """ assert((self.needMetadataUpdate(CoverImageMetadata.SIZE)) or (self.size == size)) self.size = size self.check_metadata &= ~CoverImageMetadata.SIZE
[ "def", "setSizeMetadata", "(", "self", ",", "size", ")", ":", "assert", "(", "(", "self", ".", "needMetadataUpdate", "(", "CoverImageMetadata", ".", "SIZE", ")", ")", "or", "(", "self", ".", "size", "==", "size", ")", ")", "self", ".", "size", "=", "...
Set size image metadata to what has been reliably identified.
[ "Set", "size", "image", "metadata", "to", "what", "has", "been", "reliably", "identified", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/server/background.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/server/background.py#L66-L85
def stat( self, *args ): '''Check process completion and consume pending I/O data''' self.pipe.poll() if not self.pipe.returncode is None: '''cleanup handlers and timeouts''' if not self.expiration is None: self.ioloop.remove_timeout(self.expiration) for fd, dest in self.streams: self.ioloop.remove_handler(fd) '''schedulle callback (first try to read all pending data)''' self.ioloop.add_callback(self.on_finish) for fd, dest in self.streams: while True: try: data = os.read(fd, 4096) if len(data) == 0: break print(data.rstrip()) except: break
[ "def", "stat", "(", "self", ",", "*", "args", ")", ":", "self", ".", "pipe", ".", "poll", "(", ")", "if", "not", "self", ".", "pipe", ".", "returncode", "is", "None", ":", "'''cleanup handlers and timeouts'''", "if", "not", "self", ".", "expiration", "...
Check process completion and consume pending I/O data
[ "Check", "process", "completion", "and", "consume", "pending", "I", "/", "O", "data" ]
python
train
heikomuller/sco-datastore
scodata/image.py
https://github.com/heikomuller/sco-datastore/blob/7180a6b51150667e47629da566aedaa742e39342/scodata/image.py#L879-L906
def get_image_files(directory, files): """Recursively iterate through directory tree and list all files that have a valid image file suffix Parameters ---------- directory : directory Path to directory on disk files : List(string) List of file names Returns ------- List(string) List of files that have a valid image suffix """ # For each file in the directory test if it is a valid image file or a # sub-directory. for f in os.listdir(directory): abs_file = os.path.join(directory, f) if os.path.isdir(abs_file): # Recursively iterate through sub-directories get_image_files(abs_file, files) else: # Add to file collection if has valid suffix if '.' in f and '.' + f.rsplit('.', 1)[1] in VALID_IMGFILE_SUFFIXES: files.append(abs_file) return files
[ "def", "get_image_files", "(", "directory", ",", "files", ")", ":", "# For each file in the directory test if it is a valid image file or a", "# sub-directory.", "for", "f", "in", "os", ".", "listdir", "(", "directory", ")", ":", "abs_file", "=", "os", ".", "path", ...
Recursively iterate through directory tree and list all files that have a valid image file suffix Parameters ---------- directory : directory Path to directory on disk files : List(string) List of file names Returns ------- List(string) List of files that have a valid image suffix
[ "Recursively", "iterate", "through", "directory", "tree", "and", "list", "all", "files", "that", "have", "a", "valid", "image", "file", "suffix" ]
python
train
s0lst1c3/grey_harvest
grey_harvest.py
https://github.com/s0lst1c3/grey_harvest/blob/811e5787ce7e613bc489b8e5e475eaa8790f4d66/grey_harvest.py#L176-L190
def _extract_ajax_endpoints(self): ''' make a GET request to freeproxylists.com/elite.html ''' url = '/'.join([DOC_ROOT, ELITE_PAGE]) response = requests.get(url) ''' extract the raw HTML doc from the response ''' raw_html = response.text ''' convert raw html into BeautifulSoup object ''' soup = BeautifulSoup(raw_html, 'lxml') for url in soup.select('table tr td table tr td a'): if 'elite #' in url.text: yield '%s/load_elite_d%s' % (DOC_ROOT, url['href'].lstrip('elite/'))
[ "def", "_extract_ajax_endpoints", "(", "self", ")", ":", "url", "=", "'/'", ".", "join", "(", "[", "DOC_ROOT", ",", "ELITE_PAGE", "]", ")", "response", "=", "requests", ".", "get", "(", "url", ")", "''' extract the raw HTML doc from the response '''", "raw_html"...
make a GET request to freeproxylists.com/elite.html
[ "make", "a", "GET", "request", "to", "freeproxylists", ".", "com", "/", "elite", ".", "html" ]
python
train
letuananh/chirptext
chirptext/leutile.py
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/leutile.py#L629-L637
def read_file(self, file_path): ''' Read a configuration file and return configuration data ''' getLogger().info("Loading app config from {} file: {}".format(self.__mode, file_path)) if self.__mode == AppConfig.JSON: return json.loads(FileHelper.read(file_path), object_pairs_hook=OrderedDict) elif self.__mode == AppConfig.INI: config = configparser.ConfigParser(allow_no_value=True) config.read(file_path) return config
[ "def", "read_file", "(", "self", ",", "file_path", ")", ":", "getLogger", "(", ")", ".", "info", "(", "\"Loading app config from {} file: {}\"", ".", "format", "(", "self", ".", "__mode", ",", "file_path", ")", ")", "if", "self", ".", "__mode", "==", "AppC...
Read a configuration file and return configuration data
[ "Read", "a", "configuration", "file", "and", "return", "configuration", "data" ]
python
train
pinterest/thrift-tools
examples/methods_per_port.py
https://github.com/pinterest/thrift-tools/blob/64e74aec89e2491c781fc62d1c45944dc15aba28/examples/methods_per_port.py#L37-L56
def listening_ports(): """ Reads listening ports from /proc/net/tcp """ ports = [] if not os.path.exists(PROC_TCP): return ports with open(PROC_TCP) as fh: for line in fh: if '00000000:0000' not in line: continue parts = line.lstrip(' ').split(' ') if parts[2] != '00000000:0000': continue local_port = parts[1].split(':')[1] local_port = int('0x' + local_port, base=16) ports.append(local_port) return ports
[ "def", "listening_ports", "(", ")", ":", "ports", "=", "[", "]", "if", "not", "os", ".", "path", ".", "exists", "(", "PROC_TCP", ")", ":", "return", "ports", "with", "open", "(", "PROC_TCP", ")", "as", "fh", ":", "for", "line", "in", "fh", ":", "...
Reads listening ports from /proc/net/tcp
[ "Reads", "listening", "ports", "from", "/", "proc", "/", "net", "/", "tcp" ]
python
valid
shaunduncan/helga-facts
helga_facts.py
https://github.com/shaunduncan/helga-facts/blob/956b1d93abccdaaf318d7cac4451edc7e73bf5e9/helga_facts.py#L89-L95
def replace_fact(term, fact, author=''): """ Replaces an existing fact by removing it, then adding the new definition """ forget_fact(term) add_fact(term, fact, author) return random.choice(ACKS)
[ "def", "replace_fact", "(", "term", ",", "fact", ",", "author", "=", "''", ")", ":", "forget_fact", "(", "term", ")", "add_fact", "(", "term", ",", "fact", ",", "author", ")", "return", "random", ".", "choice", "(", "ACKS", ")" ]
Replaces an existing fact by removing it, then adding the new definition
[ "Replaces", "an", "existing", "fact", "by", "removing", "it", "then", "adding", "the", "new", "definition" ]
python
train
quantumlib/Cirq
cirq/circuits/_block_diagram_drawer.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/circuits/_block_diagram_drawer.py#L53-L107
def draw_curve(self, grid_characters: BoxDrawCharacterSet, *, top: bool = False, left: bool = False, right: bool = False, bottom: bool = False, crossing_char: Optional[str] = None): """Draws lines in the box using the given character set. Supports merging the new lines with the lines from a previous call to draw_curve, including when they have different character sets (assuming there exist characters merging the two). Args: grid_characters: The character set to draw the curve with. top: Draw topward leg? left: Draw leftward leg? right: Draw rightward leg? bottom: Draw downward leg? crossing_char: Overrides the all-legs-present character. Useful for ascii diagrams, where the + doesn't always look the clearest. """ if not any([top, left, right, bottom]): return # Remember which legs are new, old, or missing. sign_top = +1 if top else -1 if self.top else 0 sign_bottom = +1 if bottom else -1 if self.bottom else 0 sign_left = +1 if left else -1 if self.left else 0 sign_right = +1 if right else -1 if self.right else 0 # Add new segments. if top: self.top = grid_characters.top_bottom if bottom: self.bottom = grid_characters.top_bottom if left: self.left = grid_characters.left_right if right: self.right = grid_characters.left_right # Fill center. if not all([crossing_char, self.top, self.bottom, self.left, self.right]): crossing_char = box_draw_character( self._prev_curve_grid_chars, grid_characters, top=sign_top, bottom=sign_bottom, left=sign_left, right=sign_right) self.center = crossing_char or '' self._prev_curve_grid_chars = grid_characters
[ "def", "draw_curve", "(", "self", ",", "grid_characters", ":", "BoxDrawCharacterSet", ",", "*", ",", "top", ":", "bool", "=", "False", ",", "left", ":", "bool", "=", "False", ",", "right", ":", "bool", "=", "False", ",", "bottom", ":", "bool", "=", "...
Draws lines in the box using the given character set. Supports merging the new lines with the lines from a previous call to draw_curve, including when they have different character sets (assuming there exist characters merging the two). Args: grid_characters: The character set to draw the curve with. top: Draw topward leg? left: Draw leftward leg? right: Draw rightward leg? bottom: Draw downward leg? crossing_char: Overrides the all-legs-present character. Useful for ascii diagrams, where the + doesn't always look the clearest.
[ "Draws", "lines", "in", "the", "box", "using", "the", "given", "character", "set", "." ]
python
train
senaite/senaite.jsonapi
src/senaite/jsonapi/request.py
https://github.com/senaite/senaite.jsonapi/blob/871959f4b1c9edbb477e9456325527ca78e13ec6/src/senaite/jsonapi/request.py#L67-L77
def is_true(key, default=False): """ Check if the value is in TRUE_VALUES """ value = get(key, default) if isinstance(value, list): value = value[0] if isinstance(value, bool): return value if value is default: return default return value.lower() in TRUE_VALUES
[ "def", "is_true", "(", "key", ",", "default", "=", "False", ")", ":", "value", "=", "get", "(", "key", ",", "default", ")", "if", "isinstance", "(", "value", ",", "list", ")", ":", "value", "=", "value", "[", "0", "]", "if", "isinstance", "(", "v...
Check if the value is in TRUE_VALUES
[ "Check", "if", "the", "value", "is", "in", "TRUE_VALUES" ]
python
train
gwastro/pycbc
pycbc/workflow/segment.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/segment.py#L1287-L1297
def parse_cat_ini_opt(cat_str): """ Parse a cat str from the ini file into a list of sets """ if cat_str == "": return [] cat_groups = cat_str.split(',') cat_sets = [] for group in cat_groups: group = group.strip() cat_sets += [set(c for c in group)] return cat_sets
[ "def", "parse_cat_ini_opt", "(", "cat_str", ")", ":", "if", "cat_str", "==", "\"\"", ":", "return", "[", "]", "cat_groups", "=", "cat_str", ".", "split", "(", "','", ")", "cat_sets", "=", "[", "]", "for", "group", "in", "cat_groups", ":", "group", "=",...
Parse a cat str from the ini file into a list of sets
[ "Parse", "a", "cat", "str", "from", "the", "ini", "file", "into", "a", "list", "of", "sets" ]
python
train
d11wtq/dockerpty
dockerpty/io.py
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/io.py#L127-L140
def write(self, data): """ Write `data` to the Stream. Not all data may be written right away. Use select to find when the stream is writeable, and call do_write() to flush the internal buffer. """ if not data: return None self.buffer += data self.do_write() return len(data)
[ "def", "write", "(", "self", ",", "data", ")", ":", "if", "not", "data", ":", "return", "None", "self", ".", "buffer", "+=", "data", "self", ".", "do_write", "(", ")", "return", "len", "(", "data", ")" ]
Write `data` to the Stream. Not all data may be written right away. Use select to find when the stream is writeable, and call do_write() to flush the internal buffer.
[ "Write", "data", "to", "the", "Stream", ".", "Not", "all", "data", "may", "be", "written", "right", "away", ".", "Use", "select", "to", "find", "when", "the", "stream", "is", "writeable", "and", "call", "do_write", "()", "to", "flush", "the", "internal",...
python
train
sirfoga/pyhal
hal/internet/parser.py
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/internet/parser.py#L64-L76
def parse(self): """Parses data in table :return: List of list of values in table """ data = [] # add name of section for row in self.soup.find_all("tr"): # cycle through all rows parsed = self._parse_row(row) if parsed: data.append(parsed) return data
[ "def", "parse", "(", "self", ")", ":", "data", "=", "[", "]", "# add name of section", "for", "row", "in", "self", ".", "soup", ".", "find_all", "(", "\"tr\"", ")", ":", "# cycle through all rows", "parsed", "=", "self", ".", "_parse_row", "(", "row", ")...
Parses data in table :return: List of list of values in table
[ "Parses", "data", "in", "table" ]
python
train
angr/angr
angr/analyses/vfg.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/vfg.py#L1789-L1813
def _widening_points(self, function_address): """ Return the ordered widening points for a specific function. :param int function_address: Address of the querying function. :return: A list of sorted merge points (addresses). :rtype: list """ # we are entering a new function. now it's time to figure out how to optimally traverse the control flow # graph by generating the sorted merge points try: new_function = self.kb.functions[function_address] except KeyError: # the function does not exist return [ ] if function_address not in self._function_widening_points: if not new_function.normalized: new_function.normalize() widening_points = CFGUtils.find_widening_points(function_address, new_function.endpoints, new_function.graph) self._function_widening_points[function_address] = widening_points return self._function_widening_points[function_address]
[ "def", "_widening_points", "(", "self", ",", "function_address", ")", ":", "# we are entering a new function. now it's time to figure out how to optimally traverse the control flow", "# graph by generating the sorted merge points", "try", ":", "new_function", "=", "self", ".", "kb", ...
Return the ordered widening points for a specific function. :param int function_address: Address of the querying function. :return: A list of sorted merge points (addresses). :rtype: list
[ "Return", "the", "ordered", "widening", "points", "for", "a", "specific", "function", "." ]
python
train
neithere/argh
argh/assembling.py
https://github.com/neithere/argh/blob/dcd3253f2994400a6a58a700c118c53765bc50a4/argh/assembling.py#L321-L459
def add_commands(parser, functions, namespace=None, namespace_kwargs=None, func_kwargs=None, # deprecated args: title=None, description=None, help=None): """ Adds given functions as commands to given parser. :param parser: an :class:`argparse.ArgumentParser` instance. :param functions: a list of functions. A subparser is created for each of them. If the function is decorated with :func:`~argh.decorators.arg`, the arguments are passed to :class:`argparse.ArgumentParser.add_argument`. See also :func:`~argh.dispatching.dispatch` for requirements concerning function signatures. The command name is inferred from the function name. Note that the underscores in the name are replaced with hyphens, i.e. function name "foo_bar" becomes command name "foo-bar". :param namespace: an optional string representing the group of commands. For example, if a command named "hello" is added without the namespace, it will be available as "prog.py hello"; if the namespace if specified as "greet", then the command will be accessible as "prog.py greet hello". The namespace itself is not callable, so "prog.py greet" will fail and only display a help message. :param func_kwargs: a `dict` of keyword arguments to be passed to each nested ArgumentParser instance created per command (i.e. per function). Members of this dictionary have the highest priority, so a function's docstring is overridden by a `help` in `func_kwargs` (if present). :param namespace_kwargs: a `dict` of keyword arguments to be passed to the nested ArgumentParser instance under given `namespace`. Deprecated params that should be moved into `namespace_kwargs`: :param title: passed to :meth:`argparse.ArgumentParser.add_subparsers` as `title`. .. deprecated:: 0.26.0 Please use `namespace_kwargs` instead. :param description: passed to :meth:`argparse.ArgumentParser.add_subparsers` as `description`. .. deprecated:: 0.26.0 Please use `namespace_kwargs` instead. :param help: passed to :meth:`argparse.ArgumentParser.add_subparsers` as `help`. .. deprecated:: 0.26.0 Please use `namespace_kwargs` instead. .. note:: This function modifies the parser object. Generally side effects are bad practice but we don't seem to have any choice as ArgumentParser is pretty opaque. You may prefer :class:`~argh.helpers.ArghParser.add_commands` for a bit more predictable API. .. note:: An attempt to add commands to a parser which already has a default function (e.g. added with :func:`~argh.assembling.set_default_command`) results in `AssemblingError`. """ # FIXME "namespace" is a correct name but it clashes with the "namespace" # that represents arguments (argparse.Namespace and our ArghNamespace). # We should rename the argument here. if DEST_FUNCTION in parser._defaults: _require_support_for_default_command_with_subparsers() namespace_kwargs = namespace_kwargs or {} # FIXME remove this by 1.0 # if title: warnings.warn('argument `title` is deprecated in add_commands(),' ' use `parser_kwargs` instead', DeprecationWarning) namespace_kwargs['description'] = title if help: warnings.warn('argument `help` is deprecated in add_commands(),' ' use `parser_kwargs` instead', DeprecationWarning) namespace_kwargs['help'] = help if description: warnings.warn('argument `description` is deprecated in add_commands(),' ' use `parser_kwargs` instead', DeprecationWarning) namespace_kwargs['description'] = description # # / subparsers_action = get_subparsers(parser, create=True) if namespace: # Make a nested parser and init a deeper _SubParsersAction under it. # Create a named group of commands. It will be listed along with # root-level commands in ``app.py --help``; in that context its `title` # can be used as a short description on the right side of its name. # Normally `title` is shown above the list of commands # in ``app.py my-namespace --help``. subsubparser_kw = { 'help': namespace_kwargs.get('title'), } subsubparser = subparsers_action.add_parser(namespace, **subsubparser_kw) subparsers_action = subsubparser.add_subparsers(**namespace_kwargs) else: assert not namespace_kwargs, ('`parser_kwargs` only makes sense ' 'with `namespace`.') for func in functions: cmd_name, func_parser_kwargs = _extract_command_meta_from_func(func) # override any computed kwargs by manually supplied ones if func_kwargs: func_parser_kwargs.update(func_kwargs) # create and set up the parser for this command command_parser = subparsers_action.add_parser(cmd_name, **func_parser_kwargs) set_default_command(command_parser, func)
[ "def", "add_commands", "(", "parser", ",", "functions", ",", "namespace", "=", "None", ",", "namespace_kwargs", "=", "None", ",", "func_kwargs", "=", "None", ",", "# deprecated args:", "title", "=", "None", ",", "description", "=", "None", ",", "help", "=", ...
Adds given functions as commands to given parser. :param parser: an :class:`argparse.ArgumentParser` instance. :param functions: a list of functions. A subparser is created for each of them. If the function is decorated with :func:`~argh.decorators.arg`, the arguments are passed to :class:`argparse.ArgumentParser.add_argument`. See also :func:`~argh.dispatching.dispatch` for requirements concerning function signatures. The command name is inferred from the function name. Note that the underscores in the name are replaced with hyphens, i.e. function name "foo_bar" becomes command name "foo-bar". :param namespace: an optional string representing the group of commands. For example, if a command named "hello" is added without the namespace, it will be available as "prog.py hello"; if the namespace if specified as "greet", then the command will be accessible as "prog.py greet hello". The namespace itself is not callable, so "prog.py greet" will fail and only display a help message. :param func_kwargs: a `dict` of keyword arguments to be passed to each nested ArgumentParser instance created per command (i.e. per function). Members of this dictionary have the highest priority, so a function's docstring is overridden by a `help` in `func_kwargs` (if present). :param namespace_kwargs: a `dict` of keyword arguments to be passed to the nested ArgumentParser instance under given `namespace`. Deprecated params that should be moved into `namespace_kwargs`: :param title: passed to :meth:`argparse.ArgumentParser.add_subparsers` as `title`. .. deprecated:: 0.26.0 Please use `namespace_kwargs` instead. :param description: passed to :meth:`argparse.ArgumentParser.add_subparsers` as `description`. .. deprecated:: 0.26.0 Please use `namespace_kwargs` instead. :param help: passed to :meth:`argparse.ArgumentParser.add_subparsers` as `help`. .. deprecated:: 0.26.0 Please use `namespace_kwargs` instead. .. note:: This function modifies the parser object. Generally side effects are bad practice but we don't seem to have any choice as ArgumentParser is pretty opaque. You may prefer :class:`~argh.helpers.ArghParser.add_commands` for a bit more predictable API. .. note:: An attempt to add commands to a parser which already has a default function (e.g. added with :func:`~argh.assembling.set_default_command`) results in `AssemblingError`.
[ "Adds", "given", "functions", "as", "commands", "to", "given", "parser", "." ]
python
test
watson-developer-cloud/python-sdk
ibm_watson/visual_recognition_v3.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/visual_recognition_v3.py#L94-L186
def classify(self, images_file=None, images_filename=None, images_file_content_type=None, url=None, threshold=None, owners=None, classifier_ids=None, accept_language=None, **kwargs): """ Classify images. Classify images with built-in or custom classifiers. :param file images_file: An image file (.gif, .jpg, .png, .tif) or .zip file with images. Maximum image size is 10 MB. Include no more than 20 images and limit the .zip file to 100 MB. Encode the image and .zip file names in UTF-8 if they contain non-ASCII characters. The service assumes UTF-8 encoding if it encounters non-ASCII characters. You can also include an image with the **url** parameter. :param str images_filename: The filename for images_file. :param str images_file_content_type: The content type of images_file. :param str url: The URL of an image (.gif, .jpg, .png, .tif) to analyze. The minimum recommended pixel density is 32X32 pixels, but the service tends to perform better with images that are at least 224 x 224 pixels. The maximum image size is 10 MB. You can also include images with the **images_file** parameter. :param float threshold: The minimum score a class must have to be displayed in the response. Set the threshold to `0.0` to return all identified classes. :param list[str] owners: The categories of classifiers to apply. The **classifier_ids** parameter overrides **owners**, so make sure that **classifier_ids** is empty. - Use `IBM` to classify against the `default` general classifier. You get the same result if both **classifier_ids** and **owners** parameters are empty. - Use `me` to classify against all your custom classifiers. However, for better performance use **classifier_ids** to specify the specific custom classifiers to apply. - Use both `IBM` and `me` to analyze the image against both classifier categories. :param list[str] classifier_ids: Which classifiers to apply. Overrides the **owners** parameter. You can specify both custom and built-in classifier IDs. The built-in `default` classifier is used if both **classifier_ids** and **owners** parameters are empty. The following built-in classifier IDs require no training: - `default`: Returns classes from thousands of general tags. - `food`: Enhances specificity and accuracy for images of food items. - `explicit`: Evaluates whether the image might be pornographic. :param str accept_language: The desired language of parts of the response. See the response for details. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ headers = {'Accept-Language': accept_language} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('watson_vision_combined', 'V3', 'classify') headers.update(sdk_headers) params = {'version': self.version} form_data = {} if images_file: if not images_filename and hasattr(images_file, 'name'): images_filename = basename(images_file.name) if not images_filename: raise ValueError('images_filename must be provided') form_data['images_file'] = (images_filename, images_file, images_file_content_type or 'application/octet-stream') if url: form_data['url'] = (None, url, 'text/plain') if threshold: form_data['threshold'] = (None, threshold, 'application/json') if owners: owners = self._convert_list(owners) form_data['owners'] = (None, owners, 'application/json') if classifier_ids: classifier_ids = self._convert_list(classifier_ids) form_data['classifier_ids'] = (None, classifier_ids, 'application/json') url = '/v3/classify' response = self.request( method='POST', url=url, headers=headers, params=params, files=form_data, accept_json=True) return response
[ "def", "classify", "(", "self", ",", "images_file", "=", "None", ",", "images_filename", "=", "None", ",", "images_file_content_type", "=", "None", ",", "url", "=", "None", ",", "threshold", "=", "None", ",", "owners", "=", "None", ",", "classifier_ids", "...
Classify images. Classify images with built-in or custom classifiers. :param file images_file: An image file (.gif, .jpg, .png, .tif) or .zip file with images. Maximum image size is 10 MB. Include no more than 20 images and limit the .zip file to 100 MB. Encode the image and .zip file names in UTF-8 if they contain non-ASCII characters. The service assumes UTF-8 encoding if it encounters non-ASCII characters. You can also include an image with the **url** parameter. :param str images_filename: The filename for images_file. :param str images_file_content_type: The content type of images_file. :param str url: The URL of an image (.gif, .jpg, .png, .tif) to analyze. The minimum recommended pixel density is 32X32 pixels, but the service tends to perform better with images that are at least 224 x 224 pixels. The maximum image size is 10 MB. You can also include images with the **images_file** parameter. :param float threshold: The minimum score a class must have to be displayed in the response. Set the threshold to `0.0` to return all identified classes. :param list[str] owners: The categories of classifiers to apply. The **classifier_ids** parameter overrides **owners**, so make sure that **classifier_ids** is empty. - Use `IBM` to classify against the `default` general classifier. You get the same result if both **classifier_ids** and **owners** parameters are empty. - Use `me` to classify against all your custom classifiers. However, for better performance use **classifier_ids** to specify the specific custom classifiers to apply. - Use both `IBM` and `me` to analyze the image against both classifier categories. :param list[str] classifier_ids: Which classifiers to apply. Overrides the **owners** parameter. You can specify both custom and built-in classifier IDs. The built-in `default` classifier is used if both **classifier_ids** and **owners** parameters are empty. The following built-in classifier IDs require no training: - `default`: Returns classes from thousands of general tags. - `food`: Enhances specificity and accuracy for images of food items. - `explicit`: Evaluates whether the image might be pornographic. :param str accept_language: The desired language of parts of the response. See the response for details. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse
[ "Classify", "images", "." ]
python
train
bcbio/bcbio-nextgen
scripts/utils/upload_to_synapse.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/upload_to_synapse.py#L38-L48
def _accumulate_remotes(synapse_parent_id, syn): """Retrieve references to all remote directories and files. """ remotes = {} s_base_folder = syn.get(synapse_parent_id) for (s_dirpath, s_dirpath_id), _, s_filenames in synapseutils.walk(syn, synapse_parent_id): remotes[s_dirpath] = s_dirpath_id if s_filenames: for s_filename, s_filename_id in s_filenames: remotes[os.path.join(s_dirpath, s_filename)] = s_filename_id return s_base_folder, remotes
[ "def", "_accumulate_remotes", "(", "synapse_parent_id", ",", "syn", ")", ":", "remotes", "=", "{", "}", "s_base_folder", "=", "syn", ".", "get", "(", "synapse_parent_id", ")", "for", "(", "s_dirpath", ",", "s_dirpath_id", ")", ",", "_", ",", "s_filenames", ...
Retrieve references to all remote directories and files.
[ "Retrieve", "references", "to", "all", "remote", "directories", "and", "files", "." ]
python
train
aspiers/git-deps
git_deps/detector.py
https://github.com/aspiers/git-deps/blob/a00380b8bf1451d8c3405dace8d5927428506eb0/git_deps/detector.py#L134-L147
def find_dependencies_with_parent(self, dependent, parent): """Find all dependencies of the given revision caused by the given parent commit. This will be called multiple times for merge commits which have multiple parents. """ self.logger.info(" Finding dependencies of %s via parent %s" % (dependent.hex[:8], parent.hex[:8])) diff = self.repo.diff(parent, dependent, context_lines=self.options.context_lines) for patch in diff: path = patch.delta.old_file.path self.logger.info(" Examining hunks in %s" % path) for hunk in patch.hunks: self.blame_diff_hunk(dependent, parent, path, hunk)
[ "def", "find_dependencies_with_parent", "(", "self", ",", "dependent", ",", "parent", ")", ":", "self", ".", "logger", ".", "info", "(", "\" Finding dependencies of %s via parent %s\"", "%", "(", "dependent", ".", "hex", "[", ":", "8", "]", ",", "parent", "...
Find all dependencies of the given revision caused by the given parent commit. This will be called multiple times for merge commits which have multiple parents.
[ "Find", "all", "dependencies", "of", "the", "given", "revision", "caused", "by", "the", "given", "parent", "commit", ".", "This", "will", "be", "called", "multiple", "times", "for", "merge", "commits", "which", "have", "multiple", "parents", "." ]
python
train
opendatateam/udata
udata/harvest/commands.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/commands.py#L55-L59
def delete(identifier): '''Delete a harvest source''' log.info('Deleting source "%s"', identifier) actions.delete_source(identifier) log.info('Deleted source "%s"', identifier)
[ "def", "delete", "(", "identifier", ")", ":", "log", ".", "info", "(", "'Deleting source \"%s\"'", ",", "identifier", ")", "actions", ".", "delete_source", "(", "identifier", ")", "log", ".", "info", "(", "'Deleted source \"%s\"'", ",", "identifier", ")" ]
Delete a harvest source
[ "Delete", "a", "harvest", "source" ]
python
train
SmokinCaterpillar/pypet
pypet/utils/decorators.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/utils/decorators.py#L75-L102
def copydoc(fromfunc, sep="\n"): """Decorator: Copy the docstring of `fromfunc` If the doc contains a line with the keyword `ABSTRACT`, like `ABSTRACT: Needs to be defined in subclass`, this line and the line after are removed. """ def _decorator(func): sourcedoc = fromfunc.__doc__ # Remove the ABSTRACT line: split_doc = sourcedoc.split('\n') split_doc_no_abstract = [line for line in split_doc if not 'ABSTRACT' in line] # If the length is different we have found an ABSTRACT line # Finally we want to remove the final blank line, otherwise # we would have three blank lines at the end if len(split_doc) != len(split_doc_no_abstract): sourcedoc = '\n'.join(split_doc_no_abstract[:-1]) if func.__doc__ is None: func.__doc__ = sourcedoc else: func.__doc__ = sep.join([sourcedoc, func.__doc__]) return func return _decorator
[ "def", "copydoc", "(", "fromfunc", ",", "sep", "=", "\"\\n\"", ")", ":", "def", "_decorator", "(", "func", ")", ":", "sourcedoc", "=", "fromfunc", ".", "__doc__", "# Remove the ABSTRACT line:", "split_doc", "=", "sourcedoc", ".", "split", "(", "'\\n'", ")", ...
Decorator: Copy the docstring of `fromfunc` If the doc contains a line with the keyword `ABSTRACT`, like `ABSTRACT: Needs to be defined in subclass`, this line and the line after are removed.
[ "Decorator", ":", "Copy", "the", "docstring", "of", "fromfunc" ]
python
test
jeffh/rpi_courses
rpi_courses/scheduler.py
https://github.com/jeffh/rpi_courses/blob/c97176f73f866f112c785910ebf3ff8a790e8e9a/rpi_courses/scheduler.py#L82-L95
def exclude_times(self, *tuples): """Adds multiple excluded times by tuple of (start, end, days) or by TimeRange instance. ``start`` and ``end`` are in military integer times (e.g. - 1200 1430). ``days`` is a collection of integers or strings of fully-spelt, lowercased days of the week. """ for item in tuples: if isinstance(item, TimeRange): self._excluded_times.append(item) else: self.exclude_time(*item) return self
[ "def", "exclude_times", "(", "self", ",", "*", "tuples", ")", ":", "for", "item", "in", "tuples", ":", "if", "isinstance", "(", "item", ",", "TimeRange", ")", ":", "self", ".", "_excluded_times", ".", "append", "(", "item", ")", "else", ":", "self", ...
Adds multiple excluded times by tuple of (start, end, days) or by TimeRange instance. ``start`` and ``end`` are in military integer times (e.g. - 1200 1430). ``days`` is a collection of integers or strings of fully-spelt, lowercased days of the week.
[ "Adds", "multiple", "excluded", "times", "by", "tuple", "of", "(", "start", "end", "days", ")", "or", "by", "TimeRange", "instance", "." ]
python
train
Xion/taipan
taipan/collections/lists.py
https://github.com/Xion/taipan/blob/f333f0287c8bd0915182c7d5308e5f05ef0cca78/taipan/collections/lists.py#L102-L122
def index(*args, **kwargs): """Search a list for an exact element, or element satisfying a predicate. Usage:: index(element, list_) index(of=element, in_=list_) index(where=predicate, in_=list_) :param element, of: Element to search for (by equality comparison) :param where: Predicate defining an element to search for. This should be a callable taking a single argument and returning a boolean result. :param list_, in_: List to search in :return: Index of first matching element, or -1 if none was found .. versionadded:: 0.0.3 """ _, idx = _index(*args, start=0, step=1, **kwargs) return idx
[ "def", "index", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "_", ",", "idx", "=", "_index", "(", "*", "args", ",", "start", "=", "0", ",", "step", "=", "1", ",", "*", "*", "kwargs", ")", "return", "idx" ]
Search a list for an exact element, or element satisfying a predicate. Usage:: index(element, list_) index(of=element, in_=list_) index(where=predicate, in_=list_) :param element, of: Element to search for (by equality comparison) :param where: Predicate defining an element to search for. This should be a callable taking a single argument and returning a boolean result. :param list_, in_: List to search in :return: Index of first matching element, or -1 if none was found .. versionadded:: 0.0.3
[ "Search", "a", "list", "for", "an", "exact", "element", "or", "element", "satisfying", "a", "predicate", "." ]
python
train
gitenberg-dev/gitberg
gitenberg/util/tenprintcover.py
https://github.com/gitenberg-dev/gitberg/blob/3f6db8b5a22ccdd2110d3199223c30db4e558b5c/gitenberg/util/tenprintcover.py#L110-L117
def rect(self, x, y, width, height, color): """ See the Processing function rect(): https://processing.org/reference/rect_.html """ self.context.set_source_rgb(*color) self.context.rectangle(self.tx(x), self.ty(y), self.tx(width), self.ty(height)) self.context.fill()
[ "def", "rect", "(", "self", ",", "x", ",", "y", ",", "width", ",", "height", ",", "color", ")", ":", "self", ".", "context", ".", "set_source_rgb", "(", "*", "color", ")", "self", ".", "context", ".", "rectangle", "(", "self", ".", "tx", "(", "x"...
See the Processing function rect(): https://processing.org/reference/rect_.html
[ "See", "the", "Processing", "function", "rect", "()", ":", "https", ":", "//", "processing", ".", "org", "/", "reference", "/", "rect_", ".", "html" ]
python
train
kervi/kervi-devices
kervi/devices/sensors/BMP085.py
https://github.com/kervi/kervi-devices/blob/c6aaddc6da1d0bce0ea2b0c6eb8393ba10aefa56/kervi/devices/sensors/BMP085.py#L130-L141
def read_temperature(self): """Gets the compensated temperature in degrees celsius.""" UT = self.read_raw_temp() # Datasheet value for debugging: #UT = 27898 # Calculations below are taken straight from section 3.5 of the datasheet. X1 = ((UT - self.cal_AC6) * self.cal_AC5) >> 15 X2 = (self.cal_MC << 11) // (X1 + self.cal_MD) B5 = X1 + X2 temp = ((B5 + 8) >> 4) / 10.0 self.logger.debug('Calibrated temperature {0} C', temp) return temp
[ "def", "read_temperature", "(", "self", ")", ":", "UT", "=", "self", ".", "read_raw_temp", "(", ")", "# Datasheet value for debugging:", "#UT = 27898", "# Calculations below are taken straight from section 3.5 of the datasheet.", "X1", "=", "(", "(", "UT", "-", "self", ...
Gets the compensated temperature in degrees celsius.
[ "Gets", "the", "compensated", "temperature", "in", "degrees", "celsius", "." ]
python
train
pypyr/pypyr-cli
pypyr/log/logger.py
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/log/logger.py#L22-L39
def set_root_logger(root_log_level, log_path=None): """Set the root logger 'pypyr'. Do this before you do anything else. Run once and only once at initialization. """ handlers = [] console_handler = logging.StreamHandler() handlers.append(console_handler) if log_path: file_handler = logging.FileHandler(log_path) handlers.append(file_handler) set_logging_config(root_log_level, handlers=handlers) root_logger = logging.getLogger("pypyr") root_logger.debug( f"Root logger {root_logger.name} configured with level " f"{root_log_level}")
[ "def", "set_root_logger", "(", "root_log_level", ",", "log_path", "=", "None", ")", ":", "handlers", "=", "[", "]", "console_handler", "=", "logging", ".", "StreamHandler", "(", ")", "handlers", ".", "append", "(", "console_handler", ")", "if", "log_path", "...
Set the root logger 'pypyr'. Do this before you do anything else. Run once and only once at initialization.
[ "Set", "the", "root", "logger", "pypyr", ".", "Do", "this", "before", "you", "do", "anything", "else", "." ]
python
train
apache/incubator-mxnet
example/gluon/lipnet/BeamSearch.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/lipnet/BeamSearch.py#L76-L81
def addBeam(beamState, labeling): """ add beam if it does not yet exist """ if labeling not in beamState.entries: beamState.entries[labeling] = BeamEntry()
[ "def", "addBeam", "(", "beamState", ",", "labeling", ")", ":", "if", "labeling", "not", "in", "beamState", ".", "entries", ":", "beamState", ".", "entries", "[", "labeling", "]", "=", "BeamEntry", "(", ")" ]
add beam if it does not yet exist
[ "add", "beam", "if", "it", "does", "not", "yet", "exist" ]
python
train
google/dotty
efilter/dispatch.py
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/dispatch.py#L51-L68
def call_audit(func): """Print a detailed audit of all calls to this function.""" def audited_func(*args, **kwargs): import traceback stack = traceback.extract_stack() r = func(*args, **kwargs) func_name = func.__name__ print("@depth %d, trace %s -> %s(*%r, **%r) => %r" % ( len(stack), " -> ".join("%s:%d:%s" % x[0:3] for x in stack[-5:-2]), func_name, args, kwargs, r)) return r return audited_func
[ "def", "call_audit", "(", "func", ")", ":", "def", "audited_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "import", "traceback", "stack", "=", "traceback", ".", "extract_stack", "(", ")", "r", "=", "func", "(", "*", "args", ",", "*", ...
Print a detailed audit of all calls to this function.
[ "Print", "a", "detailed", "audit", "of", "all", "calls", "to", "this", "function", "." ]
python
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L263-L279
def from_api_repr(cls, resource): """Factory: construct a table reference given its API representation Args: resource (Dict[str, object]): Table reference representation returned from the API Returns: google.cloud.bigquery.table.TableReference: Table reference parsed from ``resource``. """ from google.cloud.bigquery.dataset import DatasetReference project = resource["projectId"] dataset_id = resource["datasetId"] table_id = resource["tableId"] return cls(DatasetReference(project, dataset_id), table_id)
[ "def", "from_api_repr", "(", "cls", ",", "resource", ")", ":", "from", "google", ".", "cloud", ".", "bigquery", ".", "dataset", "import", "DatasetReference", "project", "=", "resource", "[", "\"projectId\"", "]", "dataset_id", "=", "resource", "[", "\"datasetI...
Factory: construct a table reference given its API representation Args: resource (Dict[str, object]): Table reference representation returned from the API Returns: google.cloud.bigquery.table.TableReference: Table reference parsed from ``resource``.
[ "Factory", ":", "construct", "a", "table", "reference", "given", "its", "API", "representation" ]
python
train
nvbn/thefuck
thefuck/utils.py
https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/utils.py#L73-L87
def default_settings(params): """Adds default values to settings if it not presented. Usage: @default_settings({'apt': '/usr/bin/apt'}) def match(command): print(settings.apt) """ def _default_settings(fn, command): for k, w in params.items(): settings.setdefault(k, w) return fn(command) return decorator(_default_settings)
[ "def", "default_settings", "(", "params", ")", ":", "def", "_default_settings", "(", "fn", ",", "command", ")", ":", "for", "k", ",", "w", "in", "params", ".", "items", "(", ")", ":", "settings", ".", "setdefault", "(", "k", ",", "w", ")", "return", ...
Adds default values to settings if it not presented. Usage: @default_settings({'apt': '/usr/bin/apt'}) def match(command): print(settings.apt)
[ "Adds", "default", "values", "to", "settings", "if", "it", "not", "presented", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/distribute-0.6.31-py2.7.egg/pkg_resources.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/distribute-0.6.31-py2.7.egg/pkg_resources.py#L2056-L2085
def parse(cls, src, dist=None): """Parse a single entry point from string `src` Entry point syntax follows the form:: name = some.module:some.attr [extra1,extra2] The entry name and module name are required, but the ``:attrs`` and ``[extras]`` parts are optional """ try: attrs = extras = () name,value = src.split('=',1) if '[' in value: value,extras = value.split('[',1) req = Requirement.parse("x["+extras) if req.specs: raise ValueError extras = req.extras if ':' in value: value,attrs = value.split(':',1) if not MODULE(attrs.rstrip()): raise ValueError attrs = attrs.rstrip().split('.') except ValueError: raise ValueError( "EntryPoint must be in 'name=module:attrs [extras]' format", src ) else: return cls(name.strip(), value.strip(), attrs, extras, dist)
[ "def", "parse", "(", "cls", ",", "src", ",", "dist", "=", "None", ")", ":", "try", ":", "attrs", "=", "extras", "=", "(", ")", "name", ",", "value", "=", "src", ".", "split", "(", "'='", ",", "1", ")", "if", "'['", "in", "value", ":", "value"...
Parse a single entry point from string `src` Entry point syntax follows the form:: name = some.module:some.attr [extra1,extra2] The entry name and module name are required, but the ``:attrs`` and ``[extras]`` parts are optional
[ "Parse", "a", "single", "entry", "point", "from", "string", "src" ]
python
test
juju/charm-helpers
charmhelpers/core/hookenv.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/core/hookenv.py#L1018-L1035
def status_get(): """Retrieve the previously set juju workload state and message If the status-get command is not found then assume this is juju < 1.23 and return 'unknown', "" """ cmd = ['status-get', "--format=json", "--include-data"] try: raw_status = subprocess.check_output(cmd) except OSError as e: if e.errno == errno.ENOENT: return ('unknown', "") else: raise else: status = json.loads(raw_status.decode("UTF-8")) return (status["status"], status["message"])
[ "def", "status_get", "(", ")", ":", "cmd", "=", "[", "'status-get'", ",", "\"--format=json\"", ",", "\"--include-data\"", "]", "try", ":", "raw_status", "=", "subprocess", ".", "check_output", "(", "cmd", ")", "except", "OSError", "as", "e", ":", "if", "e"...
Retrieve the previously set juju workload state and message If the status-get command is not found then assume this is juju < 1.23 and return 'unknown', ""
[ "Retrieve", "the", "previously", "set", "juju", "workload", "state", "and", "message" ]
python
train
mdgoldberg/sportsref
sportsref/nba/pbp.py
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nba/pbp.py#L415-L434
def clean_multigame_features(df): """TODO: Docstring for clean_multigame_features. :df: TODO :returns: TODO """ df = pd.DataFrame(df) if df.index.value_counts().max() > 1: df.reset_index(drop=True, inplace=True) df = clean_features(df) # if it's many games in one DataFrame, make poss_id and play_id unique for col in ('play_id', 'poss_id'): diffs = df[col].diff().fillna(0) if (diffs < 0).any(): new_col = np.cumsum(diffs.astype(bool)) df.eval('{} = @new_col'.format(col), inplace=True) return df
[ "def", "clean_multigame_features", "(", "df", ")", ":", "df", "=", "pd", ".", "DataFrame", "(", "df", ")", "if", "df", ".", "index", ".", "value_counts", "(", ")", ".", "max", "(", ")", ">", "1", ":", "df", ".", "reset_index", "(", "drop", "=", "...
TODO: Docstring for clean_multigame_features. :df: TODO :returns: TODO
[ "TODO", ":", "Docstring", "for", "clean_multigame_features", "." ]
python
test
OpenKMIP/PyKMIP
kmip/pie/client.py
https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/pie/client.py#L777-L844
def get(self, uid=None, key_wrapping_specification=None): """ Get a managed object from a KMIP appliance. Args: uid (string): The unique ID of the managed object to retrieve. key_wrapping_specification (dict): A dictionary containing various settings to be used when wrapping the key during retrieval. See Note below. Optional, defaults to None. Returns: ManagedObject: The retrieved managed object object. Raises: ClientConnectionNotOpen: if the client connection is unusable KmipOperationFailure: if the operation result is a failure TypeError: if the input argument is invalid Notes: The derivation_parameters argument is a dictionary that can contain the following key/value pairs: Key | Value --------------------------------|--------------------------------- 'wrapping_method' | A WrappingMethod enumeration | that specifies how the object | should be wrapped. 'encryption_key_information' | A dictionary containing the ID | of the wrapping key and | associated cryptographic | parameters. 'mac_signature_key_information' | A dictionary containing the ID | of the wrapping key and | associated cryptographic | parameters. 'attribute_names' | A list of strings representing | the names of attributes that | should be included with the | wrapped object. 'encoding_option' | An EncodingOption enumeration | that specifies the encoding of | the object before it is wrapped. """ # Check input if uid is not None: if not isinstance(uid, six.string_types): raise TypeError("uid must be a string") if key_wrapping_specification is not None: if not isinstance(key_wrapping_specification, dict): raise TypeError( "Key wrapping specification must be a dictionary." ) spec = self._build_key_wrapping_specification( key_wrapping_specification ) # Get the managed object and handle the results result = self.proxy.get(uid, key_wrapping_specification=spec) status = result.result_status.value if status == enums.ResultStatus.SUCCESS: managed_object = self.object_factory.convert(result.secret) return managed_object else: reason = result.result_reason.value message = result.result_message.value raise exceptions.KmipOperationFailure(status, reason, message)
[ "def", "get", "(", "self", ",", "uid", "=", "None", ",", "key_wrapping_specification", "=", "None", ")", ":", "# Check input", "if", "uid", "is", "not", "None", ":", "if", "not", "isinstance", "(", "uid", ",", "six", ".", "string_types", ")", ":", "rai...
Get a managed object from a KMIP appliance. Args: uid (string): The unique ID of the managed object to retrieve. key_wrapping_specification (dict): A dictionary containing various settings to be used when wrapping the key during retrieval. See Note below. Optional, defaults to None. Returns: ManagedObject: The retrieved managed object object. Raises: ClientConnectionNotOpen: if the client connection is unusable KmipOperationFailure: if the operation result is a failure TypeError: if the input argument is invalid Notes: The derivation_parameters argument is a dictionary that can contain the following key/value pairs: Key | Value --------------------------------|--------------------------------- 'wrapping_method' | A WrappingMethod enumeration | that specifies how the object | should be wrapped. 'encryption_key_information' | A dictionary containing the ID | of the wrapping key and | associated cryptographic | parameters. 'mac_signature_key_information' | A dictionary containing the ID | of the wrapping key and | associated cryptographic | parameters. 'attribute_names' | A list of strings representing | the names of attributes that | should be included with the | wrapped object. 'encoding_option' | An EncodingOption enumeration | that specifies the encoding of | the object before it is wrapped.
[ "Get", "a", "managed", "object", "from", "a", "KMIP", "appliance", "." ]
python
test
pysathq/pysat
examples/hitman.py
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/examples/hitman.py#L296-L315
def get(self): """ This method computes and returns a hitting set. The hitting set is obtained using the underlying oracle operating the MaxSAT problem formulation. The computed solution is mapped back to objects of the problem domain. :rtype: list(obj) """ model = self.oracle.compute() if model: if self.htype == 'rc2': # extracting a hitting set self.hset = filter(lambda v: v > 0, model) else: self.hset = model return list(map(lambda vid: self.idpool.id2obj[vid], self.hset))
[ "def", "get", "(", "self", ")", ":", "model", "=", "self", ".", "oracle", ".", "compute", "(", ")", "if", "model", ":", "if", "self", ".", "htype", "==", "'rc2'", ":", "# extracting a hitting set", "self", ".", "hset", "=", "filter", "(", "lambda", "...
This method computes and returns a hitting set. The hitting set is obtained using the underlying oracle operating the MaxSAT problem formulation. The computed solution is mapped back to objects of the problem domain. :rtype: list(obj)
[ "This", "method", "computes", "and", "returns", "a", "hitting", "set", ".", "The", "hitting", "set", "is", "obtained", "using", "the", "underlying", "oracle", "operating", "the", "MaxSAT", "problem", "formulation", ".", "The", "computed", "solution", "is", "ma...
python
train
saltstack/salt
salt/returners/couchdb_return.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/couchdb_return.py#L215-L269
def returner(ret): ''' Take in the return and shove it into the couchdb database. ''' options = _get_options(ret) # Check to see if the database exists. _response = _request("GET", options['url'] + "_all_dbs", user=options['user'], passwd=options['passwd']) if options['db'] not in _response: # Make a PUT request to create the database. _response = _request("PUT", options['url'] + options['db'], user=options['user'], passwd=options['passwd']) # Confirm that the response back was simple 'ok': true. if 'ok' not in _response or _response['ok'] is not True: log.error('Nothing logged! Lost data. Unable to create database "%s"', options['db']) log.debug('_response object is: %s', _response) return log.info('Created database "%s"', options['db']) if boltons_lib: # redact all passwords if options['redact_pws'] is True if options['redact_pws']: ret_remap_pws = remap(ret, visit=_redact_passwords) else: ret_remap_pws = ret # remove all return values starting with '__pub' if options['minimum_return'] is True if options['minimum_return']: ret_remapped = remap(ret_remap_pws, visit=_minimize_return) else: ret_remapped = ret_remap_pws else: log.info('boltons library not installed. pip install boltons. https://github.com/mahmoud/boltons.') ret_remapped = ret # Call _generate_doc to get a dict object of the document we're going to shove into the database. doc = _generate_doc(ret_remapped) # Make the actual HTTP PUT request to create the doc. _response = _request("PUT", options['url'] + options['db'] + "/" + doc['_id'], 'application/json', salt.utils.json.dumps(doc)) # Sanity check regarding the response.. if 'ok' not in _response or _response['ok'] is not True: log.error('Nothing logged! Lost data. Unable to create document: "%s"', _response)
[ "def", "returner", "(", "ret", ")", ":", "options", "=", "_get_options", "(", "ret", ")", "# Check to see if the database exists.", "_response", "=", "_request", "(", "\"GET\"", ",", "options", "[", "'url'", "]", "+", "\"_all_dbs\"", ",", "user", "=", "options...
Take in the return and shove it into the couchdb database.
[ "Take", "in", "the", "return", "and", "shove", "it", "into", "the", "couchdb", "database", "." ]
python
train
Qiskit/qiskit-terra
qiskit/visualization/utils.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/visualization/utils.py#L40-L48
def _trim(image): """Trim a PIL image and remove white space.""" background = PIL.Image.new(image.mode, image.size, image.getpixel((0, 0))) diff = PIL.ImageChops.difference(image, background) diff = PIL.ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: image = image.crop(bbox) return image
[ "def", "_trim", "(", "image", ")", ":", "background", "=", "PIL", ".", "Image", ".", "new", "(", "image", ".", "mode", ",", "image", ".", "size", ",", "image", ".", "getpixel", "(", "(", "0", ",", "0", ")", ")", ")", "diff", "=", "PIL", ".", ...
Trim a PIL image and remove white space.
[ "Trim", "a", "PIL", "image", "and", "remove", "white", "space", "." ]
python
test
nuagenetworks/monolithe
monolithe/specifications/specification.py
https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/specifications/specification.py#L101-L131
def to_dict(self): """ Transform the current specification to a dictionary """ data = {"model": {}} data["model"]["description"] = self.description data["model"]["entity_name"] = self.entity_name data["model"]["package"] = self.package data["model"]["resource_name"] = self.resource_name data["model"]["rest_name"] = self.rest_name data["model"]["extends"] = self.extends data["model"]["get"] = self.allows_get data["model"]["update"] = self.allows_update data["model"]["create"] = self.allows_create data["model"]["delete"] = self.allows_delete data["model"]["root"] = self.is_root data["model"]["userlabel"] = self.userlabel data["model"]["template"] = self.template data["model"]["allowed_job_commands"] = self.allowed_job_commands data["attributes"] = [] for attribute in self.attributes: data["attributes"].append(attribute.to_dict()) data["children"] = [] for api in self.child_apis: data["children"].append(api.to_dict()) return data
[ "def", "to_dict", "(", "self", ")", ":", "data", "=", "{", "\"model\"", ":", "{", "}", "}", "data", "[", "\"model\"", "]", "[", "\"description\"", "]", "=", "self", ".", "description", "data", "[", "\"model\"", "]", "[", "\"entity_name\"", "]", "=", ...
Transform the current specification to a dictionary
[ "Transform", "the", "current", "specification", "to", "a", "dictionary" ]
python
train
saltstack/salt
salt/pillar/hg_pillar.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/hg_pillar.py#L69-L84
def ext_pillar(minion_id, pillar, repo, branch='default', root=None): ''' Extract pillar from an hg repository ''' with Repo(repo) as repo: repo.update(branch) envname = 'base' if branch == 'default' else branch if root: path = os.path.normpath(os.path.join(repo.working_dir, root)) else: path = repo.working_dir opts = copy.deepcopy(__opts__) opts['pillar_roots'][envname] = [path] pil = salt.pillar.Pillar(opts, __grains__, minion_id, envname) return pil.compile_pillar(ext=False)
[ "def", "ext_pillar", "(", "minion_id", ",", "pillar", ",", "repo", ",", "branch", "=", "'default'", ",", "root", "=", "None", ")", ":", "with", "Repo", "(", "repo", ")", "as", "repo", ":", "repo", ".", "update", "(", "branch", ")", "envname", "=", ...
Extract pillar from an hg repository
[ "Extract", "pillar", "from", "an", "hg", "repository" ]
python
train
cloudsight/cloudsight-python
cloudsight/api.py
https://github.com/cloudsight/cloudsight-python/blob/f9bb43dfd468d5f5d50cc89bfcfb12d5c4abdb1e/cloudsight/api.py#L92-L114
def image_request(self, image, filename, params=None): """ Send an image for classification. The image is a file-like object. The params parameter is optional. On success this method will immediately return a job information. Its status will initially be :py:data:`cloudsight.STATUS_NOT_COMPLETED` as it usually takes 6-12 seconds for the server to process an image. In order to retrieve the annotation data, you need to keep updating the job status using the :py:meth:`cloudsight.API.image_response` method until the status changes. You may also use the :py:meth:`cloudsight.API.wait` method which does this automatically. :param image: File-like object containing the image data. :param filename: The file name. :param params: Additional parameters for CloudSight API. """ data = self._init_data(params) response = requests.post(REQUESTS_URL, headers={ 'Authorization': self.auth.authorize('POST', REQUESTS_URL, params), 'User-Agent': USER_AGENT, }, data=data, files={'image_request[image]': (filename, image)}) return self._unwrap_error(response)
[ "def", "image_request", "(", "self", ",", "image", ",", "filename", ",", "params", "=", "None", ")", ":", "data", "=", "self", ".", "_init_data", "(", "params", ")", "response", "=", "requests", ".", "post", "(", "REQUESTS_URL", ",", "headers", "=", "{...
Send an image for classification. The image is a file-like object. The params parameter is optional. On success this method will immediately return a job information. Its status will initially be :py:data:`cloudsight.STATUS_NOT_COMPLETED` as it usually takes 6-12 seconds for the server to process an image. In order to retrieve the annotation data, you need to keep updating the job status using the :py:meth:`cloudsight.API.image_response` method until the status changes. You may also use the :py:meth:`cloudsight.API.wait` method which does this automatically. :param image: File-like object containing the image data. :param filename: The file name. :param params: Additional parameters for CloudSight API.
[ "Send", "an", "image", "for", "classification", ".", "The", "image", "is", "a", "file", "-", "like", "object", ".", "The", "params", "parameter", "is", "optional", ".", "On", "success", "this", "method", "will", "immediately", "return", "a", "job", "inform...
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/distribute-0.6.31-py2.7.egg/setuptools/command/easy_install.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/distribute-0.6.31-py2.7.egg/setuptools/command/easy_install.py#L738-L763
def install_script(self, dist, script_name, script_text, dev_path=None): """Generate a legacy script wrapper and install it""" spec = str(dist.as_requirement()) is_script = is_python_script(script_text, script_name) def get_template(filename): """ There are a couple of template scripts in the package. This function loads one of them and prepares it for use. These templates use triple-quotes to escape variable substitutions so the scripts get the 2to3 treatment when build on Python 3. The templates cannot use triple-quotes naturally. """ raw_bytes = resource_string('setuptools', template_name) template_str = raw_bytes.decode('utf-8') clean_template = template_str.replace('"""', '') return clean_template if is_script: template_name = 'script template.py' if dev_path: template_name = template_name.replace('.py', ' (dev).py') script_text = (get_script_header(script_text) + get_template(template_name) % locals()) self.write_script(script_name, _to_ascii(script_text), 'b')
[ "def", "install_script", "(", "self", ",", "dist", ",", "script_name", ",", "script_text", ",", "dev_path", "=", "None", ")", ":", "spec", "=", "str", "(", "dist", ".", "as_requirement", "(", ")", ")", "is_script", "=", "is_python_script", "(", "script_tex...
Generate a legacy script wrapper and install it
[ "Generate", "a", "legacy", "script", "wrapper", "and", "install", "it" ]
python
test
opendatateam/udata
udata/utils.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/utils.py#L30-L35
def multi_to_dict(multi): '''Transform a Werkzeug multidictionnary into a flat dictionnary''' return dict( (key, value[0] if len(value) == 1 else value) for key, value in multi.to_dict(False).items() )
[ "def", "multi_to_dict", "(", "multi", ")", ":", "return", "dict", "(", "(", "key", ",", "value", "[", "0", "]", "if", "len", "(", "value", ")", "==", "1", "else", "value", ")", "for", "key", ",", "value", "in", "multi", ".", "to_dict", "(", "Fals...
Transform a Werkzeug multidictionnary into a flat dictionnary
[ "Transform", "a", "Werkzeug", "multidictionnary", "into", "a", "flat", "dictionnary" ]
python
train
dropbox/stone
ez_setup.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/ez_setup.py#L321-L329
def main(): """Install or upgrade setuptools and EasyInstall""" options = _parse_args() archive = download_setuptools( version=options.version, download_base=options.download_base, downloader_factory=options.downloader_factory, ) return _install(archive, _build_install_args(options))
[ "def", "main", "(", ")", ":", "options", "=", "_parse_args", "(", ")", "archive", "=", "download_setuptools", "(", "version", "=", "options", ".", "version", ",", "download_base", "=", "options", ".", "download_base", ",", "downloader_factory", "=", "options",...
Install or upgrade setuptools and EasyInstall
[ "Install", "or", "upgrade", "setuptools", "and", "EasyInstall" ]
python
train
thiezn/iperf3-python
iperf3/iperf3.py
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L311-L326
def json_output(self): """Toggles json output of libiperf Turning this off will output the iperf3 instance results to stdout/stderr :rtype: bool """ enabled = self.lib.iperf_get_test_json_output(self._test) if enabled: self._json_output = True else: self._json_output = False return self._json_output
[ "def", "json_output", "(", "self", ")", ":", "enabled", "=", "self", ".", "lib", ".", "iperf_get_test_json_output", "(", "self", ".", "_test", ")", "if", "enabled", ":", "self", ".", "_json_output", "=", "True", "else", ":", "self", ".", "_json_output", ...
Toggles json output of libiperf Turning this off will output the iperf3 instance results to stdout/stderr :rtype: bool
[ "Toggles", "json", "output", "of", "libiperf" ]
python
train
ilgarm/pyzimbra
pyzimbra/z/client.py
https://github.com/ilgarm/pyzimbra/blob/c397bc7497554d260ec6fd1a80405aed872a70cc/pyzimbra/z/client.py#L72-L90
def get_account_info(self): """ Gets account info. @return: AccountInfo """ attrs = {sconstant.A_BY: sconstant.V_NAME} account = SOAPpy.Types.stringType(data=self.auth_token.account_name, attrs=attrs) params = {sconstant.E_ACCOUNT: account} res = self.invoke(zconstant.NS_ZIMBRA_ACC_URL, sconstant.GetAccountInfoRequest, params) info = AccountInfo() info.parse(res) return info
[ "def", "get_account_info", "(", "self", ")", ":", "attrs", "=", "{", "sconstant", ".", "A_BY", ":", "sconstant", ".", "V_NAME", "}", "account", "=", "SOAPpy", ".", "Types", ".", "stringType", "(", "data", "=", "self", ".", "auth_token", ".", "account_nam...
Gets account info. @return: AccountInfo
[ "Gets", "account", "info", "." ]
python
train
brainiak/brainiak
brainiak/image.py
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/image.py#L107-L137
def mask_image(image: SpatialImage, mask: np.ndarray, data_type: type = None ) -> np.ndarray: """Mask image after optionally casting its type. Parameters ---------- image Image to mask. Can include time as the last dimension. mask Mask to apply. Must have the same shape as the image data. data_type Type to cast image to. Returns ------- np.ndarray Masked image. Raises ------ ValueError Image data and masks have different shapes. """ image_data = image.get_data() if image_data.shape[:3] != mask.shape: raise ValueError("Image data and mask have different shapes.") if data_type is not None: cast_data = image_data.astype(data_type) else: cast_data = image_data return cast_data[mask]
[ "def", "mask_image", "(", "image", ":", "SpatialImage", ",", "mask", ":", "np", ".", "ndarray", ",", "data_type", ":", "type", "=", "None", ")", "->", "np", ".", "ndarray", ":", "image_data", "=", "image", ".", "get_data", "(", ")", "if", "image_data",...
Mask image after optionally casting its type. Parameters ---------- image Image to mask. Can include time as the last dimension. mask Mask to apply. Must have the same shape as the image data. data_type Type to cast image to. Returns ------- np.ndarray Masked image. Raises ------ ValueError Image data and masks have different shapes.
[ "Mask", "image", "after", "optionally", "casting", "its", "type", "." ]
python
train
xi/ldif3
ldif3.py
https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L43-L48
def is_dn(s): """Return True if s is a LDAP DN.""" if s == '': return True rm = DN_REGEX.match(s) return rm is not None and rm.group(0) == s
[ "def", "is_dn", "(", "s", ")", ":", "if", "s", "==", "''", ":", "return", "True", "rm", "=", "DN_REGEX", ".", "match", "(", "s", ")", "return", "rm", "is", "not", "None", "and", "rm", ".", "group", "(", "0", ")", "==", "s" ]
Return True if s is a LDAP DN.
[ "Return", "True", "if", "s", "is", "a", "LDAP", "DN", "." ]
python
train
alexa/alexa-skills-kit-sdk-for-python
django-ask-sdk/django_ask_sdk/skill_adapter.py
https://github.com/alexa/alexa-skills-kit-sdk-for-python/blob/097b6406aa12d5ca0b825b00c936861b530cbf39/django-ask-sdk/django_ask_sdk/skill_adapter.py#L152-L173
def dispatch(self, request, *args, **kwargs): # type: (HttpRequest, object, object) -> HttpResponse """Inspect the HTTP method and delegate to the view method. This is the default implementation of the :py:class:`django.views.View` method, which will inspect the HTTP method in the input request and delegate it to the corresponding method in the view. The only allowed method on this view is ``post``. :param request: The input request sent to the view :type request: django.http.HttpRequest :return: The response from the view :rtype: django.http.HttpResponse :raises: :py:class:`django.http.HttpResponseNotAllowed` if the method is invoked for other than HTTP POST request. :py:class:`django.http.HttpResponseBadRequest` if the request verification fails. :py:class:`django.http.HttpResponseServerError` for any internal exception. """ return super(SkillAdapter, self).dispatch(request)
[ "def", "dispatch", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# type: (HttpRequest, object, object) -> HttpResponse", "return", "super", "(", "SkillAdapter", ",", "self", ")", ".", "dispatch", "(", "request", ")" ]
Inspect the HTTP method and delegate to the view method. This is the default implementation of the :py:class:`django.views.View` method, which will inspect the HTTP method in the input request and delegate it to the corresponding method in the view. The only allowed method on this view is ``post``. :param request: The input request sent to the view :type request: django.http.HttpRequest :return: The response from the view :rtype: django.http.HttpResponse :raises: :py:class:`django.http.HttpResponseNotAllowed` if the method is invoked for other than HTTP POST request. :py:class:`django.http.HttpResponseBadRequest` if the request verification fails. :py:class:`django.http.HttpResponseServerError` for any internal exception.
[ "Inspect", "the", "HTTP", "method", "and", "delegate", "to", "the", "view", "method", "." ]
python
train
dagster-io/dagster
python_modules/dagster/dagster/core/execution.py
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/execution.py#L731-L757
def execute_pipeline_iterator(pipeline, environment_dict=None, run_config=None): '''Returns iterator that yields :py:class:`SolidExecutionResult` for each solid executed in the pipeline. This is intended to allow the caller to do things between each executed node. For the 'synchronous' API, see :py:func:`execute_pipeline`. Parameters: pipeline (PipelineDefinition): Pipeline to run environment_dict (dict): The enviroment configuration that parameterizes this run run_config (RunConfig): Configuration for how this pipeline will be executed Returns: Iterator[DagsterEvent] ''' check.inst_param(pipeline, 'pipeline', PipelineDefinition) environment_dict = check.opt_dict_param(environment_dict, 'environment_dict') run_config = check_run_config_param(run_config) environment_config = create_environment_config(pipeline, environment_dict) intermediates_manager = construct_intermediates_manager( run_config, environment_config, pipeline ) with _pipeline_execution_context_manager( pipeline, environment_config, run_config, intermediates_manager ) as pipeline_context: return _execute_pipeline_iterator(pipeline_context)
[ "def", "execute_pipeline_iterator", "(", "pipeline", ",", "environment_dict", "=", "None", ",", "run_config", "=", "None", ")", ":", "check", ".", "inst_param", "(", "pipeline", ",", "'pipeline'", ",", "PipelineDefinition", ")", "environment_dict", "=", "check", ...
Returns iterator that yields :py:class:`SolidExecutionResult` for each solid executed in the pipeline. This is intended to allow the caller to do things between each executed node. For the 'synchronous' API, see :py:func:`execute_pipeline`. Parameters: pipeline (PipelineDefinition): Pipeline to run environment_dict (dict): The enviroment configuration that parameterizes this run run_config (RunConfig): Configuration for how this pipeline will be executed Returns: Iterator[DagsterEvent]
[ "Returns", "iterator", "that", "yields", ":", "py", ":", "class", ":", "SolidExecutionResult", "for", "each", "solid", "executed", "in", "the", "pipeline", "." ]
python
test
celery/django-celery
djcelery/backends/database.py
https://github.com/celery/django-celery/blob/5d1ecb09c6304d22cc447c7c08fba0bd1febc2ef/djcelery/backends/database.py#L46-L50
def _restore_group(self, group_id): """Get group metadata for a group by id.""" meta = self.TaskSetModel._default_manager.restore_taskset(group_id) if meta: return meta.to_dict()
[ "def", "_restore_group", "(", "self", ",", "group_id", ")", ":", "meta", "=", "self", ".", "TaskSetModel", ".", "_default_manager", ".", "restore_taskset", "(", "group_id", ")", "if", "meta", ":", "return", "meta", ".", "to_dict", "(", ")" ]
Get group metadata for a group by id.
[ "Get", "group", "metadata", "for", "a", "group", "by", "id", "." ]
python
train
openvax/varcode
varcode/variant.py
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant.py#L382-L391
def is_deletion(self): """ Does this variant represent the deletion of nucleotides from the reference genome? """ # A deletion would appear in a VCF like CT>C, so that the # reference allele starts with the alternate nucleotides. # This is true even in the normalized case, where the alternate # nucleotides are an empty string. return (len(self.ref) > len(self.alt)) and self.ref.startswith(self.alt)
[ "def", "is_deletion", "(", "self", ")", ":", "# A deletion would appear in a VCF like CT>C, so that the", "# reference allele starts with the alternate nucleotides.", "# This is true even in the normalized case, where the alternate", "# nucleotides are an empty string.", "return", "(", "len"...
Does this variant represent the deletion of nucleotides from the reference genome?
[ "Does", "this", "variant", "represent", "the", "deletion", "of", "nucleotides", "from", "the", "reference", "genome?" ]
python
train
merll/docker-map
dockermap/build/buffer.py
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/build/buffer.py#L102-L114
def save(self, name): """ Save the string buffer to a file. Finalizes prior to saving. :param name: File path. :type name: unicode | str """ self.finalize() with open(name, 'wb+') as f: if six.PY3: f.write(self.fileobj.getbuffer()) else: f.write(self.fileobj.getvalue().encode('utf-8'))
[ "def", "save", "(", "self", ",", "name", ")", ":", "self", ".", "finalize", "(", ")", "with", "open", "(", "name", ",", "'wb+'", ")", "as", "f", ":", "if", "six", ".", "PY3", ":", "f", ".", "write", "(", "self", ".", "fileobj", ".", "getbuffer"...
Save the string buffer to a file. Finalizes prior to saving. :param name: File path. :type name: unicode | str
[ "Save", "the", "string", "buffer", "to", "a", "file", ".", "Finalizes", "prior", "to", "saving", "." ]
python
train
pyviz/holoviews
holoviews/core/spaces.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/spaces.py#L85-L101
def layout(self, dimensions=None, **kwargs): """Group by supplied dimension(s) and lay out groups Groups data by supplied dimension(s) laying the groups along the dimension(s) out in a NdLayout. Args: dimensions: Dimension(s) to group by Returns: NdLayout with supplied dimensions """ dimensions = self._valid_dimensions(dimensions) if len(dimensions) == self.ndims: with item_check(False): return NdLayout(self, **kwargs).reindex(dimensions) return self.groupby(dimensions, container_type=NdLayout, **kwargs)
[ "def", "layout", "(", "self", ",", "dimensions", "=", "None", ",", "*", "*", "kwargs", ")", ":", "dimensions", "=", "self", ".", "_valid_dimensions", "(", "dimensions", ")", "if", "len", "(", "dimensions", ")", "==", "self", ".", "ndims", ":", "with", ...
Group by supplied dimension(s) and lay out groups Groups data by supplied dimension(s) laying the groups along the dimension(s) out in a NdLayout. Args: dimensions: Dimension(s) to group by Returns: NdLayout with supplied dimensions
[ "Group", "by", "supplied", "dimension", "(", "s", ")", "and", "lay", "out", "groups" ]
python
train
tanghaibao/jcvi
jcvi/assembly/unitig.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/unitig.py#L161-L192
def cnsfix(args): """ %prog cnsfix consensus-fix.out.FAILED > blacklist.ids Parse consensus-fix.out to extract layouts for fixed unitigs. This will mark all the failed fragments detected by utgcnsfix and pop them out of the existing unitigs. """ from jcvi.formats.base import read_block p = OptionParser(cnsfix.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) cnsfixout, = args fp = open(cnsfixout) utgs = [] saves = [] for header, contents in read_block(fp, "Evaluating"): contents = list(contents) utg = header.split()[2] utgs.append(utg) # Look for this line: # save fragment idx=388 ident=206054426 for next pass for c in contents: if not c.startswith("save"): continue ident = c.split()[3].split("=")[-1] saves.append(ident) print("\n".join(saves))
[ "def", "cnsfix", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "base", "import", "read_block", "p", "=", "OptionParser", "(", "cnsfix", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len"...
%prog cnsfix consensus-fix.out.FAILED > blacklist.ids Parse consensus-fix.out to extract layouts for fixed unitigs. This will mark all the failed fragments detected by utgcnsfix and pop them out of the existing unitigs.
[ "%prog", "cnsfix", "consensus", "-", "fix", ".", "out", ".", "FAILED", ">", "blacklist", ".", "ids" ]
python
train
fedora-python/pyp2rpm
pyp2rpm/dependency_parser.py
https://github.com/fedora-python/pyp2rpm/blob/853eb3d226689a5ccdcdb9358b1a3394fafbd2b5/pyp2rpm/dependency_parser.py#L75-L123
def deps_from_pydit_json(requires, runtime=True): """Parses dependencies returned by pydist.json, since versions uses brackets we can't use pkg_resources to parse and we need a separate method Args: requires: list of dependencies as written in pydist.json of the package runtime: are the dependencies runtime (True) or build time (False) Returns: List of semi-SPECFILE dependecies (see dependency_to_rpm for format) """ parsed = [] for req in requires: # req looks like 'some-name (>=X.Y,!=Y.X)' or 'someme-name' where # 'some-name' is the name of required package and '(>=X.Y,!=Y.X)' # are specs name, specs = None, None # len(reqs) == 1 if there are not specified versions, 2 otherwise reqs = req.split(' ') name = reqs[0] if len(reqs) == 2: specs = reqs[1] # try if there are more specs in spec part of the requires specs = specs.split(",") # strip brackets specs = [re.sub('[()]', '', spec) for spec in specs] # this will divide (>=0.1.2) to ['>=', '0', '.1.2'] # or (0.1.2) into ['', '0', '.1.2'] specs = [re.split('([0-9])', spec, 1) for spec in specs] # we have separated specs based on number as delimiter # so we need to join it back to rest of version number # e.g ['>=', '0', '.1.2'] to ['>=', '0.1.2'] for spec in specs: spec[1:3] = [''.join(spec[1:3])] if specs: for spec in specs: if '!' in spec[0]: parsed.append(['Conflicts', name, '=', spec[1]]) elif specs[0] == '==': parsed.append(['Requires', name, '=', spec[1]]) else: parsed.append(['Requires', name, spec[0], spec[1]]) else: parsed.append(['Requires', name]) if not runtime: for pars in parsed: pars[0] = 'Build' + pars[0] return parsed
[ "def", "deps_from_pydit_json", "(", "requires", ",", "runtime", "=", "True", ")", ":", "parsed", "=", "[", "]", "for", "req", "in", "requires", ":", "# req looks like 'some-name (>=X.Y,!=Y.X)' or 'someme-name' where", "# 'some-name' is the name of required package and '(>=X.Y...
Parses dependencies returned by pydist.json, since versions uses brackets we can't use pkg_resources to parse and we need a separate method Args: requires: list of dependencies as written in pydist.json of the package runtime: are the dependencies runtime (True) or build time (False) Returns: List of semi-SPECFILE dependecies (see dependency_to_rpm for format)
[ "Parses", "dependencies", "returned", "by", "pydist", ".", "json", "since", "versions", "uses", "brackets", "we", "can", "t", "use", "pkg_resources", "to", "parse", "and", "we", "need", "a", "separate", "method", "Args", ":", "requires", ":", "list", "of", ...
python
train
shazow/unstdlib.py
unstdlib/standard/string_.py
https://github.com/shazow/unstdlib.py/blob/e0632fe165cfbfdb5a7e4bc7b412c9d6f2ebad83/unstdlib/standard/string_.py#L294-L329
def to_float(s, default=0.0, allow_nan=False): """ Return input converted into a float. If failed, then return ``default``. Note that, by default, ``allow_nan=False``, so ``to_float`` will not return ``nan``, ``inf``, or ``-inf``. Examples:: >>> to_float('1.5') 1.5 >>> to_float(1) 1.0 >>> to_float('') 0.0 >>> to_float('nan') 0.0 >>> to_float('inf') 0.0 >>> to_float('-inf', allow_nan=True) -inf >>> to_float(None) 0.0 >>> to_float(0, default='Empty') 0.0 >>> to_float(None, default='Empty') 'Empty' """ try: f = float(s) except (TypeError, ValueError): return default if not allow_nan: if f != f or f in _infs: return default return f
[ "def", "to_float", "(", "s", ",", "default", "=", "0.0", ",", "allow_nan", "=", "False", ")", ":", "try", ":", "f", "=", "float", "(", "s", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "return", "default", "if", "not", "allow_nan", ...
Return input converted into a float. If failed, then return ``default``. Note that, by default, ``allow_nan=False``, so ``to_float`` will not return ``nan``, ``inf``, or ``-inf``. Examples:: >>> to_float('1.5') 1.5 >>> to_float(1) 1.0 >>> to_float('') 0.0 >>> to_float('nan') 0.0 >>> to_float('inf') 0.0 >>> to_float('-inf', allow_nan=True) -inf >>> to_float(None) 0.0 >>> to_float(0, default='Empty') 0.0 >>> to_float(None, default='Empty') 'Empty'
[ "Return", "input", "converted", "into", "a", "float", ".", "If", "failed", "then", "return", "default", "." ]
python
train
Iotic-Labs/py-IoticAgent
src/IoticAgent/Core/Validation.py
https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/Core/Validation.py#L248-L257
def search_location_check(cls, location): """Core.Client.request_search location parameter should be a dictionary that contains lat, lon and radius floats """ if not (isinstance(location, Mapping) and set(location.keys()) == _LOCATION_SEARCH_ARGS): raise ValueError('Search location should be mapping with keys: %s' % _LOCATION_SEARCH_ARGS) cls.location_check(location['lat'], location['long']) radius = location['radius'] if not (isinstance(radius, number_types) and 0 < radius <= 20038): # half circumference raise ValueError("Radius: '{radius}' is invalid".format(radius=radius))
[ "def", "search_location_check", "(", "cls", ",", "location", ")", ":", "if", "not", "(", "isinstance", "(", "location", ",", "Mapping", ")", "and", "set", "(", "location", ".", "keys", "(", ")", ")", "==", "_LOCATION_SEARCH_ARGS", ")", ":", "raise", "Val...
Core.Client.request_search location parameter should be a dictionary that contains lat, lon and radius floats
[ "Core", ".", "Client", ".", "request_search", "location", "parameter", "should", "be", "a", "dictionary", "that", "contains", "lat", "lon", "and", "radius", "floats" ]
python
train
dshean/pygeotools
pygeotools/lib/geolib.py
https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L1922-L1938
def get_xy_ma(bma, gt, stride=1, origmask=True, newmask=None): """Return arrays of x and y map coordinates for input array and geotransform """ pX = np.arange(0, bma.shape[1], stride) pY = np.arange(0, bma.shape[0], stride) psamp = np.meshgrid(pX, pY) #if origmask: # psamp = np.ma.array(psamp, mask=np.ma.getmaskarray(bma), fill_value=0) mX, mY = pixelToMap(psamp[0], psamp[1], gt) mask = None if origmask: mask = np.ma.getmaskarray(bma)[::stride] if newmask is not None: mask = newmask[::stride] mX = np.ma.array(mX, mask=mask, fill_value=0) mY = np.ma.array(mY, mask=mask, fill_value=0) return mX, mY
[ "def", "get_xy_ma", "(", "bma", ",", "gt", ",", "stride", "=", "1", ",", "origmask", "=", "True", ",", "newmask", "=", "None", ")", ":", "pX", "=", "np", ".", "arange", "(", "0", ",", "bma", ".", "shape", "[", "1", "]", ",", "stride", ")", "p...
Return arrays of x and y map coordinates for input array and geotransform
[ "Return", "arrays", "of", "x", "and", "y", "map", "coordinates", "for", "input", "array", "and", "geotransform" ]
python
train
QUANTAXIS/QUANTAXIS
QUANTAXIS/QASetting/executor.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QASetting/executor.py#L33-L71
def execute(command, shell=None, working_dir=".", echo=False, echo_indent=0): """Execute a command on the command-line. :param str,list command: The command to run :param bool shell: Whether or not to use the shell. This is optional; if ``command`` is a basestring, shell will be set to True, otherwise it will be false. You can override this behavior by setting this parameter directly. :param str working_dir: The directory in which to run the command. :param bool echo: Whether or not to print the output from the command to stdout. :param int echo_indent: Any number of spaces to indent the echo for clarity :returns: tuple: (return code, stdout) Example >>> from executor import execute >>> return_code, text = execute("dir") """ if shell is None: shell = True if isinstance(command, str) else False p = Popen(command, stdin=PIPE, stdout=PIPE, stderr=STDOUT, shell=shell, cwd=working_dir) if echo: stdout = "" while p.poll() is None: # This blocks until it receives a newline. line = p.stdout.readline() print(" " * echo_indent, line, end="") stdout += line # Read any last bits line = p.stdout.read() print(" " * echo_indent, line, end="") print() stdout += line else: stdout, _ = p.communicate() return (p.returncode, stdout)
[ "def", "execute", "(", "command", ",", "shell", "=", "None", ",", "working_dir", "=", "\".\"", ",", "echo", "=", "False", ",", "echo_indent", "=", "0", ")", ":", "if", "shell", "is", "None", ":", "shell", "=", "True", "if", "isinstance", "(", "comman...
Execute a command on the command-line. :param str,list command: The command to run :param bool shell: Whether or not to use the shell. This is optional; if ``command`` is a basestring, shell will be set to True, otherwise it will be false. You can override this behavior by setting this parameter directly. :param str working_dir: The directory in which to run the command. :param bool echo: Whether or not to print the output from the command to stdout. :param int echo_indent: Any number of spaces to indent the echo for clarity :returns: tuple: (return code, stdout) Example >>> from executor import execute >>> return_code, text = execute("dir")
[ "Execute", "a", "command", "on", "the", "command", "-", "line", ".", ":", "param", "str", "list", "command", ":", "The", "command", "to", "run", ":", "param", "bool", "shell", ":", "Whether", "or", "not", "to", "use", "the", "shell", ".", "This", "is...
python
train
CityOfZion/neo-python-core
neocore/Cryptography/Helper.py
https://github.com/CityOfZion/neo-python-core/blob/786c02cc2f41712d70b1f064ae3d67f86167107f/neocore/Cryptography/Helper.py#L28-L42
def double_sha256(ba): """ Perform two SHA256 operations on the input. Args: ba (bytes): data to hash. Returns: str: hash as a double digit hex string. """ d1 = hashlib.sha256(ba) d2 = hashlib.sha256() d1.hexdigest() d2.update(d1.digest()) return d2.hexdigest()
[ "def", "double_sha256", "(", "ba", ")", ":", "d1", "=", "hashlib", ".", "sha256", "(", "ba", ")", "d2", "=", "hashlib", ".", "sha256", "(", ")", "d1", ".", "hexdigest", "(", ")", "d2", ".", "update", "(", "d1", ".", "digest", "(", ")", ")", "re...
Perform two SHA256 operations on the input. Args: ba (bytes): data to hash. Returns: str: hash as a double digit hex string.
[ "Perform", "two", "SHA256", "operations", "on", "the", "input", "." ]
python
train
raymontag/kppy
kppy/database.py
https://github.com/raymontag/kppy/blob/a43f1fff7d49da1da4b3d8628a1b3ebbaf47f43a/kppy/database.py#L461-L473
def lock(self): """This method locks the database.""" self.password = None self.keyfile = None self.groups[:] = [] self.entries[:] = [] self._group_order[:] = [] self._entry_order[:] = [] self.root_group = v1Group() self._num_groups = 1 self._num_entries = 0 return True
[ "def", "lock", "(", "self", ")", ":", "self", ".", "password", "=", "None", "self", ".", "keyfile", "=", "None", "self", ".", "groups", "[", ":", "]", "=", "[", "]", "self", ".", "entries", "[", ":", "]", "=", "[", "]", "self", ".", "_group_ord...
This method locks the database.
[ "This", "method", "locks", "the", "database", "." ]
python
train
spyder-ide/spyder
spyder/plugins/variableexplorer/widgets/importwizard.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/variableexplorer/widgets/importwizard.py#L423-L426
def contextMenuEvent(self, event): """Reimplement Qt method""" self.opt_menu.popup(event.globalPos()) event.accept()
[ "def", "contextMenuEvent", "(", "self", ",", "event", ")", ":", "self", ".", "opt_menu", ".", "popup", "(", "event", ".", "globalPos", "(", ")", ")", "event", ".", "accept", "(", ")" ]
Reimplement Qt method
[ "Reimplement", "Qt", "method" ]
python
train
pallets/werkzeug
src/werkzeug/wrappers/base_response.py
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/wrappers/base_response.py#L366-L372
def calculate_content_length(self): """Returns the content length if available or `None` otherwise.""" try: self._ensure_sequence() except RuntimeError: return None return sum(len(x) for x in self.iter_encoded())
[ "def", "calculate_content_length", "(", "self", ")", ":", "try", ":", "self", ".", "_ensure_sequence", "(", ")", "except", "RuntimeError", ":", "return", "None", "return", "sum", "(", "len", "(", "x", ")", "for", "x", "in", "self", ".", "iter_encoded", "...
Returns the content length if available or `None` otherwise.
[ "Returns", "the", "content", "length", "if", "available", "or", "None", "otherwise", "." ]
python
train
ricobl/django-importer
sample_project/tasks/importers.py
https://github.com/ricobl/django-importer/blob/6967adfa7a286be7aaf59d3f33c6637270bd9df6/sample_project/tasks/importers.py#L64-L88
def parse_date(self, item, field_name, source_name): """ Converts the date in the format: Thu 03. As only the day is provided, tries to find the best match based on the current date, considering that dates are on the past. """ # Get the current date now = datetime.now().date() # Get the date from the source val = self.get_value(item, source_name) week_day, day = val.split() day = int(day) # If the current date is minor than the item date # go back one month if now.day < day: if now.month == 1: now = now.replace(month=12, year=now.year-1) else: now = now.replace(month=now.month-1) # Finally, replace the source day in the current date # and return now = now.replace(day=day) return now
[ "def", "parse_date", "(", "self", ",", "item", ",", "field_name", ",", "source_name", ")", ":", "# Get the current date", "now", "=", "datetime", ".", "now", "(", ")", ".", "date", "(", ")", "# Get the date from the source", "val", "=", "self", ".", "get_val...
Converts the date in the format: Thu 03. As only the day is provided, tries to find the best match based on the current date, considering that dates are on the past.
[ "Converts", "the", "date", "in", "the", "format", ":", "Thu", "03", "." ]
python
test
Nekroze/partpy
partpy/sourcestring.py
https://github.com/Nekroze/partpy/blob/dbb7d2fb285464fc43d85bc31f5af46192d301f6/partpy/sourcestring.py#L322-L345
def match_any_string(self, strings, word=0, offset=0): """Attempts to match each string in strings in order. Will return the string that matches or an empty string if no match. If word arg >= 1 then only match if string is followed by a whitespace which is much higher performance. If word is 0 then you should sort the strings argument yourself by length. """ if word: current = self.get_string(offset) return current if current in strings else '' current = '' currentlength = 0 length = 0 for string in strings: length = len(string) if length != currentlength: current = self.get_length(length, offset) if string == current: return string return ''
[ "def", "match_any_string", "(", "self", ",", "strings", ",", "word", "=", "0", ",", "offset", "=", "0", ")", ":", "if", "word", ":", "current", "=", "self", ".", "get_string", "(", "offset", ")", "return", "current", "if", "current", "in", "strings", ...
Attempts to match each string in strings in order. Will return the string that matches or an empty string if no match. If word arg >= 1 then only match if string is followed by a whitespace which is much higher performance. If word is 0 then you should sort the strings argument yourself by length.
[ "Attempts", "to", "match", "each", "string", "in", "strings", "in", "order", ".", "Will", "return", "the", "string", "that", "matches", "or", "an", "empty", "string", "if", "no", "match", "." ]
python
train
wgnet/webium
webium/controls/select.py
https://github.com/wgnet/webium/blob/ccb09876a201e75f5c5810392d4db7a8708b90cb/webium/controls/select.py#L57-L87
def select_by_visible_text(self, text): """ Performs search of selected item from Web List @params text - string visible text """ xpath = './/option[normalize-space(.) = {0}]'.format(self._escape_string(text)) opts = self.find_elements_by_xpath(xpath) matched = False for opt in opts: self._set_selected(opt) if not self.is_multiple: return matched = True # in case the target option isn't found by xpath # attempt to find it by direct comparison among options which contain at least the longest token from the text if len(opts) == 0 and ' ' in text: sub_string_without_space = self._get_longest_token(text) if sub_string_without_space == "": candidates = self.get_options() else: xpath = ".//option[contains(.,{0})]".format(self._escape_string(sub_string_without_space)) candidates = self.find_elements_by_xpath(xpath) for candidate in candidates: if text == candidate.text: self._set_selected(candidate) if not self.is_multiple: return matched = True if not matched: raise NoSuchElementException("Could not locate element with visible text: " + str(text))
[ "def", "select_by_visible_text", "(", "self", ",", "text", ")", ":", "xpath", "=", "'.//option[normalize-space(.) = {0}]'", ".", "format", "(", "self", ".", "_escape_string", "(", "text", ")", ")", "opts", "=", "self", ".", "find_elements_by_xpath", "(", "xpath"...
Performs search of selected item from Web List @params text - string visible text
[ "Performs", "search", "of", "selected", "item", "from", "Web", "List" ]
python
train