repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
4
175
func_name
stringlengths
1
129
whole_func_string
stringlengths
91
50.9k
language
stringclasses
1 value
func_code_string
stringlengths
91
50.9k
func_code_tokens
sequence
func_documentation_string
stringlengths
1
31.6k
func_documentation_tokens
sequence
split_name
stringclasses
1 value
func_code_url
stringlengths
89
268
score
float64
0
0.09
saltstack/salt
salt/modules/opsgenie.py
post_data
def post_data(api_key=None, name='OpsGenie Execution Module', reason=None, action_type=None): ''' Post data to OpsGenie. It's designed for Salt's Event Reactor. After configuring the sls reaction file as shown above, you can trigger the module with your designated tag (og-tag in this case). CLI Example: .. code-block:: bash salt-call event.send 'og-tag' '{"reason" : "Overheating CPU!"}' Required parameters: api_key It's the API Key you've copied while adding integration in OpsGenie. reason It will be used as alert's default message in OpsGenie. action_type OpsGenie supports the default values Create/Close for action_type. You can customize this field with OpsGenie's custom actions for other purposes like adding notes or acknowledging alerts. Optional parameters: name It will be used as alert's alias. If you want to use the close functionality you must provide name field for both states like in this case. ''' if api_key is None or reason is None: raise salt.exceptions.SaltInvocationError( 'API Key or Reason cannot be None.') data = dict() data['alias'] = name data['message'] = reason # data['actions'] = action_type data['cpuModel'] = __grains__['cpu_model'] data['cpuArch'] = __grains__['cpuarch'] data['fqdn'] = __grains__['fqdn'] data['host'] = __grains__['host'] data['id'] = __grains__['id'] data['kernel'] = __grains__['kernel'] data['kernelRelease'] = __grains__['kernelrelease'] data['master'] = __grains__['master'] data['os'] = __grains__['os'] data['saltPath'] = __grains__['saltpath'] data['saltVersion'] = __grains__['saltversion'] data['username'] = __grains__['username'] data['uuid'] = __grains__['uuid'] log.debug('Below data will be posted:\n%s', data) log.debug('API Key: %s \t API Endpoint: %s', api_key, API_ENDPOINT) if action_type == "Create": response = requests.post( url=API_ENDPOINT, data=salt.utils.json.dumps(data), headers={'Content-Type': 'application/json', 'Authorization': 'GenieKey ' + api_key}) else: response = requests.post( url=API_ENDPOINT + "/" + name + "/close?identifierType=alias", data=salt.utils.json.dumps(data), headers={'Content-Type': 'application/json', 'Authorization': 'GenieKey ' + api_key}) return response.status_code, response.text
python
def post_data(api_key=None, name='OpsGenie Execution Module', reason=None, action_type=None): ''' Post data to OpsGenie. It's designed for Salt's Event Reactor. After configuring the sls reaction file as shown above, you can trigger the module with your designated tag (og-tag in this case). CLI Example: .. code-block:: bash salt-call event.send 'og-tag' '{"reason" : "Overheating CPU!"}' Required parameters: api_key It's the API Key you've copied while adding integration in OpsGenie. reason It will be used as alert's default message in OpsGenie. action_type OpsGenie supports the default values Create/Close for action_type. You can customize this field with OpsGenie's custom actions for other purposes like adding notes or acknowledging alerts. Optional parameters: name It will be used as alert's alias. If you want to use the close functionality you must provide name field for both states like in this case. ''' if api_key is None or reason is None: raise salt.exceptions.SaltInvocationError( 'API Key or Reason cannot be None.') data = dict() data['alias'] = name data['message'] = reason # data['actions'] = action_type data['cpuModel'] = __grains__['cpu_model'] data['cpuArch'] = __grains__['cpuarch'] data['fqdn'] = __grains__['fqdn'] data['host'] = __grains__['host'] data['id'] = __grains__['id'] data['kernel'] = __grains__['kernel'] data['kernelRelease'] = __grains__['kernelrelease'] data['master'] = __grains__['master'] data['os'] = __grains__['os'] data['saltPath'] = __grains__['saltpath'] data['saltVersion'] = __grains__['saltversion'] data['username'] = __grains__['username'] data['uuid'] = __grains__['uuid'] log.debug('Below data will be posted:\n%s', data) log.debug('API Key: %s \t API Endpoint: %s', api_key, API_ENDPOINT) if action_type == "Create": response = requests.post( url=API_ENDPOINT, data=salt.utils.json.dumps(data), headers={'Content-Type': 'application/json', 'Authorization': 'GenieKey ' + api_key}) else: response = requests.post( url=API_ENDPOINT + "/" + name + "/close?identifierType=alias", data=salt.utils.json.dumps(data), headers={'Content-Type': 'application/json', 'Authorization': 'GenieKey ' + api_key}) return response.status_code, response.text
[ "def", "post_data", "(", "api_key", "=", "None", ",", "name", "=", "'OpsGenie Execution Module'", ",", "reason", "=", "None", ",", "action_type", "=", "None", ")", ":", "if", "api_key", "is", "None", "or", "reason", "is", "None", ":", "raise", "salt", ".", "exceptions", ".", "SaltInvocationError", "(", "'API Key or Reason cannot be None.'", ")", "data", "=", "dict", "(", ")", "data", "[", "'alias'", "]", "=", "name", "data", "[", "'message'", "]", "=", "reason", "# data['actions'] = action_type", "data", "[", "'cpuModel'", "]", "=", "__grains__", "[", "'cpu_model'", "]", "data", "[", "'cpuArch'", "]", "=", "__grains__", "[", "'cpuarch'", "]", "data", "[", "'fqdn'", "]", "=", "__grains__", "[", "'fqdn'", "]", "data", "[", "'host'", "]", "=", "__grains__", "[", "'host'", "]", "data", "[", "'id'", "]", "=", "__grains__", "[", "'id'", "]", "data", "[", "'kernel'", "]", "=", "__grains__", "[", "'kernel'", "]", "data", "[", "'kernelRelease'", "]", "=", "__grains__", "[", "'kernelrelease'", "]", "data", "[", "'master'", "]", "=", "__grains__", "[", "'master'", "]", "data", "[", "'os'", "]", "=", "__grains__", "[", "'os'", "]", "data", "[", "'saltPath'", "]", "=", "__grains__", "[", "'saltpath'", "]", "data", "[", "'saltVersion'", "]", "=", "__grains__", "[", "'saltversion'", "]", "data", "[", "'username'", "]", "=", "__grains__", "[", "'username'", "]", "data", "[", "'uuid'", "]", "=", "__grains__", "[", "'uuid'", "]", "log", ".", "debug", "(", "'Below data will be posted:\\n%s'", ",", "data", ")", "log", ".", "debug", "(", "'API Key: %s \\t API Endpoint: %s'", ",", "api_key", ",", "API_ENDPOINT", ")", "if", "action_type", "==", "\"Create\"", ":", "response", "=", "requests", ".", "post", "(", "url", "=", "API_ENDPOINT", ",", "data", "=", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "data", ")", ",", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", ",", "'Authorization'", ":", "'GenieKey '", "+", "api_key", "}", ")", "else", ":", "response", "=", "requests", ".", "post", "(", "url", "=", "API_ENDPOINT", "+", "\"/\"", "+", "name", "+", "\"/close?identifierType=alias\"", ",", "data", "=", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "data", ")", ",", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", ",", "'Authorization'", ":", "'GenieKey '", "+", "api_key", "}", ")", "return", "response", ".", "status_code", ",", "response", ".", "text" ]
Post data to OpsGenie. It's designed for Salt's Event Reactor. After configuring the sls reaction file as shown above, you can trigger the module with your designated tag (og-tag in this case). CLI Example: .. code-block:: bash salt-call event.send 'og-tag' '{"reason" : "Overheating CPU!"}' Required parameters: api_key It's the API Key you've copied while adding integration in OpsGenie. reason It will be used as alert's default message in OpsGenie. action_type OpsGenie supports the default values Create/Close for action_type. You can customize this field with OpsGenie's custom actions for other purposes like adding notes or acknowledging alerts. Optional parameters: name It will be used as alert's alias. If you want to use the close functionality you must provide name field for both states like in this case.
[ "Post", "data", "to", "OpsGenie", ".", "It", "s", "designed", "for", "Salt", "s", "Event", "Reactor", "." ]
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/opsgenie.py#L37-L109
0.000771
DLR-RM/RAFCON
source/rafcon/gui/mygaphas/utils/gap_helper.py
get_state_id_for_port
def get_state_id_for_port(port): """This method returns the state ID of the state containing the given port :param port: Port to check for containing state ID :return: State ID of state containing port """ parent = port.parent from rafcon.gui.mygaphas.items.state import StateView if isinstance(parent, StateView): return parent.model.state.state_id
python
def get_state_id_for_port(port): """This method returns the state ID of the state containing the given port :param port: Port to check for containing state ID :return: State ID of state containing port """ parent = port.parent from rafcon.gui.mygaphas.items.state import StateView if isinstance(parent, StateView): return parent.model.state.state_id
[ "def", "get_state_id_for_port", "(", "port", ")", ":", "parent", "=", "port", ".", "parent", "from", "rafcon", ".", "gui", ".", "mygaphas", ".", "items", ".", "state", "import", "StateView", "if", "isinstance", "(", "parent", ",", "StateView", ")", ":", "return", "parent", ".", "model", ".", "state", ".", "state_id" ]
This method returns the state ID of the state containing the given port :param port: Port to check for containing state ID :return: State ID of state containing port
[ "This", "method", "returns", "the", "state", "ID", "of", "the", "state", "containing", "the", "given", "port" ]
train
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/mygaphas/utils/gap_helper.py#L76-L85
0.002591
RudolfCardinal/pythonlib
cardinal_pythonlib/rnc_db.py
DatabaseSupporter.connect_to_database_odbc_mysql
def connect_to_database_odbc_mysql(self, database: str, user: str, password: str, server: str = "localhost", port: int = 3306, driver: str = "{MySQL ODBC 5.1 Driver}", autocommit: bool = True) -> None: """Connects to a MySQL database via ODBC.""" self.connect(engine=ENGINE_MYSQL, interface=INTERFACE_ODBC, database=database, user=user, password=password, host=server, port=port, driver=driver, autocommit=autocommit)
python
def connect_to_database_odbc_mysql(self, database: str, user: str, password: str, server: str = "localhost", port: int = 3306, driver: str = "{MySQL ODBC 5.1 Driver}", autocommit: bool = True) -> None: """Connects to a MySQL database via ODBC.""" self.connect(engine=ENGINE_MYSQL, interface=INTERFACE_ODBC, database=database, user=user, password=password, host=server, port=port, driver=driver, autocommit=autocommit)
[ "def", "connect_to_database_odbc_mysql", "(", "self", ",", "database", ":", "str", ",", "user", ":", "str", ",", "password", ":", "str", ",", "server", ":", "str", "=", "\"localhost\"", ",", "port", ":", "int", "=", "3306", ",", "driver", ":", "str", "=", "\"{MySQL ODBC 5.1 Driver}\"", ",", "autocommit", ":", "bool", "=", "True", ")", "->", "None", ":", "self", ".", "connect", "(", "engine", "=", "ENGINE_MYSQL", ",", "interface", "=", "INTERFACE_ODBC", ",", "database", "=", "database", ",", "user", "=", "user", ",", "password", "=", "password", ",", "host", "=", "server", ",", "port", "=", "port", ",", "driver", "=", "driver", ",", "autocommit", "=", "autocommit", ")" ]
Connects to a MySQL database via ODBC.
[ "Connects", "to", "a", "MySQL", "database", "via", "ODBC", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_db.py#L1969-L1981
0.011704
dnanexus/dx-toolkit
src/python/dxpy/__init__.py
get_auth_server_name
def get_auth_server_name(host_override=None, port_override=None, protocol='https'): """ Chooses the auth server name from the currently configured API server name. Raises DXError if the auth server name cannot be guessed and the overrides are not provided (or improperly provided). """ if host_override is not None or port_override is not None: if host_override is None or port_override is None: raise exceptions.DXError("Both host and port must be specified if either is specified") return protocol + '://' + host_override + ':' + str(port_override) elif APISERVER_HOST == 'stagingapi.dnanexus.com': return 'https://stagingauth.dnanexus.com' elif APISERVER_HOST == 'api.dnanexus.com': return 'https://auth.dnanexus.com' elif APISERVER_HOST == 'stagingapi.cn.dnanexus.com': return 'https://stagingauth.cn.dnanexus.com:7001' elif APISERVER_HOST == 'api.cn.dnanexus.com': return 'https://auth.cn.dnanexus.com:8001' elif APISERVER_HOST == "localhost" or APISERVER_HOST == "127.0.0.1": if "DX_AUTHSERVER_HOST" not in os.environ or "DX_AUTHSERVER_PORT" not in os.environ: err_msg = "Must set authserver env vars (DX_AUTHSERVER_HOST, DX_AUTHSERVER_PORT) if apiserver is {apiserver}." raise exceptions.DXError(err_msg.format(apiserver=APISERVER_HOST)) else: return os.environ["DX_AUTHSERVER_HOST"] + ":" + os.environ["DX_AUTHSERVER_PORT"] else: err_msg = "Could not determine which auth server is associated with {apiserver}." raise exceptions.DXError(err_msg.format(apiserver=APISERVER_HOST))
python
def get_auth_server_name(host_override=None, port_override=None, protocol='https'): """ Chooses the auth server name from the currently configured API server name. Raises DXError if the auth server name cannot be guessed and the overrides are not provided (or improperly provided). """ if host_override is not None or port_override is not None: if host_override is None or port_override is None: raise exceptions.DXError("Both host and port must be specified if either is specified") return protocol + '://' + host_override + ':' + str(port_override) elif APISERVER_HOST == 'stagingapi.dnanexus.com': return 'https://stagingauth.dnanexus.com' elif APISERVER_HOST == 'api.dnanexus.com': return 'https://auth.dnanexus.com' elif APISERVER_HOST == 'stagingapi.cn.dnanexus.com': return 'https://stagingauth.cn.dnanexus.com:7001' elif APISERVER_HOST == 'api.cn.dnanexus.com': return 'https://auth.cn.dnanexus.com:8001' elif APISERVER_HOST == "localhost" or APISERVER_HOST == "127.0.0.1": if "DX_AUTHSERVER_HOST" not in os.environ or "DX_AUTHSERVER_PORT" not in os.environ: err_msg = "Must set authserver env vars (DX_AUTHSERVER_HOST, DX_AUTHSERVER_PORT) if apiserver is {apiserver}." raise exceptions.DXError(err_msg.format(apiserver=APISERVER_HOST)) else: return os.environ["DX_AUTHSERVER_HOST"] + ":" + os.environ["DX_AUTHSERVER_PORT"] else: err_msg = "Could not determine which auth server is associated with {apiserver}." raise exceptions.DXError(err_msg.format(apiserver=APISERVER_HOST))
[ "def", "get_auth_server_name", "(", "host_override", "=", "None", ",", "port_override", "=", "None", ",", "protocol", "=", "'https'", ")", ":", "if", "host_override", "is", "not", "None", "or", "port_override", "is", "not", "None", ":", "if", "host_override", "is", "None", "or", "port_override", "is", "None", ":", "raise", "exceptions", ".", "DXError", "(", "\"Both host and port must be specified if either is specified\"", ")", "return", "protocol", "+", "'://'", "+", "host_override", "+", "':'", "+", "str", "(", "port_override", ")", "elif", "APISERVER_HOST", "==", "'stagingapi.dnanexus.com'", ":", "return", "'https://stagingauth.dnanexus.com'", "elif", "APISERVER_HOST", "==", "'api.dnanexus.com'", ":", "return", "'https://auth.dnanexus.com'", "elif", "APISERVER_HOST", "==", "'stagingapi.cn.dnanexus.com'", ":", "return", "'https://stagingauth.cn.dnanexus.com:7001'", "elif", "APISERVER_HOST", "==", "'api.cn.dnanexus.com'", ":", "return", "'https://auth.cn.dnanexus.com:8001'", "elif", "APISERVER_HOST", "==", "\"localhost\"", "or", "APISERVER_HOST", "==", "\"127.0.0.1\"", ":", "if", "\"DX_AUTHSERVER_HOST\"", "not", "in", "os", ".", "environ", "or", "\"DX_AUTHSERVER_PORT\"", "not", "in", "os", ".", "environ", ":", "err_msg", "=", "\"Must set authserver env vars (DX_AUTHSERVER_HOST, DX_AUTHSERVER_PORT) if apiserver is {apiserver}.\"", "raise", "exceptions", ".", "DXError", "(", "err_msg", ".", "format", "(", "apiserver", "=", "APISERVER_HOST", ")", ")", "else", ":", "return", "os", ".", "environ", "[", "\"DX_AUTHSERVER_HOST\"", "]", "+", "\":\"", "+", "os", ".", "environ", "[", "\"DX_AUTHSERVER_PORT\"", "]", "else", ":", "err_msg", "=", "\"Could not determine which auth server is associated with {apiserver}.\"", "raise", "exceptions", ".", "DXError", "(", "err_msg", ".", "format", "(", "apiserver", "=", "APISERVER_HOST", ")", ")" ]
Chooses the auth server name from the currently configured API server name. Raises DXError if the auth server name cannot be guessed and the overrides are not provided (or improperly provided).
[ "Chooses", "the", "auth", "server", "name", "from", "the", "currently", "configured", "API", "server", "name", "." ]
train
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/__init__.py#L964-L991
0.004209
google/grr
grr/server/grr_response_server/bin/config_updater_util.py
ImportConfig
def ImportConfig(filename, config): """Reads an old config file and imports keys and user accounts.""" sections_to_import = ["PrivateKeys"] entries_to_import = [ "Client.executable_signing_public_key", "CA.certificate", "Frontend.certificate" ] options_imported = 0 old_config = grr_config.CONFIG.MakeNewConfig() old_config.Initialize(filename) for entry in old_config.raw_data: try: section = entry.split(".")[0] if section in sections_to_import or entry in entries_to_import: config.Set(entry, old_config.Get(entry)) print("Imported %s." % entry) options_imported += 1 except Exception as e: # pylint: disable=broad-except print("Exception during import of %s: %s" % (entry, e)) return options_imported
python
def ImportConfig(filename, config): """Reads an old config file and imports keys and user accounts.""" sections_to_import = ["PrivateKeys"] entries_to_import = [ "Client.executable_signing_public_key", "CA.certificate", "Frontend.certificate" ] options_imported = 0 old_config = grr_config.CONFIG.MakeNewConfig() old_config.Initialize(filename) for entry in old_config.raw_data: try: section = entry.split(".")[0] if section in sections_to_import or entry in entries_to_import: config.Set(entry, old_config.Get(entry)) print("Imported %s." % entry) options_imported += 1 except Exception as e: # pylint: disable=broad-except print("Exception during import of %s: %s" % (entry, e)) return options_imported
[ "def", "ImportConfig", "(", "filename", ",", "config", ")", ":", "sections_to_import", "=", "[", "\"PrivateKeys\"", "]", "entries_to_import", "=", "[", "\"Client.executable_signing_public_key\"", ",", "\"CA.certificate\"", ",", "\"Frontend.certificate\"", "]", "options_imported", "=", "0", "old_config", "=", "grr_config", ".", "CONFIG", ".", "MakeNewConfig", "(", ")", "old_config", ".", "Initialize", "(", "filename", ")", "for", "entry", "in", "old_config", ".", "raw_data", ":", "try", ":", "section", "=", "entry", ".", "split", "(", "\".\"", ")", "[", "0", "]", "if", "section", "in", "sections_to_import", "or", "entry", "in", "entries_to_import", ":", "config", ".", "Set", "(", "entry", ",", "old_config", ".", "Get", "(", "entry", ")", ")", "print", "(", "\"Imported %s.\"", "%", "entry", ")", "options_imported", "+=", "1", "except", "Exception", "as", "e", ":", "# pylint: disable=broad-except", "print", "(", "\"Exception during import of %s: %s\"", "%", "(", "entry", ",", "e", ")", ")", "return", "options_imported" ]
Reads an old config file and imports keys and user accounts.
[ "Reads", "an", "old", "config", "file", "and", "imports", "keys", "and", "user", "accounts", "." ]
train
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/bin/config_updater_util.py#L87-L108
0.015248
JNRowe/upoints
upoints/utils.py
dump_xearth_markers
def dump_xearth_markers(markers, name='identifier'): """Generate an Xearth compatible marker file. ``dump_xearth_markers()`` writes a simple Xearth_ marker file from a dictionary of :class:`trigpoints.Trigpoint` objects. It expects a dictionary in one of the following formats. For support of :class:`Trigpoint` that is:: {500936: Trigpoint(52.066035, -0.281449, 37.0, "Broom Farm"), 501097: Trigpoint(52.010585, -0.173443, 97.0, "Bygrave"), 505392: Trigpoint(51.910886, -0.186462, 136.0, "Sish Lane")} And generates output of the form:: 52.066035 -0.281449 "500936" # Broom Farm, alt 37m 52.010585 -0.173443 "501097" # Bygrave, alt 97m 51.910886 -0.186462 "205392" # Sish Lane, alt 136m Or similar to the following if the ``name`` parameter is set to ``name``:: 52.066035 -0.281449 "Broom Farm" # 500936 alt 37m 52.010585 -0.173443 "Bygrave" # 501097 alt 97m 51.910886 -0.186462 "Sish Lane" # 205392 alt 136m Point objects should be provided in the following format:: {"Broom Farm": Point(52.066035, -0.281449), "Bygrave": Point(52.010585, -0.173443), "Sish Lane": Point(51.910886, -0.186462)} And generates output of the form:: 52.066035 -0.281449 "Broom Farm" 52.010585 -0.173443 "Bygrave" 51.910886 -0.186462 "Sish Lane" Note: xplanet_ also supports xearth marker files, and as such can use the output from this function. See also: upoints.xearth.Xearths.import_locations Args: markers (dict): Dictionary of identifier keys, with :class:`Trigpoint` values name (str): Value to use as Xearth display string Returns: list: List of strings representing an Xearth marker file Raises: ValueError: Unsupported value for ``name`` .. _xearth: http://hewgill.com/xearth/original/ .. _xplanet: http://xplanet.sourceforge.net/ """ output = [] for identifier, point in markers.items(): line = ['%f %f ' % (point.latitude, point.longitude), ] if hasattr(point, 'name') and point.name: if name == 'identifier': line.append('"%s" # %s' % (identifier, point.name)) elif name == 'name': line.append('"%s" # %s' % (point.name, identifier)) elif name == 'comment': line.append('"%s" # %s' % (identifier, point.comment)) else: raise ValueError('Unknown name type %r' % name) if hasattr(point, 'altitude') and point.altitude: line.append(', alt %im' % point.altitude) else: line.append('"%s"' % identifier) output.append(''.join(line)) # Return the list sorted on the marker name return sorted(output, key=lambda x: x.split()[2])
python
def dump_xearth_markers(markers, name='identifier'): """Generate an Xearth compatible marker file. ``dump_xearth_markers()`` writes a simple Xearth_ marker file from a dictionary of :class:`trigpoints.Trigpoint` objects. It expects a dictionary in one of the following formats. For support of :class:`Trigpoint` that is:: {500936: Trigpoint(52.066035, -0.281449, 37.0, "Broom Farm"), 501097: Trigpoint(52.010585, -0.173443, 97.0, "Bygrave"), 505392: Trigpoint(51.910886, -0.186462, 136.0, "Sish Lane")} And generates output of the form:: 52.066035 -0.281449 "500936" # Broom Farm, alt 37m 52.010585 -0.173443 "501097" # Bygrave, alt 97m 51.910886 -0.186462 "205392" # Sish Lane, alt 136m Or similar to the following if the ``name`` parameter is set to ``name``:: 52.066035 -0.281449 "Broom Farm" # 500936 alt 37m 52.010585 -0.173443 "Bygrave" # 501097 alt 97m 51.910886 -0.186462 "Sish Lane" # 205392 alt 136m Point objects should be provided in the following format:: {"Broom Farm": Point(52.066035, -0.281449), "Bygrave": Point(52.010585, -0.173443), "Sish Lane": Point(51.910886, -0.186462)} And generates output of the form:: 52.066035 -0.281449 "Broom Farm" 52.010585 -0.173443 "Bygrave" 51.910886 -0.186462 "Sish Lane" Note: xplanet_ also supports xearth marker files, and as such can use the output from this function. See also: upoints.xearth.Xearths.import_locations Args: markers (dict): Dictionary of identifier keys, with :class:`Trigpoint` values name (str): Value to use as Xearth display string Returns: list: List of strings representing an Xearth marker file Raises: ValueError: Unsupported value for ``name`` .. _xearth: http://hewgill.com/xearth/original/ .. _xplanet: http://xplanet.sourceforge.net/ """ output = [] for identifier, point in markers.items(): line = ['%f %f ' % (point.latitude, point.longitude), ] if hasattr(point, 'name') and point.name: if name == 'identifier': line.append('"%s" # %s' % (identifier, point.name)) elif name == 'name': line.append('"%s" # %s' % (point.name, identifier)) elif name == 'comment': line.append('"%s" # %s' % (identifier, point.comment)) else: raise ValueError('Unknown name type %r' % name) if hasattr(point, 'altitude') and point.altitude: line.append(', alt %im' % point.altitude) else: line.append('"%s"' % identifier) output.append(''.join(line)) # Return the list sorted on the marker name return sorted(output, key=lambda x: x.split()[2])
[ "def", "dump_xearth_markers", "(", "markers", ",", "name", "=", "'identifier'", ")", ":", "output", "=", "[", "]", "for", "identifier", ",", "point", "in", "markers", ".", "items", "(", ")", ":", "line", "=", "[", "'%f %f '", "%", "(", "point", ".", "latitude", ",", "point", ".", "longitude", ")", ",", "]", "if", "hasattr", "(", "point", ",", "'name'", ")", "and", "point", ".", "name", ":", "if", "name", "==", "'identifier'", ":", "line", ".", "append", "(", "'\"%s\" # %s'", "%", "(", "identifier", ",", "point", ".", "name", ")", ")", "elif", "name", "==", "'name'", ":", "line", ".", "append", "(", "'\"%s\" # %s'", "%", "(", "point", ".", "name", ",", "identifier", ")", ")", "elif", "name", "==", "'comment'", ":", "line", ".", "append", "(", "'\"%s\" # %s'", "%", "(", "identifier", ",", "point", ".", "comment", ")", ")", "else", ":", "raise", "ValueError", "(", "'Unknown name type %r'", "%", "name", ")", "if", "hasattr", "(", "point", ",", "'altitude'", ")", "and", "point", ".", "altitude", ":", "line", ".", "append", "(", "', alt %im'", "%", "point", ".", "altitude", ")", "else", ":", "line", ".", "append", "(", "'\"%s\"'", "%", "identifier", ")", "output", ".", "append", "(", "''", ".", "join", "(", "line", ")", ")", "# Return the list sorted on the marker name", "return", "sorted", "(", "output", ",", "key", "=", "lambda", "x", ":", "x", ".", "split", "(", ")", "[", "2", "]", ")" ]
Generate an Xearth compatible marker file. ``dump_xearth_markers()`` writes a simple Xearth_ marker file from a dictionary of :class:`trigpoints.Trigpoint` objects. It expects a dictionary in one of the following formats. For support of :class:`Trigpoint` that is:: {500936: Trigpoint(52.066035, -0.281449, 37.0, "Broom Farm"), 501097: Trigpoint(52.010585, -0.173443, 97.0, "Bygrave"), 505392: Trigpoint(51.910886, -0.186462, 136.0, "Sish Lane")} And generates output of the form:: 52.066035 -0.281449 "500936" # Broom Farm, alt 37m 52.010585 -0.173443 "501097" # Bygrave, alt 97m 51.910886 -0.186462 "205392" # Sish Lane, alt 136m Or similar to the following if the ``name`` parameter is set to ``name``:: 52.066035 -0.281449 "Broom Farm" # 500936 alt 37m 52.010585 -0.173443 "Bygrave" # 501097 alt 97m 51.910886 -0.186462 "Sish Lane" # 205392 alt 136m Point objects should be provided in the following format:: {"Broom Farm": Point(52.066035, -0.281449), "Bygrave": Point(52.010585, -0.173443), "Sish Lane": Point(51.910886, -0.186462)} And generates output of the form:: 52.066035 -0.281449 "Broom Farm" 52.010585 -0.173443 "Bygrave" 51.910886 -0.186462 "Sish Lane" Note: xplanet_ also supports xearth marker files, and as such can use the output from this function. See also: upoints.xearth.Xearths.import_locations Args: markers (dict): Dictionary of identifier keys, with :class:`Trigpoint` values name (str): Value to use as Xearth display string Returns: list: List of strings representing an Xearth marker file Raises: ValueError: Unsupported value for ``name`` .. _xearth: http://hewgill.com/xearth/original/ .. _xplanet: http://xplanet.sourceforge.net/
[ "Generate", "an", "Xearth", "compatible", "marker", "file", "." ]
train
https://github.com/JNRowe/upoints/blob/1e4b7a53ed2a06cd854523d54c36aabdccea3830/upoints/utils.py#L1031-L1107
0.000346
PmagPy/PmagPy
pmagpy/pmag.py
doeqdi
def doeqdi(x, y, UP=False): """ Takes digitized x,y, data and returns the dec,inc, assuming an equal area projection Parameters __________________ x : array of digitized x from point on equal area projection y : array of igitized y from point on equal area projection UP : if True, is an upper hemisphere projection Output : dec : declination inc : inclination """ xp, yp = y, x # need to switch into geographic convention r = np.sqrt(xp**2+yp**2) z = 1.-r**2 t = np.arcsin(z) if UP == 1: t = -t p = np.arctan2(yp, xp) dec, inc = np.degrees(p) % 360, np.degrees(t) return dec, inc
python
def doeqdi(x, y, UP=False): """ Takes digitized x,y, data and returns the dec,inc, assuming an equal area projection Parameters __________________ x : array of digitized x from point on equal area projection y : array of igitized y from point on equal area projection UP : if True, is an upper hemisphere projection Output : dec : declination inc : inclination """ xp, yp = y, x # need to switch into geographic convention r = np.sqrt(xp**2+yp**2) z = 1.-r**2 t = np.arcsin(z) if UP == 1: t = -t p = np.arctan2(yp, xp) dec, inc = np.degrees(p) % 360, np.degrees(t) return dec, inc
[ "def", "doeqdi", "(", "x", ",", "y", ",", "UP", "=", "False", ")", ":", "xp", ",", "yp", "=", "y", ",", "x", "# need to switch into geographic convention", "r", "=", "np", ".", "sqrt", "(", "xp", "**", "2", "+", "yp", "**", "2", ")", "z", "=", "1.", "-", "r", "**", "2", "t", "=", "np", ".", "arcsin", "(", "z", ")", "if", "UP", "==", "1", ":", "t", "=", "-", "t", "p", "=", "np", ".", "arctan2", "(", "yp", ",", "xp", ")", "dec", ",", "inc", "=", "np", ".", "degrees", "(", "p", ")", "%", "360", ",", "np", ".", "degrees", "(", "t", ")", "return", "dec", ",", "inc" ]
Takes digitized x,y, data and returns the dec,inc, assuming an equal area projection Parameters __________________ x : array of digitized x from point on equal area projection y : array of igitized y from point on equal area projection UP : if True, is an upper hemisphere projection Output : dec : declination inc : inclination
[ "Takes", "digitized", "x", "y", "data", "and", "returns", "the", "dec", "inc", "assuming", "an", "equal", "area", "projection", "Parameters", "__________________", "x", ":", "array", "of", "digitized", "x", "from", "point", "on", "equal", "area", "projection", "y", ":", "array", "of", "igitized", "y", "from", "point", "on", "equal", "area", "projection", "UP", ":", "if", "True", "is", "an", "upper", "hemisphere", "projection", "Output", ":", "dec", ":", "declination", "inc", ":", "inclination" ]
train
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L10678-L10699
0.001449
GMadorell/abris
abris_transform/transformations/transformer.py
Transformer.prepare
def prepare(self, dataframe): """ Takes the already cleaned dataframe, splits it into train and test and returns the train and test as numpy arrays. If the problem is supervised, the target column will be that last one of the returned arrays. """ mapping = DataFrameMapCreator().get_mapping_from_config(self.__config) self.__mapper = DataFrameMapper(mapping) train, test = split_dataframe_train_test(dataframe, self.__config.get_option_parameter("split", "train_percentage")) return self.__get_correct_return_parameters(train, test)
python
def prepare(self, dataframe): """ Takes the already cleaned dataframe, splits it into train and test and returns the train and test as numpy arrays. If the problem is supervised, the target column will be that last one of the returned arrays. """ mapping = DataFrameMapCreator().get_mapping_from_config(self.__config) self.__mapper = DataFrameMapper(mapping) train, test = split_dataframe_train_test(dataframe, self.__config.get_option_parameter("split", "train_percentage")) return self.__get_correct_return_parameters(train, test)
[ "def", "prepare", "(", "self", ",", "dataframe", ")", ":", "mapping", "=", "DataFrameMapCreator", "(", ")", ".", "get_mapping_from_config", "(", "self", ".", "__config", ")", "self", ".", "__mapper", "=", "DataFrameMapper", "(", "mapping", ")", "train", ",", "test", "=", "split_dataframe_train_test", "(", "dataframe", ",", "self", ".", "__config", ".", "get_option_parameter", "(", "\"split\"", ",", "\"train_percentage\"", ")", ")", "return", "self", ".", "__get_correct_return_parameters", "(", "train", ",", "test", ")" ]
Takes the already cleaned dataframe, splits it into train and test and returns the train and test as numpy arrays. If the problem is supervised, the target column will be that last one of the returned arrays.
[ "Takes", "the", "already", "cleaned", "dataframe", "splits", "it", "into", "train", "and", "test", "and", "returns", "the", "train", "and", "test", "as", "numpy", "arrays", ".", "If", "the", "problem", "is", "supervised", "the", "target", "column", "will", "be", "that", "last", "one", "of", "the", "returned", "arrays", "." ]
train
https://github.com/GMadorell/abris/blob/0d8ab7ec506835a45fae6935d129f5d7e6937bb2/abris_transform/transformations/transformer.py#L24-L34
0.004902
HazyResearch/pdftotree
pdftotree/TreeVisualizer.py
TreeVisualizer.display_boxes
def display_boxes(self, tree, html_path, filename_prefix, alternate_colors=False): """ Displays each of the bounding boxes passed in 'boxes' on images of the pdf pointed to by pdf_file boxes is a list of 5-tuples (page, top, left, bottom, right) """ imgs = [] colors = { "section_header": Color("blue"), "figure": Color("green"), "figure_caption": Color("green"), "table_caption": Color("red"), "list": Color("yellow"), "paragraph": Color("gray"), "table": Color("red"), "header": Color("brown"), } for i, page_num in enumerate(tree.keys()): img = self.pdf_to_img(page_num) draw = Drawing() draw.fill_color = Color("rgba(0, 0, 0, 0.0)") for clust in tree[page_num]: for (pnum, pwidth, pheight, top, left, bottom, right) in tree[page_num][ clust ]: draw.stroke_color = colors[clust] draw.rectangle(left=left, top=top, right=right, bottom=bottom) draw.push() draw.font_size = 20 draw.font_weight = 10 draw.fill_color = colors[clust] if int(left) > 0 and int(top) > 0: draw.text(x=int(left), y=int(top), body=clust) draw.pop() draw(img) img.save(filename=html_path + filename_prefix + "_page_" + str(i) + ".png") imgs.append(img) return imgs
python
def display_boxes(self, tree, html_path, filename_prefix, alternate_colors=False): """ Displays each of the bounding boxes passed in 'boxes' on images of the pdf pointed to by pdf_file boxes is a list of 5-tuples (page, top, left, bottom, right) """ imgs = [] colors = { "section_header": Color("blue"), "figure": Color("green"), "figure_caption": Color("green"), "table_caption": Color("red"), "list": Color("yellow"), "paragraph": Color("gray"), "table": Color("red"), "header": Color("brown"), } for i, page_num in enumerate(tree.keys()): img = self.pdf_to_img(page_num) draw = Drawing() draw.fill_color = Color("rgba(0, 0, 0, 0.0)") for clust in tree[page_num]: for (pnum, pwidth, pheight, top, left, bottom, right) in tree[page_num][ clust ]: draw.stroke_color = colors[clust] draw.rectangle(left=left, top=top, right=right, bottom=bottom) draw.push() draw.font_size = 20 draw.font_weight = 10 draw.fill_color = colors[clust] if int(left) > 0 and int(top) > 0: draw.text(x=int(left), y=int(top), body=clust) draw.pop() draw(img) img.save(filename=html_path + filename_prefix + "_page_" + str(i) + ".png") imgs.append(img) return imgs
[ "def", "display_boxes", "(", "self", ",", "tree", ",", "html_path", ",", "filename_prefix", ",", "alternate_colors", "=", "False", ")", ":", "imgs", "=", "[", "]", "colors", "=", "{", "\"section_header\"", ":", "Color", "(", "\"blue\"", ")", ",", "\"figure\"", ":", "Color", "(", "\"green\"", ")", ",", "\"figure_caption\"", ":", "Color", "(", "\"green\"", ")", ",", "\"table_caption\"", ":", "Color", "(", "\"red\"", ")", ",", "\"list\"", ":", "Color", "(", "\"yellow\"", ")", ",", "\"paragraph\"", ":", "Color", "(", "\"gray\"", ")", ",", "\"table\"", ":", "Color", "(", "\"red\"", ")", ",", "\"header\"", ":", "Color", "(", "\"brown\"", ")", ",", "}", "for", "i", ",", "page_num", "in", "enumerate", "(", "tree", ".", "keys", "(", ")", ")", ":", "img", "=", "self", ".", "pdf_to_img", "(", "page_num", ")", "draw", "=", "Drawing", "(", ")", "draw", ".", "fill_color", "=", "Color", "(", "\"rgba(0, 0, 0, 0.0)\"", ")", "for", "clust", "in", "tree", "[", "page_num", "]", ":", "for", "(", "pnum", ",", "pwidth", ",", "pheight", ",", "top", ",", "left", ",", "bottom", ",", "right", ")", "in", "tree", "[", "page_num", "]", "[", "clust", "]", ":", "draw", ".", "stroke_color", "=", "colors", "[", "clust", "]", "draw", ".", "rectangle", "(", "left", "=", "left", ",", "top", "=", "top", ",", "right", "=", "right", ",", "bottom", "=", "bottom", ")", "draw", ".", "push", "(", ")", "draw", ".", "font_size", "=", "20", "draw", ".", "font_weight", "=", "10", "draw", ".", "fill_color", "=", "colors", "[", "clust", "]", "if", "int", "(", "left", ")", ">", "0", "and", "int", "(", "top", ")", ">", "0", ":", "draw", ".", "text", "(", "x", "=", "int", "(", "left", ")", ",", "y", "=", "int", "(", "top", ")", ",", "body", "=", "clust", ")", "draw", ".", "pop", "(", ")", "draw", "(", "img", ")", "img", ".", "save", "(", "filename", "=", "html_path", "+", "filename_prefix", "+", "\"_page_\"", "+", "str", "(", "i", ")", "+", "\".png\"", ")", "imgs", ".", "append", "(", "img", ")", "return", "imgs" ]
Displays each of the bounding boxes passed in 'boxes' on images of the pdf pointed to by pdf_file boxes is a list of 5-tuples (page, top, left, bottom, right)
[ "Displays", "each", "of", "the", "bounding", "boxes", "passed", "in", "boxes", "on", "images", "of", "the", "pdf", "pointed", "to", "by", "pdf_file", "boxes", "is", "a", "list", "of", "5", "-", "tuples", "(", "page", "top", "left", "bottom", "right", ")" ]
train
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/TreeVisualizer.py#L22-L59
0.004284
ifduyue/urlfetch
urlfetch.py
request
def request(url, method="GET", params=None, data=None, headers={}, timeout=None, files={}, randua=False, auth=None, length_limit=None, proxies=None, trust_env=True, max_redirects=0, source_address=None, **kwargs): """request an URL :arg string url: URL to be fetched. :arg string method: (optional) HTTP method, one of ``GET``, ``DELETE``, ``HEAD``, ``OPTIONS``, ``PUT``, ``POST``, ``TRACE``, ``PATCH``. ``GET`` is the default. :arg dict/string params: (optional) Dict or string to attach to url as querystring. :arg dict headers: (optional) HTTP request headers. :arg float timeout: (optional) Timeout in seconds :arg files: (optional) Files to be sended :arg randua: (optional) If ``True`` or ``path string``, use a random user-agent in headers, instead of ``'urlfetch/' + __version__`` :arg tuple auth: (optional) (username, password) for basic authentication :arg int length_limit: (optional) If ``None``, no limits on content length, if the limit reached raised exception 'Content length is more than ...' :arg dict proxies: (optional) HTTP proxy, like {'http': '127.0.0.1:8888', 'https': '127.0.0.1:563'} :arg bool trust_env: (optional) If ``True``, urlfetch will get infomations from env, such as HTTP_PROXY, HTTPS_PROXY :arg int max_redirects: (integer, optional) Max redirects allowed within a request. Default is 0, which means redirects are not allowed. :arg tuple source_address: (optional) A tuple of (host, port) to specify the source_address to bind to. This argument is ignored if you're using Python prior to 2.7/3.2. :returns: A :class:`~urlfetch.Response` object :raises: :class:`URLError`, :class:`UrlfetchException`, :class:`TooManyRedirects`, """ def make_connection(conn_type, host, port, timeout, source_address): """Return HTTP or HTTPS connection.""" if support_source_address: kwargs = {'timeout': timeout, 'source_address': source_address} else: kwargs = {'timeout': timeout} if source_address is not None: raise UrlfetchException('source_address requires' 'Python 2.7/3.2 or newer versions') if conn_type == 'http': conn = HTTPConnection(host, port, **kwargs) elif conn_type == 'https': conn = HTTPSConnection(host, port, **kwargs) else: raise URLError('Unknown Connection Type: %s' % conn_type) return conn via_proxy = False method = method.upper() if method not in ALLOWED_METHODS: raise UrlfetchException("Method should be one of " + ", ".join(ALLOWED_METHODS)) if params: if isinstance(params, dict): url = url_concat(url, params) elif isinstance(params, basestring): if url[-1] not in ('?', '&'): url += '&' if ('?' in url) else '?' url += params parsed_url = parse_url(url) reqheaders = { 'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate, compress, identity, *', 'User-Agent': random_useragent(randua), 'Host': parsed_url['http_host'] } # Proxy support scheme = parsed_url['scheme'] if proxies is None and trust_env: proxies = PROXIES ignore_hosts = PROXY_IGNORE_HOSTS if trust_env: no_proxy = os.getenv('no_proxy') or os.getenv('NO_PROXY') if no_proxy: ignore_hosts = no_proxy.split(",") proxy = proxies.get(scheme) if proxy and not any(match_no_proxy(parsed_url['host'], host) for host in ignore_hosts): via_proxy = True if '://' not in proxy: proxy = '%s://%s' % (scheme, proxy) parsed_proxy = parse_url(proxy) # Proxy-Authorization if parsed_proxy['username'] and parsed_proxy['password']: proxyauth = '%s:%s' % (parsed_proxy['username'], parsed_proxy['password']) proxyauth = base64.b64encode(proxyauth.encode('utf-8')) reqheaders['Proxy-Authorization'] = 'Basic ' + \ proxyauth.decode('utf-8') conn = make_connection(scheme, parsed_proxy['host'], parsed_proxy['port'], timeout, source_address) else: conn = make_connection(scheme, parsed_url['host'], parsed_url['port'], timeout, source_address) if not auth and parsed_url['username'] and parsed_url['password']: auth = (parsed_url['username'], parsed_url['password']) if auth: if isinstance(auth, (list, tuple)): auth = '%s:%s' % tuple(auth) auth = base64.b64encode(auth.encode('utf-8')) reqheaders['Authorization'] = 'Basic ' + auth.decode('utf-8') if files: content_type, data = encode_multipart(data, files) reqheaders['Content-Type'] = content_type elif isinstance(data, dict): data = urlencode(data, 1) if isinstance(data, basestring) and not files: # httplib will set 'Content-Length', also you can set it by yourself reqheaders["Content-Type"] = "application/x-www-form-urlencoded" # what if the method is GET, HEAD or DELETE # just do not make so much decisions for users reqheaders.update(headers) start_time = time.time() try: request_url = url if via_proxy else parsed_url['uri'] conn.request(method, request_url, data, reqheaders) resp = conn.getresponse() except socket.timeout as e: raise Timeout(e) except Exception as e: raise UrlfetchException(e) end_time = time.time() total_time = end_time - start_time history = [] response = Response.from_httplib(resp, reqheaders=reqheaders, length_limit=length_limit, history=history[:], url=url, total_time=total_time, start_time=start_time) while (response.status in (301, 302, 303, 307) and 'location' in response.headers and max_redirects): response.body, response.close(), history.append(response) if len(history) > max_redirects: raise TooManyRedirects('max_redirects exceeded') method = method if response.status == 307 else 'GET' location = response.headers['location'] if location[:2] == '//': url = parsed_url['scheme'] + ':' + location else: url = urlparse.urljoin(url, location) parsed_url = parse_url(url) reqheaders['Host'] = parsed_url['http_host'] reqheaders['Referer'] = response.url # Proxy scheme = parsed_url['scheme'] proxy = proxies.get(scheme) if proxy and parsed_url['host'] not in PROXY_IGNORE_HOSTS: via_proxy = True if '://' not in proxy: proxy = '%s://%s' % (parsed_url['scheme'], proxy) parsed_proxy = parse_url(proxy) # Proxy-Authorization if parsed_proxy['username'] and parsed_proxy['password']: proxyauth = '%s:%s' % (parsed_proxy['username'], parsed_proxy['username']) proxyauth = base64.b64encode(proxyauth.encode('utf-8')) reqheaders['Proxy-Authorization'] = 'Basic ' + \ proxyauth.decode('utf-8') conn = make_connection(scheme, parsed_proxy['host'], parsed_proxy['port'], timeout, source_address) else: via_proxy = False reqheaders.pop('Proxy-Authorization', None) conn = make_connection(scheme, parsed_url['host'], parsed_url['port'], timeout, source_address) try: request_url = url if via_proxy else parsed_url['uri'] conn.request(method, request_url, data, reqheaders) resp = conn.getresponse() except socket.timeout as e: raise Timeout(e) except Exception as e: raise UrlfetchException(e) response = Response.from_httplib(resp, reqheaders=reqheaders, length_limit=length_limit, history=history[:], url=url, total_time=total_time, start_time=start_time) return response
python
def request(url, method="GET", params=None, data=None, headers={}, timeout=None, files={}, randua=False, auth=None, length_limit=None, proxies=None, trust_env=True, max_redirects=0, source_address=None, **kwargs): """request an URL :arg string url: URL to be fetched. :arg string method: (optional) HTTP method, one of ``GET``, ``DELETE``, ``HEAD``, ``OPTIONS``, ``PUT``, ``POST``, ``TRACE``, ``PATCH``. ``GET`` is the default. :arg dict/string params: (optional) Dict or string to attach to url as querystring. :arg dict headers: (optional) HTTP request headers. :arg float timeout: (optional) Timeout in seconds :arg files: (optional) Files to be sended :arg randua: (optional) If ``True`` or ``path string``, use a random user-agent in headers, instead of ``'urlfetch/' + __version__`` :arg tuple auth: (optional) (username, password) for basic authentication :arg int length_limit: (optional) If ``None``, no limits on content length, if the limit reached raised exception 'Content length is more than ...' :arg dict proxies: (optional) HTTP proxy, like {'http': '127.0.0.1:8888', 'https': '127.0.0.1:563'} :arg bool trust_env: (optional) If ``True``, urlfetch will get infomations from env, such as HTTP_PROXY, HTTPS_PROXY :arg int max_redirects: (integer, optional) Max redirects allowed within a request. Default is 0, which means redirects are not allowed. :arg tuple source_address: (optional) A tuple of (host, port) to specify the source_address to bind to. This argument is ignored if you're using Python prior to 2.7/3.2. :returns: A :class:`~urlfetch.Response` object :raises: :class:`URLError`, :class:`UrlfetchException`, :class:`TooManyRedirects`, """ def make_connection(conn_type, host, port, timeout, source_address): """Return HTTP or HTTPS connection.""" if support_source_address: kwargs = {'timeout': timeout, 'source_address': source_address} else: kwargs = {'timeout': timeout} if source_address is not None: raise UrlfetchException('source_address requires' 'Python 2.7/3.2 or newer versions') if conn_type == 'http': conn = HTTPConnection(host, port, **kwargs) elif conn_type == 'https': conn = HTTPSConnection(host, port, **kwargs) else: raise URLError('Unknown Connection Type: %s' % conn_type) return conn via_proxy = False method = method.upper() if method not in ALLOWED_METHODS: raise UrlfetchException("Method should be one of " + ", ".join(ALLOWED_METHODS)) if params: if isinstance(params, dict): url = url_concat(url, params) elif isinstance(params, basestring): if url[-1] not in ('?', '&'): url += '&' if ('?' in url) else '?' url += params parsed_url = parse_url(url) reqheaders = { 'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate, compress, identity, *', 'User-Agent': random_useragent(randua), 'Host': parsed_url['http_host'] } # Proxy support scheme = parsed_url['scheme'] if proxies is None and trust_env: proxies = PROXIES ignore_hosts = PROXY_IGNORE_HOSTS if trust_env: no_proxy = os.getenv('no_proxy') or os.getenv('NO_PROXY') if no_proxy: ignore_hosts = no_proxy.split(",") proxy = proxies.get(scheme) if proxy and not any(match_no_proxy(parsed_url['host'], host) for host in ignore_hosts): via_proxy = True if '://' not in proxy: proxy = '%s://%s' % (scheme, proxy) parsed_proxy = parse_url(proxy) # Proxy-Authorization if parsed_proxy['username'] and parsed_proxy['password']: proxyauth = '%s:%s' % (parsed_proxy['username'], parsed_proxy['password']) proxyauth = base64.b64encode(proxyauth.encode('utf-8')) reqheaders['Proxy-Authorization'] = 'Basic ' + \ proxyauth.decode('utf-8') conn = make_connection(scheme, parsed_proxy['host'], parsed_proxy['port'], timeout, source_address) else: conn = make_connection(scheme, parsed_url['host'], parsed_url['port'], timeout, source_address) if not auth and parsed_url['username'] and parsed_url['password']: auth = (parsed_url['username'], parsed_url['password']) if auth: if isinstance(auth, (list, tuple)): auth = '%s:%s' % tuple(auth) auth = base64.b64encode(auth.encode('utf-8')) reqheaders['Authorization'] = 'Basic ' + auth.decode('utf-8') if files: content_type, data = encode_multipart(data, files) reqheaders['Content-Type'] = content_type elif isinstance(data, dict): data = urlencode(data, 1) if isinstance(data, basestring) and not files: # httplib will set 'Content-Length', also you can set it by yourself reqheaders["Content-Type"] = "application/x-www-form-urlencoded" # what if the method is GET, HEAD or DELETE # just do not make so much decisions for users reqheaders.update(headers) start_time = time.time() try: request_url = url if via_proxy else parsed_url['uri'] conn.request(method, request_url, data, reqheaders) resp = conn.getresponse() except socket.timeout as e: raise Timeout(e) except Exception as e: raise UrlfetchException(e) end_time = time.time() total_time = end_time - start_time history = [] response = Response.from_httplib(resp, reqheaders=reqheaders, length_limit=length_limit, history=history[:], url=url, total_time=total_time, start_time=start_time) while (response.status in (301, 302, 303, 307) and 'location' in response.headers and max_redirects): response.body, response.close(), history.append(response) if len(history) > max_redirects: raise TooManyRedirects('max_redirects exceeded') method = method if response.status == 307 else 'GET' location = response.headers['location'] if location[:2] == '//': url = parsed_url['scheme'] + ':' + location else: url = urlparse.urljoin(url, location) parsed_url = parse_url(url) reqheaders['Host'] = parsed_url['http_host'] reqheaders['Referer'] = response.url # Proxy scheme = parsed_url['scheme'] proxy = proxies.get(scheme) if proxy and parsed_url['host'] not in PROXY_IGNORE_HOSTS: via_proxy = True if '://' not in proxy: proxy = '%s://%s' % (parsed_url['scheme'], proxy) parsed_proxy = parse_url(proxy) # Proxy-Authorization if parsed_proxy['username'] and parsed_proxy['password']: proxyauth = '%s:%s' % (parsed_proxy['username'], parsed_proxy['username']) proxyauth = base64.b64encode(proxyauth.encode('utf-8')) reqheaders['Proxy-Authorization'] = 'Basic ' + \ proxyauth.decode('utf-8') conn = make_connection(scheme, parsed_proxy['host'], parsed_proxy['port'], timeout, source_address) else: via_proxy = False reqheaders.pop('Proxy-Authorization', None) conn = make_connection(scheme, parsed_url['host'], parsed_url['port'], timeout, source_address) try: request_url = url if via_proxy else parsed_url['uri'] conn.request(method, request_url, data, reqheaders) resp = conn.getresponse() except socket.timeout as e: raise Timeout(e) except Exception as e: raise UrlfetchException(e) response = Response.from_httplib(resp, reqheaders=reqheaders, length_limit=length_limit, history=history[:], url=url, total_time=total_time, start_time=start_time) return response
[ "def", "request", "(", "url", ",", "method", "=", "\"GET\"", ",", "params", "=", "None", ",", "data", "=", "None", ",", "headers", "=", "{", "}", ",", "timeout", "=", "None", ",", "files", "=", "{", "}", ",", "randua", "=", "False", ",", "auth", "=", "None", ",", "length_limit", "=", "None", ",", "proxies", "=", "None", ",", "trust_env", "=", "True", ",", "max_redirects", "=", "0", ",", "source_address", "=", "None", ",", "*", "*", "kwargs", ")", ":", "def", "make_connection", "(", "conn_type", ",", "host", ",", "port", ",", "timeout", ",", "source_address", ")", ":", "\"\"\"Return HTTP or HTTPS connection.\"\"\"", "if", "support_source_address", ":", "kwargs", "=", "{", "'timeout'", ":", "timeout", ",", "'source_address'", ":", "source_address", "}", "else", ":", "kwargs", "=", "{", "'timeout'", ":", "timeout", "}", "if", "source_address", "is", "not", "None", ":", "raise", "UrlfetchException", "(", "'source_address requires'", "'Python 2.7/3.2 or newer versions'", ")", "if", "conn_type", "==", "'http'", ":", "conn", "=", "HTTPConnection", "(", "host", ",", "port", ",", "*", "*", "kwargs", ")", "elif", "conn_type", "==", "'https'", ":", "conn", "=", "HTTPSConnection", "(", "host", ",", "port", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "URLError", "(", "'Unknown Connection Type: %s'", "%", "conn_type", ")", "return", "conn", "via_proxy", "=", "False", "method", "=", "method", ".", "upper", "(", ")", "if", "method", "not", "in", "ALLOWED_METHODS", ":", "raise", "UrlfetchException", "(", "\"Method should be one of \"", "+", "\", \"", ".", "join", "(", "ALLOWED_METHODS", ")", ")", "if", "params", ":", "if", "isinstance", "(", "params", ",", "dict", ")", ":", "url", "=", "url_concat", "(", "url", ",", "params", ")", "elif", "isinstance", "(", "params", ",", "basestring", ")", ":", "if", "url", "[", "-", "1", "]", "not", "in", "(", "'?'", ",", "'&'", ")", ":", "url", "+=", "'&'", "if", "(", "'?'", "in", "url", ")", "else", "'?'", "url", "+=", "params", "parsed_url", "=", "parse_url", "(", "url", ")", "reqheaders", "=", "{", "'Accept'", ":", "'*/*'", ",", "'Accept-Encoding'", ":", "'gzip, deflate, compress, identity, *'", ",", "'User-Agent'", ":", "random_useragent", "(", "randua", ")", ",", "'Host'", ":", "parsed_url", "[", "'http_host'", "]", "}", "# Proxy support", "scheme", "=", "parsed_url", "[", "'scheme'", "]", "if", "proxies", "is", "None", "and", "trust_env", ":", "proxies", "=", "PROXIES", "ignore_hosts", "=", "PROXY_IGNORE_HOSTS", "if", "trust_env", ":", "no_proxy", "=", "os", ".", "getenv", "(", "'no_proxy'", ")", "or", "os", ".", "getenv", "(", "'NO_PROXY'", ")", "if", "no_proxy", ":", "ignore_hosts", "=", "no_proxy", ".", "split", "(", "\",\"", ")", "proxy", "=", "proxies", ".", "get", "(", "scheme", ")", "if", "proxy", "and", "not", "any", "(", "match_no_proxy", "(", "parsed_url", "[", "'host'", "]", ",", "host", ")", "for", "host", "in", "ignore_hosts", ")", ":", "via_proxy", "=", "True", "if", "'://'", "not", "in", "proxy", ":", "proxy", "=", "'%s://%s'", "%", "(", "scheme", ",", "proxy", ")", "parsed_proxy", "=", "parse_url", "(", "proxy", ")", "# Proxy-Authorization", "if", "parsed_proxy", "[", "'username'", "]", "and", "parsed_proxy", "[", "'password'", "]", ":", "proxyauth", "=", "'%s:%s'", "%", "(", "parsed_proxy", "[", "'username'", "]", ",", "parsed_proxy", "[", "'password'", "]", ")", "proxyauth", "=", "base64", ".", "b64encode", "(", "proxyauth", ".", "encode", "(", "'utf-8'", ")", ")", "reqheaders", "[", "'Proxy-Authorization'", "]", "=", "'Basic '", "+", "proxyauth", ".", "decode", "(", "'utf-8'", ")", "conn", "=", "make_connection", "(", "scheme", ",", "parsed_proxy", "[", "'host'", "]", ",", "parsed_proxy", "[", "'port'", "]", ",", "timeout", ",", "source_address", ")", "else", ":", "conn", "=", "make_connection", "(", "scheme", ",", "parsed_url", "[", "'host'", "]", ",", "parsed_url", "[", "'port'", "]", ",", "timeout", ",", "source_address", ")", "if", "not", "auth", "and", "parsed_url", "[", "'username'", "]", "and", "parsed_url", "[", "'password'", "]", ":", "auth", "=", "(", "parsed_url", "[", "'username'", "]", ",", "parsed_url", "[", "'password'", "]", ")", "if", "auth", ":", "if", "isinstance", "(", "auth", ",", "(", "list", ",", "tuple", ")", ")", ":", "auth", "=", "'%s:%s'", "%", "tuple", "(", "auth", ")", "auth", "=", "base64", ".", "b64encode", "(", "auth", ".", "encode", "(", "'utf-8'", ")", ")", "reqheaders", "[", "'Authorization'", "]", "=", "'Basic '", "+", "auth", ".", "decode", "(", "'utf-8'", ")", "if", "files", ":", "content_type", ",", "data", "=", "encode_multipart", "(", "data", ",", "files", ")", "reqheaders", "[", "'Content-Type'", "]", "=", "content_type", "elif", "isinstance", "(", "data", ",", "dict", ")", ":", "data", "=", "urlencode", "(", "data", ",", "1", ")", "if", "isinstance", "(", "data", ",", "basestring", ")", "and", "not", "files", ":", "# httplib will set 'Content-Length', also you can set it by yourself", "reqheaders", "[", "\"Content-Type\"", "]", "=", "\"application/x-www-form-urlencoded\"", "# what if the method is GET, HEAD or DELETE", "# just do not make so much decisions for users", "reqheaders", ".", "update", "(", "headers", ")", "start_time", "=", "time", ".", "time", "(", ")", "try", ":", "request_url", "=", "url", "if", "via_proxy", "else", "parsed_url", "[", "'uri'", "]", "conn", ".", "request", "(", "method", ",", "request_url", ",", "data", ",", "reqheaders", ")", "resp", "=", "conn", ".", "getresponse", "(", ")", "except", "socket", ".", "timeout", "as", "e", ":", "raise", "Timeout", "(", "e", ")", "except", "Exception", "as", "e", ":", "raise", "UrlfetchException", "(", "e", ")", "end_time", "=", "time", ".", "time", "(", ")", "total_time", "=", "end_time", "-", "start_time", "history", "=", "[", "]", "response", "=", "Response", ".", "from_httplib", "(", "resp", ",", "reqheaders", "=", "reqheaders", ",", "length_limit", "=", "length_limit", ",", "history", "=", "history", "[", ":", "]", ",", "url", "=", "url", ",", "total_time", "=", "total_time", ",", "start_time", "=", "start_time", ")", "while", "(", "response", ".", "status", "in", "(", "301", ",", "302", ",", "303", ",", "307", ")", "and", "'location'", "in", "response", ".", "headers", "and", "max_redirects", ")", ":", "response", ".", "body", ",", "response", ".", "close", "(", ")", ",", "history", ".", "append", "(", "response", ")", "if", "len", "(", "history", ")", ">", "max_redirects", ":", "raise", "TooManyRedirects", "(", "'max_redirects exceeded'", ")", "method", "=", "method", "if", "response", ".", "status", "==", "307", "else", "'GET'", "location", "=", "response", ".", "headers", "[", "'location'", "]", "if", "location", "[", ":", "2", "]", "==", "'//'", ":", "url", "=", "parsed_url", "[", "'scheme'", "]", "+", "':'", "+", "location", "else", ":", "url", "=", "urlparse", ".", "urljoin", "(", "url", ",", "location", ")", "parsed_url", "=", "parse_url", "(", "url", ")", "reqheaders", "[", "'Host'", "]", "=", "parsed_url", "[", "'http_host'", "]", "reqheaders", "[", "'Referer'", "]", "=", "response", ".", "url", "# Proxy", "scheme", "=", "parsed_url", "[", "'scheme'", "]", "proxy", "=", "proxies", ".", "get", "(", "scheme", ")", "if", "proxy", "and", "parsed_url", "[", "'host'", "]", "not", "in", "PROXY_IGNORE_HOSTS", ":", "via_proxy", "=", "True", "if", "'://'", "not", "in", "proxy", ":", "proxy", "=", "'%s://%s'", "%", "(", "parsed_url", "[", "'scheme'", "]", ",", "proxy", ")", "parsed_proxy", "=", "parse_url", "(", "proxy", ")", "# Proxy-Authorization", "if", "parsed_proxy", "[", "'username'", "]", "and", "parsed_proxy", "[", "'password'", "]", ":", "proxyauth", "=", "'%s:%s'", "%", "(", "parsed_proxy", "[", "'username'", "]", ",", "parsed_proxy", "[", "'username'", "]", ")", "proxyauth", "=", "base64", ".", "b64encode", "(", "proxyauth", ".", "encode", "(", "'utf-8'", ")", ")", "reqheaders", "[", "'Proxy-Authorization'", "]", "=", "'Basic '", "+", "proxyauth", ".", "decode", "(", "'utf-8'", ")", "conn", "=", "make_connection", "(", "scheme", ",", "parsed_proxy", "[", "'host'", "]", ",", "parsed_proxy", "[", "'port'", "]", ",", "timeout", ",", "source_address", ")", "else", ":", "via_proxy", "=", "False", "reqheaders", ".", "pop", "(", "'Proxy-Authorization'", ",", "None", ")", "conn", "=", "make_connection", "(", "scheme", ",", "parsed_url", "[", "'host'", "]", ",", "parsed_url", "[", "'port'", "]", ",", "timeout", ",", "source_address", ")", "try", ":", "request_url", "=", "url", "if", "via_proxy", "else", "parsed_url", "[", "'uri'", "]", "conn", ".", "request", "(", "method", ",", "request_url", ",", "data", ",", "reqheaders", ")", "resp", "=", "conn", ".", "getresponse", "(", ")", "except", "socket", ".", "timeout", "as", "e", ":", "raise", "Timeout", "(", "e", ")", "except", "Exception", "as", "e", ":", "raise", "UrlfetchException", "(", "e", ")", "response", "=", "Response", ".", "from_httplib", "(", "resp", ",", "reqheaders", "=", "reqheaders", ",", "length_limit", "=", "length_limit", ",", "history", "=", "history", "[", ":", "]", ",", "url", "=", "url", ",", "total_time", "=", "total_time", ",", "start_time", "=", "start_time", ")", "return", "response" ]
request an URL :arg string url: URL to be fetched. :arg string method: (optional) HTTP method, one of ``GET``, ``DELETE``, ``HEAD``, ``OPTIONS``, ``PUT``, ``POST``, ``TRACE``, ``PATCH``. ``GET`` is the default. :arg dict/string params: (optional) Dict or string to attach to url as querystring. :arg dict headers: (optional) HTTP request headers. :arg float timeout: (optional) Timeout in seconds :arg files: (optional) Files to be sended :arg randua: (optional) If ``True`` or ``path string``, use a random user-agent in headers, instead of ``'urlfetch/' + __version__`` :arg tuple auth: (optional) (username, password) for basic authentication :arg int length_limit: (optional) If ``None``, no limits on content length, if the limit reached raised exception 'Content length is more than ...' :arg dict proxies: (optional) HTTP proxy, like {'http': '127.0.0.1:8888', 'https': '127.0.0.1:563'} :arg bool trust_env: (optional) If ``True``, urlfetch will get infomations from env, such as HTTP_PROXY, HTTPS_PROXY :arg int max_redirects: (integer, optional) Max redirects allowed within a request. Default is 0, which means redirects are not allowed. :arg tuple source_address: (optional) A tuple of (host, port) to specify the source_address to bind to. This argument is ignored if you're using Python prior to 2.7/3.2. :returns: A :class:`~urlfetch.Response` object :raises: :class:`URLError`, :class:`UrlfetchException`, :class:`TooManyRedirects`,
[ "request", "an", "URL" ]
train
https://github.com/ifduyue/urlfetch/blob/e0ea4673367c157eb832ba4ba2635306c81a61be/urlfetch.py#L558-L763
0.00022
libtcod/python-tcod
tcod/libtcodpy.py
console_save_apf
def console_save_apf(con: tcod.console.Console, filename: str) -> bool: """Save a console to an ASCII Paint `.apf` file.""" return bool( lib.TCOD_console_save_apf(_console(con), filename.encode("utf-8")) )
python
def console_save_apf(con: tcod.console.Console, filename: str) -> bool: """Save a console to an ASCII Paint `.apf` file.""" return bool( lib.TCOD_console_save_apf(_console(con), filename.encode("utf-8")) )
[ "def", "console_save_apf", "(", "con", ":", "tcod", ".", "console", ".", "Console", ",", "filename", ":", "str", ")", "->", "bool", ":", "return", "bool", "(", "lib", ".", "TCOD_console_save_apf", "(", "_console", "(", "con", ")", ",", "filename", ".", "encode", "(", "\"utf-8\"", ")", ")", ")" ]
Save a console to an ASCII Paint `.apf` file.
[ "Save", "a", "console", "to", "an", "ASCII", "Paint", ".", "apf", "file", "." ]
train
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/libtcodpy.py#L1902-L1906
0.004444
dmlc/gluon-nlp
src/gluonnlp/model/elmo.py
elmo_2x1024_128_2048cnn_1xhighway
def elmo_2x1024_128_2048cnn_1xhighway(dataset_name=None, pretrained=False, ctx=mx.cpu(), root=os.path.join(get_home_dir(), 'models'), **kwargs): r"""ELMo 2-layer BiLSTM with 1024 hidden units, 128 projection size, 1 highway layer. Parameters ---------- dataset_name : str or None, default None The dataset name on which the pre-trained model is trained. Options are 'gbw'. pretrained : bool, default False Whether to load the pre-trained weights for model. ctx : Context, default CPU The context in which to load the pre-trained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. MXNET_HOME defaults to '~/.mxnet'. Returns ------- gluon.Block """ predefined_args = {'rnn_type': 'lstmpc', 'output_size': 128, 'filters': [[1, 32], [2, 32], [3, 64], [4, 128], [5, 256], [6, 512], [7, 1024]], 'char_embed_size': 16, 'num_highway': 1, 'conv_layer_activation': 'relu', 'max_chars_per_token': 50, 'input_size': 128, 'hidden_size': 1024, 'proj_size': 128, 'num_layers': 2, 'cell_clip': 3, 'proj_clip': 3, 'skip_connection': True} assert all((k not in kwargs) for k in predefined_args), \ 'Cannot override predefined model settings.' predefined_args.update(kwargs) return _get_elmo_model(ELMoBiLM, 'elmo_2x1024_128_2048cnn_1xhighway', dataset_name, pretrained, ctx, root, **predefined_args)
python
def elmo_2x1024_128_2048cnn_1xhighway(dataset_name=None, pretrained=False, ctx=mx.cpu(), root=os.path.join(get_home_dir(), 'models'), **kwargs): r"""ELMo 2-layer BiLSTM with 1024 hidden units, 128 projection size, 1 highway layer. Parameters ---------- dataset_name : str or None, default None The dataset name on which the pre-trained model is trained. Options are 'gbw'. pretrained : bool, default False Whether to load the pre-trained weights for model. ctx : Context, default CPU The context in which to load the pre-trained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. MXNET_HOME defaults to '~/.mxnet'. Returns ------- gluon.Block """ predefined_args = {'rnn_type': 'lstmpc', 'output_size': 128, 'filters': [[1, 32], [2, 32], [3, 64], [4, 128], [5, 256], [6, 512], [7, 1024]], 'char_embed_size': 16, 'num_highway': 1, 'conv_layer_activation': 'relu', 'max_chars_per_token': 50, 'input_size': 128, 'hidden_size': 1024, 'proj_size': 128, 'num_layers': 2, 'cell_clip': 3, 'proj_clip': 3, 'skip_connection': True} assert all((k not in kwargs) for k in predefined_args), \ 'Cannot override predefined model settings.' predefined_args.update(kwargs) return _get_elmo_model(ELMoBiLM, 'elmo_2x1024_128_2048cnn_1xhighway', dataset_name, pretrained, ctx, root, **predefined_args)
[ "def", "elmo_2x1024_128_2048cnn_1xhighway", "(", "dataset_name", "=", "None", ",", "pretrained", "=", "False", ",", "ctx", "=", "mx", ".", "cpu", "(", ")", ",", "root", "=", "os", ".", "path", ".", "join", "(", "get_home_dir", "(", ")", ",", "'models'", ")", ",", "*", "*", "kwargs", ")", ":", "predefined_args", "=", "{", "'rnn_type'", ":", "'lstmpc'", ",", "'output_size'", ":", "128", ",", "'filters'", ":", "[", "[", "1", ",", "32", "]", ",", "[", "2", ",", "32", "]", ",", "[", "3", ",", "64", "]", ",", "[", "4", ",", "128", "]", ",", "[", "5", ",", "256", "]", ",", "[", "6", ",", "512", "]", ",", "[", "7", ",", "1024", "]", "]", ",", "'char_embed_size'", ":", "16", ",", "'num_highway'", ":", "1", ",", "'conv_layer_activation'", ":", "'relu'", ",", "'max_chars_per_token'", ":", "50", ",", "'input_size'", ":", "128", ",", "'hidden_size'", ":", "1024", ",", "'proj_size'", ":", "128", ",", "'num_layers'", ":", "2", ",", "'cell_clip'", ":", "3", ",", "'proj_clip'", ":", "3", ",", "'skip_connection'", ":", "True", "}", "assert", "all", "(", "(", "k", "not", "in", "kwargs", ")", "for", "k", "in", "predefined_args", ")", ",", "'Cannot override predefined model settings.'", "predefined_args", ".", "update", "(", "kwargs", ")", "return", "_get_elmo_model", "(", "ELMoBiLM", ",", "'elmo_2x1024_128_2048cnn_1xhighway'", ",", "dataset_name", ",", "pretrained", ",", "ctx", ",", "root", ",", "*", "*", "predefined_args", ")" ]
r"""ELMo 2-layer BiLSTM with 1024 hidden units, 128 projection size, 1 highway layer. Parameters ---------- dataset_name : str or None, default None The dataset name on which the pre-trained model is trained. Options are 'gbw'. pretrained : bool, default False Whether to load the pre-trained weights for model. ctx : Context, default CPU The context in which to load the pre-trained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. MXNET_HOME defaults to '~/.mxnet'. Returns ------- gluon.Block
[ "r", "ELMo", "2", "-", "layer", "BiLSTM", "with", "1024", "hidden", "units", "128", "projection", "size", "1", "highway", "layer", "." ]
train
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/elmo.py#L308-L349
0.002711
frictionlessdata/goodtables-py
goodtables/validate.py
init_datapackage
def init_datapackage(resource_paths): """Create tabular data package with resources. It will also infer the tabular resources' schemas. Args: resource_paths (List[str]): Paths to the data package resources. Returns: datapackage.Package: The data package. """ dp = datapackage.Package({ 'name': 'change-me', 'schema': 'tabular-data-package', }) for path in resource_paths: dp.infer(path) return dp
python
def init_datapackage(resource_paths): """Create tabular data package with resources. It will also infer the tabular resources' schemas. Args: resource_paths (List[str]): Paths to the data package resources. Returns: datapackage.Package: The data package. """ dp = datapackage.Package({ 'name': 'change-me', 'schema': 'tabular-data-package', }) for path in resource_paths: dp.infer(path) return dp
[ "def", "init_datapackage", "(", "resource_paths", ")", ":", "dp", "=", "datapackage", ".", "Package", "(", "{", "'name'", ":", "'change-me'", ",", "'schema'", ":", "'tabular-data-package'", ",", "}", ")", "for", "path", "in", "resource_paths", ":", "dp", ".", "infer", "(", "path", ")", "return", "dp" ]
Create tabular data package with resources. It will also infer the tabular resources' schemas. Args: resource_paths (List[str]): Paths to the data package resources. Returns: datapackage.Package: The data package.
[ "Create", "tabular", "data", "package", "with", "resources", "." ]
train
https://github.com/frictionlessdata/goodtables-py/blob/3e7d6891d2f4e342dfafbe0e951e204ccc252a44/goodtables/validate.py#L90-L109
0.002096
rigetti/pyquil
pyquil/noise.py
_create_kraus_pragmas
def _create_kraus_pragmas(name, qubit_indices, kraus_ops): """ Generate the pragmas to define a Kraus map for a specific gate on some qubits. :param str name: The name of the gate. :param list|tuple qubit_indices: The qubits :param list|tuple kraus_ops: The Kraus operators as matrices. :return: A QUIL string with PRAGMA ADD-KRAUS ... statements. :rtype: str """ pragmas = [Pragma("ADD-KRAUS", [name] + list(qubit_indices), "({})".format(" ".join(map(format_parameter, np.ravel(k))))) for k in kraus_ops] return pragmas
python
def _create_kraus_pragmas(name, qubit_indices, kraus_ops): """ Generate the pragmas to define a Kraus map for a specific gate on some qubits. :param str name: The name of the gate. :param list|tuple qubit_indices: The qubits :param list|tuple kraus_ops: The Kraus operators as matrices. :return: A QUIL string with PRAGMA ADD-KRAUS ... statements. :rtype: str """ pragmas = [Pragma("ADD-KRAUS", [name] + list(qubit_indices), "({})".format(" ".join(map(format_parameter, np.ravel(k))))) for k in kraus_ops] return pragmas
[ "def", "_create_kraus_pragmas", "(", "name", ",", "qubit_indices", ",", "kraus_ops", ")", ":", "pragmas", "=", "[", "Pragma", "(", "\"ADD-KRAUS\"", ",", "[", "name", "]", "+", "list", "(", "qubit_indices", ")", ",", "\"({})\"", ".", "format", "(", "\" \"", ".", "join", "(", "map", "(", "format_parameter", ",", "np", ".", "ravel", "(", "k", ")", ")", ")", ")", ")", "for", "k", "in", "kraus_ops", "]", "return", "pragmas" ]
Generate the pragmas to define a Kraus map for a specific gate on some qubits. :param str name: The name of the gate. :param list|tuple qubit_indices: The qubits :param list|tuple kraus_ops: The Kraus operators as matrices. :return: A QUIL string with PRAGMA ADD-KRAUS ... statements. :rtype: str
[ "Generate", "the", "pragmas", "to", "define", "a", "Kraus", "map", "for", "a", "specific", "gate", "on", "some", "qubits", "." ]
train
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/noise.py#L202-L217
0.004831
galaxyproject/pulsar
pulsar/client/manager.py
ClientManager.get_client
def get_client(self, destination_params, job_id, **kwargs): """Build a client given specific destination parameters and job_id.""" destination_params = _parse_destination_params(destination_params) destination_params.update(**kwargs) job_manager_interface_class = self.job_manager_interface_class job_manager_interface_args = dict(destination_params=destination_params, **self.job_manager_interface_args) job_manager_interface = job_manager_interface_class(**job_manager_interface_args) return self.client_class(destination_params, job_id, job_manager_interface, **self.extra_client_kwds)
python
def get_client(self, destination_params, job_id, **kwargs): """Build a client given specific destination parameters and job_id.""" destination_params = _parse_destination_params(destination_params) destination_params.update(**kwargs) job_manager_interface_class = self.job_manager_interface_class job_manager_interface_args = dict(destination_params=destination_params, **self.job_manager_interface_args) job_manager_interface = job_manager_interface_class(**job_manager_interface_args) return self.client_class(destination_params, job_id, job_manager_interface, **self.extra_client_kwds)
[ "def", "get_client", "(", "self", ",", "destination_params", ",", "job_id", ",", "*", "*", "kwargs", ")", ":", "destination_params", "=", "_parse_destination_params", "(", "destination_params", ")", "destination_params", ".", "update", "(", "*", "*", "kwargs", ")", "job_manager_interface_class", "=", "self", ".", "job_manager_interface_class", "job_manager_interface_args", "=", "dict", "(", "destination_params", "=", "destination_params", ",", "*", "*", "self", ".", "job_manager_interface_args", ")", "job_manager_interface", "=", "job_manager_interface_class", "(", "*", "*", "job_manager_interface_args", ")", "return", "self", ".", "client_class", "(", "destination_params", ",", "job_id", ",", "job_manager_interface", ",", "*", "*", "self", ".", "extra_client_kwds", ")" ]
Build a client given specific destination parameters and job_id.
[ "Build", "a", "client", "given", "specific", "destination", "parameters", "and", "job_id", "." ]
train
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/manager.py#L83-L90
0.007764
angr/angr
angr/analyses/cfg/cfb.py
CFBlanket._mark_unknowns
def _mark_unknowns(self): """ Mark all unmapped regions. :return: None """ for obj in self.project.loader.all_objects: if isinstance(obj, cle.ELF): # sections? if obj.sections: for section in obj.sections: if not section.memsize or not section.vaddr: continue min_addr, max_addr = section.min_addr, section.max_addr self._mark_unknowns_core(min_addr, max_addr + 1, obj=obj, section=section) elif obj.segments: for segment in obj.segments: if not segment.memsize: continue min_addr, max_addr = segment.min_addr, segment.max_addr self._mark_unknowns_core(min_addr, max_addr + 1, obj=obj, segment=segment) else: # is it empty? _l.warning("Empty ELF object %s.", repr(obj)) elif isinstance(obj, cle.PE): if obj.sections: for section in obj.sections: if not section.memsize: continue min_addr, max_addr = section.min_addr, section.max_addr self._mark_unknowns_core(min_addr, max_addr + 1, obj=obj, section=section) else: # is it empty? _l.warning("Empty PE object %s.", repr(obj)) else: min_addr, max_addr = obj.min_addr, obj.max_addr self._mark_unknowns_core(min_addr, max_addr + 1, obj=obj)
python
def _mark_unknowns(self): """ Mark all unmapped regions. :return: None """ for obj in self.project.loader.all_objects: if isinstance(obj, cle.ELF): # sections? if obj.sections: for section in obj.sections: if not section.memsize or not section.vaddr: continue min_addr, max_addr = section.min_addr, section.max_addr self._mark_unknowns_core(min_addr, max_addr + 1, obj=obj, section=section) elif obj.segments: for segment in obj.segments: if not segment.memsize: continue min_addr, max_addr = segment.min_addr, segment.max_addr self._mark_unknowns_core(min_addr, max_addr + 1, obj=obj, segment=segment) else: # is it empty? _l.warning("Empty ELF object %s.", repr(obj)) elif isinstance(obj, cle.PE): if obj.sections: for section in obj.sections: if not section.memsize: continue min_addr, max_addr = section.min_addr, section.max_addr self._mark_unknowns_core(min_addr, max_addr + 1, obj=obj, section=section) else: # is it empty? _l.warning("Empty PE object %s.", repr(obj)) else: min_addr, max_addr = obj.min_addr, obj.max_addr self._mark_unknowns_core(min_addr, max_addr + 1, obj=obj)
[ "def", "_mark_unknowns", "(", "self", ")", ":", "for", "obj", "in", "self", ".", "project", ".", "loader", ".", "all_objects", ":", "if", "isinstance", "(", "obj", ",", "cle", ".", "ELF", ")", ":", "# sections?", "if", "obj", ".", "sections", ":", "for", "section", "in", "obj", ".", "sections", ":", "if", "not", "section", ".", "memsize", "or", "not", "section", ".", "vaddr", ":", "continue", "min_addr", ",", "max_addr", "=", "section", ".", "min_addr", ",", "section", ".", "max_addr", "self", ".", "_mark_unknowns_core", "(", "min_addr", ",", "max_addr", "+", "1", ",", "obj", "=", "obj", ",", "section", "=", "section", ")", "elif", "obj", ".", "segments", ":", "for", "segment", "in", "obj", ".", "segments", ":", "if", "not", "segment", ".", "memsize", ":", "continue", "min_addr", ",", "max_addr", "=", "segment", ".", "min_addr", ",", "segment", ".", "max_addr", "self", ".", "_mark_unknowns_core", "(", "min_addr", ",", "max_addr", "+", "1", ",", "obj", "=", "obj", ",", "segment", "=", "segment", ")", "else", ":", "# is it empty?", "_l", ".", "warning", "(", "\"Empty ELF object %s.\"", ",", "repr", "(", "obj", ")", ")", "elif", "isinstance", "(", "obj", ",", "cle", ".", "PE", ")", ":", "if", "obj", ".", "sections", ":", "for", "section", "in", "obj", ".", "sections", ":", "if", "not", "section", ".", "memsize", ":", "continue", "min_addr", ",", "max_addr", "=", "section", ".", "min_addr", ",", "section", ".", "max_addr", "self", ".", "_mark_unknowns_core", "(", "min_addr", ",", "max_addr", "+", "1", ",", "obj", "=", "obj", ",", "section", "=", "section", ")", "else", ":", "# is it empty?", "_l", ".", "warning", "(", "\"Empty PE object %s.\"", ",", "repr", "(", "obj", ")", ")", "else", ":", "min_addr", ",", "max_addr", "=", "obj", ".", "min_addr", ",", "obj", ".", "max_addr", "self", ".", "_mark_unknowns_core", "(", "min_addr", ",", "max_addr", "+", "1", ",", "obj", "=", "obj", ")" ]
Mark all unmapped regions. :return: None
[ "Mark", "all", "unmapped", "regions", "." ]
train
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/cfg/cfb.py#L228-L265
0.002885
rodluger/everest
everest/missions/k2/sysrem.py
GetChunk
def GetChunk(time, breakpoints, b, mask=[]): ''' Returns the indices corresponding to a given light curve chunk. :param int b: The index of the chunk to return ''' M = np.delete(np.arange(len(time)), mask, axis=0) if b > 0: res = M[(M > breakpoints[b - 1]) & (M <= breakpoints[b])] else: res = M[M <= breakpoints[b]] return res
python
def GetChunk(time, breakpoints, b, mask=[]): ''' Returns the indices corresponding to a given light curve chunk. :param int b: The index of the chunk to return ''' M = np.delete(np.arange(len(time)), mask, axis=0) if b > 0: res = M[(M > breakpoints[b - 1]) & (M <= breakpoints[b])] else: res = M[M <= breakpoints[b]] return res
[ "def", "GetChunk", "(", "time", ",", "breakpoints", ",", "b", ",", "mask", "=", "[", "]", ")", ":", "M", "=", "np", ".", "delete", "(", "np", ".", "arange", "(", "len", "(", "time", ")", ")", ",", "mask", ",", "axis", "=", "0", ")", "if", "b", ">", "0", ":", "res", "=", "M", "[", "(", "M", ">", "breakpoints", "[", "b", "-", "1", "]", ")", "&", "(", "M", "<=", "breakpoints", "[", "b", "]", ")", "]", "else", ":", "res", "=", "M", "[", "M", "<=", "breakpoints", "[", "b", "]", "]", "return", "res" ]
Returns the indices corresponding to a given light curve chunk. :param int b: The index of the chunk to return
[ "Returns", "the", "indices", "corresponding", "to", "a", "given", "light", "curve", "chunk", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/sysrem.py#L25-L38
0.002646
theelous3/asks
asks/utils.py
unquote_unreserved
def unquote_unreserved(uri): """Un-escape any percent-escape sequences in a URI that are unreserved characters. This leaves all reserved, illegal and non-ASCII bytes encoded. :rtype: str """ parts = uri.split('%') for i in range(1, len(parts)): h = parts[i][0:2] if len(h) == 2 and h.isalnum(): try: c = chr(int(h, 16)) except ValueError: raise ValueError("Invalid percent-escape sequence: '%s'" % h) if c in UNRESERVED_SET: parts[i] = c + parts[i][2:] else: parts[i] = '%' + parts[i] else: parts[i] = '%' + parts[i] return ''.join(parts)
python
def unquote_unreserved(uri): """Un-escape any percent-escape sequences in a URI that are unreserved characters. This leaves all reserved, illegal and non-ASCII bytes encoded. :rtype: str """ parts = uri.split('%') for i in range(1, len(parts)): h = parts[i][0:2] if len(h) == 2 and h.isalnum(): try: c = chr(int(h, 16)) except ValueError: raise ValueError("Invalid percent-escape sequence: '%s'" % h) if c in UNRESERVED_SET: parts[i] = c + parts[i][2:] else: parts[i] = '%' + parts[i] else: parts[i] = '%' + parts[i] return ''.join(parts)
[ "def", "unquote_unreserved", "(", "uri", ")", ":", "parts", "=", "uri", ".", "split", "(", "'%'", ")", "for", "i", "in", "range", "(", "1", ",", "len", "(", "parts", ")", ")", ":", "h", "=", "parts", "[", "i", "]", "[", "0", ":", "2", "]", "if", "len", "(", "h", ")", "==", "2", "and", "h", ".", "isalnum", "(", ")", ":", "try", ":", "c", "=", "chr", "(", "int", "(", "h", ",", "16", ")", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Invalid percent-escape sequence: '%s'\"", "%", "h", ")", "if", "c", "in", "UNRESERVED_SET", ":", "parts", "[", "i", "]", "=", "c", "+", "parts", "[", "i", "]", "[", "2", ":", "]", "else", ":", "parts", "[", "i", "]", "=", "'%'", "+", "parts", "[", "i", "]", "else", ":", "parts", "[", "i", "]", "=", "'%'", "+", "parts", "[", "i", "]", "return", "''", ".", "join", "(", "parts", ")" ]
Un-escape any percent-escape sequences in a URI that are unreserved characters. This leaves all reserved, illegal and non-ASCII bytes encoded. :rtype: str
[ "Un", "-", "escape", "any", "percent", "-", "escape", "sequences", "in", "a", "URI", "that", "are", "unreserved", "characters", ".", "This", "leaves", "all", "reserved", "illegal", "and", "non", "-", "ASCII", "bytes", "encoded", ".", ":", "rtype", ":", "str" ]
train
https://github.com/theelous3/asks/blob/ea522ea971ecb031d488a6301dc2718516cadcd6/asks/utils.py#L38-L58
0.001399
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/runsnakerun/homedirectory.py
_winreg_getShellFolder
def _winreg_getShellFolder( name ): """Get a shell folder by string name from the registry""" k = _winreg.OpenKey( _winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" ) try: # should check that it's valid? How? return _winreg.QueryValueEx( k, name )[0] finally: _winreg.CloseKey( k )
python
def _winreg_getShellFolder( name ): """Get a shell folder by string name from the registry""" k = _winreg.OpenKey( _winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" ) try: # should check that it's valid? How? return _winreg.QueryValueEx( k, name )[0] finally: _winreg.CloseKey( k )
[ "def", "_winreg_getShellFolder", "(", "name", ")", ":", "k", "=", "_winreg", ".", "OpenKey", "(", "_winreg", ".", "HKEY_CURRENT_USER", ",", "r\"Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\"", ")", "try", ":", "# should check that it's valid? How?", "return", "_winreg", ".", "QueryValueEx", "(", "k", ",", "name", ")", "[", "0", "]", "finally", ":", "_winreg", ".", "CloseKey", "(", "k", ")" ]
Get a shell folder by string name from the registry
[ "Get", "a", "shell", "folder", "by", "string", "name", "from", "the", "registry" ]
train
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/homedirectory.py#L18-L28
0.018135
wonambi-python/wonambi
wonambi/ioeeg/openephys.py
_read_n_samples
def _read_n_samples(channel_file): """Calculate the number of samples based on the file size Parameters ---------- channel_file : Path path to single filename with the header Returns ------- int number of blocks (i.e. records, in which the data is cut) int number of samples """ n_blocks = int((channel_file.stat().st_size - HDR_LENGTH) / BLK_SIZE) n_samples = n_blocks * BLK_LENGTH return n_blocks, n_samples
python
def _read_n_samples(channel_file): """Calculate the number of samples based on the file size Parameters ---------- channel_file : Path path to single filename with the header Returns ------- int number of blocks (i.e. records, in which the data is cut) int number of samples """ n_blocks = int((channel_file.stat().st_size - HDR_LENGTH) / BLK_SIZE) n_samples = n_blocks * BLK_LENGTH return n_blocks, n_samples
[ "def", "_read_n_samples", "(", "channel_file", ")", ":", "n_blocks", "=", "int", "(", "(", "channel_file", ".", "stat", "(", ")", ".", "st_size", "-", "HDR_LENGTH", ")", "/", "BLK_SIZE", ")", "n_samples", "=", "n_blocks", "*", "BLK_LENGTH", "return", "n_blocks", ",", "n_samples" ]
Calculate the number of samples based on the file size Parameters ---------- channel_file : Path path to single filename with the header Returns ------- int number of blocks (i.e. records, in which the data is cut) int number of samples
[ "Calculate", "the", "number", "of", "samples", "based", "on", "the", "file", "size" ]
train
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/ioeeg/openephys.py#L222-L239
0.00207
IDSIA/sacred
sacred/utils.py
set_by_dotted_path
def set_by_dotted_path(d, path, value): """ Set an entry in a nested dict using a dotted path. Will create dictionaries as needed. Examples -------- >>> d = {'foo': {'bar': 7}} >>> set_by_dotted_path(d, 'foo.bar', 10) >>> d {'foo': {'bar': 10}} >>> set_by_dotted_path(d, 'foo.d.baz', 3) >>> d {'foo': {'bar': 10, 'd': {'baz': 3}}} """ split_path = path.split('.') current_option = d for p in split_path[:-1]: if p not in current_option: current_option[p] = dict() current_option = current_option[p] current_option[split_path[-1]] = value
python
def set_by_dotted_path(d, path, value): """ Set an entry in a nested dict using a dotted path. Will create dictionaries as needed. Examples -------- >>> d = {'foo': {'bar': 7}} >>> set_by_dotted_path(d, 'foo.bar', 10) >>> d {'foo': {'bar': 10}} >>> set_by_dotted_path(d, 'foo.d.baz', 3) >>> d {'foo': {'bar': 10, 'd': {'baz': 3}}} """ split_path = path.split('.') current_option = d for p in split_path[:-1]: if p not in current_option: current_option[p] = dict() current_option = current_option[p] current_option[split_path[-1]] = value
[ "def", "set_by_dotted_path", "(", "d", ",", "path", ",", "value", ")", ":", "split_path", "=", "path", ".", "split", "(", "'.'", ")", "current_option", "=", "d", "for", "p", "in", "split_path", "[", ":", "-", "1", "]", ":", "if", "p", "not", "in", "current_option", ":", "current_option", "[", "p", "]", "=", "dict", "(", ")", "current_option", "=", "current_option", "[", "p", "]", "current_option", "[", "split_path", "[", "-", "1", "]", "]", "=", "value" ]
Set an entry in a nested dict using a dotted path. Will create dictionaries as needed. Examples -------- >>> d = {'foo': {'bar': 7}} >>> set_by_dotted_path(d, 'foo.bar', 10) >>> d {'foo': {'bar': 10}} >>> set_by_dotted_path(d, 'foo.d.baz', 3) >>> d {'foo': {'bar': 10, 'd': {'baz': 3}}}
[ "Set", "an", "entry", "in", "a", "nested", "dict", "using", "a", "dotted", "path", "." ]
train
https://github.com/IDSIA/sacred/blob/72633776bed9b5bddf93ae7d215188e61970973a/sacred/utils.py#L372-L395
0.001572
rpcope1/PythonConfluenceAPI
PythonConfluenceAPI/cfapi.py
ConfluenceFuturesAPI._start_http_session
def _start_http_session(self): """ Start a new requests HTTP session, clearing cookies and session data. :return: None """ api_logger.debug("Starting new HTTP session...") self.session = FuturesSession(executor=self.executor, max_workers=self.max_workers) self.session.headers.update({"User-Agent": self.user_agent}) if self.username and self.password: api_logger.debug("Requests will use authorization.") self.session.auth = HTTPBasicAuth(self.username, self.password)
python
def _start_http_session(self): """ Start a new requests HTTP session, clearing cookies and session data. :return: None """ api_logger.debug("Starting new HTTP session...") self.session = FuturesSession(executor=self.executor, max_workers=self.max_workers) self.session.headers.update({"User-Agent": self.user_agent}) if self.username and self.password: api_logger.debug("Requests will use authorization.") self.session.auth = HTTPBasicAuth(self.username, self.password)
[ "def", "_start_http_session", "(", "self", ")", ":", "api_logger", ".", "debug", "(", "\"Starting new HTTP session...\"", ")", "self", ".", "session", "=", "FuturesSession", "(", "executor", "=", "self", ".", "executor", ",", "max_workers", "=", "self", ".", "max_workers", ")", "self", ".", "session", ".", "headers", ".", "update", "(", "{", "\"User-Agent\"", ":", "self", ".", "user_agent", "}", ")", "if", "self", ".", "username", "and", "self", ".", "password", ":", "api_logger", ".", "debug", "(", "\"Requests will use authorization.\"", ")", "self", ".", "session", ".", "auth", "=", "HTTPBasicAuth", "(", "self", ".", "username", ",", "self", ".", "password", ")" ]
Start a new requests HTTP session, clearing cookies and session data. :return: None
[ "Start", "a", "new", "requests", "HTTP", "session", "clearing", "cookies", "and", "session", "data", ".", ":", "return", ":", "None" ]
train
https://github.com/rpcope1/PythonConfluenceAPI/blob/b7f0ca2a390f964715fdf3a60b5b0c5ef7116d40/PythonConfluenceAPI/cfapi.py#L57-L67
0.005386
optimizely/python-sdk
optimizely/project_config.py
ProjectConfig.get_event
def get_event(self, event_key): """ Get event for the provided event key. Args: event_key: Event key for which event is to be determined. Returns: Event corresponding to the provided event key. """ event = self.event_key_map.get(event_key) if event: return event self.logger.error('Event "%s" is not in datafile.' % event_key) self.error_handler.handle_error(exceptions.InvalidEventException(enums.Errors.INVALID_EVENT_KEY_ERROR)) return None
python
def get_event(self, event_key): """ Get event for the provided event key. Args: event_key: Event key for which event is to be determined. Returns: Event corresponding to the provided event key. """ event = self.event_key_map.get(event_key) if event: return event self.logger.error('Event "%s" is not in datafile.' % event_key) self.error_handler.handle_error(exceptions.InvalidEventException(enums.Errors.INVALID_EVENT_KEY_ERROR)) return None
[ "def", "get_event", "(", "self", ",", "event_key", ")", ":", "event", "=", "self", ".", "event_key_map", ".", "get", "(", "event_key", ")", "if", "event", ":", "return", "event", "self", ".", "logger", ".", "error", "(", "'Event \"%s\" is not in datafile.'", "%", "event_key", ")", "self", ".", "error_handler", ".", "handle_error", "(", "exceptions", ".", "InvalidEventException", "(", "enums", ".", "Errors", ".", "INVALID_EVENT_KEY_ERROR", ")", ")", "return", "None" ]
Get event for the provided event key. Args: event_key: Event key for which event is to be determined. Returns: Event corresponding to the provided event key.
[ "Get", "event", "for", "the", "provided", "event", "key", "." ]
train
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/project_config.py#L354-L371
0.005988
vtkiorg/vtki
vtki/pointset.py
UnstructuredGrid.extract_cells
def extract_cells(self, ind): """ Returns a subset of the grid Parameters ---------- ind : np.ndarray Numpy array of cell indices to be extracted. Returns ------- subgrid : vtki.UnstructuredGrid Subselected grid """ if not isinstance(ind, np.ndarray): ind = np.array(ind, np.ndarray) if ind.dtype == np.bool: ind = ind.nonzero()[0].astype(vtki.ID_TYPE) if ind.dtype != vtki.ID_TYPE: ind = ind.astype(vtki.ID_TYPE) if not ind.flags.c_contiguous: ind = np.ascontiguousarray(ind) vtk_ind = numpy_to_vtkIdTypeArray(ind, deep=False) # Create selection objects selectionNode = vtk.vtkSelectionNode() selectionNode.SetFieldType(vtk.vtkSelectionNode.CELL) selectionNode.SetContentType(vtk.vtkSelectionNode.INDICES) selectionNode.SetSelectionList(vtk_ind) selection = vtk.vtkSelection() selection.AddNode(selectionNode) # extract extract_sel = vtk.vtkExtractSelection() extract_sel.SetInputData(0, self) extract_sel.SetInputData(1, selection) extract_sel.Update() subgrid = _get_output(extract_sel) # extracts only in float32 if self.points.dtype is not np.dtype('float32'): ind = subgrid.point_arrays['vtkOriginalPointIds'] subgrid.points = self.points[ind] return subgrid
python
def extract_cells(self, ind): """ Returns a subset of the grid Parameters ---------- ind : np.ndarray Numpy array of cell indices to be extracted. Returns ------- subgrid : vtki.UnstructuredGrid Subselected grid """ if not isinstance(ind, np.ndarray): ind = np.array(ind, np.ndarray) if ind.dtype == np.bool: ind = ind.nonzero()[0].astype(vtki.ID_TYPE) if ind.dtype != vtki.ID_TYPE: ind = ind.astype(vtki.ID_TYPE) if not ind.flags.c_contiguous: ind = np.ascontiguousarray(ind) vtk_ind = numpy_to_vtkIdTypeArray(ind, deep=False) # Create selection objects selectionNode = vtk.vtkSelectionNode() selectionNode.SetFieldType(vtk.vtkSelectionNode.CELL) selectionNode.SetContentType(vtk.vtkSelectionNode.INDICES) selectionNode.SetSelectionList(vtk_ind) selection = vtk.vtkSelection() selection.AddNode(selectionNode) # extract extract_sel = vtk.vtkExtractSelection() extract_sel.SetInputData(0, self) extract_sel.SetInputData(1, selection) extract_sel.Update() subgrid = _get_output(extract_sel) # extracts only in float32 if self.points.dtype is not np.dtype('float32'): ind = subgrid.point_arrays['vtkOriginalPointIds'] subgrid.points = self.points[ind] return subgrid
[ "def", "extract_cells", "(", "self", ",", "ind", ")", ":", "if", "not", "isinstance", "(", "ind", ",", "np", ".", "ndarray", ")", ":", "ind", "=", "np", ".", "array", "(", "ind", ",", "np", ".", "ndarray", ")", "if", "ind", ".", "dtype", "==", "np", ".", "bool", ":", "ind", "=", "ind", ".", "nonzero", "(", ")", "[", "0", "]", ".", "astype", "(", "vtki", ".", "ID_TYPE", ")", "if", "ind", ".", "dtype", "!=", "vtki", ".", "ID_TYPE", ":", "ind", "=", "ind", ".", "astype", "(", "vtki", ".", "ID_TYPE", ")", "if", "not", "ind", ".", "flags", ".", "c_contiguous", ":", "ind", "=", "np", ".", "ascontiguousarray", "(", "ind", ")", "vtk_ind", "=", "numpy_to_vtkIdTypeArray", "(", "ind", ",", "deep", "=", "False", ")", "# Create selection objects", "selectionNode", "=", "vtk", ".", "vtkSelectionNode", "(", ")", "selectionNode", ".", "SetFieldType", "(", "vtk", ".", "vtkSelectionNode", ".", "CELL", ")", "selectionNode", ".", "SetContentType", "(", "vtk", ".", "vtkSelectionNode", ".", "INDICES", ")", "selectionNode", ".", "SetSelectionList", "(", "vtk_ind", ")", "selection", "=", "vtk", ".", "vtkSelection", "(", ")", "selection", ".", "AddNode", "(", "selectionNode", ")", "# extract", "extract_sel", "=", "vtk", ".", "vtkExtractSelection", "(", ")", "extract_sel", ".", "SetInputData", "(", "0", ",", "self", ")", "extract_sel", ".", "SetInputData", "(", "1", ",", "selection", ")", "extract_sel", ".", "Update", "(", ")", "subgrid", "=", "_get_output", "(", "extract_sel", ")", "# extracts only in float32", "if", "self", ".", "points", ".", "dtype", "is", "not", "np", ".", "dtype", "(", "'float32'", ")", ":", "ind", "=", "subgrid", ".", "point_arrays", "[", "'vtkOriginalPointIds'", "]", "subgrid", ".", "points", "=", "self", ".", "points", "[", "ind", "]", "return", "subgrid" ]
Returns a subset of the grid Parameters ---------- ind : np.ndarray Numpy array of cell indices to be extracted. Returns ------- subgrid : vtki.UnstructuredGrid Subselected grid
[ "Returns", "a", "subset", "of", "the", "grid" ]
train
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/pointset.py#L2087-L2137
0.001323
Alveo/pyalveo
pyalveo/objects.py
Document.get_content
def get_content(self, force_download=False): """ Retrieve the content for this Document from the server :type force_download: Boolean :param force_download: True to download from the server regardless of the cache's contents :rtype: String :returns: the content data :raises: APIError if the API request is not successful """ return self.client.get_document(self.url(), force_download)
python
def get_content(self, force_download=False): """ Retrieve the content for this Document from the server :type force_download: Boolean :param force_download: True to download from the server regardless of the cache's contents :rtype: String :returns: the content data :raises: APIError if the API request is not successful """ return self.client.get_document(self.url(), force_download)
[ "def", "get_content", "(", "self", ",", "force_download", "=", "False", ")", ":", "return", "self", ".", "client", ".", "get_document", "(", "self", ".", "url", "(", ")", ",", "force_download", ")" ]
Retrieve the content for this Document from the server :type force_download: Boolean :param force_download: True to download from the server regardless of the cache's contents :rtype: String :returns: the content data :raises: APIError if the API request is not successful
[ "Retrieve", "the", "content", "for", "this", "Document", "from", "the", "server" ]
train
https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/objects.py#L702-L716
0.004292
hthiery/python-fritzhome
pyfritzhome/cli.py
list_all
def list_all(fritz, args): """Command that prints all device information.""" devices = fritz.get_devices() for device in devices: print('#' * 30) print('name=%s' % device.name) print(' ain=%s' % device.ain) print(' id=%s' % device.identifier) print(' productname=%s' % device.productname) print(' manufacturer=%s' % device.manufacturer) print(" present=%s" % device.present) print(" lock=%s" % device.lock) print(" devicelock=%s" % device.device_lock) if device.present is False: continue if device.has_switch: print(" Switch:") print(" switch_state=%s" % device.switch_state) if device.has_switch: print(" Powermeter:") print(" power=%s" % device.power) print(" energy=%s" % device.energy) print(" voltage=%s" % device.voltage) if device.has_temperature_sensor: print(" Temperature:") print(" temperature=%s" % device.temperature) print(" offset=%s" % device.offset) if device.has_thermostat: print(" Thermostat:") print(" battery_low=%s" % device.battery_low) print(" battery_level=%s" % device.battery_level) print(" actual=%s" % device.actual_temperature) print(" target=%s" % device.target_temperature) print(" comfort=%s" % device.comfort_temperature) print(" eco=%s" % device.eco_temperature) print(" window=%s" % device.window_open) print(" summer=%s" % device.summer_active) print(" holiday=%s" % device.holiday_active) if device.has_alarm: print(" Alert:") print(" alert=%s" % device.alert_state)
python
def list_all(fritz, args): """Command that prints all device information.""" devices = fritz.get_devices() for device in devices: print('#' * 30) print('name=%s' % device.name) print(' ain=%s' % device.ain) print(' id=%s' % device.identifier) print(' productname=%s' % device.productname) print(' manufacturer=%s' % device.manufacturer) print(" present=%s" % device.present) print(" lock=%s" % device.lock) print(" devicelock=%s" % device.device_lock) if device.present is False: continue if device.has_switch: print(" Switch:") print(" switch_state=%s" % device.switch_state) if device.has_switch: print(" Powermeter:") print(" power=%s" % device.power) print(" energy=%s" % device.energy) print(" voltage=%s" % device.voltage) if device.has_temperature_sensor: print(" Temperature:") print(" temperature=%s" % device.temperature) print(" offset=%s" % device.offset) if device.has_thermostat: print(" Thermostat:") print(" battery_low=%s" % device.battery_low) print(" battery_level=%s" % device.battery_level) print(" actual=%s" % device.actual_temperature) print(" target=%s" % device.target_temperature) print(" comfort=%s" % device.comfort_temperature) print(" eco=%s" % device.eco_temperature) print(" window=%s" % device.window_open) print(" summer=%s" % device.summer_active) print(" holiday=%s" % device.holiday_active) if device.has_alarm: print(" Alert:") print(" alert=%s" % device.alert_state)
[ "def", "list_all", "(", "fritz", ",", "args", ")", ":", "devices", "=", "fritz", ".", "get_devices", "(", ")", "for", "device", "in", "devices", ":", "print", "(", "'#'", "*", "30", ")", "print", "(", "'name=%s'", "%", "device", ".", "name", ")", "print", "(", "' ain=%s'", "%", "device", ".", "ain", ")", "print", "(", "' id=%s'", "%", "device", ".", "identifier", ")", "print", "(", "' productname=%s'", "%", "device", ".", "productname", ")", "print", "(", "' manufacturer=%s'", "%", "device", ".", "manufacturer", ")", "print", "(", "\" present=%s\"", "%", "device", ".", "present", ")", "print", "(", "\" lock=%s\"", "%", "device", ".", "lock", ")", "print", "(", "\" devicelock=%s\"", "%", "device", ".", "device_lock", ")", "if", "device", ".", "present", "is", "False", ":", "continue", "if", "device", ".", "has_switch", ":", "print", "(", "\" Switch:\"", ")", "print", "(", "\" switch_state=%s\"", "%", "device", ".", "switch_state", ")", "if", "device", ".", "has_switch", ":", "print", "(", "\" Powermeter:\"", ")", "print", "(", "\" power=%s\"", "%", "device", ".", "power", ")", "print", "(", "\" energy=%s\"", "%", "device", ".", "energy", ")", "print", "(", "\" voltage=%s\"", "%", "device", ".", "voltage", ")", "if", "device", ".", "has_temperature_sensor", ":", "print", "(", "\" Temperature:\"", ")", "print", "(", "\" temperature=%s\"", "%", "device", ".", "temperature", ")", "print", "(", "\" offset=%s\"", "%", "device", ".", "offset", ")", "if", "device", ".", "has_thermostat", ":", "print", "(", "\" Thermostat:\"", ")", "print", "(", "\" battery_low=%s\"", "%", "device", ".", "battery_low", ")", "print", "(", "\" battery_level=%s\"", "%", "device", ".", "battery_level", ")", "print", "(", "\" actual=%s\"", "%", "device", ".", "actual_temperature", ")", "print", "(", "\" target=%s\"", "%", "device", ".", "target_temperature", ")", "print", "(", "\" comfort=%s\"", "%", "device", ".", "comfort_temperature", ")", "print", "(", "\" eco=%s\"", "%", "device", ".", "eco_temperature", ")", "print", "(", "\" window=%s\"", "%", "device", ".", "window_open", ")", "print", "(", "\" summer=%s\"", "%", "device", ".", "summer_active", ")", "print", "(", "\" holiday=%s\"", "%", "device", ".", "holiday_active", ")", "if", "device", ".", "has_alarm", ":", "print", "(", "\" Alert:\"", ")", "print", "(", "\" alert=%s\"", "%", "device", ".", "alert_state", ")" ]
Command that prints all device information.
[ "Command", "that", "prints", "all", "device", "information", "." ]
train
https://github.com/hthiery/python-fritzhome/blob/c74bd178d08a305028f316f7da35202da3526f61/pyfritzhome/cli.py#L18-L61
0.000547
konomae/lastpass-python
lastpass/parser.py
parse_PRIK
def parse_PRIK(chunk, encryption_key): """Parse PRIK chunk which contains private RSA key""" decrypted = decode_aes256('cbc', encryption_key[:16], decode_hex(chunk.payload), encryption_key) hex_key = re.match(br'^LastPassPrivateKey<(?P<hex_key>.*)>LastPassPrivateKey$', decrypted).group('hex_key') rsa_key = RSA.importKey(decode_hex(hex_key)) rsa_key.dmp1 = rsa_key.d % (rsa_key.p - 1) rsa_key.dmq1 = rsa_key.d % (rsa_key.q - 1) rsa_key.iqmp = number.inverse(rsa_key.q, rsa_key.p) return rsa_key
python
def parse_PRIK(chunk, encryption_key): """Parse PRIK chunk which contains private RSA key""" decrypted = decode_aes256('cbc', encryption_key[:16], decode_hex(chunk.payload), encryption_key) hex_key = re.match(br'^LastPassPrivateKey<(?P<hex_key>.*)>LastPassPrivateKey$', decrypted).group('hex_key') rsa_key = RSA.importKey(decode_hex(hex_key)) rsa_key.dmp1 = rsa_key.d % (rsa_key.p - 1) rsa_key.dmq1 = rsa_key.d % (rsa_key.q - 1) rsa_key.iqmp = number.inverse(rsa_key.q, rsa_key.p) return rsa_key
[ "def", "parse_PRIK", "(", "chunk", ",", "encryption_key", ")", ":", "decrypted", "=", "decode_aes256", "(", "'cbc'", ",", "encryption_key", "[", ":", "16", "]", ",", "decode_hex", "(", "chunk", ".", "payload", ")", ",", "encryption_key", ")", "hex_key", "=", "re", ".", "match", "(", "br'^LastPassPrivateKey<(?P<hex_key>.*)>LastPassPrivateKey$'", ",", "decrypted", ")", ".", "group", "(", "'hex_key'", ")", "rsa_key", "=", "RSA", ".", "importKey", "(", "decode_hex", "(", "hex_key", ")", ")", "rsa_key", ".", "dmp1", "=", "rsa_key", ".", "d", "%", "(", "rsa_key", ".", "p", "-", "1", ")", "rsa_key", ".", "dmq1", "=", "rsa_key", ".", "d", "%", "(", "rsa_key", ".", "q", "-", "1", ")", "rsa_key", ".", "iqmp", "=", "number", ".", "inverse", "(", "rsa_key", ".", "q", ",", "rsa_key", ".", "p", ")", "return", "rsa_key" ]
Parse PRIK chunk which contains private RSA key
[ "Parse", "PRIK", "chunk", "which", "contains", "private", "RSA", "key" ]
train
https://github.com/konomae/lastpass-python/blob/5063911b789868a1fd9db9922db82cdf156b938a/lastpass/parser.py#L73-L87
0.003226
veripress/veripress
veripress/model/storages.py
FileStorage.search_file
def search_file(search_root, search_filename, instance_relative_root=False): """ Search for a filename in a specific search root dir. :param search_root: root dir to search :param search_filename: filename to search (no extension) :param instance_relative_root: search root is relative to instance path :return: tuple(full_file_path, extension without heading dot) """ if instance_relative_root: search_root = os.path.join(current_app.instance_path, search_root) file_path = None file_ext = None for file in os.listdir(search_root): filename, ext = os.path.splitext(file) if filename == search_filename and ext and ext != '.': file_path = os.path.join(search_root, filename + ext) file_ext = ext[1:] # remove heading '.' (dot) break return file_path, file_ext
python
def search_file(search_root, search_filename, instance_relative_root=False): """ Search for a filename in a specific search root dir. :param search_root: root dir to search :param search_filename: filename to search (no extension) :param instance_relative_root: search root is relative to instance path :return: tuple(full_file_path, extension without heading dot) """ if instance_relative_root: search_root = os.path.join(current_app.instance_path, search_root) file_path = None file_ext = None for file in os.listdir(search_root): filename, ext = os.path.splitext(file) if filename == search_filename and ext and ext != '.': file_path = os.path.join(search_root, filename + ext) file_ext = ext[1:] # remove heading '.' (dot) break return file_path, file_ext
[ "def", "search_file", "(", "search_root", ",", "search_filename", ",", "instance_relative_root", "=", "False", ")", ":", "if", "instance_relative_root", ":", "search_root", "=", "os", ".", "path", ".", "join", "(", "current_app", ".", "instance_path", ",", "search_root", ")", "file_path", "=", "None", "file_ext", "=", "None", "for", "file", "in", "os", ".", "listdir", "(", "search_root", ")", ":", "filename", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "file", ")", "if", "filename", "==", "search_filename", "and", "ext", "and", "ext", "!=", "'.'", ":", "file_path", "=", "os", ".", "path", ".", "join", "(", "search_root", ",", "filename", "+", "ext", ")", "file_ext", "=", "ext", "[", "1", ":", "]", "# remove heading '.' (dot)", "break", "return", "file_path", ",", "file_ext" ]
Search for a filename in a specific search root dir. :param search_root: root dir to search :param search_filename: filename to search (no extension) :param instance_relative_root: search root is relative to instance path :return: tuple(full_file_path, extension without heading dot)
[ "Search", "for", "a", "filename", "in", "a", "specific", "search", "root", "dir", "." ]
train
https://github.com/veripress/veripress/blob/9e3df3a10eb1db32da596bf52118fe6acbe4b14a/veripress/model/storages.py#L303-L323
0.003122
googlefonts/glyphsLib
Lib/glyphsLib/types.py
parse_color
def parse_color(src=None): # type: (Optional[str]) -> Optional[Union[Tuple[int, ...], int]] """Parse a string representing a color value. Color is either a fixed color (when coloring something from the UI, see the GLYPHS_COLORS constant) or a list of the format [u8, u8, u8, u8], Glyphs does not support an alpha channel as of 2.5.1 (confirmed by Georg Seifert), and always writes a 1 to it. This was brought up and is probably corrected in the next versions. https://github.com/googlei18n/glyphsLib/pull/363#issuecomment-390418497 """ if src is None: return None # Tuple. if src[0] == "(": rgba = tuple(int(v) for v in src[1:-1].split(",") if v) if not (len(rgba) == 4 and all(0 <= v < 256 for v in rgba)): raise ValueError( "Broken color tuple: {}. Must have four values from 0 to 255.".format( src ) ) return rgba # Constant. return int(src)
python
def parse_color(src=None): # type: (Optional[str]) -> Optional[Union[Tuple[int, ...], int]] """Parse a string representing a color value. Color is either a fixed color (when coloring something from the UI, see the GLYPHS_COLORS constant) or a list of the format [u8, u8, u8, u8], Glyphs does not support an alpha channel as of 2.5.1 (confirmed by Georg Seifert), and always writes a 1 to it. This was brought up and is probably corrected in the next versions. https://github.com/googlei18n/glyphsLib/pull/363#issuecomment-390418497 """ if src is None: return None # Tuple. if src[0] == "(": rgba = tuple(int(v) for v in src[1:-1].split(",") if v) if not (len(rgba) == 4 and all(0 <= v < 256 for v in rgba)): raise ValueError( "Broken color tuple: {}. Must have four values from 0 to 255.".format( src ) ) return rgba # Constant. return int(src)
[ "def", "parse_color", "(", "src", "=", "None", ")", ":", "# type: (Optional[str]) -> Optional[Union[Tuple[int, ...], int]]", "if", "src", "is", "None", ":", "return", "None", "# Tuple.", "if", "src", "[", "0", "]", "==", "\"(\"", ":", "rgba", "=", "tuple", "(", "int", "(", "v", ")", "for", "v", "in", "src", "[", "1", ":", "-", "1", "]", ".", "split", "(", "\",\"", ")", "if", "v", ")", "if", "not", "(", "len", "(", "rgba", ")", "==", "4", "and", "all", "(", "0", "<=", "v", "<", "256", "for", "v", "in", "rgba", ")", ")", ":", "raise", "ValueError", "(", "\"Broken color tuple: {}. Must have four values from 0 to 255.\"", ".", "format", "(", "src", ")", ")", "return", "rgba", "# Constant.", "return", "int", "(", "src", ")" ]
Parse a string representing a color value. Color is either a fixed color (when coloring something from the UI, see the GLYPHS_COLORS constant) or a list of the format [u8, u8, u8, u8], Glyphs does not support an alpha channel as of 2.5.1 (confirmed by Georg Seifert), and always writes a 1 to it. This was brought up and is probably corrected in the next versions. https://github.com/googlei18n/glyphsLib/pull/363#issuecomment-390418497
[ "Parse", "a", "string", "representing", "a", "color", "value", "." ]
train
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/types.py#L305-L334
0.001972
osrg/ryu
ryu/services/protocols/bgp/api/prefix.py
add_evpn_local
def add_evpn_local(route_type, route_dist, next_hop, **kwargs): """Adds EVPN route from VRF identified by *route_dist*. """ if(route_type in [EVPN_ETH_AUTO_DISCOVERY, EVPN_ETH_SEGMENT] and kwargs['esi'] == 0): raise ConfigValueError(conf_name=EVPN_ESI, conf_value=kwargs['esi']) try: # Create new path and insert into appropriate VRF table. tm = CORE_MANAGER.get_core_service().table_manager label = tm.update_vrf_table(route_dist, next_hop=next_hop, route_family=VRF_RF_L2_EVPN, route_type=route_type, **kwargs) # Currently we only allocate one label per local route, # so we share first label from the list. if label: label = label[0] # Send success response with new label. return [{EVPN_ROUTE_TYPE: route_type, ROUTE_DISTINGUISHER: route_dist, VRF_RF: VRF_RF_L2_EVPN, VPN_LABEL: label}.update(kwargs)] except BgpCoreError as e: raise PrefixError(desc=e)
python
def add_evpn_local(route_type, route_dist, next_hop, **kwargs): """Adds EVPN route from VRF identified by *route_dist*. """ if(route_type in [EVPN_ETH_AUTO_DISCOVERY, EVPN_ETH_SEGMENT] and kwargs['esi'] == 0): raise ConfigValueError(conf_name=EVPN_ESI, conf_value=kwargs['esi']) try: # Create new path and insert into appropriate VRF table. tm = CORE_MANAGER.get_core_service().table_manager label = tm.update_vrf_table(route_dist, next_hop=next_hop, route_family=VRF_RF_L2_EVPN, route_type=route_type, **kwargs) # Currently we only allocate one label per local route, # so we share first label from the list. if label: label = label[0] # Send success response with new label. return [{EVPN_ROUTE_TYPE: route_type, ROUTE_DISTINGUISHER: route_dist, VRF_RF: VRF_RF_L2_EVPN, VPN_LABEL: label}.update(kwargs)] except BgpCoreError as e: raise PrefixError(desc=e)
[ "def", "add_evpn_local", "(", "route_type", ",", "route_dist", ",", "next_hop", ",", "*", "*", "kwargs", ")", ":", "if", "(", "route_type", "in", "[", "EVPN_ETH_AUTO_DISCOVERY", ",", "EVPN_ETH_SEGMENT", "]", "and", "kwargs", "[", "'esi'", "]", "==", "0", ")", ":", "raise", "ConfigValueError", "(", "conf_name", "=", "EVPN_ESI", ",", "conf_value", "=", "kwargs", "[", "'esi'", "]", ")", "try", ":", "# Create new path and insert into appropriate VRF table.", "tm", "=", "CORE_MANAGER", ".", "get_core_service", "(", ")", ".", "table_manager", "label", "=", "tm", ".", "update_vrf_table", "(", "route_dist", ",", "next_hop", "=", "next_hop", ",", "route_family", "=", "VRF_RF_L2_EVPN", ",", "route_type", "=", "route_type", ",", "*", "*", "kwargs", ")", "# Currently we only allocate one label per local route,", "# so we share first label from the list.", "if", "label", ":", "label", "=", "label", "[", "0", "]", "# Send success response with new label.", "return", "[", "{", "EVPN_ROUTE_TYPE", ":", "route_type", ",", "ROUTE_DISTINGUISHER", ":", "route_dist", ",", "VRF_RF", ":", "VRF_RF_L2_EVPN", ",", "VPN_LABEL", ":", "label", "}", ".", "update", "(", "kwargs", ")", "]", "except", "BgpCoreError", "as", "e", ":", "raise", "PrefixError", "(", "desc", "=", "e", ")" ]
Adds EVPN route from VRF identified by *route_dist*.
[ "Adds", "EVPN", "route", "from", "VRF", "identified", "by", "*", "route_dist", "*", "." ]
train
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/api/prefix.py#L364-L390
0.001765
numenta/htmresearch
htmresearch/regions/RawSensor.py
RawSensor.getSpec
def getSpec(cls): """Return base spec for this region. See base class method for more info.""" spec = { "description":"Sensor for sending sparse data to an HTM network.", "singleNodeOnly":True, "outputs":{ "dataOut":{ "description":"Encoded text", "dataType":"Real32", "count":0, "regionLevel":True, "isDefaultOutput":True, }, "resetOut":{ "description":"Boolean reset output.", "dataType":"Real32", "count":1, "regionLevel":True, "isDefaultOutput":False, }, "sequenceIdOut":{ "description":"Sequence ID", "dataType":'Real32', "count":1, "regionLevel":True, "isDefaultOutput":False, }, }, "inputs":{}, "parameters":{ "verbosity":{ "description":"Verbosity level", "dataType":"UInt32", "accessMode":"ReadWrite", "count":1, "constraints":"", }, "outputWidth":{ "description":"Size of output vector", "dataType":"UInt32", "accessMode":"ReadWrite", "count":1, "defaultValue": 2048, "constraints":"", }, }, "commands":{ "addDataToQueue": { "description": "Add data", } }, } return spec
python
def getSpec(cls): """Return base spec for this region. See base class method for more info.""" spec = { "description":"Sensor for sending sparse data to an HTM network.", "singleNodeOnly":True, "outputs":{ "dataOut":{ "description":"Encoded text", "dataType":"Real32", "count":0, "regionLevel":True, "isDefaultOutput":True, }, "resetOut":{ "description":"Boolean reset output.", "dataType":"Real32", "count":1, "regionLevel":True, "isDefaultOutput":False, }, "sequenceIdOut":{ "description":"Sequence ID", "dataType":'Real32', "count":1, "regionLevel":True, "isDefaultOutput":False, }, }, "inputs":{}, "parameters":{ "verbosity":{ "description":"Verbosity level", "dataType":"UInt32", "accessMode":"ReadWrite", "count":1, "constraints":"", }, "outputWidth":{ "description":"Size of output vector", "dataType":"UInt32", "accessMode":"ReadWrite", "count":1, "defaultValue": 2048, "constraints":"", }, }, "commands":{ "addDataToQueue": { "description": "Add data", } }, } return spec
[ "def", "getSpec", "(", "cls", ")", ":", "spec", "=", "{", "\"description\"", ":", "\"Sensor for sending sparse data to an HTM network.\"", ",", "\"singleNodeOnly\"", ":", "True", ",", "\"outputs\"", ":", "{", "\"dataOut\"", ":", "{", "\"description\"", ":", "\"Encoded text\"", ",", "\"dataType\"", ":", "\"Real32\"", ",", "\"count\"", ":", "0", ",", "\"regionLevel\"", ":", "True", ",", "\"isDefaultOutput\"", ":", "True", ",", "}", ",", "\"resetOut\"", ":", "{", "\"description\"", ":", "\"Boolean reset output.\"", ",", "\"dataType\"", ":", "\"Real32\"", ",", "\"count\"", ":", "1", ",", "\"regionLevel\"", ":", "True", ",", "\"isDefaultOutput\"", ":", "False", ",", "}", ",", "\"sequenceIdOut\"", ":", "{", "\"description\"", ":", "\"Sequence ID\"", ",", "\"dataType\"", ":", "'Real32'", ",", "\"count\"", ":", "1", ",", "\"regionLevel\"", ":", "True", ",", "\"isDefaultOutput\"", ":", "False", ",", "}", ",", "}", ",", "\"inputs\"", ":", "{", "}", ",", "\"parameters\"", ":", "{", "\"verbosity\"", ":", "{", "\"description\"", ":", "\"Verbosity level\"", ",", "\"dataType\"", ":", "\"UInt32\"", ",", "\"accessMode\"", ":", "\"ReadWrite\"", ",", "\"count\"", ":", "1", ",", "\"constraints\"", ":", "\"\"", ",", "}", ",", "\"outputWidth\"", ":", "{", "\"description\"", ":", "\"Size of output vector\"", ",", "\"dataType\"", ":", "\"UInt32\"", ",", "\"accessMode\"", ":", "\"ReadWrite\"", ",", "\"count\"", ":", "1", ",", "\"defaultValue\"", ":", "2048", ",", "\"constraints\"", ":", "\"\"", ",", "}", ",", "}", ",", "\"commands\"", ":", "{", "\"addDataToQueue\"", ":", "{", "\"description\"", ":", "\"Add data\"", ",", "}", "}", ",", "}", "return", "spec" ]
Return base spec for this region. See base class method for more info.
[ "Return", "base", "spec", "for", "this", "region", ".", "See", "base", "class", "method", "for", "more", "info", "." ]
train
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/regions/RawSensor.py#L48-L101
0.026742
ska-sa/katcp-python
katcp/inspecting_client.py
InspectingClientAsync.inform_hook_client_factory
def inform_hook_client_factory(self, host, port, *args, **kwargs): """Return an instance of :class:`_InformHookDeviceClient` or similar Provided to ease testing. Dynamically overriding this method after instantiation but before start() is called allows for deep brain surgery. See :class:`katcp.fake_clients.TBD` """ return _InformHookDeviceClient(host, port, *args, **kwargs)
python
def inform_hook_client_factory(self, host, port, *args, **kwargs): """Return an instance of :class:`_InformHookDeviceClient` or similar Provided to ease testing. Dynamically overriding this method after instantiation but before start() is called allows for deep brain surgery. See :class:`katcp.fake_clients.TBD` """ return _InformHookDeviceClient(host, port, *args, **kwargs)
[ "def", "inform_hook_client_factory", "(", "self", ",", "host", ",", "port", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_InformHookDeviceClient", "(", "host", ",", "port", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Return an instance of :class:`_InformHookDeviceClient` or similar Provided to ease testing. Dynamically overriding this method after instantiation but before start() is called allows for deep brain surgery. See :class:`katcp.fake_clients.TBD`
[ "Return", "an", "instance", "of", ":", "class", ":", "_InformHookDeviceClient", "or", "similar" ]
train
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/inspecting_client.py#L260-L268
0.007042
IndicoDataSolutions/IndicoIo-python
indicoio/utils/preprocessing.py
get_element_type
def get_element_type(_list, dimens): """ Given the dimensions of a nested list and the list, returns the type of the elements in the inner list. """ elem = _list for _ in range(len(dimens)): elem = elem[0] return type(elem)
python
def get_element_type(_list, dimens): """ Given the dimensions of a nested list and the list, returns the type of the elements in the inner list. """ elem = _list for _ in range(len(dimens)): elem = elem[0] return type(elem)
[ "def", "get_element_type", "(", "_list", ",", "dimens", ")", ":", "elem", "=", "_list", "for", "_", "in", "range", "(", "len", "(", "dimens", ")", ")", ":", "elem", "=", "elem", "[", "0", "]", "return", "type", "(", "elem", ")" ]
Given the dimensions of a nested list and the list, returns the type of the elements in the inner list.
[ "Given", "the", "dimensions", "of", "a", "nested", "list", "and", "the", "list", "returns", "the", "type", "of", "the", "elements", "in", "the", "inner", "list", "." ]
train
https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/utils/preprocessing.py#L109-L117
0.003861
dwwkelly/note
note/server.py
Note_Server.Run
def Run(self): """ Wait for clients to connect and service them :returns: None """ while True: try: events = self.poller.poll() except KeyboardInterrupt: self.context.destroy() sys.exit() self.Handle_Events(events)
python
def Run(self): """ Wait for clients to connect and service them :returns: None """ while True: try: events = self.poller.poll() except KeyboardInterrupt: self.context.destroy() sys.exit() self.Handle_Events(events)
[ "def", "Run", "(", "self", ")", ":", "while", "True", ":", "try", ":", "events", "=", "self", ".", "poller", ".", "poll", "(", ")", "except", "KeyboardInterrupt", ":", "self", ".", "context", ".", "destroy", "(", ")", "sys", ".", "exit", "(", ")", "self", ".", "Handle_Events", "(", "events", ")" ]
Wait for clients to connect and service them :returns: None
[ "Wait", "for", "clients", "to", "connect", "and", "service", "them" ]
train
https://github.com/dwwkelly/note/blob/b41d5fe1e4a3b67b50285168dd58f903bf219e6c/note/server.py#L31-L46
0.005848
djgagne/hagelslag
hagelslag/evaluation/ObjectEvaluator.py
ObjectEvaluator.sample_forecast_max_hail
def sample_forecast_max_hail(self, dist_model_name, condition_model_name, num_samples, condition_threshold=0.5, query=None): """ Samples every forecast hail object and returns an empirical distribution of possible maximum hail sizes. Hail sizes are sampled from each predicted gamma distribution. The total number of samples equals num_samples * area of the hail object. To get the maximum hail size for each realization, the maximum value within each area sample is used. Args: dist_model_name: Name of the distribution machine learning model being evaluated condition_model_name: Name of the hail/no-hail model being evaluated num_samples: Number of maximum hail samples to draw condition_threshold: Threshold for drawing hail samples query: A str that selects a subset of the data for evaluation Returns: A numpy array containing maximum hail samples for each forecast object. """ if query is not None: dist_forecasts = self.matched_forecasts["dist"][dist_model_name].query(query) dist_forecasts = dist_forecasts.reset_index(drop=True) condition_forecasts = self.matched_forecasts["condition"][condition_model_name].query(query) condition_forecasts = condition_forecasts.reset_index(drop=True) else: dist_forecasts = self.matched_forecasts["dist"][dist_model_name] condition_forecasts = self.matched_forecasts["condition"][condition_model_name] max_hail_samples = np.zeros((dist_forecasts.shape[0], num_samples)) areas = dist_forecasts["Area"].values for f in np.arange(dist_forecasts.shape[0]): condition_prob = condition_forecasts.loc[f, self.forecast_bins["condition"][0]] if condition_prob >= condition_threshold: max_hail_samples[f] = np.sort(gamma.rvs(*dist_forecasts.loc[f, self.forecast_bins["dist"]].values, size=(num_samples, areas[f])).max(axis=1)) return max_hail_samples
python
def sample_forecast_max_hail(self, dist_model_name, condition_model_name, num_samples, condition_threshold=0.5, query=None): """ Samples every forecast hail object and returns an empirical distribution of possible maximum hail sizes. Hail sizes are sampled from each predicted gamma distribution. The total number of samples equals num_samples * area of the hail object. To get the maximum hail size for each realization, the maximum value within each area sample is used. Args: dist_model_name: Name of the distribution machine learning model being evaluated condition_model_name: Name of the hail/no-hail model being evaluated num_samples: Number of maximum hail samples to draw condition_threshold: Threshold for drawing hail samples query: A str that selects a subset of the data for evaluation Returns: A numpy array containing maximum hail samples for each forecast object. """ if query is not None: dist_forecasts = self.matched_forecasts["dist"][dist_model_name].query(query) dist_forecasts = dist_forecasts.reset_index(drop=True) condition_forecasts = self.matched_forecasts["condition"][condition_model_name].query(query) condition_forecasts = condition_forecasts.reset_index(drop=True) else: dist_forecasts = self.matched_forecasts["dist"][dist_model_name] condition_forecasts = self.matched_forecasts["condition"][condition_model_name] max_hail_samples = np.zeros((dist_forecasts.shape[0], num_samples)) areas = dist_forecasts["Area"].values for f in np.arange(dist_forecasts.shape[0]): condition_prob = condition_forecasts.loc[f, self.forecast_bins["condition"][0]] if condition_prob >= condition_threshold: max_hail_samples[f] = np.sort(gamma.rvs(*dist_forecasts.loc[f, self.forecast_bins["dist"]].values, size=(num_samples, areas[f])).max(axis=1)) return max_hail_samples
[ "def", "sample_forecast_max_hail", "(", "self", ",", "dist_model_name", ",", "condition_model_name", ",", "num_samples", ",", "condition_threshold", "=", "0.5", ",", "query", "=", "None", ")", ":", "if", "query", "is", "not", "None", ":", "dist_forecasts", "=", "self", ".", "matched_forecasts", "[", "\"dist\"", "]", "[", "dist_model_name", "]", ".", "query", "(", "query", ")", "dist_forecasts", "=", "dist_forecasts", ".", "reset_index", "(", "drop", "=", "True", ")", "condition_forecasts", "=", "self", ".", "matched_forecasts", "[", "\"condition\"", "]", "[", "condition_model_name", "]", ".", "query", "(", "query", ")", "condition_forecasts", "=", "condition_forecasts", ".", "reset_index", "(", "drop", "=", "True", ")", "else", ":", "dist_forecasts", "=", "self", ".", "matched_forecasts", "[", "\"dist\"", "]", "[", "dist_model_name", "]", "condition_forecasts", "=", "self", ".", "matched_forecasts", "[", "\"condition\"", "]", "[", "condition_model_name", "]", "max_hail_samples", "=", "np", ".", "zeros", "(", "(", "dist_forecasts", ".", "shape", "[", "0", "]", ",", "num_samples", ")", ")", "areas", "=", "dist_forecasts", "[", "\"Area\"", "]", ".", "values", "for", "f", "in", "np", ".", "arange", "(", "dist_forecasts", ".", "shape", "[", "0", "]", ")", ":", "condition_prob", "=", "condition_forecasts", ".", "loc", "[", "f", ",", "self", ".", "forecast_bins", "[", "\"condition\"", "]", "[", "0", "]", "]", "if", "condition_prob", ">=", "condition_threshold", ":", "max_hail_samples", "[", "f", "]", "=", "np", ".", "sort", "(", "gamma", ".", "rvs", "(", "*", "dist_forecasts", ".", "loc", "[", "f", ",", "self", ".", "forecast_bins", "[", "\"dist\"", "]", "]", ".", "values", ",", "size", "=", "(", "num_samples", ",", "areas", "[", "f", "]", ")", ")", ".", "max", "(", "axis", "=", "1", ")", ")", "return", "max_hail_samples" ]
Samples every forecast hail object and returns an empirical distribution of possible maximum hail sizes. Hail sizes are sampled from each predicted gamma distribution. The total number of samples equals num_samples * area of the hail object. To get the maximum hail size for each realization, the maximum value within each area sample is used. Args: dist_model_name: Name of the distribution machine learning model being evaluated condition_model_name: Name of the hail/no-hail model being evaluated num_samples: Number of maximum hail samples to draw condition_threshold: Threshold for drawing hail samples query: A str that selects a subset of the data for evaluation Returns: A numpy array containing maximum hail samples for each forecast object.
[ "Samples", "every", "forecast", "hail", "object", "and", "returns", "an", "empirical", "distribution", "of", "possible", "maximum", "hail", "sizes", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/ObjectEvaluator.py#L248-L282
0.00736
datastore/datastore
datastore/core/query.py
Query.from_dict
def from_dict(cls, dictionary): '''Constructs a query from a dictionary.''' query = cls(Key(dictionary['key'])) for key, value in dictionary.items(): if key == 'order': for order in value: query.order(order) elif key == 'filter': for filter in value: if not isinstance(filter, Filter): filter = Filter(*filter) query.filter(filter) elif key in ['limit', 'offset', 'offset_key']: setattr(query, key, value) return query
python
def from_dict(cls, dictionary): '''Constructs a query from a dictionary.''' query = cls(Key(dictionary['key'])) for key, value in dictionary.items(): if key == 'order': for order in value: query.order(order) elif key == 'filter': for filter in value: if not isinstance(filter, Filter): filter = Filter(*filter) query.filter(filter) elif key in ['limit', 'offset', 'offset_key']: setattr(query, key, value) return query
[ "def", "from_dict", "(", "cls", ",", "dictionary", ")", ":", "query", "=", "cls", "(", "Key", "(", "dictionary", "[", "'key'", "]", ")", ")", "for", "key", ",", "value", "in", "dictionary", ".", "items", "(", ")", ":", "if", "key", "==", "'order'", ":", "for", "order", "in", "value", ":", "query", ".", "order", "(", "order", ")", "elif", "key", "==", "'filter'", ":", "for", "filter", "in", "value", ":", "if", "not", "isinstance", "(", "filter", ",", "Filter", ")", ":", "filter", "=", "Filter", "(", "*", "filter", ")", "query", ".", "filter", "(", "filter", ")", "elif", "key", "in", "[", "'limit'", ",", "'offset'", ",", "'offset_key'", "]", ":", "setattr", "(", "query", ",", "key", ",", "value", ")", "return", "query" ]
Constructs a query from a dictionary.
[ "Constructs", "a", "query", "from", "a", "dictionary", "." ]
train
https://github.com/datastore/datastore/blob/7ccf0cd4748001d3dbf5e6dda369b0f63e0269d3/datastore/core/query.py#L444-L462
0.01341
crackinglandia/pype32
pype32/directories.py
ImageExportTable.parse
def parse(readDataInstance): """ Returns a new L{ImageExportTable} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{ImageExportTable} object. @rtype: L{ImageExportTable} @return: A new L{ImageExportTable} object. """ et = ImageExportTable() et.characteristics.value = readDataInstance.readDword() et.timeDateStamp.value = readDataInstance.readDword() et.majorVersion.value = readDataInstance.readWord() et.minorVersion.value = readDataInstance.readWord() et.name.value = readDataInstance.readDword() et.base.value = readDataInstance.readDword() et.numberOfFunctions.value = readDataInstance.readDword() et.numberOfNames.value = readDataInstance.readDword() et.addressOfFunctions.value = readDataInstance.readDword() et.addressOfNames.value = readDataInstance.readDword() et.addressOfNameOrdinals.value = readDataInstance.readDword() return et
python
def parse(readDataInstance): """ Returns a new L{ImageExportTable} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{ImageExportTable} object. @rtype: L{ImageExportTable} @return: A new L{ImageExportTable} object. """ et = ImageExportTable() et.characteristics.value = readDataInstance.readDword() et.timeDateStamp.value = readDataInstance.readDword() et.majorVersion.value = readDataInstance.readWord() et.minorVersion.value = readDataInstance.readWord() et.name.value = readDataInstance.readDword() et.base.value = readDataInstance.readDword() et.numberOfFunctions.value = readDataInstance.readDword() et.numberOfNames.value = readDataInstance.readDword() et.addressOfFunctions.value = readDataInstance.readDword() et.addressOfNames.value = readDataInstance.readDword() et.addressOfNameOrdinals.value = readDataInstance.readDword() return et
[ "def", "parse", "(", "readDataInstance", ")", ":", "et", "=", "ImageExportTable", "(", ")", "et", ".", "characteristics", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "et", ".", "timeDateStamp", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "et", ".", "majorVersion", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "et", ".", "minorVersion", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "et", ".", "name", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "et", ".", "base", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "et", ".", "numberOfFunctions", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "et", ".", "numberOfNames", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "et", ".", "addressOfFunctions", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "et", ".", "addressOfNames", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "et", ".", "addressOfNameOrdinals", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "return", "et" ]
Returns a new L{ImageExportTable} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{ImageExportTable} object. @rtype: L{ImageExportTable} @return: A new L{ImageExportTable} object.
[ "Returns", "a", "new", "L", "{", "ImageExportTable", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L902-L925
0.005455
apple/turicreate
src/unity/python/turicreate/toolkits/_tree_model_mixin.py
TreeModelMixin.extract_features
def extract_features(self, dataset, missing_value_action='auto'): """ For each example in the dataset, extract the leaf indices of each tree as features. For multiclass classification, each leaf index contains #num_class numbers. The returned feature vectors can be used as input to train another supervised learning model such as a :py:class:`~turicreate.logistic_classifier.LogisticClassifier`, an :py:class:`~turicreate.svm_classifier.SVMClassifier`, or a Parameters ---------- dataset : SFrame Dataset of new observations. Must include columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. missing_value_action: str, optional Action to perform when missing values are encountered. This can be one of: - 'auto': Choose a model dependent missing value policy. - 'impute': Proceed with evaluation by filling in the missing values with the mean of the training data. Missing values are also imputed if an entire column of data is missing during evaluation. - 'none': Treat missing value as is. Model must be able to handle missing value. - 'error' : Do not proceed with prediction and terminate with an error message. Returns ------- out : SArray An SArray of dtype array.array containing extracted features. Examples -------- >>> data = turicreate.SFrame( 'https://static.turi.com/datasets/regression/houses.csv') >>> # Regression Tree Models >>> data['regression_tree_features'] = model.extract_features(data) >>> # Classification Tree Models >>> data['classification_tree_features'] = model.extract_features(data) """ _raise_error_if_not_sframe(dataset, "dataset") if missing_value_action == 'auto': missing_value_action = select_default_missing_value_policy(self, 'extract_features') return self.__proxy__.extract_features(dataset, missing_value_action)
python
def extract_features(self, dataset, missing_value_action='auto'): """ For each example in the dataset, extract the leaf indices of each tree as features. For multiclass classification, each leaf index contains #num_class numbers. The returned feature vectors can be used as input to train another supervised learning model such as a :py:class:`~turicreate.logistic_classifier.LogisticClassifier`, an :py:class:`~turicreate.svm_classifier.SVMClassifier`, or a Parameters ---------- dataset : SFrame Dataset of new observations. Must include columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. missing_value_action: str, optional Action to perform when missing values are encountered. This can be one of: - 'auto': Choose a model dependent missing value policy. - 'impute': Proceed with evaluation by filling in the missing values with the mean of the training data. Missing values are also imputed if an entire column of data is missing during evaluation. - 'none': Treat missing value as is. Model must be able to handle missing value. - 'error' : Do not proceed with prediction and terminate with an error message. Returns ------- out : SArray An SArray of dtype array.array containing extracted features. Examples -------- >>> data = turicreate.SFrame( 'https://static.turi.com/datasets/regression/houses.csv') >>> # Regression Tree Models >>> data['regression_tree_features'] = model.extract_features(data) >>> # Classification Tree Models >>> data['classification_tree_features'] = model.extract_features(data) """ _raise_error_if_not_sframe(dataset, "dataset") if missing_value_action == 'auto': missing_value_action = select_default_missing_value_policy(self, 'extract_features') return self.__proxy__.extract_features(dataset, missing_value_action)
[ "def", "extract_features", "(", "self", ",", "dataset", ",", "missing_value_action", "=", "'auto'", ")", ":", "_raise_error_if_not_sframe", "(", "dataset", ",", "\"dataset\"", ")", "if", "missing_value_action", "==", "'auto'", ":", "missing_value_action", "=", "select_default_missing_value_policy", "(", "self", ",", "'extract_features'", ")", "return", "self", ".", "__proxy__", ".", "extract_features", "(", "dataset", ",", "missing_value_action", ")" ]
For each example in the dataset, extract the leaf indices of each tree as features. For multiclass classification, each leaf index contains #num_class numbers. The returned feature vectors can be used as input to train another supervised learning model such as a :py:class:`~turicreate.logistic_classifier.LogisticClassifier`, an :py:class:`~turicreate.svm_classifier.SVMClassifier`, or a Parameters ---------- dataset : SFrame Dataset of new observations. Must include columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. missing_value_action: str, optional Action to perform when missing values are encountered. This can be one of: - 'auto': Choose a model dependent missing value policy. - 'impute': Proceed with evaluation by filling in the missing values with the mean of the training data. Missing values are also imputed if an entire column of data is missing during evaluation. - 'none': Treat missing value as is. Model must be able to handle missing value. - 'error' : Do not proceed with prediction and terminate with an error message. Returns ------- out : SArray An SArray of dtype array.array containing extracted features. Examples -------- >>> data = turicreate.SFrame( 'https://static.turi.com/datasets/regression/houses.csv') >>> # Regression Tree Models >>> data['regression_tree_features'] = model.extract_features(data) >>> # Classification Tree Models >>> data['classification_tree_features'] = model.extract_features(data)
[ "For", "each", "example", "in", "the", "dataset", "extract", "the", "leaf", "indices", "of", "each", "tree", "as", "features", "." ]
train
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_tree_model_mixin.py#L65-L120
0.001276
rainwoodman/kdcount
kdcount/utils.py
bincount
def bincount(dig, weight, minlength): """ bincount supporting scalar and vector weight """ if numpy.isscalar(weight): return numpy.bincount(dig, minlength=minlength) * weight else: return numpy.bincount(dig, weight, minlength)
python
def bincount(dig, weight, minlength): """ bincount supporting scalar and vector weight """ if numpy.isscalar(weight): return numpy.bincount(dig, minlength=minlength) * weight else: return numpy.bincount(dig, weight, minlength)
[ "def", "bincount", "(", "dig", ",", "weight", ",", "minlength", ")", ":", "if", "numpy", ".", "isscalar", "(", "weight", ")", ":", "return", "numpy", ".", "bincount", "(", "dig", ",", "minlength", "=", "minlength", ")", "*", "weight", "else", ":", "return", "numpy", ".", "bincount", "(", "dig", ",", "weight", ",", "minlength", ")" ]
bincount supporting scalar and vector weight
[ "bincount", "supporting", "scalar", "and", "vector", "weight" ]
train
https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/utils.py#L27-L32
0.003937
aws/sagemaker-python-sdk
src/sagemaker/amazon/hyperparameter.py
Hyperparameter.serialize_all
def serialize_all(obj): """Return all non-None ``hyperparameter`` values on ``obj`` as a ``dict[str,str].``""" if '_hyperparameters' not in dir(obj): return {} return {k: str(v) for k, v in obj._hyperparameters.items() if v is not None}
python
def serialize_all(obj): """Return all non-None ``hyperparameter`` values on ``obj`` as a ``dict[str,str].``""" if '_hyperparameters' not in dir(obj): return {} return {k: str(v) for k, v in obj._hyperparameters.items() if v is not None}
[ "def", "serialize_all", "(", "obj", ")", ":", "if", "'_hyperparameters'", "not", "in", "dir", "(", "obj", ")", ":", "return", "{", "}", "return", "{", "k", ":", "str", "(", "v", ")", "for", "k", ",", "v", "in", "obj", ".", "_hyperparameters", ".", "items", "(", ")", "if", "v", "is", "not", "None", "}" ]
Return all non-None ``hyperparameter`` values on ``obj`` as a ``dict[str,str].``
[ "Return", "all", "non", "-", "None", "hyperparameter", "values", "on", "obj", "as", "a", "dict", "[", "str", "str", "]", "." ]
train
https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/amazon/hyperparameter.py#L66-L70
0.014706
notifiers/notifiers
notifiers/utils/schema/formats.py
is_timestamp
def is_timestamp(instance): """Validates data is a timestamp""" if not isinstance(instance, (int, str)): return True return datetime.fromtimestamp(int(instance))
python
def is_timestamp(instance): """Validates data is a timestamp""" if not isinstance(instance, (int, str)): return True return datetime.fromtimestamp(int(instance))
[ "def", "is_timestamp", "(", "instance", ")", ":", "if", "not", "isinstance", "(", "instance", ",", "(", "int", ",", "str", ")", ")", ":", "return", "True", "return", "datetime", ".", "fromtimestamp", "(", "int", "(", "instance", ")", ")" ]
Validates data is a timestamp
[ "Validates", "data", "is", "a", "timestamp" ]
train
https://github.com/notifiers/notifiers/blob/6dd8aafff86935dbb4763db9c56f9cdd7fc08b65/notifiers/utils/schema/formats.py#L65-L69
0.005525
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/paulaxml/paula.py
PaulaDocument.__gen_rel_anno_file
def __gen_rel_anno_file(self, top_level_layer): """ A rel annotation file contains edge (rel) attributes. It is e.g. used to annotate the type of a dependency relation (subj, obj etc.). See also: __gen_hierarchy_file() """ paula_id = '{0}.{1}.{2}_{3}_rel'.format(top_level_layer, self.corpus_name, self.name, top_level_layer) E, tree = gen_paula_etree(paula_id) dominance_edges = select_edges_by( self.dg, layer=top_level_layer, edge_type=EdgeTypes.dominance_relation, data=True) dominance_dict = defaultdict(lambda: defaultdict(str)) for source_id, target_id, edge_attrs in dominance_edges: if source_id != top_level_layer+':root_node': dominance_dict[source_id][target_id] = edge_attrs base_paula_id = self.paulamap['hierarchy'][top_level_layer] mflist = E('multiFeatList', {XMLBASE: base_paula_id+'.xml'}) for source_id in dominance_dict: for target_id in dominance_dict[source_id]: rel_href = '#rel_{0}_{1}'.format(source_id, target_id) mfeat = E('multiFeat', {XLINKHREF: rel_href}) edge_attrs = dominance_dict[source_id][target_id] for edge_attr in edge_attrs: if edge_attr not in IGNORED_EDGE_ATTRIBS: mfeat.append(E('feat', {'name': edge_attr, 'value': edge_attrs[edge_attr]})) if self.human_readable: # adds edge label as a <!--comment--> source_label = self.dg.node[source_id].get('label') target_label = self.dg.node[target_id].get('label') mfeat.append(Comment(u'{0} - {1}'.format(source_label, target_label))) mflist.append(mfeat) tree.append(mflist) self.files[paula_id] = tree self.file2dtd[paula_id] = PaulaDTDs.multifeat return paula_id
python
def __gen_rel_anno_file(self, top_level_layer): """ A rel annotation file contains edge (rel) attributes. It is e.g. used to annotate the type of a dependency relation (subj, obj etc.). See also: __gen_hierarchy_file() """ paula_id = '{0}.{1}.{2}_{3}_rel'.format(top_level_layer, self.corpus_name, self.name, top_level_layer) E, tree = gen_paula_etree(paula_id) dominance_edges = select_edges_by( self.dg, layer=top_level_layer, edge_type=EdgeTypes.dominance_relation, data=True) dominance_dict = defaultdict(lambda: defaultdict(str)) for source_id, target_id, edge_attrs in dominance_edges: if source_id != top_level_layer+':root_node': dominance_dict[source_id][target_id] = edge_attrs base_paula_id = self.paulamap['hierarchy'][top_level_layer] mflist = E('multiFeatList', {XMLBASE: base_paula_id+'.xml'}) for source_id in dominance_dict: for target_id in dominance_dict[source_id]: rel_href = '#rel_{0}_{1}'.format(source_id, target_id) mfeat = E('multiFeat', {XLINKHREF: rel_href}) edge_attrs = dominance_dict[source_id][target_id] for edge_attr in edge_attrs: if edge_attr not in IGNORED_EDGE_ATTRIBS: mfeat.append(E('feat', {'name': edge_attr, 'value': edge_attrs[edge_attr]})) if self.human_readable: # adds edge label as a <!--comment--> source_label = self.dg.node[source_id].get('label') target_label = self.dg.node[target_id].get('label') mfeat.append(Comment(u'{0} - {1}'.format(source_label, target_label))) mflist.append(mfeat) tree.append(mflist) self.files[paula_id] = tree self.file2dtd[paula_id] = PaulaDTDs.multifeat return paula_id
[ "def", "__gen_rel_anno_file", "(", "self", ",", "top_level_layer", ")", ":", "paula_id", "=", "'{0}.{1}.{2}_{3}_rel'", ".", "format", "(", "top_level_layer", ",", "self", ".", "corpus_name", ",", "self", ".", "name", ",", "top_level_layer", ")", "E", ",", "tree", "=", "gen_paula_etree", "(", "paula_id", ")", "dominance_edges", "=", "select_edges_by", "(", "self", ".", "dg", ",", "layer", "=", "top_level_layer", ",", "edge_type", "=", "EdgeTypes", ".", "dominance_relation", ",", "data", "=", "True", ")", "dominance_dict", "=", "defaultdict", "(", "lambda", ":", "defaultdict", "(", "str", ")", ")", "for", "source_id", ",", "target_id", ",", "edge_attrs", "in", "dominance_edges", ":", "if", "source_id", "!=", "top_level_layer", "+", "':root_node'", ":", "dominance_dict", "[", "source_id", "]", "[", "target_id", "]", "=", "edge_attrs", "base_paula_id", "=", "self", ".", "paulamap", "[", "'hierarchy'", "]", "[", "top_level_layer", "]", "mflist", "=", "E", "(", "'multiFeatList'", ",", "{", "XMLBASE", ":", "base_paula_id", "+", "'.xml'", "}", ")", "for", "source_id", "in", "dominance_dict", ":", "for", "target_id", "in", "dominance_dict", "[", "source_id", "]", ":", "rel_href", "=", "'#rel_{0}_{1}'", ".", "format", "(", "source_id", ",", "target_id", ")", "mfeat", "=", "E", "(", "'multiFeat'", ",", "{", "XLINKHREF", ":", "rel_href", "}", ")", "edge_attrs", "=", "dominance_dict", "[", "source_id", "]", "[", "target_id", "]", "for", "edge_attr", "in", "edge_attrs", ":", "if", "edge_attr", "not", "in", "IGNORED_EDGE_ATTRIBS", ":", "mfeat", ".", "append", "(", "E", "(", "'feat'", ",", "{", "'name'", ":", "edge_attr", ",", "'value'", ":", "edge_attrs", "[", "edge_attr", "]", "}", ")", ")", "if", "self", ".", "human_readable", ":", "# adds edge label as a <!--comment-->", "source_label", "=", "self", ".", "dg", ".", "node", "[", "source_id", "]", ".", "get", "(", "'label'", ")", "target_label", "=", "self", ".", "dg", ".", "node", "[", "target_id", "]", ".", "get", "(", "'label'", ")", "mfeat", ".", "append", "(", "Comment", "(", "u'{0} - {1}'", ".", "format", "(", "source_label", ",", "target_label", ")", ")", ")", "mflist", ".", "append", "(", "mfeat", ")", "tree", ".", "append", "(", "mflist", ")", "self", ".", "files", "[", "paula_id", "]", "=", "tree", "self", ".", "file2dtd", "[", "paula_id", "]", "=", "PaulaDTDs", ".", "multifeat", "return", "paula_id" ]
A rel annotation file contains edge (rel) attributes. It is e.g. used to annotate the type of a dependency relation (subj, obj etc.). See also: __gen_hierarchy_file()
[ "A", "rel", "annotation", "file", "contains", "edge", "(", "rel", ")", "attributes", ".", "It", "is", "e", ".", "g", ".", "used", "to", "annotate", "the", "type", "of", "a", "dependency", "relation", "(", "subj", "obj", "etc", ".", ")", "." ]
train
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/paulaxml/paula.py#L400-L445
0.00228
mitsei/dlkit
dlkit/records/assessment/orthographic_visualization/multi_choice_records.py
MultiChoiceOrthoQuestionFormRecord._init_map
def _init_map(self): """stub""" QuestionFilesFormRecord._init_map(self) FirstAngleProjectionFormRecord._init_map(self) super(MultiChoiceOrthoQuestionFormRecord, self)._init_map()
python
def _init_map(self): """stub""" QuestionFilesFormRecord._init_map(self) FirstAngleProjectionFormRecord._init_map(self) super(MultiChoiceOrthoQuestionFormRecord, self)._init_map()
[ "def", "_init_map", "(", "self", ")", ":", "QuestionFilesFormRecord", ".", "_init_map", "(", "self", ")", "FirstAngleProjectionFormRecord", ".", "_init_map", "(", "self", ")", "super", "(", "MultiChoiceOrthoQuestionFormRecord", ",", "self", ")", ".", "_init_map", "(", ")" ]
stub
[ "stub" ]
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/orthographic_visualization/multi_choice_records.py#L143-L147
0.009524
QualiSystems/vCenterShell
package/cloudshell/cp/vcenter/vm/deploy.py
VirtualMachineDeployer._deploy_a_clone
def _deploy_a_clone(self, si, logger, app_name, template_name, other_params, vcenter_data_model, reservation_id, cancellation_context, snapshot=''): """ :rtype DeployAppResult: """ # generate unique name vm_name = self.name_generator(app_name, reservation_id) VCenterDetailsFactory.set_deplyment_vcenter_params( vcenter_resource_model=vcenter_data_model, deploy_params=other_params) template_name = VMLocation.combine([other_params.default_datacenter, template_name]) params = self.pv_service.CloneVmParameters(si=si, template_name=template_name, vm_name=vm_name, vm_folder=other_params.vm_location, datastore_name=other_params.vm_storage, cluster_name=other_params.vm_cluster, resource_pool=other_params.vm_resource_pool, power_on=False, snapshot=snapshot) if cancellation_context.is_cancelled: raise Exception("Action 'Clone VM' was cancelled.") clone_vm_result = self.pv_service.clone_vm(clone_params=params, logger=logger, cancellation_context=cancellation_context) if clone_vm_result.error: raise Exception(clone_vm_result.error) # remove a new created vm due to cancellation if cancellation_context.is_cancelled: self.pv_service.destroy_vm(vm=clone_vm_result.vm, logger=logger) raise Exception("Action 'Clone VM' was cancelled.") vm_details_data = self._safely_get_vm_details(clone_vm_result.vm, vm_name, vcenter_data_model, other_params, logger) return DeployAppResult(vmName=vm_name, vmUuid=clone_vm_result.vm.summary.config.uuid, vmDetailsData=vm_details_data, deployedAppAdditionalData={'ip_regex': other_params.ip_regex, 'refresh_ip_timeout': other_params.refresh_ip_timeout, 'auto_power_off': convert_to_bool(other_params.auto_power_off), 'auto_delete': convert_to_bool(other_params.auto_delete)})
python
def _deploy_a_clone(self, si, logger, app_name, template_name, other_params, vcenter_data_model, reservation_id, cancellation_context, snapshot=''): """ :rtype DeployAppResult: """ # generate unique name vm_name = self.name_generator(app_name, reservation_id) VCenterDetailsFactory.set_deplyment_vcenter_params( vcenter_resource_model=vcenter_data_model, deploy_params=other_params) template_name = VMLocation.combine([other_params.default_datacenter, template_name]) params = self.pv_service.CloneVmParameters(si=si, template_name=template_name, vm_name=vm_name, vm_folder=other_params.vm_location, datastore_name=other_params.vm_storage, cluster_name=other_params.vm_cluster, resource_pool=other_params.vm_resource_pool, power_on=False, snapshot=snapshot) if cancellation_context.is_cancelled: raise Exception("Action 'Clone VM' was cancelled.") clone_vm_result = self.pv_service.clone_vm(clone_params=params, logger=logger, cancellation_context=cancellation_context) if clone_vm_result.error: raise Exception(clone_vm_result.error) # remove a new created vm due to cancellation if cancellation_context.is_cancelled: self.pv_service.destroy_vm(vm=clone_vm_result.vm, logger=logger) raise Exception("Action 'Clone VM' was cancelled.") vm_details_data = self._safely_get_vm_details(clone_vm_result.vm, vm_name, vcenter_data_model, other_params, logger) return DeployAppResult(vmName=vm_name, vmUuid=clone_vm_result.vm.summary.config.uuid, vmDetailsData=vm_details_data, deployedAppAdditionalData={'ip_regex': other_params.ip_regex, 'refresh_ip_timeout': other_params.refresh_ip_timeout, 'auto_power_off': convert_to_bool(other_params.auto_power_off), 'auto_delete': convert_to_bool(other_params.auto_delete)})
[ "def", "_deploy_a_clone", "(", "self", ",", "si", ",", "logger", ",", "app_name", ",", "template_name", ",", "other_params", ",", "vcenter_data_model", ",", "reservation_id", ",", "cancellation_context", ",", "snapshot", "=", "''", ")", ":", "# generate unique name", "vm_name", "=", "self", ".", "name_generator", "(", "app_name", ",", "reservation_id", ")", "VCenterDetailsFactory", ".", "set_deplyment_vcenter_params", "(", "vcenter_resource_model", "=", "vcenter_data_model", ",", "deploy_params", "=", "other_params", ")", "template_name", "=", "VMLocation", ".", "combine", "(", "[", "other_params", ".", "default_datacenter", ",", "template_name", "]", ")", "params", "=", "self", ".", "pv_service", ".", "CloneVmParameters", "(", "si", "=", "si", ",", "template_name", "=", "template_name", ",", "vm_name", "=", "vm_name", ",", "vm_folder", "=", "other_params", ".", "vm_location", ",", "datastore_name", "=", "other_params", ".", "vm_storage", ",", "cluster_name", "=", "other_params", ".", "vm_cluster", ",", "resource_pool", "=", "other_params", ".", "vm_resource_pool", ",", "power_on", "=", "False", ",", "snapshot", "=", "snapshot", ")", "if", "cancellation_context", ".", "is_cancelled", ":", "raise", "Exception", "(", "\"Action 'Clone VM' was cancelled.\"", ")", "clone_vm_result", "=", "self", ".", "pv_service", ".", "clone_vm", "(", "clone_params", "=", "params", ",", "logger", "=", "logger", ",", "cancellation_context", "=", "cancellation_context", ")", "if", "clone_vm_result", ".", "error", ":", "raise", "Exception", "(", "clone_vm_result", ".", "error", ")", "# remove a new created vm due to cancellation", "if", "cancellation_context", ".", "is_cancelled", ":", "self", ".", "pv_service", ".", "destroy_vm", "(", "vm", "=", "clone_vm_result", ".", "vm", ",", "logger", "=", "logger", ")", "raise", "Exception", "(", "\"Action 'Clone VM' was cancelled.\"", ")", "vm_details_data", "=", "self", ".", "_safely_get_vm_details", "(", "clone_vm_result", ".", "vm", ",", "vm_name", ",", "vcenter_data_model", ",", "other_params", ",", "logger", ")", "return", "DeployAppResult", "(", "vmName", "=", "vm_name", ",", "vmUuid", "=", "clone_vm_result", ".", "vm", ".", "summary", ".", "config", ".", "uuid", ",", "vmDetailsData", "=", "vm_details_data", ",", "deployedAppAdditionalData", "=", "{", "'ip_regex'", ":", "other_params", ".", "ip_regex", ",", "'refresh_ip_timeout'", ":", "other_params", ".", "refresh_ip_timeout", ",", "'auto_power_off'", ":", "convert_to_bool", "(", "other_params", ".", "auto_power_off", ")", ",", "'auto_delete'", ":", "convert_to_bool", "(", "other_params", ".", "auto_delete", ")", "}", ")" ]
:rtype DeployAppResult:
[ ":", "rtype", "DeployAppResult", ":" ]
train
https://github.com/QualiSystems/vCenterShell/blob/e2e24cd938a92a68f4a8e6a860810d3ef72aae6d/package/cloudshell/cp/vcenter/vm/deploy.py#L109-L156
0.006139
bitesofcode/projexui
projexui/widgets/xviewwidget/xviewwidget.py
XViewWidget.restoreSettings
def restoreSettings(self, settings): """ Restores the current structure of the view widget from the inputed \ settings instance. :param settings | <QSettings> """ key = self.objectName() value = unwrapVariant(settings.value('%s/profile' % key)) if not value: self.reset(force = True) return False profile = value # restore the view type settings for viewType in self.viewTypes(): viewType.restoreGlobalSettings(settings) # restore the profile self.restoreProfile(XViewProfile.fromString(profile)) if not self.views(): self.reset(force = True) return True
python
def restoreSettings(self, settings): """ Restores the current structure of the view widget from the inputed \ settings instance. :param settings | <QSettings> """ key = self.objectName() value = unwrapVariant(settings.value('%s/profile' % key)) if not value: self.reset(force = True) return False profile = value # restore the view type settings for viewType in self.viewTypes(): viewType.restoreGlobalSettings(settings) # restore the profile self.restoreProfile(XViewProfile.fromString(profile)) if not self.views(): self.reset(force = True) return True
[ "def", "restoreSettings", "(", "self", ",", "settings", ")", ":", "key", "=", "self", ".", "objectName", "(", ")", "value", "=", "unwrapVariant", "(", "settings", ".", "value", "(", "'%s/profile'", "%", "key", ")", ")", "if", "not", "value", ":", "self", ".", "reset", "(", "force", "=", "True", ")", "return", "False", "profile", "=", "value", "# restore the view type settings", "for", "viewType", "in", "self", ".", "viewTypes", "(", ")", ":", "viewType", ".", "restoreGlobalSettings", "(", "settings", ")", "# restore the profile", "self", ".", "restoreProfile", "(", "XViewProfile", ".", "fromString", "(", "profile", ")", ")", "if", "not", "self", ".", "views", "(", ")", ":", "self", ".", "reset", "(", "force", "=", "True", ")", "return", "True" ]
Restores the current structure of the view widget from the inputed \ settings instance. :param settings | <QSettings>
[ "Restores", "the", "current", "structure", "of", "the", "view", "widget", "from", "the", "inputed", "\\", "settings", "instance", ".", ":", "param", "settings", "|", "<QSettings", ">" ]
train
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xviewwidget.py#L295-L321
0.01875
Nachtfeuer/pipeline
spline/components/stage.py
Stage.process
def process(self, stage): """Processing one stage.""" self.logger.info("Processing pipeline stage '%s'", self.title) output = [] for entry in stage: key = list(entry.keys())[0] if key == "env": self.pipeline.data.env_list[1].update(entry[key]) self.logger.debug("Updating environment at level 1 with %s", self.pipeline.data.env_list[1]) continue # if not "env" then it must be "tasks" (schema): tasks = Tasks(self.pipeline, re.match(r"tasks\(parallel\)", key) is not None) result = tasks.process(entry[key]) for line in result['output']: output.append(line) if not result['success']: self.event.failed() return {'success': False, 'output': output} self.event.succeeded() return {'success': True, 'output': output}
python
def process(self, stage): """Processing one stage.""" self.logger.info("Processing pipeline stage '%s'", self.title) output = [] for entry in stage: key = list(entry.keys())[0] if key == "env": self.pipeline.data.env_list[1].update(entry[key]) self.logger.debug("Updating environment at level 1 with %s", self.pipeline.data.env_list[1]) continue # if not "env" then it must be "tasks" (schema): tasks = Tasks(self.pipeline, re.match(r"tasks\(parallel\)", key) is not None) result = tasks.process(entry[key]) for line in result['output']: output.append(line) if not result['success']: self.event.failed() return {'success': False, 'output': output} self.event.succeeded() return {'success': True, 'output': output}
[ "def", "process", "(", "self", ",", "stage", ")", ":", "self", ".", "logger", ".", "info", "(", "\"Processing pipeline stage '%s'\"", ",", "self", ".", "title", ")", "output", "=", "[", "]", "for", "entry", "in", "stage", ":", "key", "=", "list", "(", "entry", ".", "keys", "(", ")", ")", "[", "0", "]", "if", "key", "==", "\"env\"", ":", "self", ".", "pipeline", ".", "data", ".", "env_list", "[", "1", "]", ".", "update", "(", "entry", "[", "key", "]", ")", "self", ".", "logger", ".", "debug", "(", "\"Updating environment at level 1 with %s\"", ",", "self", ".", "pipeline", ".", "data", ".", "env_list", "[", "1", "]", ")", "continue", "# if not \"env\" then it must be \"tasks\" (schema):", "tasks", "=", "Tasks", "(", "self", ".", "pipeline", ",", "re", ".", "match", "(", "r\"tasks\\(parallel\\)\"", ",", "key", ")", "is", "not", "None", ")", "result", "=", "tasks", ".", "process", "(", "entry", "[", "key", "]", ")", "for", "line", "in", "result", "[", "'output'", "]", ":", "output", ".", "append", "(", "line", ")", "if", "not", "result", "[", "'success'", "]", ":", "self", ".", "event", ".", "failed", "(", ")", "return", "{", "'success'", ":", "False", ",", "'output'", ":", "output", "}", "self", ".", "event", ".", "succeeded", "(", ")", "return", "{", "'success'", ":", "True", ",", "'output'", ":", "output", "}" ]
Processing one stage.
[ "Processing", "one", "stage", "." ]
train
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/stage.py#L47-L69
0.003071
spacetelescope/drizzlepac
drizzlepac/processInput.py
_process_input_wcs
def _process_input_wcs(infiles, wcskey, updatewcs): """ This is a subset of process_input(), for internal use only. This is the portion of input handling which sets/updates WCS data, and is a performance hit - a target for parallelization. Returns the expanded list of filenames. """ # Run parseinput though it's likely already been done in processFilenames outfiles = parseinput.parseinput(infiles)[0] # Disable parallel processing here for now until hardware I/O gets "wider". # Since this part is IO bound, parallelizing doesn't help more than a little # in most cases, and may actually slow this down on some desktop nodes. # cfgval_num_cores = None # get this from paramDict # pool_size = util.get_pool_size(cfgval_num_cores, len(outfiles)) pool_size = 1 # do the WCS updating if wcskey in ['', ' ', 'INDEF', None]: if updatewcs: log.info('Updating input WCS using "updatewcs"') else: log.info('Resetting input WCS to be based on WCS key = %s' % wcskey) if pool_size > 1: log.info('Executing %d parallel workers' % pool_size) subprocs = [] for fname in outfiles: p = multiprocessing.Process(target=_process_input_wcs_single, name='processInput._process_input_wcs()', # for err msgs args=(fname, wcskey, updatewcs) ) subprocs.append(p) mputil.launch_and_wait(subprocs, pool_size) # blocks till all done else: log.info('Executing serially') for fname in outfiles: _process_input_wcs_single(fname, wcskey, updatewcs) return outfiles
python
def _process_input_wcs(infiles, wcskey, updatewcs): """ This is a subset of process_input(), for internal use only. This is the portion of input handling which sets/updates WCS data, and is a performance hit - a target for parallelization. Returns the expanded list of filenames. """ # Run parseinput though it's likely already been done in processFilenames outfiles = parseinput.parseinput(infiles)[0] # Disable parallel processing here for now until hardware I/O gets "wider". # Since this part is IO bound, parallelizing doesn't help more than a little # in most cases, and may actually slow this down on some desktop nodes. # cfgval_num_cores = None # get this from paramDict # pool_size = util.get_pool_size(cfgval_num_cores, len(outfiles)) pool_size = 1 # do the WCS updating if wcskey in ['', ' ', 'INDEF', None]: if updatewcs: log.info('Updating input WCS using "updatewcs"') else: log.info('Resetting input WCS to be based on WCS key = %s' % wcskey) if pool_size > 1: log.info('Executing %d parallel workers' % pool_size) subprocs = [] for fname in outfiles: p = multiprocessing.Process(target=_process_input_wcs_single, name='processInput._process_input_wcs()', # for err msgs args=(fname, wcskey, updatewcs) ) subprocs.append(p) mputil.launch_and_wait(subprocs, pool_size) # blocks till all done else: log.info('Executing serially') for fname in outfiles: _process_input_wcs_single(fname, wcskey, updatewcs) return outfiles
[ "def", "_process_input_wcs", "(", "infiles", ",", "wcskey", ",", "updatewcs", ")", ":", "# Run parseinput though it's likely already been done in processFilenames", "outfiles", "=", "parseinput", ".", "parseinput", "(", "infiles", ")", "[", "0", "]", "# Disable parallel processing here for now until hardware I/O gets \"wider\".", "# Since this part is IO bound, parallelizing doesn't help more than a little", "# in most cases, and may actually slow this down on some desktop nodes.", "# cfgval_num_cores = None # get this from paramDict", "# pool_size = util.get_pool_size(cfgval_num_cores, len(outfiles))", "pool_size", "=", "1", "# do the WCS updating", "if", "wcskey", "in", "[", "''", ",", "' '", ",", "'INDEF'", ",", "None", "]", ":", "if", "updatewcs", ":", "log", ".", "info", "(", "'Updating input WCS using \"updatewcs\"'", ")", "else", ":", "log", ".", "info", "(", "'Resetting input WCS to be based on WCS key = %s'", "%", "wcskey", ")", "if", "pool_size", ">", "1", ":", "log", ".", "info", "(", "'Executing %d parallel workers'", "%", "pool_size", ")", "subprocs", "=", "[", "]", "for", "fname", "in", "outfiles", ":", "p", "=", "multiprocessing", ".", "Process", "(", "target", "=", "_process_input_wcs_single", ",", "name", "=", "'processInput._process_input_wcs()'", ",", "# for err msgs", "args", "=", "(", "fname", ",", "wcskey", ",", "updatewcs", ")", ")", "subprocs", ".", "append", "(", "p", ")", "mputil", ".", "launch_and_wait", "(", "subprocs", ",", "pool_size", ")", "# blocks till all done", "else", ":", "log", ".", "info", "(", "'Executing serially'", ")", "for", "fname", "in", "outfiles", ":", "_process_input_wcs_single", "(", "fname", ",", "wcskey", ",", "updatewcs", ")", "return", "outfiles" ]
This is a subset of process_input(), for internal use only. This is the portion of input handling which sets/updates WCS data, and is a performance hit - a target for parallelization. Returns the expanded list of filenames.
[ "This", "is", "a", "subset", "of", "process_input", "()", "for", "internal", "use", "only", ".", "This", "is", "the", "portion", "of", "input", "handling", "which", "sets", "/", "updates", "WCS", "data", "and", "is", "a", "performance", "hit", "-", "a", "target", "for", "parallelization", ".", "Returns", "the", "expanded", "list", "of", "filenames", "." ]
train
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/processInput.py#L572-L610
0.004227
google/prettytensor
prettytensor/layers.py
he_init
def he_init(n_inputs, n_outputs, activation_fn, uniform=True): """Sets the parameter initialization using the method described. This method is designed to keep the scale of the gradients roughly the same in all layers with ReLU activations. He et al. (2015): Delving deep into rectifiers: surpassing human-level performance on imageNet classification. International Conference on Computer Vision. For activations other than ReLU and ReLU6, this method uses Xavier initialization as in xavier_init(). Args: n_inputs: The number of input nodes into each output. n_outputs: The number of output nodes for each input. activation_fn: Activation function used in this layer. uniform: If uniform distribution will be used for Xavier initialization. Normal distribution will be used if False. Returns: An initializer. """ def in_relu_family(activation_fn): if isinstance(activation_fn, collections.Sequence): activation_fn = activation_fn[0] return activation_fn in (tf.nn.relu, tf.nn.relu6) if in_relu_family(activation_fn): stddev = math.sqrt(2.0 / n_inputs) # TODO(): Evaluates truncated_normal_initializer. return tf.random_normal_initializer(stddev=stddev) else: return xavier_init(n_inputs, n_outputs, uniform)
python
def he_init(n_inputs, n_outputs, activation_fn, uniform=True): """Sets the parameter initialization using the method described. This method is designed to keep the scale of the gradients roughly the same in all layers with ReLU activations. He et al. (2015): Delving deep into rectifiers: surpassing human-level performance on imageNet classification. International Conference on Computer Vision. For activations other than ReLU and ReLU6, this method uses Xavier initialization as in xavier_init(). Args: n_inputs: The number of input nodes into each output. n_outputs: The number of output nodes for each input. activation_fn: Activation function used in this layer. uniform: If uniform distribution will be used for Xavier initialization. Normal distribution will be used if False. Returns: An initializer. """ def in_relu_family(activation_fn): if isinstance(activation_fn, collections.Sequence): activation_fn = activation_fn[0] return activation_fn in (tf.nn.relu, tf.nn.relu6) if in_relu_family(activation_fn): stddev = math.sqrt(2.0 / n_inputs) # TODO(): Evaluates truncated_normal_initializer. return tf.random_normal_initializer(stddev=stddev) else: return xavier_init(n_inputs, n_outputs, uniform)
[ "def", "he_init", "(", "n_inputs", ",", "n_outputs", ",", "activation_fn", ",", "uniform", "=", "True", ")", ":", "def", "in_relu_family", "(", "activation_fn", ")", ":", "if", "isinstance", "(", "activation_fn", ",", "collections", ".", "Sequence", ")", ":", "activation_fn", "=", "activation_fn", "[", "0", "]", "return", "activation_fn", "in", "(", "tf", ".", "nn", ".", "relu", ",", "tf", ".", "nn", ".", "relu6", ")", "if", "in_relu_family", "(", "activation_fn", ")", ":", "stddev", "=", "math", ".", "sqrt", "(", "2.0", "/", "n_inputs", ")", "# TODO(): Evaluates truncated_normal_initializer.", "return", "tf", ".", "random_normal_initializer", "(", "stddev", "=", "stddev", ")", "else", ":", "return", "xavier_init", "(", "n_inputs", ",", "n_outputs", ",", "uniform", ")" ]
Sets the parameter initialization using the method described. This method is designed to keep the scale of the gradients roughly the same in all layers with ReLU activations. He et al. (2015): Delving deep into rectifiers: surpassing human-level performance on imageNet classification. International Conference on Computer Vision. For activations other than ReLU and ReLU6, this method uses Xavier initialization as in xavier_init(). Args: n_inputs: The number of input nodes into each output. n_outputs: The number of output nodes for each input. activation_fn: Activation function used in this layer. uniform: If uniform distribution will be used for Xavier initialization. Normal distribution will be used if False. Returns: An initializer.
[ "Sets", "the", "parameter", "initialization", "using", "the", "method", "described", "." ]
train
https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/layers.py#L84-L116
0.005295
bitesofcode/projexui
projexui/widgets/xorbrecordbox.py
XOrbRecordBox.records
def records( self ): """ Returns the record list that ist linked with this combo box. :return [<orb.Table>, ..] """ records = [] for i in range(self.count()): record = self.recordAt(i) if record: records.append(record) return records
python
def records( self ): """ Returns the record list that ist linked with this combo box. :return [<orb.Table>, ..] """ records = [] for i in range(self.count()): record = self.recordAt(i) if record: records.append(record) return records
[ "def", "records", "(", "self", ")", ":", "records", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "count", "(", ")", ")", ":", "record", "=", "self", ".", "recordAt", "(", "i", ")", "if", "record", ":", "records", ".", "append", "(", "record", ")", "return", "records" ]
Returns the record list that ist linked with this combo box. :return [<orb.Table>, ..]
[ "Returns", "the", "record", "list", "that", "ist", "linked", "with", "this", "combo", "box", ".", ":", "return", "[", "<orb", ".", "Table", ">", "..", "]" ]
train
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbrecordbox.py#L644-L655
0.014164
janpipek/physt
physt/histogram_nd.py
HistogramND.projection
def projection(self, *axes: AxisIdentifier, **kwargs) -> HistogramBase: """Reduce dimensionality by summing along axis/axes. Parameters ---------- axes: Iterable[int or str] List of axes for the new histogram. Could be either numbers or names. Must contain at least one axis. name: Optional[str] # TODO: Check Name for the projected histogram (default: same) type: Optional[type] # TODO: Check If set, predefined class for the projection Returns ------- HistogramND or Histogram2D or Histogram1D (or others in special cases) """ # TODO: rename to project in 0.5 axes, invert = self._get_projection_axes(*axes) frequencies = self.frequencies.sum(axis=invert) errors2 = self.errors2.sum(axis=invert) return self._reduce_dimension(axes, frequencies, errors2, **kwargs)
python
def projection(self, *axes: AxisIdentifier, **kwargs) -> HistogramBase: """Reduce dimensionality by summing along axis/axes. Parameters ---------- axes: Iterable[int or str] List of axes for the new histogram. Could be either numbers or names. Must contain at least one axis. name: Optional[str] # TODO: Check Name for the projected histogram (default: same) type: Optional[type] # TODO: Check If set, predefined class for the projection Returns ------- HistogramND or Histogram2D or Histogram1D (or others in special cases) """ # TODO: rename to project in 0.5 axes, invert = self._get_projection_axes(*axes) frequencies = self.frequencies.sum(axis=invert) errors2 = self.errors2.sum(axis=invert) return self._reduce_dimension(axes, frequencies, errors2, **kwargs)
[ "def", "projection", "(", "self", ",", "*", "axes", ":", "AxisIdentifier", ",", "*", "*", "kwargs", ")", "->", "HistogramBase", ":", "# TODO: rename to project in 0.5", "axes", ",", "invert", "=", "self", ".", "_get_projection_axes", "(", "*", "axes", ")", "frequencies", "=", "self", ".", "frequencies", ".", "sum", "(", "axis", "=", "invert", ")", "errors2", "=", "self", ".", "errors2", ".", "sum", "(", "axis", "=", "invert", ")", "return", "self", ".", "_reduce_dimension", "(", "axes", ",", "frequencies", ",", "errors2", ",", "*", "*", "kwargs", ")" ]
Reduce dimensionality by summing along axis/axes. Parameters ---------- axes: Iterable[int or str] List of axes for the new histogram. Could be either numbers or names. Must contain at least one axis. name: Optional[str] # TODO: Check Name for the projected histogram (default: same) type: Optional[type] # TODO: Check If set, predefined class for the projection Returns ------- HistogramND or Histogram2D or Histogram1D (or others in special cases)
[ "Reduce", "dimensionality", "by", "summing", "along", "axis", "/", "axes", "." ]
train
https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram_nd.py#L353-L374
0.002139
gitpython-developers/GitPython
git/util.py
BlockingLockFile._obtain_lock
def _obtain_lock(self): """This method blocks until it obtained the lock, or raises IOError if it ran out of time or if the parent directory was not available anymore. If this method returns, you are guaranteed to own the lock""" starttime = time.time() maxtime = starttime + float(self._max_block_time) while True: try: super(BlockingLockFile, self)._obtain_lock() except IOError: # synity check: if the directory leading to the lockfile is not # readable anymore, raise an exception curtime = time.time() if not osp.isdir(osp.dirname(self._lock_file_path())): msg = "Directory containing the lockfile %r was not readable anymore after waiting %g seconds" % ( self._lock_file_path(), curtime - starttime) raise IOError(msg) # END handle missing directory if curtime >= maxtime: msg = "Waited %g seconds for lock at %r" % (maxtime - starttime, self._lock_file_path()) raise IOError(msg) # END abort if we wait too long time.sleep(self._check_interval) else: break
python
def _obtain_lock(self): """This method blocks until it obtained the lock, or raises IOError if it ran out of time or if the parent directory was not available anymore. If this method returns, you are guaranteed to own the lock""" starttime = time.time() maxtime = starttime + float(self._max_block_time) while True: try: super(BlockingLockFile, self)._obtain_lock() except IOError: # synity check: if the directory leading to the lockfile is not # readable anymore, raise an exception curtime = time.time() if not osp.isdir(osp.dirname(self._lock_file_path())): msg = "Directory containing the lockfile %r was not readable anymore after waiting %g seconds" % ( self._lock_file_path(), curtime - starttime) raise IOError(msg) # END handle missing directory if curtime >= maxtime: msg = "Waited %g seconds for lock at %r" % (maxtime - starttime, self._lock_file_path()) raise IOError(msg) # END abort if we wait too long time.sleep(self._check_interval) else: break
[ "def", "_obtain_lock", "(", "self", ")", ":", "starttime", "=", "time", ".", "time", "(", ")", "maxtime", "=", "starttime", "+", "float", "(", "self", ".", "_max_block_time", ")", "while", "True", ":", "try", ":", "super", "(", "BlockingLockFile", ",", "self", ")", ".", "_obtain_lock", "(", ")", "except", "IOError", ":", "# synity check: if the directory leading to the lockfile is not", "# readable anymore, raise an exception", "curtime", "=", "time", ".", "time", "(", ")", "if", "not", "osp", ".", "isdir", "(", "osp", ".", "dirname", "(", "self", ".", "_lock_file_path", "(", ")", ")", ")", ":", "msg", "=", "\"Directory containing the lockfile %r was not readable anymore after waiting %g seconds\"", "%", "(", "self", ".", "_lock_file_path", "(", ")", ",", "curtime", "-", "starttime", ")", "raise", "IOError", "(", "msg", ")", "# END handle missing directory", "if", "curtime", ">=", "maxtime", ":", "msg", "=", "\"Waited %g seconds for lock at %r\"", "%", "(", "maxtime", "-", "starttime", ",", "self", ".", "_lock_file_path", "(", ")", ")", "raise", "IOError", "(", "msg", ")", "# END abort if we wait too long", "time", ".", "sleep", "(", "self", ".", "_check_interval", ")", "else", ":", "break" ]
This method blocks until it obtained the lock, or raises IOError if it ran out of time or if the parent directory was not available anymore. If this method returns, you are guaranteed to own the lock
[ "This", "method", "blocks", "until", "it", "obtained", "the", "lock", "or", "raises", "IOError", "if", "it", "ran", "out", "of", "time", "or", "if", "the", "parent", "directory", "was", "not", "available", "anymore", ".", "If", "this", "method", "returns", "you", "are", "guaranteed", "to", "own", "the", "lock" ]
train
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/util.py#L811-L836
0.003811
pyqt/python-qt5
PyQt5/uic/icon_cache.py
IconCache.get_icon
def get_icon(self, iconset): """Return an icon described by the given iconset tag.""" # Handle a themed icon. theme = iconset.attrib.get('theme') if theme is not None: return self._object_factory.createQObject("QIcon.fromTheme", 'icon', (self._object_factory.asString(theme), ), is_attribute=False) # Handle an empty iconset property. if iconset.text is None: return None iset = _IconSet(iconset, self._base_dir) try: idx = self._cache.index(iset) except ValueError: idx = -1 if idx >= 0: # Return the icon from the cache. iset = self._cache[idx] else: # Follow uic's naming convention. name = 'icon' idx = len(self._cache) if idx > 0: name += str(idx) icon = self._object_factory.createQObject("QIcon", name, (), is_attribute=False) iset.set_icon(icon, self._qtgui_module) self._cache.append(iset) return iset.icon
python
def get_icon(self, iconset): """Return an icon described by the given iconset tag.""" # Handle a themed icon. theme = iconset.attrib.get('theme') if theme is not None: return self._object_factory.createQObject("QIcon.fromTheme", 'icon', (self._object_factory.asString(theme), ), is_attribute=False) # Handle an empty iconset property. if iconset.text is None: return None iset = _IconSet(iconset, self._base_dir) try: idx = self._cache.index(iset) except ValueError: idx = -1 if idx >= 0: # Return the icon from the cache. iset = self._cache[idx] else: # Follow uic's naming convention. name = 'icon' idx = len(self._cache) if idx > 0: name += str(idx) icon = self._object_factory.createQObject("QIcon", name, (), is_attribute=False) iset.set_icon(icon, self._qtgui_module) self._cache.append(iset) return iset.icon
[ "def", "get_icon", "(", "self", ",", "iconset", ")", ":", "# Handle a themed icon.", "theme", "=", "iconset", ".", "attrib", ".", "get", "(", "'theme'", ")", "if", "theme", "is", "not", "None", ":", "return", "self", ".", "_object_factory", ".", "createQObject", "(", "\"QIcon.fromTheme\"", ",", "'icon'", ",", "(", "self", ".", "_object_factory", ".", "asString", "(", "theme", ")", ",", ")", ",", "is_attribute", "=", "False", ")", "# Handle an empty iconset property.", "if", "iconset", ".", "text", "is", "None", ":", "return", "None", "iset", "=", "_IconSet", "(", "iconset", ",", "self", ".", "_base_dir", ")", "try", ":", "idx", "=", "self", ".", "_cache", ".", "index", "(", "iset", ")", "except", "ValueError", ":", "idx", "=", "-", "1", "if", "idx", ">=", "0", ":", "# Return the icon from the cache.", "iset", "=", "self", ".", "_cache", "[", "idx", "]", "else", ":", "# Follow uic's naming convention.", "name", "=", "'icon'", "idx", "=", "len", "(", "self", ".", "_cache", ")", "if", "idx", ">", "0", ":", "name", "+=", "str", "(", "idx", ")", "icon", "=", "self", ".", "_object_factory", ".", "createQObject", "(", "\"QIcon\"", ",", "name", ",", "(", ")", ",", "is_attribute", "=", "False", ")", "iset", ".", "set_icon", "(", "icon", ",", "self", ".", "_qtgui_module", ")", "self", ".", "_cache", ".", "append", "(", "iset", ")", "return", "iset", ".", "icon" ]
Return an icon described by the given iconset tag.
[ "Return", "an", "icon", "described", "by", "the", "given", "iconset", "tag", "." ]
train
https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/PyQt5/uic/icon_cache.py#L44-L81
0.003475
ikalnytskyi/dooku
dooku/datetime.py
Local.dst
def dst(self, dt): """ Returns a difference in seconds between standard offset and dst offset. """ if not self._is_dst(dt): return datetime.timedelta(0) offset = time.timezone - time.altzone return datetime.timedelta(seconds=-offset)
python
def dst(self, dt): """ Returns a difference in seconds between standard offset and dst offset. """ if not self._is_dst(dt): return datetime.timedelta(0) offset = time.timezone - time.altzone return datetime.timedelta(seconds=-offset)
[ "def", "dst", "(", "self", ",", "dt", ")", ":", "if", "not", "self", ".", "_is_dst", "(", "dt", ")", ":", "return", "datetime", ".", "timedelta", "(", "0", ")", "offset", "=", "time", ".", "timezone", "-", "time", ".", "altzone", "return", "datetime", ".", "timedelta", "(", "seconds", "=", "-", "offset", ")" ]
Returns a difference in seconds between standard offset and dst offset.
[ "Returns", "a", "difference", "in", "seconds", "between", "standard", "offset", "and", "dst", "offset", "." ]
train
https://github.com/ikalnytskyi/dooku/blob/77e6c82c9c41211c86ee36ae5e591d477945fedf/dooku/datetime.py#L118-L127
0.006623
bioidiap/gridtk
gridtk/sge.py
JobManagerSGE._queue
def _queue(self, kwargs): """The hard resource_list comes like this: '<qname>=TRUE,mem=128M'. To process it we have to split it twice (',' and then on '='), create a dictionary and extract just the qname""" if not 'hard resource_list' in kwargs: return 'all.q' d = dict([k.split('=') for k in kwargs['hard resource_list'].split(',')]) for k in d: if k[0] == 'q' and d[k] == 'TRUE': return k return 'all.q'
python
def _queue(self, kwargs): """The hard resource_list comes like this: '<qname>=TRUE,mem=128M'. To process it we have to split it twice (',' and then on '='), create a dictionary and extract just the qname""" if not 'hard resource_list' in kwargs: return 'all.q' d = dict([k.split('=') for k in kwargs['hard resource_list'].split(',')]) for k in d: if k[0] == 'q' and d[k] == 'TRUE': return k return 'all.q'
[ "def", "_queue", "(", "self", ",", "kwargs", ")", ":", "if", "not", "'hard resource_list'", "in", "kwargs", ":", "return", "'all.q'", "d", "=", "dict", "(", "[", "k", ".", "split", "(", "'='", ")", "for", "k", "in", "kwargs", "[", "'hard resource_list'", "]", ".", "split", "(", "','", ")", "]", ")", "for", "k", "in", "d", ":", "if", "k", "[", "0", "]", "==", "'q'", "and", "d", "[", "k", "]", "==", "'TRUE'", ":", "return", "k", "return", "'all.q'" ]
The hard resource_list comes like this: '<qname>=TRUE,mem=128M'. To process it we have to split it twice (',' and then on '='), create a dictionary and extract just the qname
[ "The", "hard", "resource_list", "comes", "like", "this", ":", "<qname", ">", "=", "TRUE", "mem", "=", "128M", ".", "To", "process", "it", "we", "have", "to", "split", "it", "twice", "(", "and", "then", "on", "=", ")", "create", "a", "dictionary", "and", "extract", "just", "the", "qname" ]
train
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/sge.py#L43-L51
0.01139
geronimp/graftM
graftm/sequence_searcher.py
SequenceSearcher.align
def align(self, input_path, output_path, directions, pipeline, filter_minimum): '''align - Takes input path to fasta of unaligned reads, aligns them to a HMM, and returns the aligned reads in the output path Parameters ---------- input_path : str output_path : str reverse_direction : dict A dictionary of read names, with the entries being the complement strand of the read (True = forward, False = reverse) pipeline : str Either "P" or "D" corresponding to the protein and nucleotide (DNA) pipelines, respectively. Returns ------- N/A - output alignment path known. ''' # HMMalign the forward reads, and reverse complement reads. with tempfile.NamedTemporaryFile(prefix='for_conv_file', suffix='.fa') as fwd_fh: fwd_conv_file = fwd_fh.name with tempfile.NamedTemporaryFile(prefix='rev_conv_file', suffix='.fa') as rev_fh: rev_conv_file = rev_fh.name alignments = self._hmmalign( input_path, directions, pipeline, fwd_conv_file, rev_conv_file) alignment_result = self.alignment_correcter(alignments, output_path, filter_minimum) return alignment_result
python
def align(self, input_path, output_path, directions, pipeline, filter_minimum): '''align - Takes input path to fasta of unaligned reads, aligns them to a HMM, and returns the aligned reads in the output path Parameters ---------- input_path : str output_path : str reverse_direction : dict A dictionary of read names, with the entries being the complement strand of the read (True = forward, False = reverse) pipeline : str Either "P" or "D" corresponding to the protein and nucleotide (DNA) pipelines, respectively. Returns ------- N/A - output alignment path known. ''' # HMMalign the forward reads, and reverse complement reads. with tempfile.NamedTemporaryFile(prefix='for_conv_file', suffix='.fa') as fwd_fh: fwd_conv_file = fwd_fh.name with tempfile.NamedTemporaryFile(prefix='rev_conv_file', suffix='.fa') as rev_fh: rev_conv_file = rev_fh.name alignments = self._hmmalign( input_path, directions, pipeline, fwd_conv_file, rev_conv_file) alignment_result = self.alignment_correcter(alignments, output_path, filter_minimum) return alignment_result
[ "def", "align", "(", "self", ",", "input_path", ",", "output_path", ",", "directions", ",", "pipeline", ",", "filter_minimum", ")", ":", "# HMMalign the forward reads, and reverse complement reads.", "with", "tempfile", ".", "NamedTemporaryFile", "(", "prefix", "=", "'for_conv_file'", ",", "suffix", "=", "'.fa'", ")", "as", "fwd_fh", ":", "fwd_conv_file", "=", "fwd_fh", ".", "name", "with", "tempfile", ".", "NamedTemporaryFile", "(", "prefix", "=", "'rev_conv_file'", ",", "suffix", "=", "'.fa'", ")", "as", "rev_fh", ":", "rev_conv_file", "=", "rev_fh", ".", "name", "alignments", "=", "self", ".", "_hmmalign", "(", "input_path", ",", "directions", ",", "pipeline", ",", "fwd_conv_file", ",", "rev_conv_file", ")", "alignment_result", "=", "self", ".", "alignment_correcter", "(", "alignments", ",", "output_path", ",", "filter_minimum", ")", "return", "alignment_result" ]
align - Takes input path to fasta of unaligned reads, aligns them to a HMM, and returns the aligned reads in the output path Parameters ---------- input_path : str output_path : str reverse_direction : dict A dictionary of read names, with the entries being the complement strand of the read (True = forward, False = reverse) pipeline : str Either "P" or "D" corresponding to the protein and nucleotide (DNA) pipelines, respectively. Returns ------- N/A - output alignment path known.
[ "align", "-", "Takes", "input", "path", "to", "fasta", "of", "unaligned", "reads", "aligns", "them", "to", "a", "HMM", "and", "returns", "the", "aligned", "reads", "in", "the", "output", "path" ]
train
https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/sequence_searcher.py#L1111-L1147
0.003896
vmware/pyvmomi
sample/getallvms.py
main
def main(): """ Simple command-line program for listing the virtual machines on a system. """ args = GetArgs() if args.password: password = args.password else: password = getpass.getpass(prompt='Enter password for host %s and ' 'user %s: ' % (args.host,args.user)) context = None if hasattr(ssl, '_create_unverified_context'): context = ssl._create_unverified_context() si = SmartConnect(host=args.host, user=args.user, pwd=password, port=int(args.port), sslContext=context) if not si: print("Could not connect to the specified host using specified " "username and password") return -1 atexit.register(Disconnect, si) content = si.RetrieveContent() for child in content.rootFolder.childEntity: if hasattr(child, 'vmFolder'): datacenter = child vmFolder = datacenter.vmFolder vmList = vmFolder.childEntity for vm in vmList: PrintVmInfo(vm) return 0
python
def main(): """ Simple command-line program for listing the virtual machines on a system. """ args = GetArgs() if args.password: password = args.password else: password = getpass.getpass(prompt='Enter password for host %s and ' 'user %s: ' % (args.host,args.user)) context = None if hasattr(ssl, '_create_unverified_context'): context = ssl._create_unverified_context() si = SmartConnect(host=args.host, user=args.user, pwd=password, port=int(args.port), sslContext=context) if not si: print("Could not connect to the specified host using specified " "username and password") return -1 atexit.register(Disconnect, si) content = si.RetrieveContent() for child in content.rootFolder.childEntity: if hasattr(child, 'vmFolder'): datacenter = child vmFolder = datacenter.vmFolder vmList = vmFolder.childEntity for vm in vmList: PrintVmInfo(vm) return 0
[ "def", "main", "(", ")", ":", "args", "=", "GetArgs", "(", ")", "if", "args", ".", "password", ":", "password", "=", "args", ".", "password", "else", ":", "password", "=", "getpass", ".", "getpass", "(", "prompt", "=", "'Enter password for host %s and '", "'user %s: '", "%", "(", "args", ".", "host", ",", "args", ".", "user", ")", ")", "context", "=", "None", "if", "hasattr", "(", "ssl", ",", "'_create_unverified_context'", ")", ":", "context", "=", "ssl", ".", "_create_unverified_context", "(", ")", "si", "=", "SmartConnect", "(", "host", "=", "args", ".", "host", ",", "user", "=", "args", ".", "user", ",", "pwd", "=", "password", ",", "port", "=", "int", "(", "args", ".", "port", ")", ",", "sslContext", "=", "context", ")", "if", "not", "si", ":", "print", "(", "\"Could not connect to the specified host using specified \"", "\"username and password\"", ")", "return", "-", "1", "atexit", ".", "register", "(", "Disconnect", ",", "si", ")", "content", "=", "si", ".", "RetrieveContent", "(", ")", "for", "child", "in", "content", ".", "rootFolder", ".", "childEntity", ":", "if", "hasattr", "(", "child", ",", "'vmFolder'", ")", ":", "datacenter", "=", "child", "vmFolder", "=", "datacenter", ".", "vmFolder", "vmList", "=", "vmFolder", ".", "childEntity", "for", "vm", "in", "vmList", ":", "PrintVmInfo", "(", "vm", ")", "return", "0" ]
Simple command-line program for listing the virtual machines on a system.
[ "Simple", "command", "-", "line", "program", "for", "listing", "the", "virtual", "machines", "on", "a", "system", "." ]
train
https://github.com/vmware/pyvmomi/blob/3ffcb23bf77d757175c0d5216ba9a25345d824cd/sample/getallvms.py#L90-L125
0.021505
moonso/loqusdb
loqusdb/utils/profiling.py
update_profiles
def update_profiles(adapter): """ For all cases having vcf_path, update the profile string for the samples Args: adapter (MongoAdapter): Adapter to mongodb """ for case in adapter.cases(): #If the case has a vcf_path, get the profiles and update the #case with new profiled individuals. if case.get('profile_path'): profiles = get_profiles(adapter, case['profile_path']) profiled_individuals = deepcopy(case['individuals']) for individual in profiled_individuals: ind_id = individual['ind_id'] try: profile = profiles[ind_id] individual['profile'] = profile except KeyError: LOG.warning(f"sample IDs in vcf does not match for case {case['case_id']}") updated_case = deepcopy(case) updated_case['individuals'] = profiled_individuals adapter.add_case(updated_case, update=True)
python
def update_profiles(adapter): """ For all cases having vcf_path, update the profile string for the samples Args: adapter (MongoAdapter): Adapter to mongodb """ for case in adapter.cases(): #If the case has a vcf_path, get the profiles and update the #case with new profiled individuals. if case.get('profile_path'): profiles = get_profiles(adapter, case['profile_path']) profiled_individuals = deepcopy(case['individuals']) for individual in profiled_individuals: ind_id = individual['ind_id'] try: profile = profiles[ind_id] individual['profile'] = profile except KeyError: LOG.warning(f"sample IDs in vcf does not match for case {case['case_id']}") updated_case = deepcopy(case) updated_case['individuals'] = profiled_individuals adapter.add_case(updated_case, update=True)
[ "def", "update_profiles", "(", "adapter", ")", ":", "for", "case", "in", "adapter", ".", "cases", "(", ")", ":", "#If the case has a vcf_path, get the profiles and update the", "#case with new profiled individuals.", "if", "case", ".", "get", "(", "'profile_path'", ")", ":", "profiles", "=", "get_profiles", "(", "adapter", ",", "case", "[", "'profile_path'", "]", ")", "profiled_individuals", "=", "deepcopy", "(", "case", "[", "'individuals'", "]", ")", "for", "individual", "in", "profiled_individuals", ":", "ind_id", "=", "individual", "[", "'ind_id'", "]", "try", ":", "profile", "=", "profiles", "[", "ind_id", "]", "individual", "[", "'profile'", "]", "=", "profile", "except", "KeyError", ":", "LOG", ".", "warning", "(", "f\"sample IDs in vcf does not match for case {case['case_id']}\"", ")", "updated_case", "=", "deepcopy", "(", "case", ")", "updated_case", "[", "'individuals'", "]", "=", "profiled_individuals", "adapter", ".", "add_case", "(", "updated_case", ",", "update", "=", "True", ")" ]
For all cases having vcf_path, update the profile string for the samples Args: adapter (MongoAdapter): Adapter to mongodb
[ "For", "all", "cases", "having", "vcf_path", "update", "the", "profile", "string", "for", "the", "samples" ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/profiling.py#L154-L186
0.003929
mrcagney/gtfstk
gtfstk/validators.py
check_for_invalid_columns
def check_for_invalid_columns( problems: List, table: str, df: DataFrame ) -> List: """ Check for invalid columns in the given GTFS DataFrame. Parameters ---------- problems : list A four-tuple containing 1. A problem type (string) equal to ``'error'`` or ``'warning'``; ``'error'`` means the GTFS is violated; ``'warning'`` means there is a problem but it is not a GTFS violation 2. A message (string) that describes the problem 3. A GTFS table name, e.g. ``'routes'``, in which the problem occurs 4. A list of rows (integers) of the table's DataFrame where the problem occurs table : string Name of a GTFS table df : DataFrame The GTFS table corresponding to ``table`` Returns ------- list The ``problems`` list extended as follows. Check whether the DataFrame contains extra columns not in the GTFS and append to the problems list one warning for each extra column. """ r = cs.GTFS_REF valid_columns = r.loc[r["table"] == table, "column"].values for col in df.columns: if col not in valid_columns: problems.append( ["warning", f"Unrecognized column {col}", table, []] ) return problems
python
def check_for_invalid_columns( problems: List, table: str, df: DataFrame ) -> List: """ Check for invalid columns in the given GTFS DataFrame. Parameters ---------- problems : list A four-tuple containing 1. A problem type (string) equal to ``'error'`` or ``'warning'``; ``'error'`` means the GTFS is violated; ``'warning'`` means there is a problem but it is not a GTFS violation 2. A message (string) that describes the problem 3. A GTFS table name, e.g. ``'routes'``, in which the problem occurs 4. A list of rows (integers) of the table's DataFrame where the problem occurs table : string Name of a GTFS table df : DataFrame The GTFS table corresponding to ``table`` Returns ------- list The ``problems`` list extended as follows. Check whether the DataFrame contains extra columns not in the GTFS and append to the problems list one warning for each extra column. """ r = cs.GTFS_REF valid_columns = r.loc[r["table"] == table, "column"].values for col in df.columns: if col not in valid_columns: problems.append( ["warning", f"Unrecognized column {col}", table, []] ) return problems
[ "def", "check_for_invalid_columns", "(", "problems", ":", "List", ",", "table", ":", "str", ",", "df", ":", "DataFrame", ")", "->", "List", ":", "r", "=", "cs", ".", "GTFS_REF", "valid_columns", "=", "r", ".", "loc", "[", "r", "[", "\"table\"", "]", "==", "table", ",", "\"column\"", "]", ".", "values", "for", "col", "in", "df", ".", "columns", ":", "if", "col", "not", "in", "valid_columns", ":", "problems", ".", "append", "(", "[", "\"warning\"", ",", "f\"Unrecognized column {col}\"", ",", "table", ",", "[", "]", "]", ")", "return", "problems" ]
Check for invalid columns in the given GTFS DataFrame. Parameters ---------- problems : list A four-tuple containing 1. A problem type (string) equal to ``'error'`` or ``'warning'``; ``'error'`` means the GTFS is violated; ``'warning'`` means there is a problem but it is not a GTFS violation 2. A message (string) that describes the problem 3. A GTFS table name, e.g. ``'routes'``, in which the problem occurs 4. A list of rows (integers) of the table's DataFrame where the problem occurs table : string Name of a GTFS table df : DataFrame The GTFS table corresponding to ``table`` Returns ------- list The ``problems`` list extended as follows. Check whether the DataFrame contains extra columns not in the GTFS and append to the problems list one warning for each extra column.
[ "Check", "for", "invalid", "columns", "in", "the", "given", "GTFS", "DataFrame", "." ]
train
https://github.com/mrcagney/gtfstk/blob/c91494e6fefc02523889655a0dc92d1c0eee8d03/gtfstk/validators.py#L184-L227
0.000741
saltstack/salt
salt/modules/sysmod.py
list_renderers
def list_renderers(*args): ''' List the renderers loaded on the minion .. versionadded:: 2015.5.0 CLI Example: .. code-block:: bash salt '*' sys.list_renderers Render names can be specified as globs. .. code-block:: bash salt '*' sys.list_renderers 'yaml*' ''' renderers_ = salt.loader.render(__opts__, []) renderers = set() if not args: for rend in six.iterkeys(renderers_): renderers.add(rend) return sorted(renderers) for module in args: for rend in fnmatch.filter(renderers_, module): renderers.add(rend) return sorted(renderers)
python
def list_renderers(*args): ''' List the renderers loaded on the minion .. versionadded:: 2015.5.0 CLI Example: .. code-block:: bash salt '*' sys.list_renderers Render names can be specified as globs. .. code-block:: bash salt '*' sys.list_renderers 'yaml*' ''' renderers_ = salt.loader.render(__opts__, []) renderers = set() if not args: for rend in six.iterkeys(renderers_): renderers.add(rend) return sorted(renderers) for module in args: for rend in fnmatch.filter(renderers_, module): renderers.add(rend) return sorted(renderers)
[ "def", "list_renderers", "(", "*", "args", ")", ":", "renderers_", "=", "salt", ".", "loader", ".", "render", "(", "__opts__", ",", "[", "]", ")", "renderers", "=", "set", "(", ")", "if", "not", "args", ":", "for", "rend", "in", "six", ".", "iterkeys", "(", "renderers_", ")", ":", "renderers", ".", "add", "(", "rend", ")", "return", "sorted", "(", "renderers", ")", "for", "module", "in", "args", ":", "for", "rend", "in", "fnmatch", ".", "filter", "(", "renderers_", ",", "module", ")", ":", "renderers", ".", "add", "(", "rend", ")", "return", "sorted", "(", "renderers", ")" ]
List the renderers loaded on the minion .. versionadded:: 2015.5.0 CLI Example: .. code-block:: bash salt '*' sys.list_renderers Render names can be specified as globs. .. code-block:: bash salt '*' sys.list_renderers 'yaml*'
[ "List", "the", "renderers", "loaded", "on", "the", "minion" ]
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/sysmod.py#L850-L880
0.001517
has2k1/plydata
plydata/dataframe/common.py
Selector._at
def _at(cls, verb): """ A verb with a select text match """ # Named (listed) columns are always included columns = cls.select(verb) final_columns_set = set(cls.select(verb)) groups_set = set(_get_groups(verb)) final_columns_set -= groups_set - set(verb.names) def pred(col): if col not in verb.data: raise KeyError( "Unknown column name, {!r}".format(col)) return col in final_columns_set return [col for col in columns if pred(col)]
python
def _at(cls, verb): """ A verb with a select text match """ # Named (listed) columns are always included columns = cls.select(verb) final_columns_set = set(cls.select(verb)) groups_set = set(_get_groups(verb)) final_columns_set -= groups_set - set(verb.names) def pred(col): if col not in verb.data: raise KeyError( "Unknown column name, {!r}".format(col)) return col in final_columns_set return [col for col in columns if pred(col)]
[ "def", "_at", "(", "cls", ",", "verb", ")", ":", "# Named (listed) columns are always included", "columns", "=", "cls", ".", "select", "(", "verb", ")", "final_columns_set", "=", "set", "(", "cls", ".", "select", "(", "verb", ")", ")", "groups_set", "=", "set", "(", "_get_groups", "(", "verb", ")", ")", "final_columns_set", "-=", "groups_set", "-", "set", "(", "verb", ".", "names", ")", "def", "pred", "(", "col", ")", ":", "if", "col", "not", "in", "verb", ".", "data", ":", "raise", "KeyError", "(", "\"Unknown column name, {!r}\"", ".", "format", "(", "col", ")", ")", "return", "col", "in", "final_columns_set", "return", "[", "col", "for", "col", "in", "columns", "if", "pred", "(", "col", ")", "]" ]
A verb with a select text match
[ "A", "verb", "with", "a", "select", "text", "match" ]
train
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L448-L464
0.003478
pycontribs/pyrax
pyrax/resource.py
BaseResource.human_id
def human_id(self): """Subclasses may override this to provide a pretty ID which can be used for bash completion. """ if self.NAME_ATTR in self.__dict__ and self.HUMAN_ID: return utils.to_slug(getattr(self, self.NAME_ATTR)) return None
python
def human_id(self): """Subclasses may override this to provide a pretty ID which can be used for bash completion. """ if self.NAME_ATTR in self.__dict__ and self.HUMAN_ID: return utils.to_slug(getattr(self, self.NAME_ATTR)) return None
[ "def", "human_id", "(", "self", ")", ":", "if", "self", ".", "NAME_ATTR", "in", "self", ".", "__dict__", "and", "self", ".", "HUMAN_ID", ":", "return", "utils", ".", "to_slug", "(", "getattr", "(", "self", ",", "self", ".", "NAME_ATTR", ")", ")", "return", "None" ]
Subclasses may override this to provide a pretty ID which can be used for bash completion.
[ "Subclasses", "may", "override", "this", "to", "provide", "a", "pretty", "ID", "which", "can", "be", "used", "for", "bash", "completion", "." ]
train
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/resource.py#L57-L63
0.010453
KelSolaar/Manager
manager/QObject_component.py
QObjectComponent.deactivatable
def deactivatable(self, value): """ Setter for **self.__deactivatable** attribute. :param value: Attribute value. :type value: bool """ if value is not None: assert type(value) is bool, "'{0}' attribute: '{1}' type is not 'bool'!".format("deactivatable", value) self.__deactivatable = value
python
def deactivatable(self, value): """ Setter for **self.__deactivatable** attribute. :param value: Attribute value. :type value: bool """ if value is not None: assert type(value) is bool, "'{0}' attribute: '{1}' type is not 'bool'!".format("deactivatable", value) self.__deactivatable = value
[ "def", "deactivatable", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "assert", "type", "(", "value", ")", "is", "bool", ",", "\"'{0}' attribute: '{1}' type is not 'bool'!\"", ".", "format", "(", "\"deactivatable\"", ",", "value", ")", "self", ".", "__deactivatable", "=", "value" ]
Setter for **self.__deactivatable** attribute. :param value: Attribute value. :type value: bool
[ "Setter", "for", "**", "self", ".", "__deactivatable", "**", "attribute", "." ]
train
https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/manager/QObject_component.py#L208-L218
0.008333
Shinichi-Nakagawa/pitchpx
pitchpx/mlbam.py
MlbAm._days
def _days(cls, start, end): """ Scrape a MLBAM Data :param start: Start Day(YYYYMMDD) :param end: End Day(YYYYMMDD) """ days = [] # datetime start_day, end_day = dt.strptime(start, cls.DATE_FORMAT), dt.strptime(end, cls.DATE_FORMAT) delta = end_day - start_day for day in range(delta.days+1): days.append(start_day + timedelta(days=day)) return days
python
def _days(cls, start, end): """ Scrape a MLBAM Data :param start: Start Day(YYYYMMDD) :param end: End Day(YYYYMMDD) """ days = [] # datetime start_day, end_day = dt.strptime(start, cls.DATE_FORMAT), dt.strptime(end, cls.DATE_FORMAT) delta = end_day - start_day for day in range(delta.days+1): days.append(start_day + timedelta(days=day)) return days
[ "def", "_days", "(", "cls", ",", "start", ",", "end", ")", ":", "days", "=", "[", "]", "# datetime", "start_day", ",", "end_day", "=", "dt", ".", "strptime", "(", "start", ",", "cls", ".", "DATE_FORMAT", ")", ",", "dt", ".", "strptime", "(", "end", ",", "cls", ".", "DATE_FORMAT", ")", "delta", "=", "end_day", "-", "start_day", "for", "day", "in", "range", "(", "delta", ".", "days", "+", "1", ")", ":", "days", ".", "append", "(", "start_day", "+", "timedelta", "(", "days", "=", "day", ")", ")", "return", "days" ]
Scrape a MLBAM Data :param start: Start Day(YYYYMMDD) :param end: End Day(YYYYMMDD)
[ "Scrape", "a", "MLBAM", "Data", ":", "param", "start", ":", "Start", "Day", "(", "YYYYMMDD", ")", ":", "param", "end", ":", "End", "Day", "(", "YYYYMMDD", ")" ]
train
https://github.com/Shinichi-Nakagawa/pitchpx/blob/5747402a0b3416f5e910b479e100df858f0b6440/pitchpx/mlbam.py#L170-L183
0.006667
davidcarboni/Flask-B3
b3/__init__.py
_start_subspan
def _start_subspan(headers=None): """ Sets up a new span to contact a downstream service. This is used when making a downstream service call. It returns a dict containing the required sub-span headers. Each downstream call you make is handled as a new span, so call this every time you need to contact another service. This temporarily updates what's returned by values() to match the sub-span, so it can can also be used when calling e.g. a database that doesn't support B3. You'll still be able to record the client side of an interaction, even if the downstream server doesn't use the propagated trace information. You'll need to call end_subspan when you're done. You can do this using the `SubSpan` class: with SubSpan([headers]) as headers_b3: ... log.debug("Client start: calling downstream service") ... requests.get(<downstream service>, headers=headers_b3) ... log.debug("Client receive: downstream service responded") For the specification, see: https://github.com/openzipkin/b3-propagation :param headers: The headers dict. Headers will be added to this as needed. :return: A dict containing header values for a downstream request. This can be passed directly to e.g. requests.get(...). """ b3 = values() g.subspan = { # Propagate the trace ID b3_trace_id: b3[b3_trace_id], # Start a new span for the outgoing request b3_span_id: _generate_identifier(), # Set the current span as the parent span b3_parent_span_id: b3[b3_span_id], b3_sampled: b3[b3_sampled], b3_flags: b3[b3_flags], } # Set up headers # NB dict() ensures we don't alter the value passed in. Maybe that's too conservative? result = dict(headers or {}) result.update({ b3_trace_id: g.subspan[b3_trace_id], b3_span_id: g.subspan[b3_span_id], b3_parent_span_id: g.subspan[b3_parent_span_id], }) # Propagate only if set: if g.subspan[b3_sampled]: result[b3_sampled] = g.subspan[b3_sampled] if g.subspan[b3_flags]: result[b3_flags] = g.subspan[b3_flags] _info("Client start. Starting sub-span") _log.debug("B3 values for sub-span: {b3_headers}".format(b3_headers=values())) _log.debug("All headers for downstream request: {b3_headers}".format(b3_headers=result)) return result
python
def _start_subspan(headers=None): """ Sets up a new span to contact a downstream service. This is used when making a downstream service call. It returns a dict containing the required sub-span headers. Each downstream call you make is handled as a new span, so call this every time you need to contact another service. This temporarily updates what's returned by values() to match the sub-span, so it can can also be used when calling e.g. a database that doesn't support B3. You'll still be able to record the client side of an interaction, even if the downstream server doesn't use the propagated trace information. You'll need to call end_subspan when you're done. You can do this using the `SubSpan` class: with SubSpan([headers]) as headers_b3: ... log.debug("Client start: calling downstream service") ... requests.get(<downstream service>, headers=headers_b3) ... log.debug("Client receive: downstream service responded") For the specification, see: https://github.com/openzipkin/b3-propagation :param headers: The headers dict. Headers will be added to this as needed. :return: A dict containing header values for a downstream request. This can be passed directly to e.g. requests.get(...). """ b3 = values() g.subspan = { # Propagate the trace ID b3_trace_id: b3[b3_trace_id], # Start a new span for the outgoing request b3_span_id: _generate_identifier(), # Set the current span as the parent span b3_parent_span_id: b3[b3_span_id], b3_sampled: b3[b3_sampled], b3_flags: b3[b3_flags], } # Set up headers # NB dict() ensures we don't alter the value passed in. Maybe that's too conservative? result = dict(headers or {}) result.update({ b3_trace_id: g.subspan[b3_trace_id], b3_span_id: g.subspan[b3_span_id], b3_parent_span_id: g.subspan[b3_parent_span_id], }) # Propagate only if set: if g.subspan[b3_sampled]: result[b3_sampled] = g.subspan[b3_sampled] if g.subspan[b3_flags]: result[b3_flags] = g.subspan[b3_flags] _info("Client start. Starting sub-span") _log.debug("B3 values for sub-span: {b3_headers}".format(b3_headers=values())) _log.debug("All headers for downstream request: {b3_headers}".format(b3_headers=result)) return result
[ "def", "_start_subspan", "(", "headers", "=", "None", ")", ":", "b3", "=", "values", "(", ")", "g", ".", "subspan", "=", "{", "# Propagate the trace ID", "b3_trace_id", ":", "b3", "[", "b3_trace_id", "]", ",", "# Start a new span for the outgoing request", "b3_span_id", ":", "_generate_identifier", "(", ")", ",", "# Set the current span as the parent span", "b3_parent_span_id", ":", "b3", "[", "b3_span_id", "]", ",", "b3_sampled", ":", "b3", "[", "b3_sampled", "]", ",", "b3_flags", ":", "b3", "[", "b3_flags", "]", ",", "}", "# Set up headers", "# NB dict() ensures we don't alter the value passed in. Maybe that's too conservative?", "result", "=", "dict", "(", "headers", "or", "{", "}", ")", "result", ".", "update", "(", "{", "b3_trace_id", ":", "g", ".", "subspan", "[", "b3_trace_id", "]", ",", "b3_span_id", ":", "g", ".", "subspan", "[", "b3_span_id", "]", ",", "b3_parent_span_id", ":", "g", ".", "subspan", "[", "b3_parent_span_id", "]", ",", "}", ")", "# Propagate only if set:", "if", "g", ".", "subspan", "[", "b3_sampled", "]", ":", "result", "[", "b3_sampled", "]", "=", "g", ".", "subspan", "[", "b3_sampled", "]", "if", "g", ".", "subspan", "[", "b3_flags", "]", ":", "result", "[", "b3_flags", "]", "=", "g", ".", "subspan", "[", "b3_flags", "]", "_info", "(", "\"Client start. Starting sub-span\"", ")", "_log", ".", "debug", "(", "\"B3 values for sub-span: {b3_headers}\"", ".", "format", "(", "b3_headers", "=", "values", "(", ")", ")", ")", "_log", ".", "debug", "(", "\"All headers for downstream request: {b3_headers}\"", ".", "format", "(", "b3_headers", "=", "result", ")", ")", "return", "result" ]
Sets up a new span to contact a downstream service. This is used when making a downstream service call. It returns a dict containing the required sub-span headers. Each downstream call you make is handled as a new span, so call this every time you need to contact another service. This temporarily updates what's returned by values() to match the sub-span, so it can can also be used when calling e.g. a database that doesn't support B3. You'll still be able to record the client side of an interaction, even if the downstream server doesn't use the propagated trace information. You'll need to call end_subspan when you're done. You can do this using the `SubSpan` class: with SubSpan([headers]) as headers_b3: ... log.debug("Client start: calling downstream service") ... requests.get(<downstream service>, headers=headers_b3) ... log.debug("Client receive: downstream service responded") For the specification, see: https://github.com/openzipkin/b3-propagation :param headers: The headers dict. Headers will be added to this as needed. :return: A dict containing header values for a downstream request. This can be passed directly to e.g. requests.get(...).
[ "Sets", "up", "a", "new", "span", "to", "contact", "a", "downstream", "service", ".", "This", "is", "used", "when", "making", "a", "downstream", "service", "call", ".", "It", "returns", "a", "dict", "containing", "the", "required", "sub", "-", "span", "headers", ".", "Each", "downstream", "call", "you", "make", "is", "handled", "as", "a", "new", "span", "so", "call", "this", "every", "time", "you", "need", "to", "contact", "another", "service", "." ]
train
https://github.com/davidcarboni/Flask-B3/blob/55092cb1070568aeecfd2c07c5ad6122e15ca345/b3/__init__.py#L153-L209
0.003724
Aslan11/wilos-cli
wilos/writers.py
Stdout.summary
def summary(self, summary): """Prints the ASCII Icon""" if summary is not None: if summary == 'Clear': click.secho(""" ________ \ | / / ____/ /__ ____ ______ .-. / / / / _ \/ __ `/ ___/ ‒ ( ) ‒ / /___/ / __/ /_/ / / `-᾿ \____/_/\___/\__,_/_/ / | \\ """, fg=self.colors.YELLOW) click.echo() elif summary == 'Partly Cloudy': click.secho(""" ____ __ __ ________ __ / __ \____ ______/ /_/ /_ __ / ____/ /___ __ ______/ /_ __ \ | / / /_/ / __ `/ ___/ __/ / / / / / / / / __ \/ / / / __ / / / / .-. / ____/ /_/ / / / /_/ / /_/ / / /___/ / /_/ / /_/ / /_/ / /_/ / ‒ ( .-. /_/ \__,_/_/ \__/_/\__, / \____/_/\____/\__,_/\__,_/\__, / `( ). /____/ /____/ / (___(__) """, fg=self.colors.WHITE) elif summary == 'Flurries': click.secho(""" ________ _ .--. / ____/ /_ ____________(_)__ _____ .-( ). / /_ / / / / / ___/ ___/ / _ \/ ___/ (___.__)__) / __/ / / /_/ / / / / / / __(__ ) * * /_/ /_/\__,_/_/ /_/ /_/\___/____/ * """, fg=self.colors.BLUE) click.echo() elif summary == 'Overcast' or summary == 'Mostly Cloudy': click.secho(""" ____ __ / __ \_ _____ ______________ ______/ /_ .--. / / / / | / / _ \/ ___/ ___/ __ `/ ___/ __/ .( ).-. / /_/ /| |/ / __/ / / /__/ /_/ (__ ) /_ (___.__( ). \____/ |___/\___/_/ \___/\__,_/____/\__/ (___(__) """, fg=self.colors.WHITE) click.echo() elif summary == 'Snow': click.secho(""" _____ .--. / ___/____ ____ _ __ .-( ). \__ \/ __ \/ __ \ | /| / / (___.__)__) ___/ / / / / /_/ / |/ |/ / * * * /____/_/ /_/\____/|__/|__/ * * """, fg=self.colors.BLUE) click.echo() elif summary == 'Light Snow': click.secho(""" __ _ __ __ _____ / / (_)___ _/ /_ / /_ / ___/____ ____ _ __ .--. / / / / __ `/ __ \/ __/ \__ \/ __ \/ __ \ | /| / / .-( ). / /___/ / /_/ / / / / /_ ___/ / / / / /_/ / |/ |/ / (___.__)__) /_____/_/\__, /_/ /_/\__/ /____/_/ /_/\____/|__/|__/ * * * /____/ """, fg=self.colors.BLUE) click.echo() elif summary == 'Light Rain' or summary == 'Drizzle': click.secho(""" __ _ __ __ ____ _ / / (_)___ _/ /_ / /_ / __ \____ _(_)___ .--. / / / / __ `/ __ \/ __/ / /_/ / __ `/ / __ \ .-( ). / /___/ / /_/ / / / / /_ / _, _/ /_/ / / / / / (___.__)__) /_____/_/\__, /_/ /_/\__/ /_/ |_|\__,_/_/_/ /_/ / / / /____/ """, fg=self.colors.BLUE) elif summary == 'Rain': click.secho(""" ____ _ / __ \____ _(_)___ .--. / /_/ / __ `/ / __ \ .-( ). / _, _/ /_/ / / / / / (___.__)__) /_/ |_|\__,_/_/_/ /_/ / / / """, fg=self.colors.BLUE) else: click.secho("{:=^62}".format(str(summary)), fg=self.colors.GREEN)
python
def summary(self, summary): """Prints the ASCII Icon""" if summary is not None: if summary == 'Clear': click.secho(""" ________ \ | / / ____/ /__ ____ ______ .-. / / / / _ \/ __ `/ ___/ ‒ ( ) ‒ / /___/ / __/ /_/ / / `-᾿ \____/_/\___/\__,_/_/ / | \\ """, fg=self.colors.YELLOW) click.echo() elif summary == 'Partly Cloudy': click.secho(""" ____ __ __ ________ __ / __ \____ ______/ /_/ /_ __ / ____/ /___ __ ______/ /_ __ \ | / / /_/ / __ `/ ___/ __/ / / / / / / / / __ \/ / / / __ / / / / .-. / ____/ /_/ / / / /_/ / /_/ / / /___/ / /_/ / /_/ / /_/ / /_/ / ‒ ( .-. /_/ \__,_/_/ \__/_/\__, / \____/_/\____/\__,_/\__,_/\__, / `( ). /____/ /____/ / (___(__) """, fg=self.colors.WHITE) elif summary == 'Flurries': click.secho(""" ________ _ .--. / ____/ /_ ____________(_)__ _____ .-( ). / /_ / / / / / ___/ ___/ / _ \/ ___/ (___.__)__) / __/ / / /_/ / / / / / / __(__ ) * * /_/ /_/\__,_/_/ /_/ /_/\___/____/ * """, fg=self.colors.BLUE) click.echo() elif summary == 'Overcast' or summary == 'Mostly Cloudy': click.secho(""" ____ __ / __ \_ _____ ______________ ______/ /_ .--. / / / / | / / _ \/ ___/ ___/ __ `/ ___/ __/ .( ).-. / /_/ /| |/ / __/ / / /__/ /_/ (__ ) /_ (___.__( ). \____/ |___/\___/_/ \___/\__,_/____/\__/ (___(__) """, fg=self.colors.WHITE) click.echo() elif summary == 'Snow': click.secho(""" _____ .--. / ___/____ ____ _ __ .-( ). \__ \/ __ \/ __ \ | /| / / (___.__)__) ___/ / / / / /_/ / |/ |/ / * * * /____/_/ /_/\____/|__/|__/ * * """, fg=self.colors.BLUE) click.echo() elif summary == 'Light Snow': click.secho(""" __ _ __ __ _____ / / (_)___ _/ /_ / /_ / ___/____ ____ _ __ .--. / / / / __ `/ __ \/ __/ \__ \/ __ \/ __ \ | /| / / .-( ). / /___/ / /_/ / / / / /_ ___/ / / / / /_/ / |/ |/ / (___.__)__) /_____/_/\__, /_/ /_/\__/ /____/_/ /_/\____/|__/|__/ * * * /____/ """, fg=self.colors.BLUE) click.echo() elif summary == 'Light Rain' or summary == 'Drizzle': click.secho(""" __ _ __ __ ____ _ / / (_)___ _/ /_ / /_ / __ \____ _(_)___ .--. / / / / __ `/ __ \/ __/ / /_/ / __ `/ / __ \ .-( ). / /___/ / /_/ / / / / /_ / _, _/ /_/ / / / / / (___.__)__) /_____/_/\__, /_/ /_/\__/ /_/ |_|\__,_/_/_/ /_/ / / / /____/ """, fg=self.colors.BLUE) elif summary == 'Rain': click.secho(""" ____ _ / __ \____ _(_)___ .--. / /_/ / __ `/ / __ \ .-( ). / _, _/ /_/ / / / / / (___.__)__) /_/ |_|\__,_/_/_/ /_/ / / / """, fg=self.colors.BLUE) else: click.secho("{:=^62}".format(str(summary)), fg=self.colors.GREEN)
[ "def", "summary", "(", "self", ",", "summary", ")", ":", "if", "summary", "is", "not", "None", ":", "if", "summary", "==", "'Clear'", ":", "click", ".", "secho", "(", "\"\"\"\n\n ________ \\ | /\n / ____/ /__ ____ ______ .-.\n / / / / _ \\/ __ `/ ___/ ‒ ( ) ‒\n/ /___/ / __/ /_/ / / `-᾿\n\\____/_/\\___/\\__,_/_/ / | \\\\\n\n \"\"\"", ",", "fg", "=", "self", ".", "colors", ".", "YELLOW", ")", "click", ".", "echo", "(", ")", "elif", "summary", "==", "'Partly Cloudy'", ":", "click", ".", "secho", "(", "\"\"\"\n\n ____ __ __ ________ __\n / __ \\____ ______/ /_/ /_ __ / ____/ /___ __ ______/ /_ __ \\ | /\n / /_/ / __ `/ ___/ __/ / / / / / / / / __ \\/ / / / __ / / / / .-.\n / ____/ /_/ / / / /_/ / /_/ / / /___/ / /_/ / /_/ / /_/ / /_/ / ‒ ( .-.\n/_/ \\__,_/_/ \\__/_/\\__, / \\____/_/\\____/\\__,_/\\__,_/\\__, / `( ).\n /____/ /____/ / (___(__)\n\n \"\"\"", ",", "fg", "=", "self", ".", "colors", ".", "WHITE", ")", "elif", "summary", "==", "'Flurries'", ":", "click", ".", "secho", "(", "\"\"\"\n\n ________ _ .--.\n / ____/ /_ ____________(_)__ _____ .-( ).\n / /_ / / / / / ___/ ___/ / _ \\/ ___/ (___.__)__)\n / __/ / / /_/ / / / / / / __(__ ) * *\n/_/ /_/\\__,_/_/ /_/ /_/\\___/____/ *\n\n \"\"\"", ",", "fg", "=", "self", ".", "colors", ".", "BLUE", ")", "click", ".", "echo", "(", ")", "elif", "summary", "==", "'Overcast'", "or", "summary", "==", "'Mostly Cloudy'", ":", "click", ".", "secho", "(", "\"\"\"\n\n ____ __\n / __ \\_ _____ ______________ ______/ /_ .--.\n / / / / | / / _ \\/ ___/ ___/ __ `/ ___/ __/ .( ).-.\n/ /_/ /| |/ / __/ / / /__/ /_/ (__ ) /_ (___.__( ).\n\\____/ |___/\\___/_/ \\___/\\__,_/____/\\__/ (___(__)\n\n \"\"\"", ",", "fg", "=", "self", ".", "colors", ".", "WHITE", ")", "click", ".", "echo", "(", ")", "elif", "summary", "==", "'Snow'", ":", "click", ".", "secho", "(", "\"\"\"\n\n _____ .--.\n / ___/____ ____ _ __ .-( ).\n \\__ \\/ __ \\/ __ \\ | /| / / (___.__)__)\n ___/ / / / / /_/ / |/ |/ / * * *\n/____/_/ /_/\\____/|__/|__/ * *\n\n \"\"\"", ",", "fg", "=", "self", ".", "colors", ".", "BLUE", ")", "click", ".", "echo", "(", ")", "elif", "summary", "==", "'Light Snow'", ":", "click", ".", "secho", "(", "\"\"\"\n\n __ _ __ __ _____\n / / (_)___ _/ /_ / /_ / ___/____ ____ _ __ .--.\n / / / / __ `/ __ \\/ __/ \\__ \\/ __ \\/ __ \\ | /| / / .-( ).\n / /___/ / /_/ / / / / /_ ___/ / / / / /_/ / |/ |/ / (___.__)__)\n/_____/_/\\__, /_/ /_/\\__/ /____/_/ /_/\\____/|__/|__/ * * *\n /____/\n \"\"\"", ",", "fg", "=", "self", ".", "colors", ".", "BLUE", ")", "click", ".", "echo", "(", ")", "elif", "summary", "==", "'Light Rain'", "or", "summary", "==", "'Drizzle'", ":", "click", ".", "secho", "(", "\"\"\"\n\n __ _ __ __ ____ _\n / / (_)___ _/ /_ / /_ / __ \\____ _(_)___ .--.\n / / / / __ `/ __ \\/ __/ / /_/ / __ `/ / __ \\ .-( ).\n / /___/ / /_/ / / / / /_ / _, _/ /_/ / / / / / (___.__)__)\n/_____/_/\\__, /_/ /_/\\__/ /_/ |_|\\__,_/_/_/ /_/ / / /\n /____/\n\n \"\"\"", ",", "fg", "=", "self", ".", "colors", ".", "BLUE", ")", "elif", "summary", "==", "'Rain'", ":", "click", ".", "secho", "(", "\"\"\"\n\n ____ _\n / __ \\____ _(_)___ .--.\n / /_/ / __ `/ / __ \\ .-( ).\n / _, _/ /_/ / / / / / (___.__)__)\n/_/ |_|\\__,_/_/_/ /_/ / / /\n\n \"\"\"", ",", "fg", "=", "self", ".", "colors", ".", "BLUE", ")", "else", ":", "click", ".", "secho", "(", "\"{:=^62}\"", ".", "format", "(", "str", "(", "summary", ")", ")", ",", "fg", "=", "self", ".", "colors", ".", "GREEN", ")" ]
Prints the ASCII Icon
[ "Prints", "the", "ASCII", "Icon" ]
train
https://github.com/Aslan11/wilos-cli/blob/2c3da3589f685e95b4f73237a1bfe56373ea4574/wilos/writers.py#L55-L154
0.015014
f3at/feat
src/feat/models/getter.py
model_getattr
def model_getattr(): """ Creates a getter that will drop the current value and retrieve the model's attribute with the context key as name. """ def model_getattr(_value, context, **_params): value = getattr(context["model"], context["key"]) return _attr(value) return model_getattr
python
def model_getattr(): """ Creates a getter that will drop the current value and retrieve the model's attribute with the context key as name. """ def model_getattr(_value, context, **_params): value = getattr(context["model"], context["key"]) return _attr(value) return model_getattr
[ "def", "model_getattr", "(", ")", ":", "def", "model_getattr", "(", "_value", ",", "context", ",", "*", "*", "_params", ")", ":", "value", "=", "getattr", "(", "context", "[", "\"model\"", "]", ",", "context", "[", "\"key\"", "]", ")", "return", "_attr", "(", "value", ")", "return", "model_getattr" ]
Creates a getter that will drop the current value and retrieve the model's attribute with the context key as name.
[ "Creates", "a", "getter", "that", "will", "drop", "the", "current", "value", "and", "retrieve", "the", "model", "s", "attribute", "with", "the", "context", "key", "as", "name", "." ]
train
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/models/getter.py#L134-L144
0.003096
olitheolix/qtmacs
qtmacs/auxiliary.py
QtmacsModeBar.qteRemoveMode
def qteRemoveMode(self, mode: str): """ Remove ``mode`` and associated label. If ``mode`` does not exist then nothing happens and the method returns **False**, otherwise **True**. |Args| * ``pos`` (**QRect**): size and position of new window. * ``windowID`` (**str**): unique window ID. |Returns| * **bool**: **True** if the item was removed and **False** if there was an error (most likely ``mode`` does not exist). |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type. """ # Search through the list for ``mode``. for idx, item in enumerate(self._qteModeList): if item[0] == mode: # Remove the record and delete the label. self._qteModeList.remove(item) item[2].hide() item[2].deleteLater() self._qteUpdateLabelWidths() return True return False
python
def qteRemoveMode(self, mode: str): """ Remove ``mode`` and associated label. If ``mode`` does not exist then nothing happens and the method returns **False**, otherwise **True**. |Args| * ``pos`` (**QRect**): size and position of new window. * ``windowID`` (**str**): unique window ID. |Returns| * **bool**: **True** if the item was removed and **False** if there was an error (most likely ``mode`` does not exist). |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type. """ # Search through the list for ``mode``. for idx, item in enumerate(self._qteModeList): if item[0] == mode: # Remove the record and delete the label. self._qteModeList.remove(item) item[2].hide() item[2].deleteLater() self._qteUpdateLabelWidths() return True return False
[ "def", "qteRemoveMode", "(", "self", ",", "mode", ":", "str", ")", ":", "# Search through the list for ``mode``.", "for", "idx", ",", "item", "in", "enumerate", "(", "self", ".", "_qteModeList", ")", ":", "if", "item", "[", "0", "]", "==", "mode", ":", "# Remove the record and delete the label.", "self", ".", "_qteModeList", ".", "remove", "(", "item", ")", "item", "[", "2", "]", ".", "hide", "(", ")", "item", "[", "2", "]", ".", "deleteLater", "(", ")", "self", ".", "_qteUpdateLabelWidths", "(", ")", "return", "True", "return", "False" ]
Remove ``mode`` and associated label. If ``mode`` does not exist then nothing happens and the method returns **False**, otherwise **True**. |Args| * ``pos`` (**QRect**): size and position of new window. * ``windowID`` (**str**): unique window ID. |Returns| * **bool**: **True** if the item was removed and **False** if there was an error (most likely ``mode`` does not exist). |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type.
[ "Remove", "mode", "and", "associated", "label", "." ]
train
https://github.com/olitheolix/qtmacs/blob/36253b082b82590f183fe154b053eb3a1e741be2/qtmacs/auxiliary.py#L1368-L1398
0.001967
hydpy-dev/hydpy
hydpy/core/devicetools.py
Node.get_double
def get_double(self, group: str) -> pointerutils.Double: """Return the |Double| object appropriate for the given |Element| input or output group and the actual |Node.deploymode|. Method |Node.get_double| should be of interest for framework developers only (and eventually for model developers). Let |Node| object `node1` handle different simulation and observation values: >>> from hydpy import Node >>> node = Node('node1') >>> node.sequences.sim = 1.0 >>> node.sequences.obs = 2.0 The following `test` function shows for a given |Node.deploymode| if method |Node.get_double| either returns the |Double| object handling the simulated value (1.0) or the |Double| object handling the observed value (2.0): >>> def test(deploymode): ... node.deploymode = deploymode ... for group in ('inlets', 'receivers', 'outlets', 'senders'): ... print(group, node.get_double(group)) In the default mode, nodes (passively) route simulated values through offering the |Double| object of sequence |Sim| to all |Element| input and output groups: >>> test('newsim') inlets 1.0 receivers 1.0 outlets 1.0 senders 1.0 Setting |Node.deploymode| to `obs` means that a node receives simulated values (from group `outlets` or `senders`), but provides observed values (to group `inlets` or `receivers`): >>> test('obs') inlets 2.0 receivers 2.0 outlets 1.0 senders 1.0 With |Node.deploymode| set to `oldsim`, the node provides (previously) simulated values (to group `inlets` or `receivers`) but does not receive any values. Method |Node.get_double| just returns a dummy |Double| object with value 0.0 in this case (for group `outlets` or `senders`): >>> test('oldsim') inlets 1.0 receivers 1.0 outlets 0.0 senders 0.0 Other |Element| input or output groups are not supported: >>> node.get_double('test') Traceback (most recent call last): ... ValueError: Function `get_double` of class `Node` does not support \ the given group name `test`. """ if group in ('inlets', 'receivers'): if self.deploymode != 'obs': return self.sequences.fastaccess.sim return self.sequences.fastaccess.obs if group in ('outlets', 'senders'): if self.deploymode != 'oldsim': return self.sequences.fastaccess.sim return self.__blackhole raise ValueError( f'Function `get_double` of class `Node` does not ' f'support the given group name `{group}`.')
python
def get_double(self, group: str) -> pointerutils.Double: """Return the |Double| object appropriate for the given |Element| input or output group and the actual |Node.deploymode|. Method |Node.get_double| should be of interest for framework developers only (and eventually for model developers). Let |Node| object `node1` handle different simulation and observation values: >>> from hydpy import Node >>> node = Node('node1') >>> node.sequences.sim = 1.0 >>> node.sequences.obs = 2.0 The following `test` function shows for a given |Node.deploymode| if method |Node.get_double| either returns the |Double| object handling the simulated value (1.0) or the |Double| object handling the observed value (2.0): >>> def test(deploymode): ... node.deploymode = deploymode ... for group in ('inlets', 'receivers', 'outlets', 'senders'): ... print(group, node.get_double(group)) In the default mode, nodes (passively) route simulated values through offering the |Double| object of sequence |Sim| to all |Element| input and output groups: >>> test('newsim') inlets 1.0 receivers 1.0 outlets 1.0 senders 1.0 Setting |Node.deploymode| to `obs` means that a node receives simulated values (from group `outlets` or `senders`), but provides observed values (to group `inlets` or `receivers`): >>> test('obs') inlets 2.0 receivers 2.0 outlets 1.0 senders 1.0 With |Node.deploymode| set to `oldsim`, the node provides (previously) simulated values (to group `inlets` or `receivers`) but does not receive any values. Method |Node.get_double| just returns a dummy |Double| object with value 0.0 in this case (for group `outlets` or `senders`): >>> test('oldsim') inlets 1.0 receivers 1.0 outlets 0.0 senders 0.0 Other |Element| input or output groups are not supported: >>> node.get_double('test') Traceback (most recent call last): ... ValueError: Function `get_double` of class `Node` does not support \ the given group name `test`. """ if group in ('inlets', 'receivers'): if self.deploymode != 'obs': return self.sequences.fastaccess.sim return self.sequences.fastaccess.obs if group in ('outlets', 'senders'): if self.deploymode != 'oldsim': return self.sequences.fastaccess.sim return self.__blackhole raise ValueError( f'Function `get_double` of class `Node` does not ' f'support the given group name `{group}`.')
[ "def", "get_double", "(", "self", ",", "group", ":", "str", ")", "->", "pointerutils", ".", "Double", ":", "if", "group", "in", "(", "'inlets'", ",", "'receivers'", ")", ":", "if", "self", ".", "deploymode", "!=", "'obs'", ":", "return", "self", ".", "sequences", ".", "fastaccess", ".", "sim", "return", "self", ".", "sequences", ".", "fastaccess", ".", "obs", "if", "group", "in", "(", "'outlets'", ",", "'senders'", ")", ":", "if", "self", ".", "deploymode", "!=", "'oldsim'", ":", "return", "self", ".", "sequences", ".", "fastaccess", ".", "sim", "return", "self", ".", "__blackhole", "raise", "ValueError", "(", "f'Function `get_double` of class `Node` does not '", "f'support the given group name `{group}`.'", ")" ]
Return the |Double| object appropriate for the given |Element| input or output group and the actual |Node.deploymode|. Method |Node.get_double| should be of interest for framework developers only (and eventually for model developers). Let |Node| object `node1` handle different simulation and observation values: >>> from hydpy import Node >>> node = Node('node1') >>> node.sequences.sim = 1.0 >>> node.sequences.obs = 2.0 The following `test` function shows for a given |Node.deploymode| if method |Node.get_double| either returns the |Double| object handling the simulated value (1.0) or the |Double| object handling the observed value (2.0): >>> def test(deploymode): ... node.deploymode = deploymode ... for group in ('inlets', 'receivers', 'outlets', 'senders'): ... print(group, node.get_double(group)) In the default mode, nodes (passively) route simulated values through offering the |Double| object of sequence |Sim| to all |Element| input and output groups: >>> test('newsim') inlets 1.0 receivers 1.0 outlets 1.0 senders 1.0 Setting |Node.deploymode| to `obs` means that a node receives simulated values (from group `outlets` or `senders`), but provides observed values (to group `inlets` or `receivers`): >>> test('obs') inlets 2.0 receivers 2.0 outlets 1.0 senders 1.0 With |Node.deploymode| set to `oldsim`, the node provides (previously) simulated values (to group `inlets` or `receivers`) but does not receive any values. Method |Node.get_double| just returns a dummy |Double| object with value 0.0 in this case (for group `outlets` or `senders`): >>> test('oldsim') inlets 1.0 receivers 1.0 outlets 0.0 senders 0.0 Other |Element| input or output groups are not supported: >>> node.get_double('test') Traceback (most recent call last): ... ValueError: Function `get_double` of class `Node` does not support \ the given group name `test`.
[ "Return", "the", "|Double|", "object", "appropriate", "for", "the", "given", "|Element|", "input", "or", "output", "group", "and", "the", "actual", "|Node", ".", "deploymode|", "." ]
train
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/devicetools.py#L1483-L1558
0.0007
RadhikaG/markdown-magic
magic/memeAPI.py
processMeme
def processMeme(imgParams): ''' Wrapper function for genMeme() and findMeme() imgParams may be a string of the following forms: * 'text0 | text1' * 'text0' * ' | text1' Fails gracefully when it can't find or generate a meme by returning an appropriate image url with the failure message on it. ''' template_id = findMeme(imgParams) if template_id is None: print("Couldn't find a suitable match for meme :(") return meme_not_supported # if template_id exists imgParams = imgParams.split('|') if len(imgParams) == 2 or len(imgParams) == 1: text0 = imgParams[0] if len(imgParams) == 2: text1 = imgParams[1] # Bottom text text1 exists elif len(imgParams) == 1: text1 = '' # No bottom text imgURL = genMeme(template_id, text0, text1) if imgURL is None: # Couldn't generate meme print("Couldn't generate meme :(") return couldnt_create_meme else: # Success! # print(imgURL) return imgURL elif len(imgParams) > 2: print("Too many lines of captions! Cannot create meme.") return too_many_lines elif len(imgParams) < 1: # No top text text0 exists print("Too few lines of captions! Cannot create meme.") return too_few_lines
python
def processMeme(imgParams): ''' Wrapper function for genMeme() and findMeme() imgParams may be a string of the following forms: * 'text0 | text1' * 'text0' * ' | text1' Fails gracefully when it can't find or generate a meme by returning an appropriate image url with the failure message on it. ''' template_id = findMeme(imgParams) if template_id is None: print("Couldn't find a suitable match for meme :(") return meme_not_supported # if template_id exists imgParams = imgParams.split('|') if len(imgParams) == 2 or len(imgParams) == 1: text0 = imgParams[0] if len(imgParams) == 2: text1 = imgParams[1] # Bottom text text1 exists elif len(imgParams) == 1: text1 = '' # No bottom text imgURL = genMeme(template_id, text0, text1) if imgURL is None: # Couldn't generate meme print("Couldn't generate meme :(") return couldnt_create_meme else: # Success! # print(imgURL) return imgURL elif len(imgParams) > 2: print("Too many lines of captions! Cannot create meme.") return too_many_lines elif len(imgParams) < 1: # No top text text0 exists print("Too few lines of captions! Cannot create meme.") return too_few_lines
[ "def", "processMeme", "(", "imgParams", ")", ":", "template_id", "=", "findMeme", "(", "imgParams", ")", "if", "template_id", "is", "None", ":", "print", "(", "\"Couldn't find a suitable match for meme :(\"", ")", "return", "meme_not_supported", "# if template_id exists", "imgParams", "=", "imgParams", ".", "split", "(", "'|'", ")", "if", "len", "(", "imgParams", ")", "==", "2", "or", "len", "(", "imgParams", ")", "==", "1", ":", "text0", "=", "imgParams", "[", "0", "]", "if", "len", "(", "imgParams", ")", "==", "2", ":", "text1", "=", "imgParams", "[", "1", "]", "# Bottom text text1 exists", "elif", "len", "(", "imgParams", ")", "==", "1", ":", "text1", "=", "''", "# No bottom text", "imgURL", "=", "genMeme", "(", "template_id", ",", "text0", ",", "text1", ")", "if", "imgURL", "is", "None", ":", "# Couldn't generate meme", "print", "(", "\"Couldn't generate meme :(\"", ")", "return", "couldnt_create_meme", "else", ":", "# Success!", "# print(imgURL)", "return", "imgURL", "elif", "len", "(", "imgParams", ")", ">", "2", ":", "print", "(", "\"Too many lines of captions! Cannot create meme.\"", ")", "return", "too_many_lines", "elif", "len", "(", "imgParams", ")", "<", "1", ":", "# No top text text0 exists", "print", "(", "\"Too few lines of captions! Cannot create meme.\"", ")", "return", "too_few_lines" ]
Wrapper function for genMeme() and findMeme() imgParams may be a string of the following forms: * 'text0 | text1' * 'text0' * ' | text1' Fails gracefully when it can't find or generate a meme by returning an appropriate image url with the failure message on it.
[ "Wrapper", "function", "for", "genMeme", "()", "and", "findMeme", "()", "imgParams", "may", "be", "a", "string", "of", "the", "following", "forms", ":", "*", "text0", "|", "text1", "*", "text0", "*", "|", "text1" ]
train
https://github.com/RadhikaG/markdown-magic/blob/af99549b033269d861ea13f0541cb4f894057c47/magic/memeAPI.py#L105-L150
0.000707
StanfordBioinformatics/loom
utils/loomengine_utils/file_utils.py
FilePattern
def FilePattern(pattern, settings, **kwargs): """Factory method returns LocalFilePattern or GoogleStorageFilePattern """ url = _urlparse(pattern) if url.scheme == 'gs': return GoogleStorageFilePattern(pattern, settings, **kwargs) else: assert url.scheme == 'file' return LocalFilePattern(pattern, settings, **kwargs)
python
def FilePattern(pattern, settings, **kwargs): """Factory method returns LocalFilePattern or GoogleStorageFilePattern """ url = _urlparse(pattern) if url.scheme == 'gs': return GoogleStorageFilePattern(pattern, settings, **kwargs) else: assert url.scheme == 'file' return LocalFilePattern(pattern, settings, **kwargs)
[ "def", "FilePattern", "(", "pattern", ",", "settings", ",", "*", "*", "kwargs", ")", ":", "url", "=", "_urlparse", "(", "pattern", ")", "if", "url", ".", "scheme", "==", "'gs'", ":", "return", "GoogleStorageFilePattern", "(", "pattern", ",", "settings", ",", "*", "*", "kwargs", ")", "else", ":", "assert", "url", ".", "scheme", "==", "'file'", "return", "LocalFilePattern", "(", "pattern", ",", "settings", ",", "*", "*", "kwargs", ")" ]
Factory method returns LocalFilePattern or GoogleStorageFilePattern
[ "Factory", "method", "returns", "LocalFilePattern", "or", "GoogleStorageFilePattern" ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/utils/loomengine_utils/file_utils.py#L119-L127
0.002778
fastai/fastai
fastai/basic_train.py
fit
def fit(epochs:int, learn:BasicLearner, callbacks:Optional[CallbackList]=None, metrics:OptMetrics=None)->None: "Fit the `model` on `data` and learn using `loss_func` and `opt`." assert len(learn.data.train_dl) != 0, f"""Your training dataloader is empty, can't train a model. Use a smaller batch size (batch size={learn.data.train_dl.batch_size} for {len(learn.data.train_dl.dataset)} elements).""" cb_handler = CallbackHandler(callbacks, metrics) pbar = master_bar(range(epochs)) cb_handler.on_train_begin(epochs, pbar=pbar, metrics=metrics) exception=False try: for epoch in pbar: learn.model.train() cb_handler.set_dl(learn.data.train_dl) cb_handler.on_epoch_begin() for xb,yb in progress_bar(learn.data.train_dl, parent=pbar): xb, yb = cb_handler.on_batch_begin(xb, yb) loss = loss_batch(learn.model, xb, yb, learn.loss_func, learn.opt, cb_handler) if cb_handler.on_batch_end(loss): break if not cb_handler.skip_validate and not learn.data.empty_val: val_loss = validate(learn.model, learn.data.valid_dl, loss_func=learn.loss_func, cb_handler=cb_handler, pbar=pbar) else: val_loss=None if cb_handler.on_epoch_end(val_loss): break except Exception as e: exception = e raise finally: cb_handler.on_train_end(exception)
python
def fit(epochs:int, learn:BasicLearner, callbacks:Optional[CallbackList]=None, metrics:OptMetrics=None)->None: "Fit the `model` on `data` and learn using `loss_func` and `opt`." assert len(learn.data.train_dl) != 0, f"""Your training dataloader is empty, can't train a model. Use a smaller batch size (batch size={learn.data.train_dl.batch_size} for {len(learn.data.train_dl.dataset)} elements).""" cb_handler = CallbackHandler(callbacks, metrics) pbar = master_bar(range(epochs)) cb_handler.on_train_begin(epochs, pbar=pbar, metrics=metrics) exception=False try: for epoch in pbar: learn.model.train() cb_handler.set_dl(learn.data.train_dl) cb_handler.on_epoch_begin() for xb,yb in progress_bar(learn.data.train_dl, parent=pbar): xb, yb = cb_handler.on_batch_begin(xb, yb) loss = loss_batch(learn.model, xb, yb, learn.loss_func, learn.opt, cb_handler) if cb_handler.on_batch_end(loss): break if not cb_handler.skip_validate and not learn.data.empty_val: val_loss = validate(learn.model, learn.data.valid_dl, loss_func=learn.loss_func, cb_handler=cb_handler, pbar=pbar) else: val_loss=None if cb_handler.on_epoch_end(val_loss): break except Exception as e: exception = e raise finally: cb_handler.on_train_end(exception)
[ "def", "fit", "(", "epochs", ":", "int", ",", "learn", ":", "BasicLearner", ",", "callbacks", ":", "Optional", "[", "CallbackList", "]", "=", "None", ",", "metrics", ":", "OptMetrics", "=", "None", ")", "->", "None", ":", "assert", "len", "(", "learn", ".", "data", ".", "train_dl", ")", "!=", "0", ",", "f\"\"\"Your training dataloader is empty, can't train a model.\n Use a smaller batch size (batch size={learn.data.train_dl.batch_size} for {len(learn.data.train_dl.dataset)} elements).\"\"\"", "cb_handler", "=", "CallbackHandler", "(", "callbacks", ",", "metrics", ")", "pbar", "=", "master_bar", "(", "range", "(", "epochs", ")", ")", "cb_handler", ".", "on_train_begin", "(", "epochs", ",", "pbar", "=", "pbar", ",", "metrics", "=", "metrics", ")", "exception", "=", "False", "try", ":", "for", "epoch", "in", "pbar", ":", "learn", ".", "model", ".", "train", "(", ")", "cb_handler", ".", "set_dl", "(", "learn", ".", "data", ".", "train_dl", ")", "cb_handler", ".", "on_epoch_begin", "(", ")", "for", "xb", ",", "yb", "in", "progress_bar", "(", "learn", ".", "data", ".", "train_dl", ",", "parent", "=", "pbar", ")", ":", "xb", ",", "yb", "=", "cb_handler", ".", "on_batch_begin", "(", "xb", ",", "yb", ")", "loss", "=", "loss_batch", "(", "learn", ".", "model", ",", "xb", ",", "yb", ",", "learn", ".", "loss_func", ",", "learn", ".", "opt", ",", "cb_handler", ")", "if", "cb_handler", ".", "on_batch_end", "(", "loss", ")", ":", "break", "if", "not", "cb_handler", ".", "skip_validate", "and", "not", "learn", ".", "data", ".", "empty_val", ":", "val_loss", "=", "validate", "(", "learn", ".", "model", ",", "learn", ".", "data", ".", "valid_dl", ",", "loss_func", "=", "learn", ".", "loss_func", ",", "cb_handler", "=", "cb_handler", ",", "pbar", "=", "pbar", ")", "else", ":", "val_loss", "=", "None", "if", "cb_handler", ".", "on_epoch_end", "(", "val_loss", ")", ":", "break", "except", "Exception", "as", "e", ":", "exception", "=", "e", "raise", "finally", ":", "cb_handler", ".", "on_train_end", "(", "exception", ")" ]
Fit the `model` on `data` and learn using `loss_func` and `opt`.
[ "Fit", "the", "model", "on", "data", "and", "learn", "using", "loss_func", "and", "opt", "." ]
train
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/basic_train.py#L85-L112
0.015572
ome/omego
omego/upgrade.py
Install._handle_args
def _handle_args(self, cmd, args): """ We need to support deprecated behaviour for now which makes this quite complicated Current behaviour: - install: Installs a new server, existing server causes an error - install --upgrade: Installs or upgrades a server - install --managedb: Automatically initialise or upgrade the db Deprecated: - install --upgradedb --initdb: Replaced by install --managedb - install --upgradedb: upgrade the db, must exist - install --initdb: initialise the db - upgrade: Upgrades a server, must already exist - upgrade --upgradedb: Automatically upgrade the db returns: - Modified args object, flag to indicate new/existing/auto install """ if cmd == 'install': if args.upgrade: # Current behaviour: install or upgrade if args.initdb or args.upgradedb: raise Stop(10, ( 'Deprecated --initdb --upgradedb flags ' 'are incompatible with --upgrade')) newinstall = None else: # Current behaviour: Server must not exist newinstall = True if args.managedb: # Current behaviour if args.initdb or args.upgradedb: raise Stop(10, ( 'Deprecated --initdb --upgradedb flags ' 'are incompatible with --managedb')) args.initdb = True args.upgradedb = True else: if args.initdb or args.upgradedb: log.warn('--initdb and --upgradedb are deprecated, ' 'use --managedb') elif cmd == 'upgrade': # Deprecated behaviour log.warn( '"omero upgrade" is deprecated, use "omego install --upgrade"') cmd = 'install' args.upgrade = True # Deprecated behaviour: Server must exist newinstall = False else: raise Exception('Unexpected command: %s' % cmd) return args, newinstall
python
def _handle_args(self, cmd, args): """ We need to support deprecated behaviour for now which makes this quite complicated Current behaviour: - install: Installs a new server, existing server causes an error - install --upgrade: Installs or upgrades a server - install --managedb: Automatically initialise or upgrade the db Deprecated: - install --upgradedb --initdb: Replaced by install --managedb - install --upgradedb: upgrade the db, must exist - install --initdb: initialise the db - upgrade: Upgrades a server, must already exist - upgrade --upgradedb: Automatically upgrade the db returns: - Modified args object, flag to indicate new/existing/auto install """ if cmd == 'install': if args.upgrade: # Current behaviour: install or upgrade if args.initdb or args.upgradedb: raise Stop(10, ( 'Deprecated --initdb --upgradedb flags ' 'are incompatible with --upgrade')) newinstall = None else: # Current behaviour: Server must not exist newinstall = True if args.managedb: # Current behaviour if args.initdb or args.upgradedb: raise Stop(10, ( 'Deprecated --initdb --upgradedb flags ' 'are incompatible with --managedb')) args.initdb = True args.upgradedb = True else: if args.initdb or args.upgradedb: log.warn('--initdb and --upgradedb are deprecated, ' 'use --managedb') elif cmd == 'upgrade': # Deprecated behaviour log.warn( '"omero upgrade" is deprecated, use "omego install --upgrade"') cmd = 'install' args.upgrade = True # Deprecated behaviour: Server must exist newinstall = False else: raise Exception('Unexpected command: %s' % cmd) return args, newinstall
[ "def", "_handle_args", "(", "self", ",", "cmd", ",", "args", ")", ":", "if", "cmd", "==", "'install'", ":", "if", "args", ".", "upgrade", ":", "# Current behaviour: install or upgrade", "if", "args", ".", "initdb", "or", "args", ".", "upgradedb", ":", "raise", "Stop", "(", "10", ",", "(", "'Deprecated --initdb --upgradedb flags '", "'are incompatible with --upgrade'", ")", ")", "newinstall", "=", "None", "else", ":", "# Current behaviour: Server must not exist", "newinstall", "=", "True", "if", "args", ".", "managedb", ":", "# Current behaviour", "if", "args", ".", "initdb", "or", "args", ".", "upgradedb", ":", "raise", "Stop", "(", "10", ",", "(", "'Deprecated --initdb --upgradedb flags '", "'are incompatible with --managedb'", ")", ")", "args", ".", "initdb", "=", "True", "args", ".", "upgradedb", "=", "True", "else", ":", "if", "args", ".", "initdb", "or", "args", ".", "upgradedb", ":", "log", ".", "warn", "(", "'--initdb and --upgradedb are deprecated, '", "'use --managedb'", ")", "elif", "cmd", "==", "'upgrade'", ":", "# Deprecated behaviour", "log", ".", "warn", "(", "'\"omero upgrade\" is deprecated, use \"omego install --upgrade\"'", ")", "cmd", "=", "'install'", "args", ".", "upgrade", "=", "True", "# Deprecated behaviour: Server must exist", "newinstall", "=", "False", "else", ":", "raise", "Exception", "(", "'Unexpected command: %s'", "%", "cmd", ")", "return", "args", ",", "newinstall" ]
We need to support deprecated behaviour for now which makes this quite complicated Current behaviour: - install: Installs a new server, existing server causes an error - install --upgrade: Installs or upgrades a server - install --managedb: Automatically initialise or upgrade the db Deprecated: - install --upgradedb --initdb: Replaced by install --managedb - install --upgradedb: upgrade the db, must exist - install --initdb: initialise the db - upgrade: Upgrades a server, must already exist - upgrade --upgradedb: Automatically upgrade the db returns: - Modified args object, flag to indicate new/existing/auto install
[ "We", "need", "to", "support", "deprecated", "behaviour", "for", "now", "which", "makes", "this", "quite", "complicated" ]
train
https://github.com/ome/omego/blob/2dadbf3c6342b6c995f9e0dceaf3c0b7fab030fb/omego/upgrade.py#L75-L132
0.000896
sirrice/pygg
pygg/pygg.py
_to_r
def _to_r(o, as_data=False, level=0): """Helper function to convert python data structures to R equivalents TODO: a single model for transforming to r to handle * function args * lists as function args """ if o is None: return "NA" if isinstance(o, basestring): return o if hasattr(o, "r"): # bridge to @property r on GGStatement(s) return o.r elif isinstance(o, bool): return "TRUE" if o else "FALSE" elif isinstance(o, (list, tuple)): inner = ",".join([_to_r(x, True, level+1) for x in o]) return "c({})".format(inner) if as_data else inner elif isinstance(o, dict): inner = ",".join(["{}={}".format(k, _to_r(v, True, level+1)) for k, v in sorted(o.iteritems(), key=lambda x: x[0])]) return "list({})".format(inner) if as_data else inner return str(o)
python
def _to_r(o, as_data=False, level=0): """Helper function to convert python data structures to R equivalents TODO: a single model for transforming to r to handle * function args * lists as function args """ if o is None: return "NA" if isinstance(o, basestring): return o if hasattr(o, "r"): # bridge to @property r on GGStatement(s) return o.r elif isinstance(o, bool): return "TRUE" if o else "FALSE" elif isinstance(o, (list, tuple)): inner = ",".join([_to_r(x, True, level+1) for x in o]) return "c({})".format(inner) if as_data else inner elif isinstance(o, dict): inner = ",".join(["{}={}".format(k, _to_r(v, True, level+1)) for k, v in sorted(o.iteritems(), key=lambda x: x[0])]) return "list({})".format(inner) if as_data else inner return str(o)
[ "def", "_to_r", "(", "o", ",", "as_data", "=", "False", ",", "level", "=", "0", ")", ":", "if", "o", "is", "None", ":", "return", "\"NA\"", "if", "isinstance", "(", "o", ",", "basestring", ")", ":", "return", "o", "if", "hasattr", "(", "o", ",", "\"r\"", ")", ":", "# bridge to @property r on GGStatement(s)", "return", "o", ".", "r", "elif", "isinstance", "(", "o", ",", "bool", ")", ":", "return", "\"TRUE\"", "if", "o", "else", "\"FALSE\"", "elif", "isinstance", "(", "o", ",", "(", "list", ",", "tuple", ")", ")", ":", "inner", "=", "\",\"", ".", "join", "(", "[", "_to_r", "(", "x", ",", "True", ",", "level", "+", "1", ")", "for", "x", "in", "o", "]", ")", "return", "\"c({})\"", ".", "format", "(", "inner", ")", "if", "as_data", "else", "inner", "elif", "isinstance", "(", "o", ",", "dict", ")", ":", "inner", "=", "\",\"", ".", "join", "(", "[", "\"{}={}\"", ".", "format", "(", "k", ",", "_to_r", "(", "v", ",", "True", ",", "level", "+", "1", ")", ")", "for", "k", ",", "v", "in", "sorted", "(", "o", ".", "iteritems", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", "]", ")", "return", "\"list({})\"", ".", "format", "(", "inner", ")", "if", "as_data", "else", "inner", "return", "str", "(", "o", ")" ]
Helper function to convert python data structures to R equivalents TODO: a single model for transforming to r to handle * function args * lists as function args
[ "Helper", "function", "to", "convert", "python", "data", "structures", "to", "R", "equivalents" ]
train
https://github.com/sirrice/pygg/blob/b36e19b3827e0a7d661de660b04d55a73f35896b/pygg/pygg.py#L32-L55
0.002225
jcrist/skein
skein/ui.py
ProxiedPage.address
def address(self): """The full proxied address to this page""" path = urlsplit(self.target).path suffix = '/' if not path or path.endswith('/') else '' return '%s%s/%s%s' % (self._ui_address[:-1], self._proxy_prefix, self.route, suffix)
python
def address(self): """The full proxied address to this page""" path = urlsplit(self.target).path suffix = '/' if not path or path.endswith('/') else '' return '%s%s/%s%s' % (self._ui_address[:-1], self._proxy_prefix, self.route, suffix)
[ "def", "address", "(", "self", ")", ":", "path", "=", "urlsplit", "(", "self", ".", "target", ")", ".", "path", "suffix", "=", "'/'", "if", "not", "path", "or", "path", ".", "endswith", "(", "'/'", ")", "else", "''", "return", "'%s%s/%s%s'", "%", "(", "self", ".", "_ui_address", "[", ":", "-", "1", "]", ",", "self", ".", "_proxy_prefix", ",", "self", ".", "route", ",", "suffix", ")" ]
The full proxied address to this page
[ "The", "full", "proxied", "address", "to", "this", "page" ]
train
https://github.com/jcrist/skein/blob/16f8b1d3b3d9f79f36e2f152e45893339a1793e8/skein/ui.py#L43-L48
0.006711
CalebBell/fluids
fluids/numerics/arrays.py
det
def det(matrix): '''Seem sto work fine. >> from sympy import * >> from sympy.abc import * >> Matrix([[a, b], [c, d]]).det() a*d - b*c >> Matrix([[a, b, c], [d, e, f], [g, h, i]]).det() a*e*i - a*f*h - b*d*i + b*f*g + c*d*h - c*e*g A few terms can be slightly factored out of the 3x dim. >> Matrix([[a, b, c, d], [e, f, g, h], [i, j, k, l], [m, n, o, p]]).det() a*f*k*p - a*f*l*o - a*g*j*p + a*g*l*n + a*h*j*o - a*h*k*n - b*e*k*p + b*e*l*o + b*g*i*p - b*g*l*m - b*h*i*o + b*h*k*m + c*e*j*p - c*e*l*n - c*f*i*p + c*f*l*m + c*h*i*n - c*h*j*m - d*e*j*o + d*e*k*n + d*f*i*o - d*f*k*m - d*g*i*n + d*g*j*m 72 mult vs ~48 in cse'd version' Commented out - takes a few seconds >> #Matrix([[a, b, c, d, e], [f, g, h, i, j], [k, l, m, n, o], [p, q, r, s, t], [u, v, w, x, y]]).det() 260 multiplies with cse; 480 without it. ''' size = len(matrix) if size == 1: return matrix[0] elif size == 2: (a, b), (c, d) = matrix return a*d - c*b elif size == 3: (a, b, c), (d, e, f), (g, h, i) = matrix return a*(e*i - h*f) - d*(b*i - h*c) + g*(b*f - e*c) elif size == 4: (a, b, c, d), (e, f, g, h), (i, j, k, l), (m, n, o, p) = matrix return (a*f*k*p - a*f*l*o - a*g*j*p + a*g*l*n + a*h*j*o - a*h*k*n - b*e*k*p + b*e*l*o + b*g*i*p - b*g*l*m - b*h*i*o + b*h*k*m + c*e*j*p - c*e*l*n - c*f*i*p + c*f*l*m + c*h*i*n - c*h*j*m - d*e*j*o + d*e*k*n + d*f*i*o - d*f*k*m - d*g*i*n + d*g*j*m) elif size == 5: (a, b, c, d, e), (f, g, h, i, j), (k, l, m, n, o), (p, q, r, s, t), (u, v, w, x, y) = matrix x0 = s*y x1 = a*g*m x2 = t*w x3 = a*g*n x4 = r*x x5 = a*g*o x6 = t*x x7 = a*h*l x8 = q*y x9 = a*h*n x10 = s*v x11 = a*h*o x12 = r*y x13 = a*i*l x14 = t*v x15 = a*i*m x16 = q*w x17 = a*i*o x18 = s*w x19 = a*j*l x20 = q*x x21 = a*j*m x22 = r*v x23 = a*j*n x24 = b*f*m x25 = b*f*n x26 = b*f*o x27 = b*h*k x28 = t*u x29 = b*h*n x30 = p*x x31 = b*h*o x32 = b*i*k x33 = p*y x34 = b*i*m x35 = r*u x36 = b*i*o x37 = b*j*k x38 = s*u x39 = b*j*m x40 = p*w x41 = b*j*n x42 = c*f*l x43 = c*f*n x44 = c*f*o x45 = c*g*k x46 = c*g*n x47 = c*g*o x48 = c*i*k x49 = c*i*l x50 = p*v x51 = c*i*o x52 = c*j*k x53 = c*j*l x54 = q*u x55 = c*j*n x56 = d*f*l x57 = d*f*m x58 = d*f*o x59 = d*g*k x60 = d*g*m x61 = d*g*o x62 = d*h*k x63 = d*h*l x64 = d*h*o x65 = d*j*k x66 = d*j*l x67 = d*j*m x68 = e*f*l x69 = e*f*m x70 = e*f*n x71 = e*g*k x72 = e*g*m x73 = e*g*n x74 = e*h*k x75 = e*h*l x76 = e*h*n x77 = e*i*k x78 = e*i*l x79 = e*i*m return (x0*x1 - x0*x24 + x0*x27 + x0*x42 - x0*x45 - x0*x7 - x1*x6 + x10*x11 - x10*x21 - x10*x44 + x10*x52 + x10*x69 - x10*x74 - x11*x20 + x12*x13 + x12*x25 - x12*x3 - x12*x32 - x12*x56 + x12*x59 - x13*x2 + x14*x15 + x14*x43 - x14*x48 - x14*x57 + x14*x62 - x14*x9 - x15*x8 + x16*x17 - x16*x23 - x16*x58 + x16*x65 + x16*x70 - x16*x77 - x17*x22 + x18*x19 + x18*x26 - x18*x37 - x18*x5 - x18*x68 + x18*x71 - x19*x4 - x2*x25 + x2*x3 + x2*x32 + x2*x56 - x2*x59 + x20*x21 + x20*x44 - x20*x52 - x20*x69 + x20*x74 + x22*x23 + x22*x58 - x22*x65 - x22*x70 + x22*x77 + x24*x6 - x26*x4 - x27*x6 + x28*x29 - x28*x34 - x28*x46 + x28*x49 + x28*x60 - x28*x63 - x29*x33 + x30*x31 - x30*x39 - x30*x47 + x30*x53 + x30*x72 - x30*x75 - x31*x38 + x33*x34 + x33*x46 - x33*x49 - x33*x60 + x33*x63 + x35*x36 - x35*x41 - x35*x61 + x35*x66 + x35*x73 - x35*x78 - x36*x40 + x37*x4 + x38*x39 + x38*x47 - x38*x53 - x38*x72 + x38*x75 + x4*x5 + x4*x68 - x4*x71 + x40*x41 + x40*x61 - x40*x66 - x40*x73 + x40*x78 - x42*x6 - x43*x8 + x45*x6 + x48*x8 + x50*x51 - x50*x55 - x50*x64 + x50*x67 + x50*x76 - x50*x79 - x51*x54 + x54*x55 + x54*x64 - x54*x67 - x54*x76 + x54*x79 + x57*x8 + x6*x7 - x62*x8 + x8*x9) else: # TODO algorithm? import numpy as np return float(np.linalg.det(matrix))
python
def det(matrix): '''Seem sto work fine. >> from sympy import * >> from sympy.abc import * >> Matrix([[a, b], [c, d]]).det() a*d - b*c >> Matrix([[a, b, c], [d, e, f], [g, h, i]]).det() a*e*i - a*f*h - b*d*i + b*f*g + c*d*h - c*e*g A few terms can be slightly factored out of the 3x dim. >> Matrix([[a, b, c, d], [e, f, g, h], [i, j, k, l], [m, n, o, p]]).det() a*f*k*p - a*f*l*o - a*g*j*p + a*g*l*n + a*h*j*o - a*h*k*n - b*e*k*p + b*e*l*o + b*g*i*p - b*g*l*m - b*h*i*o + b*h*k*m + c*e*j*p - c*e*l*n - c*f*i*p + c*f*l*m + c*h*i*n - c*h*j*m - d*e*j*o + d*e*k*n + d*f*i*o - d*f*k*m - d*g*i*n + d*g*j*m 72 mult vs ~48 in cse'd version' Commented out - takes a few seconds >> #Matrix([[a, b, c, d, e], [f, g, h, i, j], [k, l, m, n, o], [p, q, r, s, t], [u, v, w, x, y]]).det() 260 multiplies with cse; 480 without it. ''' size = len(matrix) if size == 1: return matrix[0] elif size == 2: (a, b), (c, d) = matrix return a*d - c*b elif size == 3: (a, b, c), (d, e, f), (g, h, i) = matrix return a*(e*i - h*f) - d*(b*i - h*c) + g*(b*f - e*c) elif size == 4: (a, b, c, d), (e, f, g, h), (i, j, k, l), (m, n, o, p) = matrix return (a*f*k*p - a*f*l*o - a*g*j*p + a*g*l*n + a*h*j*o - a*h*k*n - b*e*k*p + b*e*l*o + b*g*i*p - b*g*l*m - b*h*i*o + b*h*k*m + c*e*j*p - c*e*l*n - c*f*i*p + c*f*l*m + c*h*i*n - c*h*j*m - d*e*j*o + d*e*k*n + d*f*i*o - d*f*k*m - d*g*i*n + d*g*j*m) elif size == 5: (a, b, c, d, e), (f, g, h, i, j), (k, l, m, n, o), (p, q, r, s, t), (u, v, w, x, y) = matrix x0 = s*y x1 = a*g*m x2 = t*w x3 = a*g*n x4 = r*x x5 = a*g*o x6 = t*x x7 = a*h*l x8 = q*y x9 = a*h*n x10 = s*v x11 = a*h*o x12 = r*y x13 = a*i*l x14 = t*v x15 = a*i*m x16 = q*w x17 = a*i*o x18 = s*w x19 = a*j*l x20 = q*x x21 = a*j*m x22 = r*v x23 = a*j*n x24 = b*f*m x25 = b*f*n x26 = b*f*o x27 = b*h*k x28 = t*u x29 = b*h*n x30 = p*x x31 = b*h*o x32 = b*i*k x33 = p*y x34 = b*i*m x35 = r*u x36 = b*i*o x37 = b*j*k x38 = s*u x39 = b*j*m x40 = p*w x41 = b*j*n x42 = c*f*l x43 = c*f*n x44 = c*f*o x45 = c*g*k x46 = c*g*n x47 = c*g*o x48 = c*i*k x49 = c*i*l x50 = p*v x51 = c*i*o x52 = c*j*k x53 = c*j*l x54 = q*u x55 = c*j*n x56 = d*f*l x57 = d*f*m x58 = d*f*o x59 = d*g*k x60 = d*g*m x61 = d*g*o x62 = d*h*k x63 = d*h*l x64 = d*h*o x65 = d*j*k x66 = d*j*l x67 = d*j*m x68 = e*f*l x69 = e*f*m x70 = e*f*n x71 = e*g*k x72 = e*g*m x73 = e*g*n x74 = e*h*k x75 = e*h*l x76 = e*h*n x77 = e*i*k x78 = e*i*l x79 = e*i*m return (x0*x1 - x0*x24 + x0*x27 + x0*x42 - x0*x45 - x0*x7 - x1*x6 + x10*x11 - x10*x21 - x10*x44 + x10*x52 + x10*x69 - x10*x74 - x11*x20 + x12*x13 + x12*x25 - x12*x3 - x12*x32 - x12*x56 + x12*x59 - x13*x2 + x14*x15 + x14*x43 - x14*x48 - x14*x57 + x14*x62 - x14*x9 - x15*x8 + x16*x17 - x16*x23 - x16*x58 + x16*x65 + x16*x70 - x16*x77 - x17*x22 + x18*x19 + x18*x26 - x18*x37 - x18*x5 - x18*x68 + x18*x71 - x19*x4 - x2*x25 + x2*x3 + x2*x32 + x2*x56 - x2*x59 + x20*x21 + x20*x44 - x20*x52 - x20*x69 + x20*x74 + x22*x23 + x22*x58 - x22*x65 - x22*x70 + x22*x77 + x24*x6 - x26*x4 - x27*x6 + x28*x29 - x28*x34 - x28*x46 + x28*x49 + x28*x60 - x28*x63 - x29*x33 + x30*x31 - x30*x39 - x30*x47 + x30*x53 + x30*x72 - x30*x75 - x31*x38 + x33*x34 + x33*x46 - x33*x49 - x33*x60 + x33*x63 + x35*x36 - x35*x41 - x35*x61 + x35*x66 + x35*x73 - x35*x78 - x36*x40 + x37*x4 + x38*x39 + x38*x47 - x38*x53 - x38*x72 + x38*x75 + x4*x5 + x4*x68 - x4*x71 + x40*x41 + x40*x61 - x40*x66 - x40*x73 + x40*x78 - x42*x6 - x43*x8 + x45*x6 + x48*x8 + x50*x51 - x50*x55 - x50*x64 + x50*x67 + x50*x76 - x50*x79 - x51*x54 + x54*x55 + x54*x64 - x54*x67 - x54*x76 + x54*x79 + x57*x8 + x6*x7 - x62*x8 + x8*x9) else: # TODO algorithm? import numpy as np return float(np.linalg.det(matrix))
[ "def", "det", "(", "matrix", ")", ":", "size", "=", "len", "(", "matrix", ")", "if", "size", "==", "1", ":", "return", "matrix", "[", "0", "]", "elif", "size", "==", "2", ":", "(", "a", ",", "b", ")", ",", "(", "c", ",", "d", ")", "=", "matrix", "return", "a", "*", "d", "-", "c", "*", "b", "elif", "size", "==", "3", ":", "(", "a", ",", "b", ",", "c", ")", ",", "(", "d", ",", "e", ",", "f", ")", ",", "(", "g", ",", "h", ",", "i", ")", "=", "matrix", "return", "a", "*", "(", "e", "*", "i", "-", "h", "*", "f", ")", "-", "d", "*", "(", "b", "*", "i", "-", "h", "*", "c", ")", "+", "g", "*", "(", "b", "*", "f", "-", "e", "*", "c", ")", "elif", "size", "==", "4", ":", "(", "a", ",", "b", ",", "c", ",", "d", ")", ",", "(", "e", ",", "f", ",", "g", ",", "h", ")", ",", "(", "i", ",", "j", ",", "k", ",", "l", ")", ",", "(", "m", ",", "n", ",", "o", ",", "p", ")", "=", "matrix", "return", "(", "a", "*", "f", "*", "k", "*", "p", "-", "a", "*", "f", "*", "l", "*", "o", "-", "a", "*", "g", "*", "j", "*", "p", "+", "a", "*", "g", "*", "l", "*", "n", "+", "a", "*", "h", "*", "j", "*", "o", "-", "a", "*", "h", "*", "k", "*", "n", "-", "b", "*", "e", "*", "k", "*", "p", "+", "b", "*", "e", "*", "l", "*", "o", "+", "b", "*", "g", "*", "i", "*", "p", "-", "b", "*", "g", "*", "l", "*", "m", "-", "b", "*", "h", "*", "i", "*", "o", "+", "b", "*", "h", "*", "k", "*", "m", "+", "c", "*", "e", "*", "j", "*", "p", "-", "c", "*", "e", "*", "l", "*", "n", "-", "c", "*", "f", "*", "i", "*", "p", "+", "c", "*", "f", "*", "l", "*", "m", "+", "c", "*", "h", "*", "i", "*", "n", "-", "c", "*", "h", "*", "j", "*", "m", "-", "d", "*", "e", "*", "j", "*", "o", "+", "d", "*", "e", "*", "k", "*", "n", "+", "d", "*", "f", "*", "i", "*", "o", "-", "d", "*", "f", "*", "k", "*", "m", "-", "d", "*", "g", "*", "i", "*", "n", "+", "d", "*", "g", "*", "j", "*", "m", ")", "elif", "size", "==", "5", ":", "(", "a", ",", "b", ",", "c", ",", "d", ",", "e", ")", ",", "(", "f", ",", "g", ",", "h", ",", "i", ",", "j", ")", ",", "(", "k", ",", "l", ",", "m", ",", "n", ",", "o", ")", ",", "(", "p", ",", "q", ",", "r", ",", "s", ",", "t", ")", ",", "(", "u", ",", "v", ",", "w", ",", "x", ",", "y", ")", "=", "matrix", "x0", "=", "s", "*", "y", "x1", "=", "a", "*", "g", "*", "m", "x2", "=", "t", "*", "w", "x3", "=", "a", "*", "g", "*", "n", "x4", "=", "r", "*", "x", "x5", "=", "a", "*", "g", "*", "o", "x6", "=", "t", "*", "x", "x7", "=", "a", "*", "h", "*", "l", "x8", "=", "q", "*", "y", "x9", "=", "a", "*", "h", "*", "n", "x10", "=", "s", "*", "v", "x11", "=", "a", "*", "h", "*", "o", "x12", "=", "r", "*", "y", "x13", "=", "a", "*", "i", "*", "l", "x14", "=", "t", "*", "v", "x15", "=", "a", "*", "i", "*", "m", "x16", "=", "q", "*", "w", "x17", "=", "a", "*", "i", "*", "o", "x18", "=", "s", "*", "w", "x19", "=", "a", "*", "j", "*", "l", "x20", "=", "q", "*", "x", "x21", "=", "a", "*", "j", "*", "m", "x22", "=", "r", "*", "v", "x23", "=", "a", "*", "j", "*", "n", "x24", "=", "b", "*", "f", "*", "m", "x25", "=", "b", "*", "f", "*", "n", "x26", "=", "b", "*", "f", "*", "o", "x27", "=", "b", "*", "h", "*", "k", "x28", "=", "t", "*", "u", "x29", "=", "b", "*", "h", "*", "n", "x30", "=", "p", "*", "x", "x31", "=", "b", "*", "h", "*", "o", "x32", "=", "b", "*", "i", "*", "k", "x33", "=", "p", "*", "y", "x34", "=", "b", "*", "i", "*", "m", "x35", "=", "r", "*", "u", "x36", "=", "b", "*", "i", "*", "o", "x37", "=", "b", "*", "j", "*", "k", "x38", "=", "s", "*", "u", "x39", "=", "b", "*", "j", "*", "m", "x40", "=", "p", "*", "w", "x41", "=", "b", "*", "j", "*", "n", "x42", "=", "c", "*", "f", "*", "l", "x43", "=", "c", "*", "f", "*", "n", "x44", "=", "c", "*", "f", "*", "o", "x45", "=", "c", "*", "g", "*", "k", "x46", "=", "c", "*", "g", "*", "n", "x47", "=", "c", "*", "g", "*", "o", "x48", "=", "c", "*", "i", "*", "k", "x49", "=", "c", "*", "i", "*", "l", "x50", "=", "p", "*", "v", "x51", "=", "c", "*", "i", "*", "o", "x52", "=", "c", "*", "j", "*", "k", "x53", "=", "c", "*", "j", "*", "l", "x54", "=", "q", "*", "u", "x55", "=", "c", "*", "j", "*", "n", "x56", "=", "d", "*", "f", "*", "l", "x57", "=", "d", "*", "f", "*", "m", "x58", "=", "d", "*", "f", "*", "o", "x59", "=", "d", "*", "g", "*", "k", "x60", "=", "d", "*", "g", "*", "m", "x61", "=", "d", "*", "g", "*", "o", "x62", "=", "d", "*", "h", "*", "k", "x63", "=", "d", "*", "h", "*", "l", "x64", "=", "d", "*", "h", "*", "o", "x65", "=", "d", "*", "j", "*", "k", "x66", "=", "d", "*", "j", "*", "l", "x67", "=", "d", "*", "j", "*", "m", "x68", "=", "e", "*", "f", "*", "l", "x69", "=", "e", "*", "f", "*", "m", "x70", "=", "e", "*", "f", "*", "n", "x71", "=", "e", "*", "g", "*", "k", "x72", "=", "e", "*", "g", "*", "m", "x73", "=", "e", "*", "g", "*", "n", "x74", "=", "e", "*", "h", "*", "k", "x75", "=", "e", "*", "h", "*", "l", "x76", "=", "e", "*", "h", "*", "n", "x77", "=", "e", "*", "i", "*", "k", "x78", "=", "e", "*", "i", "*", "l", "x79", "=", "e", "*", "i", "*", "m", "return", "(", "x0", "*", "x1", "-", "x0", "*", "x24", "+", "x0", "*", "x27", "+", "x0", "*", "x42", "-", "x0", "*", "x45", "-", "x0", "*", "x7", "-", "x1", "*", "x6", "+", "x10", "*", "x11", "-", "x10", "*", "x21", "-", "x10", "*", "x44", "+", "x10", "*", "x52", "+", "x10", "*", "x69", "-", "x10", "*", "x74", "-", "x11", "*", "x20", "+", "x12", "*", "x13", "+", "x12", "*", "x25", "-", "x12", "*", "x3", "-", "x12", "*", "x32", "-", "x12", "*", "x56", "+", "x12", "*", "x59", "-", "x13", "*", "x2", "+", "x14", "*", "x15", "+", "x14", "*", "x43", "-", "x14", "*", "x48", "-", "x14", "*", "x57", "+", "x14", "*", "x62", "-", "x14", "*", "x9", "-", "x15", "*", "x8", "+", "x16", "*", "x17", "-", "x16", "*", "x23", "-", "x16", "*", "x58", "+", "x16", "*", "x65", "+", "x16", "*", "x70", "-", "x16", "*", "x77", "-", "x17", "*", "x22", "+", "x18", "*", "x19", "+", "x18", "*", "x26", "-", "x18", "*", "x37", "-", "x18", "*", "x5", "-", "x18", "*", "x68", "+", "x18", "*", "x71", "-", "x19", "*", "x4", "-", "x2", "*", "x25", "+", "x2", "*", "x3", "+", "x2", "*", "x32", "+", "x2", "*", "x56", "-", "x2", "*", "x59", "+", "x20", "*", "x21", "+", "x20", "*", "x44", "-", "x20", "*", "x52", "-", "x20", "*", "x69", "+", "x20", "*", "x74", "+", "x22", "*", "x23", "+", "x22", "*", "x58", "-", "x22", "*", "x65", "-", "x22", "*", "x70", "+", "x22", "*", "x77", "+", "x24", "*", "x6", "-", "x26", "*", "x4", "-", "x27", "*", "x6", "+", "x28", "*", "x29", "-", "x28", "*", "x34", "-", "x28", "*", "x46", "+", "x28", "*", "x49", "+", "x28", "*", "x60", "-", "x28", "*", "x63", "-", "x29", "*", "x33", "+", "x30", "*", "x31", "-", "x30", "*", "x39", "-", "x30", "*", "x47", "+", "x30", "*", "x53", "+", "x30", "*", "x72", "-", "x30", "*", "x75", "-", "x31", "*", "x38", "+", "x33", "*", "x34", "+", "x33", "*", "x46", "-", "x33", "*", "x49", "-", "x33", "*", "x60", "+", "x33", "*", "x63", "+", "x35", "*", "x36", "-", "x35", "*", "x41", "-", "x35", "*", "x61", "+", "x35", "*", "x66", "+", "x35", "*", "x73", "-", "x35", "*", "x78", "-", "x36", "*", "x40", "+", "x37", "*", "x4", "+", "x38", "*", "x39", "+", "x38", "*", "x47", "-", "x38", "*", "x53", "-", "x38", "*", "x72", "+", "x38", "*", "x75", "+", "x4", "*", "x5", "+", "x4", "*", "x68", "-", "x4", "*", "x71", "+", "x40", "*", "x41", "+", "x40", "*", "x61", "-", "x40", "*", "x66", "-", "x40", "*", "x73", "+", "x40", "*", "x78", "-", "x42", "*", "x6", "-", "x43", "*", "x8", "+", "x45", "*", "x6", "+", "x48", "*", "x8", "+", "x50", "*", "x51", "-", "x50", "*", "x55", "-", "x50", "*", "x64", "+", "x50", "*", "x67", "+", "x50", "*", "x76", "-", "x50", "*", "x79", "-", "x51", "*", "x54", "+", "x54", "*", "x55", "+", "x54", "*", "x64", "-", "x54", "*", "x67", "-", "x54", "*", "x76", "+", "x54", "*", "x79", "+", "x57", "*", "x8", "+", "x6", "*", "x7", "-", "x62", "*", "x8", "+", "x8", "*", "x9", ")", "else", ":", "# TODO algorithm?", "import", "numpy", "as", "np", "return", "float", "(", "np", ".", "linalg", ".", "det", "(", "matrix", ")", ")" ]
Seem sto work fine. >> from sympy import * >> from sympy.abc import * >> Matrix([[a, b], [c, d]]).det() a*d - b*c >> Matrix([[a, b, c], [d, e, f], [g, h, i]]).det() a*e*i - a*f*h - b*d*i + b*f*g + c*d*h - c*e*g A few terms can be slightly factored out of the 3x dim. >> Matrix([[a, b, c, d], [e, f, g, h], [i, j, k, l], [m, n, o, p]]).det() a*f*k*p - a*f*l*o - a*g*j*p + a*g*l*n + a*h*j*o - a*h*k*n - b*e*k*p + b*e*l*o + b*g*i*p - b*g*l*m - b*h*i*o + b*h*k*m + c*e*j*p - c*e*l*n - c*f*i*p + c*f*l*m + c*h*i*n - c*h*j*m - d*e*j*o + d*e*k*n + d*f*i*o - d*f*k*m - d*g*i*n + d*g*j*m 72 mult vs ~48 in cse'd version' Commented out - takes a few seconds >> #Matrix([[a, b, c, d, e], [f, g, h, i, j], [k, l, m, n, o], [p, q, r, s, t], [u, v, w, x, y]]).det() 260 multiplies with cse; 480 without it.
[ "Seem", "sto", "work", "fine", ".", ">>", "from", "sympy", "import", "*", ">>", "from", "sympy", ".", "abc", "import", "*", ">>", "Matrix", "(", "[[", "a", "b", "]", "[", "c", "d", "]]", ")", ".", "det", "()", "a", "*", "d", "-", "b", "*", "c", ">>", "Matrix", "(", "[[", "a", "b", "c", "]", "[", "d", "e", "f", "]", "[", "g", "h", "i", "]]", ")", ".", "det", "()", "a", "*", "e", "*", "i", "-", "a", "*", "f", "*", "h", "-", "b", "*", "d", "*", "i", "+", "b", "*", "f", "*", "g", "+", "c", "*", "d", "*", "h", "-", "c", "*", "e", "*", "g", "A", "few", "terms", "can", "be", "slightly", "factored", "out", "of", "the", "3x", "dim", ".", ">>", "Matrix", "(", "[[", "a", "b", "c", "d", "]", "[", "e", "f", "g", "h", "]", "[", "i", "j", "k", "l", "]", "[", "m", "n", "o", "p", "]]", ")", ".", "det", "()", "a", "*", "f", "*", "k", "*", "p", "-", "a", "*", "f", "*", "l", "*", "o", "-", "a", "*", "g", "*", "j", "*", "p", "+", "a", "*", "g", "*", "l", "*", "n", "+", "a", "*", "h", "*", "j", "*", "o", "-", "a", "*", "h", "*", "k", "*", "n", "-", "b", "*", "e", "*", "k", "*", "p", "+", "b", "*", "e", "*", "l", "*", "o", "+", "b", "*", "g", "*", "i", "*", "p", "-", "b", "*", "g", "*", "l", "*", "m", "-", "b", "*", "h", "*", "i", "*", "o", "+", "b", "*", "h", "*", "k", "*", "m", "+", "c", "*", "e", "*", "j", "*", "p", "-", "c", "*", "e", "*", "l", "*", "n", "-", "c", "*", "f", "*", "i", "*", "p", "+", "c", "*", "f", "*", "l", "*", "m", "+", "c", "*", "h", "*", "i", "*", "n", "-", "c", "*", "h", "*", "j", "*", "m", "-", "d", "*", "e", "*", "j", "*", "o", "+", "d", "*", "e", "*", "k", "*", "n", "+", "d", "*", "f", "*", "i", "*", "o", "-", "d", "*", "f", "*", "k", "*", "m", "-", "d", "*", "g", "*", "i", "*", "n", "+", "d", "*", "g", "*", "j", "*", "m" ]
train
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/numerics/arrays.py#L30-L172
0.004539
mgedmin/check-manifest
check_manifest.py
file_matches
def file_matches(filename, patterns): """Does this filename match any of the patterns?""" return any(fnmatch.fnmatch(filename, pat) or fnmatch.fnmatch(os.path.basename(filename), pat) for pat in patterns)
python
def file_matches(filename, patterns): """Does this filename match any of the patterns?""" return any(fnmatch.fnmatch(filename, pat) or fnmatch.fnmatch(os.path.basename(filename), pat) for pat in patterns)
[ "def", "file_matches", "(", "filename", ",", "patterns", ")", ":", "return", "any", "(", "fnmatch", ".", "fnmatch", "(", "filename", ",", "pat", ")", "or", "fnmatch", ".", "fnmatch", "(", "os", ".", "path", ".", "basename", "(", "filename", ")", ",", "pat", ")", "for", "pat", "in", "patterns", ")" ]
Does this filename match any of the patterns?
[ "Does", "this", "filename", "match", "any", "of", "the", "patterns?" ]
train
https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L774-L778
0.004132
monarch-initiative/dipper
dipper/graph/RDFGraph.py
RDFGraph._getnode
def _getnode(self, curie): # convention is lowercase names """ This is a wrapper for creating a URIRef or Bnode object with a given a curie or iri as a string. If an id starts with an underscore, it assigns it to a BNode, otherwise it creates it with a standard URIRef. Alternatively, self.skolemize_blank_node is True, it will skolemize the blank node :param curie: str identifier formatted as curie or iri :return: node: RDFLib URIRef or BNode object """ node = None if curie[0] == '_': if self.are_bnodes_skized is True: node = self.skolemizeBlankNode(curie) else: # delete the leading underscore to make it cleaner node = BNode(re.sub(r'^_:|^_', '', curie, 1)) # Check if curie string is actually an IRI elif curie[:4] == 'http' or curie[:3] == 'ftp': node = URIRef(curie) else: iri = RDFGraph.curie_util.get_uri(curie) if iri is not None: node = URIRef(RDFGraph.curie_util.get_uri(curie)) # Bind prefix map to graph prefix = curie.split(':')[0] if prefix not in self.namespace_manager.namespaces(): mapped_iri = self.curie_map[prefix] self.bind(prefix, Namespace(mapped_iri)) else: LOG.error("couldn't make URI for %s", curie) return node
python
def _getnode(self, curie): # convention is lowercase names """ This is a wrapper for creating a URIRef or Bnode object with a given a curie or iri as a string. If an id starts with an underscore, it assigns it to a BNode, otherwise it creates it with a standard URIRef. Alternatively, self.skolemize_blank_node is True, it will skolemize the blank node :param curie: str identifier formatted as curie or iri :return: node: RDFLib URIRef or BNode object """ node = None if curie[0] == '_': if self.are_bnodes_skized is True: node = self.skolemizeBlankNode(curie) else: # delete the leading underscore to make it cleaner node = BNode(re.sub(r'^_:|^_', '', curie, 1)) # Check if curie string is actually an IRI elif curie[:4] == 'http' or curie[:3] == 'ftp': node = URIRef(curie) else: iri = RDFGraph.curie_util.get_uri(curie) if iri is not None: node = URIRef(RDFGraph.curie_util.get_uri(curie)) # Bind prefix map to graph prefix = curie.split(':')[0] if prefix not in self.namespace_manager.namespaces(): mapped_iri = self.curie_map[prefix] self.bind(prefix, Namespace(mapped_iri)) else: LOG.error("couldn't make URI for %s", curie) return node
[ "def", "_getnode", "(", "self", ",", "curie", ")", ":", "# convention is lowercase names", "node", "=", "None", "if", "curie", "[", "0", "]", "==", "'_'", ":", "if", "self", ".", "are_bnodes_skized", "is", "True", ":", "node", "=", "self", ".", "skolemizeBlankNode", "(", "curie", ")", "else", ":", "# delete the leading underscore to make it cleaner", "node", "=", "BNode", "(", "re", ".", "sub", "(", "r'^_:|^_'", ",", "''", ",", "curie", ",", "1", ")", ")", "# Check if curie string is actually an IRI", "elif", "curie", "[", ":", "4", "]", "==", "'http'", "or", "curie", "[", ":", "3", "]", "==", "'ftp'", ":", "node", "=", "URIRef", "(", "curie", ")", "else", ":", "iri", "=", "RDFGraph", ".", "curie_util", ".", "get_uri", "(", "curie", ")", "if", "iri", "is", "not", "None", ":", "node", "=", "URIRef", "(", "RDFGraph", ".", "curie_util", ".", "get_uri", "(", "curie", ")", ")", "# Bind prefix map to graph", "prefix", "=", "curie", ".", "split", "(", "':'", ")", "[", "0", "]", "if", "prefix", "not", "in", "self", ".", "namespace_manager", ".", "namespaces", "(", ")", ":", "mapped_iri", "=", "self", ".", "curie_map", "[", "prefix", "]", "self", ".", "bind", "(", "prefix", ",", "Namespace", "(", "mapped_iri", ")", ")", "else", ":", "LOG", ".", "error", "(", "\"couldn't make URI for %s\"", ",", "curie", ")", "return", "node" ]
This is a wrapper for creating a URIRef or Bnode object with a given a curie or iri as a string. If an id starts with an underscore, it assigns it to a BNode, otherwise it creates it with a standard URIRef. Alternatively, self.skolemize_blank_node is True, it will skolemize the blank node :param curie: str identifier formatted as curie or iri :return: node: RDFLib URIRef or BNode object
[ "This", "is", "a", "wrapper", "for", "creating", "a", "URIRef", "or", "Bnode", "object", "with", "a", "given", "a", "curie", "or", "iri", "as", "a", "string", "." ]
train
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/graph/RDFGraph.py#L92-L126
0.001333
r0x0r/pywebview
webview/util.py
base_uri
def base_uri(relative_path=''): """ Get absolute path to resource, works for dev and for PyInstaller """ try: # PyInstaller creates a temp folder and stores path in _MEIPASS base_path = sys._MEIPASS except Exception: if 'pytest' in sys.modules: for arg in reversed(sys.argv): path = os.path.realpath(arg) if os.path.exists(path): base_path = path if os.path.isdir(path) else os.path.dirname(path) break else: base_path = os.path.dirname(os.path.realpath(sys.argv[0])) if not os.path.exists(base_path): raise ValueError('Path %s does not exist' % base_path) return 'file://%s' % os.path.join(base_path, relative_path)
python
def base_uri(relative_path=''): """ Get absolute path to resource, works for dev and for PyInstaller """ try: # PyInstaller creates a temp folder and stores path in _MEIPASS base_path = sys._MEIPASS except Exception: if 'pytest' in sys.modules: for arg in reversed(sys.argv): path = os.path.realpath(arg) if os.path.exists(path): base_path = path if os.path.isdir(path) else os.path.dirname(path) break else: base_path = os.path.dirname(os.path.realpath(sys.argv[0])) if not os.path.exists(base_path): raise ValueError('Path %s does not exist' % base_path) return 'file://%s' % os.path.join(base_path, relative_path)
[ "def", "base_uri", "(", "relative_path", "=", "''", ")", ":", "try", ":", "# PyInstaller creates a temp folder and stores path in _MEIPASS", "base_path", "=", "sys", ".", "_MEIPASS", "except", "Exception", ":", "if", "'pytest'", "in", "sys", ".", "modules", ":", "for", "arg", "in", "reversed", "(", "sys", ".", "argv", ")", ":", "path", "=", "os", ".", "path", ".", "realpath", "(", "arg", ")", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "base_path", "=", "path", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", "else", "os", ".", "path", ".", "dirname", "(", "path", ")", "break", "else", ":", "base_path", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "sys", ".", "argv", "[", "0", "]", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "base_path", ")", ":", "raise", "ValueError", "(", "'Path %s does not exist'", "%", "base_path", ")", "return", "'file://%s'", "%", "os", ".", "path", ".", "join", "(", "base_path", ",", "relative_path", ")" ]
Get absolute path to resource, works for dev and for PyInstaller
[ "Get", "absolute", "path", "to", "resource", "works", "for", "dev", "and", "for", "PyInstaller" ]
train
https://github.com/r0x0r/pywebview/blob/fc44d84656e88f83ca496abb50ee75e95540996e/webview/util.py#L20-L39
0.002581
florianpaquet/mease
mease/server.py
MeaseWebSocketServerFactory.run_server
def run_server(self): """ Runs the WebSocket server """ self.protocol = MeaseWebSocketServerProtocol reactor.listenTCP(port=self.port, factory=self, interface=self.host) logger.info("Websocket server listening on {address}".format( address=self.address)) reactor.run()
python
def run_server(self): """ Runs the WebSocket server """ self.protocol = MeaseWebSocketServerProtocol reactor.listenTCP(port=self.port, factory=self, interface=self.host) logger.info("Websocket server listening on {address}".format( address=self.address)) reactor.run()
[ "def", "run_server", "(", "self", ")", ":", "self", ".", "protocol", "=", "MeaseWebSocketServerProtocol", "reactor", ".", "listenTCP", "(", "port", "=", "self", ".", "port", ",", "factory", "=", "self", ",", "interface", "=", "self", ".", "host", ")", "logger", ".", "info", "(", "\"Websocket server listening on {address}\"", ".", "format", "(", "address", "=", "self", ".", "address", ")", ")", "reactor", ".", "run", "(", ")" ]
Runs the WebSocket server
[ "Runs", "the", "WebSocket", "server" ]
train
https://github.com/florianpaquet/mease/blob/b9fbd08bbe162c8890c2a2124674371170c319ef/mease/server.py#L129-L140
0.0059
Kortemme-Lab/klab
klab/bio/clustalo.py
PDBUniParcSequenceAligner._align_with_substrings
def _align_with_substrings(self, chains_to_skip = set()): '''Simple substring-based matching''' for c in self.representative_chains: # Skip specified chains if c not in chains_to_skip: #colortext.pcyan(c) #colortext.warning(self.fasta[c]) fasta_sequence = self.fasta[c] substring_matches = {} for uniparc_id, uniparc_sequence in sorted(self.uniparc_sequences.iteritems()): uniparc_sequence = str(uniparc_sequence) idx = uniparc_sequence.find(fasta_sequence) if idx != -1: substring_matches[uniparc_id] = 0 elif len(fasta_sequence) > 30: idx = uniparc_sequence.find(fasta_sequence[5:-5]) if idx != -1: substring_matches[uniparc_id] = 5 else: idx = uniparc_sequence.find(fasta_sequence[7:-7]) if idx != -1: substring_matches[uniparc_id] = 7 elif len(fasta_sequence) > 15: idx = uniparc_sequence.find(fasta_sequence[3:-3]) if idx != -1: substring_matches[uniparc_id] = 3 self.substring_matches[c] = substring_matches # Restrict the matches to a given set of UniParc IDs. This can be used to remove ambiguity when the correct mapping has been determined e.g. from the SIFTS database. colortext.pcyan('*' * 100) pprint.pprint(self.substring_matches) if self.restrict_to_uniparc_values: for c in self.representative_chains: #print('HERE!') #print(c) if set(map(str, self.substring_matches[c].keys())).intersection(set(self.restrict_to_uniparc_values)) > 0: # Only restrict in cases where there is at least one match in self.restrict_to_uniparc_values # Otherwise, chains which are not considered in self.restrict_to_uniparc_values may throw away valid matches # e.g. when looking for structures related to 1KTZ (A -> P10600 -> UPI000000D8EC, B -> P37173 -> UPI000011DD7E), # we find the close match 2PJY. However, 2PJY has 3 chains: A -> P10600, B -> P37173, and C -> P36897 -> UPI000011D62A restricted_matches = dict((str(k), self.substring_matches[c][k]) for k in self.substring_matches[c].keys() if str(k) in self.restrict_to_uniparc_values) if len(restricted_matches) != len(self.substring_matches[c]): removed_matches = sorted(set(self.substring_matches[c].keys()).difference(set(restricted_matches))) # todo: see above re:quiet colortext.pcyan('Ignoring {0} as those chains were not included in the list self.restrict_to_uniparc_values ({1}).'.format(', '.join(removed_matches), ', '.join(self.restrict_to_uniparc_values))) self.substring_matches[c] = restricted_matches #pprint.pprint(self.substring_matches) #colortext.pcyan('*' * 100) # Use the representatives' alignments for their respective equivalent classes for c_1, related_chains in self.equivalence_fiber.iteritems(): for c_2 in related_chains: self.substring_matches[c_2] = self.substring_matches[c_1]
python
def _align_with_substrings(self, chains_to_skip = set()): '''Simple substring-based matching''' for c in self.representative_chains: # Skip specified chains if c not in chains_to_skip: #colortext.pcyan(c) #colortext.warning(self.fasta[c]) fasta_sequence = self.fasta[c] substring_matches = {} for uniparc_id, uniparc_sequence in sorted(self.uniparc_sequences.iteritems()): uniparc_sequence = str(uniparc_sequence) idx = uniparc_sequence.find(fasta_sequence) if idx != -1: substring_matches[uniparc_id] = 0 elif len(fasta_sequence) > 30: idx = uniparc_sequence.find(fasta_sequence[5:-5]) if idx != -1: substring_matches[uniparc_id] = 5 else: idx = uniparc_sequence.find(fasta_sequence[7:-7]) if idx != -1: substring_matches[uniparc_id] = 7 elif len(fasta_sequence) > 15: idx = uniparc_sequence.find(fasta_sequence[3:-3]) if idx != -1: substring_matches[uniparc_id] = 3 self.substring_matches[c] = substring_matches # Restrict the matches to a given set of UniParc IDs. This can be used to remove ambiguity when the correct mapping has been determined e.g. from the SIFTS database. colortext.pcyan('*' * 100) pprint.pprint(self.substring_matches) if self.restrict_to_uniparc_values: for c in self.representative_chains: #print('HERE!') #print(c) if set(map(str, self.substring_matches[c].keys())).intersection(set(self.restrict_to_uniparc_values)) > 0: # Only restrict in cases where there is at least one match in self.restrict_to_uniparc_values # Otherwise, chains which are not considered in self.restrict_to_uniparc_values may throw away valid matches # e.g. when looking for structures related to 1KTZ (A -> P10600 -> UPI000000D8EC, B -> P37173 -> UPI000011DD7E), # we find the close match 2PJY. However, 2PJY has 3 chains: A -> P10600, B -> P37173, and C -> P36897 -> UPI000011D62A restricted_matches = dict((str(k), self.substring_matches[c][k]) for k in self.substring_matches[c].keys() if str(k) in self.restrict_to_uniparc_values) if len(restricted_matches) != len(self.substring_matches[c]): removed_matches = sorted(set(self.substring_matches[c].keys()).difference(set(restricted_matches))) # todo: see above re:quiet colortext.pcyan('Ignoring {0} as those chains were not included in the list self.restrict_to_uniparc_values ({1}).'.format(', '.join(removed_matches), ', '.join(self.restrict_to_uniparc_values))) self.substring_matches[c] = restricted_matches #pprint.pprint(self.substring_matches) #colortext.pcyan('*' * 100) # Use the representatives' alignments for their respective equivalent classes for c_1, related_chains in self.equivalence_fiber.iteritems(): for c_2 in related_chains: self.substring_matches[c_2] = self.substring_matches[c_1]
[ "def", "_align_with_substrings", "(", "self", ",", "chains_to_skip", "=", "set", "(", ")", ")", ":", "for", "c", "in", "self", ".", "representative_chains", ":", "# Skip specified chains", "if", "c", "not", "in", "chains_to_skip", ":", "#colortext.pcyan(c)", "#colortext.warning(self.fasta[c])", "fasta_sequence", "=", "self", ".", "fasta", "[", "c", "]", "substring_matches", "=", "{", "}", "for", "uniparc_id", ",", "uniparc_sequence", "in", "sorted", "(", "self", ".", "uniparc_sequences", ".", "iteritems", "(", ")", ")", ":", "uniparc_sequence", "=", "str", "(", "uniparc_sequence", ")", "idx", "=", "uniparc_sequence", ".", "find", "(", "fasta_sequence", ")", "if", "idx", "!=", "-", "1", ":", "substring_matches", "[", "uniparc_id", "]", "=", "0", "elif", "len", "(", "fasta_sequence", ")", ">", "30", ":", "idx", "=", "uniparc_sequence", ".", "find", "(", "fasta_sequence", "[", "5", ":", "-", "5", "]", ")", "if", "idx", "!=", "-", "1", ":", "substring_matches", "[", "uniparc_id", "]", "=", "5", "else", ":", "idx", "=", "uniparc_sequence", ".", "find", "(", "fasta_sequence", "[", "7", ":", "-", "7", "]", ")", "if", "idx", "!=", "-", "1", ":", "substring_matches", "[", "uniparc_id", "]", "=", "7", "elif", "len", "(", "fasta_sequence", ")", ">", "15", ":", "idx", "=", "uniparc_sequence", ".", "find", "(", "fasta_sequence", "[", "3", ":", "-", "3", "]", ")", "if", "idx", "!=", "-", "1", ":", "substring_matches", "[", "uniparc_id", "]", "=", "3", "self", ".", "substring_matches", "[", "c", "]", "=", "substring_matches", "# Restrict the matches to a given set of UniParc IDs. This can be used to remove ambiguity when the correct mapping has been determined e.g. from the SIFTS database.", "colortext", ".", "pcyan", "(", "'*'", "*", "100", ")", "pprint", ".", "pprint", "(", "self", ".", "substring_matches", ")", "if", "self", ".", "restrict_to_uniparc_values", ":", "for", "c", "in", "self", ".", "representative_chains", ":", "#print('HERE!')", "#print(c)", "if", "set", "(", "map", "(", "str", ",", "self", ".", "substring_matches", "[", "c", "]", ".", "keys", "(", ")", ")", ")", ".", "intersection", "(", "set", "(", "self", ".", "restrict_to_uniparc_values", ")", ")", ">", "0", ":", "# Only restrict in cases where there is at least one match in self.restrict_to_uniparc_values", "# Otherwise, chains which are not considered in self.restrict_to_uniparc_values may throw away valid matches", "# e.g. when looking for structures related to 1KTZ (A -> P10600 -> UPI000000D8EC, B -> P37173 -> UPI000011DD7E),", "# we find the close match 2PJY. However, 2PJY has 3 chains: A -> P10600, B -> P37173, and C -> P36897 -> UPI000011D62A", "restricted_matches", "=", "dict", "(", "(", "str", "(", "k", ")", ",", "self", ".", "substring_matches", "[", "c", "]", "[", "k", "]", ")", "for", "k", "in", "self", ".", "substring_matches", "[", "c", "]", ".", "keys", "(", ")", "if", "str", "(", "k", ")", "in", "self", ".", "restrict_to_uniparc_values", ")", "if", "len", "(", "restricted_matches", ")", "!=", "len", "(", "self", ".", "substring_matches", "[", "c", "]", ")", ":", "removed_matches", "=", "sorted", "(", "set", "(", "self", ".", "substring_matches", "[", "c", "]", ".", "keys", "(", ")", ")", ".", "difference", "(", "set", "(", "restricted_matches", ")", ")", ")", "# todo: see above re:quiet colortext.pcyan('Ignoring {0} as those chains were not included in the list self.restrict_to_uniparc_values ({1}).'.format(', '.join(removed_matches), ', '.join(self.restrict_to_uniparc_values)))", "self", ".", "substring_matches", "[", "c", "]", "=", "restricted_matches", "#pprint.pprint(self.substring_matches)", "#colortext.pcyan('*' * 100)", "# Use the representatives' alignments for their respective equivalent classes", "for", "c_1", ",", "related_chains", "in", "self", ".", "equivalence_fiber", ".", "iteritems", "(", ")", ":", "for", "c_2", "in", "related_chains", ":", "self", ".", "substring_matches", "[", "c_2", "]", "=", "self", ".", "substring_matches", "[", "c_1", "]" ]
Simple substring-based matching
[ "Simple", "substring", "-", "based", "matching" ]
train
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/clustalo.py#L737-L791
0.00622
tanghaibao/goatools
goatools/parsers/ncbi_gene_file_reader.py
NCBIgeneFileReader.replace_nulls
def replace_nulls(hdrs): """Replace '' in hdrs.""" ret = [] idx = 0 for hdr in hdrs: if hdr == '': ret.append("no_hdr{}".format(idx)) else: ret.append(hdr) return ret
python
def replace_nulls(hdrs): """Replace '' in hdrs.""" ret = [] idx = 0 for hdr in hdrs: if hdr == '': ret.append("no_hdr{}".format(idx)) else: ret.append(hdr) return ret
[ "def", "replace_nulls", "(", "hdrs", ")", ":", "ret", "=", "[", "]", "idx", "=", "0", "for", "hdr", "in", "hdrs", ":", "if", "hdr", "==", "''", ":", "ret", ".", "append", "(", "\"no_hdr{}\"", ".", "format", "(", "idx", ")", ")", "else", ":", "ret", ".", "append", "(", "hdr", ")", "return", "ret" ]
Replace '' in hdrs.
[ "Replace", "in", "hdrs", "." ]
train
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/parsers/ncbi_gene_file_reader.py#L187-L196
0.007634
saltstack/salt
salt/runners/mattermost.py
post_message
def post_message(message, channel=None, username=None, api_url=None, hook=None): ''' Send a message to a Mattermost channel. :param channel: The channel name, either will work. :param username: The username of the poster. :param message: The message to send to the Mattermost channel. :param api_url: The Mattermost api url, if not specified in the configuration. :param hook: The Mattermost hook, if not specified in the configuration. :return: Boolean if message was sent successfully. CLI Example: .. code-block:: bash salt-run mattermost.post_message message='Build is done' ''' if not api_url: api_url = _get_api_url() if not hook: hook = _get_hook() if not username: username = _get_username() if not channel: channel = _get_channel() if not message: log.error('message is a required option.') parameters = dict() if channel: parameters['channel'] = channel if username: parameters['username'] = username parameters['text'] = '```' + message + '```' # pre-formatted, fixed-width text log.debug('Parameters: %s', parameters) data = salt.utils.json.dumps(parameters) result = salt.utils.mattermost.query( api_url=api_url, hook=hook, data=str('payload={0}').format(data)) # future lint: blacklisted-function if result: return True else: return result
python
def post_message(message, channel=None, username=None, api_url=None, hook=None): ''' Send a message to a Mattermost channel. :param channel: The channel name, either will work. :param username: The username of the poster. :param message: The message to send to the Mattermost channel. :param api_url: The Mattermost api url, if not specified in the configuration. :param hook: The Mattermost hook, if not specified in the configuration. :return: Boolean if message was sent successfully. CLI Example: .. code-block:: bash salt-run mattermost.post_message message='Build is done' ''' if not api_url: api_url = _get_api_url() if not hook: hook = _get_hook() if not username: username = _get_username() if not channel: channel = _get_channel() if not message: log.error('message is a required option.') parameters = dict() if channel: parameters['channel'] = channel if username: parameters['username'] = username parameters['text'] = '```' + message + '```' # pre-formatted, fixed-width text log.debug('Parameters: %s', parameters) data = salt.utils.json.dumps(parameters) result = salt.utils.mattermost.query( api_url=api_url, hook=hook, data=str('payload={0}').format(data)) # future lint: blacklisted-function if result: return True else: return result
[ "def", "post_message", "(", "message", ",", "channel", "=", "None", ",", "username", "=", "None", ",", "api_url", "=", "None", ",", "hook", "=", "None", ")", ":", "if", "not", "api_url", ":", "api_url", "=", "_get_api_url", "(", ")", "if", "not", "hook", ":", "hook", "=", "_get_hook", "(", ")", "if", "not", "username", ":", "username", "=", "_get_username", "(", ")", "if", "not", "channel", ":", "channel", "=", "_get_channel", "(", ")", "if", "not", "message", ":", "log", ".", "error", "(", "'message is a required option.'", ")", "parameters", "=", "dict", "(", ")", "if", "channel", ":", "parameters", "[", "'channel'", "]", "=", "channel", "if", "username", ":", "parameters", "[", "'username'", "]", "=", "username", "parameters", "[", "'text'", "]", "=", "'```'", "+", "message", "+", "'```'", "# pre-formatted, fixed-width text", "log", ".", "debug", "(", "'Parameters: %s'", ",", "parameters", ")", "data", "=", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "parameters", ")", "result", "=", "salt", ".", "utils", ".", "mattermost", ".", "query", "(", "api_url", "=", "api_url", ",", "hook", "=", "hook", ",", "data", "=", "str", "(", "'payload={0}'", ")", ".", "format", "(", "data", ")", ")", "# future lint: blacklisted-function", "if", "result", ":", "return", "True", "else", ":", "return", "result" ]
Send a message to a Mattermost channel. :param channel: The channel name, either will work. :param username: The username of the poster. :param message: The message to send to the Mattermost channel. :param api_url: The Mattermost api url, if not specified in the configuration. :param hook: The Mattermost hook, if not specified in the configuration. :return: Boolean if message was sent successfully. CLI Example: .. code-block:: bash salt-run mattermost.post_message message='Build is done'
[ "Send", "a", "message", "to", "a", "Mattermost", "channel", ".", ":", "param", "channel", ":", "The", "channel", "name", "either", "will", "work", ".", ":", "param", "username", ":", "The", "username", "of", "the", "poster", ".", ":", "param", "message", ":", "The", "message", "to", "send", "to", "the", "Mattermost", "channel", ".", ":", "param", "api_url", ":", "The", "Mattermost", "api", "url", "if", "not", "specified", "in", "the", "configuration", ".", ":", "param", "hook", ":", "The", "Mattermost", "hook", "if", "not", "specified", "in", "the", "configuration", ".", ":", "return", ":", "Boolean", "if", "message", "was", "sent", "successfully", "." ]
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/mattermost.py#L95-L146
0.003187
Calysto/calysto
calysto/ai/conx.py
Network.connect
def connect(self, *names): """ Connects a list of names, one to the next. """ fromName, toName, rest = names[0], names[1], names[2:] self.connectAt(fromName, toName) if len(rest) != 0: self.connect(toName, *rest)
python
def connect(self, *names): """ Connects a list of names, one to the next. """ fromName, toName, rest = names[0], names[1], names[2:] self.connectAt(fromName, toName) if len(rest) != 0: self.connect(toName, *rest)
[ "def", "connect", "(", "self", ",", "*", "names", ")", ":", "fromName", ",", "toName", ",", "rest", "=", "names", "[", "0", "]", ",", "names", "[", "1", "]", ",", "names", "[", "2", ":", "]", "self", ".", "connectAt", "(", "fromName", ",", "toName", ")", "if", "len", "(", "rest", ")", "!=", "0", ":", "self", ".", "connect", "(", "toName", ",", "*", "rest", ")" ]
Connects a list of names, one to the next.
[ "Connects", "a", "list", "of", "names", "one", "to", "the", "next", "." ]
train
https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L949-L956
0.007353
wglass/lighthouse
lighthouse/zookeeper.py
ZookeeperDiscovery.stop_watching
def stop_watching(self, cluster): """ Causes the thread that launched the watch of the cluster path to end by setting the proper stop event found in `self.stop_events`. """ znode_path = "/".join([self.base_path, cluster.name]) if znode_path in self.stop_events: self.stop_events[znode_path].set()
python
def stop_watching(self, cluster): """ Causes the thread that launched the watch of the cluster path to end by setting the proper stop event found in `self.stop_events`. """ znode_path = "/".join([self.base_path, cluster.name]) if znode_path in self.stop_events: self.stop_events[znode_path].set()
[ "def", "stop_watching", "(", "self", ",", "cluster", ")", ":", "znode_path", "=", "\"/\"", ".", "join", "(", "[", "self", ".", "base_path", ",", "cluster", ".", "name", "]", ")", "if", "znode_path", "in", "self", ".", "stop_events", ":", "self", ".", "stop_events", "[", "znode_path", "]", ".", "set", "(", ")" ]
Causes the thread that launched the watch of the cluster path to end by setting the proper stop event found in `self.stop_events`.
[ "Causes", "the", "thread", "that", "launched", "the", "watch", "of", "the", "cluster", "path", "to", "end", "by", "setting", "the", "proper", "stop", "event", "found", "in", "self", ".", "stop_events", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/zookeeper.py#L191-L198
0.005618
saltstack/salt
salt/fileclient.py
Client.file_local_list
def file_local_list(self, saltenv='base'): ''' List files in the local minion files and localfiles caches ''' filesdest = os.path.join(self.opts['cachedir'], 'files', saltenv) localfilesdest = os.path.join(self.opts['cachedir'], 'localfiles') fdest = self._file_local_list(filesdest) ldest = self._file_local_list(localfilesdest) return sorted(fdest.union(ldest))
python
def file_local_list(self, saltenv='base'): ''' List files in the local minion files and localfiles caches ''' filesdest = os.path.join(self.opts['cachedir'], 'files', saltenv) localfilesdest = os.path.join(self.opts['cachedir'], 'localfiles') fdest = self._file_local_list(filesdest) ldest = self._file_local_list(localfilesdest) return sorted(fdest.union(ldest))
[ "def", "file_local_list", "(", "self", ",", "saltenv", "=", "'base'", ")", ":", "filesdest", "=", "os", ".", "path", ".", "join", "(", "self", ".", "opts", "[", "'cachedir'", "]", ",", "'files'", ",", "saltenv", ")", "localfilesdest", "=", "os", ".", "path", ".", "join", "(", "self", ".", "opts", "[", "'cachedir'", "]", ",", "'localfiles'", ")", "fdest", "=", "self", ".", "_file_local_list", "(", "filesdest", ")", "ldest", "=", "self", ".", "_file_local_list", "(", "localfilesdest", ")", "return", "sorted", "(", "fdest", ".", "union", "(", "ldest", ")", ")" ]
List files in the local minion files and localfiles caches
[ "List", "files", "in", "the", "local", "minion", "files", "and", "localfiles", "caches" ]
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileclient.py#L285-L294
0.004673
wummel/linkchecker
linkcheck/parser/sitemap.py
XmlTagUrlParser.end_element
def end_element(self, name): """If end tag is our tag, call add_url().""" self.in_tag = False if name == self.tag: self.add_url()
python
def end_element(self, name): """If end tag is our tag, call add_url().""" self.in_tag = False if name == self.tag: self.add_url()
[ "def", "end_element", "(", "self", ",", "name", ")", ":", "self", ".", "in_tag", "=", "False", "if", "name", "==", "self", ".", "tag", ":", "self", ".", "add_url", "(", ")" ]
If end tag is our tag, call add_url().
[ "If", "end", "tag", "is", "our", "tag", "call", "add_url", "()", "." ]
train
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/parser/sitemap.py#L53-L57
0.012121
neuropsychology/NeuroKit.py
neurokit/statistics/plot.py
plot_polarbar
def plot_polarbar(scores, labels=None, labels_size=15, colors="default", distribution_means=None, distribution_sds=None, treshold=1.28, fig_size=(15, 15)): """ Polar bar chart. Parameters ---------- scores : list or dict Scores to plot. labels : list List of labels to be used for ticks. labels_size : int Label's size. colors : list or str List of colors or "default". distribution_means : int or list List of means to add a range ribbon. distribution_sds : int or list List of SDs to add a range ribbon. treshold : float Limits of the range ribbon (in terms of standart deviation from mean). fig_size : tuple Figure size. Returns ---------- plot : matplotlig figure The figure. Example ---------- >>> import neurokit as nk >>> fig = nk.plot_polarbar(scores=[1, 2, 3, 4, 5], labels=["A", "B", "C", "D", "E"], distribution_means=3, distribution_sds=1) >>> fig.show() Notes ---------- *Authors* - `Dominique Makowski <https://dominiquemakowski.github.io/>`_ *Dependencies* - matplotlib - numpy """ # Sanity check if isinstance(scores, dict): if labels is None: labels = list(scores.keys()) try: scores = [scores[key] for key in labels] except KeyError: print("NeuroKit Error: plot_polarbar(): labels and scores keys not matching. Recheck them.") # Parameters if colors == "default": if len(scores) < 9: colors = ["#f44336", "#9C27B0", "#3F51B5","#03A9F4", "#009688", "#8BC34A", "#FFEB3B", "#FF9800", "#795548"] else: colors = None if labels is None: labels = range(len(scores)) N = len(scores) theta = np.linspace(0.0, -2 * np.pi, N, endpoint=False) width = 2 * np.pi / N # Main plot = plt.figure(figsize=fig_size) layer1 = plot.add_subplot(111, projection="polar") bars1 = layer1.bar(theta+np.pi/len(scores), scores, width=width, bottom=0.0) layer1.yaxis.set_ticks(range(11)) layer1.yaxis.set_ticklabels([]) layer1.xaxis.set_ticks(theta+np.pi/len(scores)) layer1.xaxis.set_ticklabels(labels, fontsize=labels_size) for index, bar in enumerate(bars1): if colors is not None: bar.set_facecolor(colors[index]) bar.set_alpha(1) # Layer 2 if distribution_means is not None and distribution_sds is not None: # Sanity check if isinstance(distribution_means, int): distribution_means = [distribution_means]*N if isinstance(distribution_sds, int): distribution_sds = [distribution_sds]*N # TODO: add convertion if those parameter are dict bottoms, tops = normal_range(np.array(distribution_means), np.array(distribution_sds), treshold=treshold) tops = tops - bottoms layer2 = plot.add_subplot(111, polar=True) bars2 = layer2.bar(theta, tops, width=width, bottom=bottoms, linewidth=0) layer2.xaxis.set_ticks(theta+np.pi/len(scores)) layer2.xaxis.set_ticklabels(labels, fontsize=labels_size) for index, bar in enumerate(bars2): bar.set_facecolor("#607D8B") bar.set_alpha(0.3) return(plot)
python
def plot_polarbar(scores, labels=None, labels_size=15, colors="default", distribution_means=None, distribution_sds=None, treshold=1.28, fig_size=(15, 15)): """ Polar bar chart. Parameters ---------- scores : list or dict Scores to plot. labels : list List of labels to be used for ticks. labels_size : int Label's size. colors : list or str List of colors or "default". distribution_means : int or list List of means to add a range ribbon. distribution_sds : int or list List of SDs to add a range ribbon. treshold : float Limits of the range ribbon (in terms of standart deviation from mean). fig_size : tuple Figure size. Returns ---------- plot : matplotlig figure The figure. Example ---------- >>> import neurokit as nk >>> fig = nk.plot_polarbar(scores=[1, 2, 3, 4, 5], labels=["A", "B", "C", "D", "E"], distribution_means=3, distribution_sds=1) >>> fig.show() Notes ---------- *Authors* - `Dominique Makowski <https://dominiquemakowski.github.io/>`_ *Dependencies* - matplotlib - numpy """ # Sanity check if isinstance(scores, dict): if labels is None: labels = list(scores.keys()) try: scores = [scores[key] for key in labels] except KeyError: print("NeuroKit Error: plot_polarbar(): labels and scores keys not matching. Recheck them.") # Parameters if colors == "default": if len(scores) < 9: colors = ["#f44336", "#9C27B0", "#3F51B5","#03A9F4", "#009688", "#8BC34A", "#FFEB3B", "#FF9800", "#795548"] else: colors = None if labels is None: labels = range(len(scores)) N = len(scores) theta = np.linspace(0.0, -2 * np.pi, N, endpoint=False) width = 2 * np.pi / N # Main plot = plt.figure(figsize=fig_size) layer1 = plot.add_subplot(111, projection="polar") bars1 = layer1.bar(theta+np.pi/len(scores), scores, width=width, bottom=0.0) layer1.yaxis.set_ticks(range(11)) layer1.yaxis.set_ticklabels([]) layer1.xaxis.set_ticks(theta+np.pi/len(scores)) layer1.xaxis.set_ticklabels(labels, fontsize=labels_size) for index, bar in enumerate(bars1): if colors is not None: bar.set_facecolor(colors[index]) bar.set_alpha(1) # Layer 2 if distribution_means is not None and distribution_sds is not None: # Sanity check if isinstance(distribution_means, int): distribution_means = [distribution_means]*N if isinstance(distribution_sds, int): distribution_sds = [distribution_sds]*N # TODO: add convertion if those parameter are dict bottoms, tops = normal_range(np.array(distribution_means), np.array(distribution_sds), treshold=treshold) tops = tops - bottoms layer2 = plot.add_subplot(111, polar=True) bars2 = layer2.bar(theta, tops, width=width, bottom=bottoms, linewidth=0) layer2.xaxis.set_ticks(theta+np.pi/len(scores)) layer2.xaxis.set_ticklabels(labels, fontsize=labels_size) for index, bar in enumerate(bars2): bar.set_facecolor("#607D8B") bar.set_alpha(0.3) return(plot)
[ "def", "plot_polarbar", "(", "scores", ",", "labels", "=", "None", ",", "labels_size", "=", "15", ",", "colors", "=", "\"default\"", ",", "distribution_means", "=", "None", ",", "distribution_sds", "=", "None", ",", "treshold", "=", "1.28", ",", "fig_size", "=", "(", "15", ",", "15", ")", ")", ":", "# Sanity check", "if", "isinstance", "(", "scores", ",", "dict", ")", ":", "if", "labels", "is", "None", ":", "labels", "=", "list", "(", "scores", ".", "keys", "(", ")", ")", "try", ":", "scores", "=", "[", "scores", "[", "key", "]", "for", "key", "in", "labels", "]", "except", "KeyError", ":", "print", "(", "\"NeuroKit Error: plot_polarbar(): labels and scores keys not matching. Recheck them.\"", ")", "# Parameters", "if", "colors", "==", "\"default\"", ":", "if", "len", "(", "scores", ")", "<", "9", ":", "colors", "=", "[", "\"#f44336\"", ",", "\"#9C27B0\"", ",", "\"#3F51B5\"", ",", "\"#03A9F4\"", ",", "\"#009688\"", ",", "\"#8BC34A\"", ",", "\"#FFEB3B\"", ",", "\"#FF9800\"", ",", "\"#795548\"", "]", "else", ":", "colors", "=", "None", "if", "labels", "is", "None", ":", "labels", "=", "range", "(", "len", "(", "scores", ")", ")", "N", "=", "len", "(", "scores", ")", "theta", "=", "np", ".", "linspace", "(", "0.0", ",", "-", "2", "*", "np", ".", "pi", ",", "N", ",", "endpoint", "=", "False", ")", "width", "=", "2", "*", "np", ".", "pi", "/", "N", "# Main", "plot", "=", "plt", ".", "figure", "(", "figsize", "=", "fig_size", ")", "layer1", "=", "plot", ".", "add_subplot", "(", "111", ",", "projection", "=", "\"polar\"", ")", "bars1", "=", "layer1", ".", "bar", "(", "theta", "+", "np", ".", "pi", "/", "len", "(", "scores", ")", ",", "scores", ",", "width", "=", "width", ",", "bottom", "=", "0.0", ")", "layer1", ".", "yaxis", ".", "set_ticks", "(", "range", "(", "11", ")", ")", "layer1", ".", "yaxis", ".", "set_ticklabels", "(", "[", "]", ")", "layer1", ".", "xaxis", ".", "set_ticks", "(", "theta", "+", "np", ".", "pi", "/", "len", "(", "scores", ")", ")", "layer1", ".", "xaxis", ".", "set_ticklabels", "(", "labels", ",", "fontsize", "=", "labels_size", ")", "for", "index", ",", "bar", "in", "enumerate", "(", "bars1", ")", ":", "if", "colors", "is", "not", "None", ":", "bar", ".", "set_facecolor", "(", "colors", "[", "index", "]", ")", "bar", ".", "set_alpha", "(", "1", ")", "# Layer 2", "if", "distribution_means", "is", "not", "None", "and", "distribution_sds", "is", "not", "None", ":", "# Sanity check", "if", "isinstance", "(", "distribution_means", ",", "int", ")", ":", "distribution_means", "=", "[", "distribution_means", "]", "*", "N", "if", "isinstance", "(", "distribution_sds", ",", "int", ")", ":", "distribution_sds", "=", "[", "distribution_sds", "]", "*", "N", "# TODO: add convertion if those parameter are dict", "bottoms", ",", "tops", "=", "normal_range", "(", "np", ".", "array", "(", "distribution_means", ")", ",", "np", ".", "array", "(", "distribution_sds", ")", ",", "treshold", "=", "treshold", ")", "tops", "=", "tops", "-", "bottoms", "layer2", "=", "plot", ".", "add_subplot", "(", "111", ",", "polar", "=", "True", ")", "bars2", "=", "layer2", ".", "bar", "(", "theta", ",", "tops", ",", "width", "=", "width", ",", "bottom", "=", "bottoms", ",", "linewidth", "=", "0", ")", "layer2", ".", "xaxis", ".", "set_ticks", "(", "theta", "+", "np", ".", "pi", "/", "len", "(", "scores", ")", ")", "layer2", ".", "xaxis", ".", "set_ticklabels", "(", "labels", ",", "fontsize", "=", "labels_size", ")", "for", "index", ",", "bar", "in", "enumerate", "(", "bars2", ")", ":", "bar", ".", "set_facecolor", "(", "\"#607D8B\"", ")", "bar", ".", "set_alpha", "(", "0.3", ")", "return", "(", "plot", ")" ]
Polar bar chart. Parameters ---------- scores : list or dict Scores to plot. labels : list List of labels to be used for ticks. labels_size : int Label's size. colors : list or str List of colors or "default". distribution_means : int or list List of means to add a range ribbon. distribution_sds : int or list List of SDs to add a range ribbon. treshold : float Limits of the range ribbon (in terms of standart deviation from mean). fig_size : tuple Figure size. Returns ---------- plot : matplotlig figure The figure. Example ---------- >>> import neurokit as nk >>> fig = nk.plot_polarbar(scores=[1, 2, 3, 4, 5], labels=["A", "B", "C", "D", "E"], distribution_means=3, distribution_sds=1) >>> fig.show() Notes ---------- *Authors* - `Dominique Makowski <https://dominiquemakowski.github.io/>`_ *Dependencies* - matplotlib - numpy
[ "Polar", "bar", "chart", "." ]
train
https://github.com/neuropsychology/NeuroKit.py/blob/c9589348fbbde0fa7e986048c48f38e6b488adfe/neurokit/statistics/plot.py#L18-L133
0.00419
materialsproject/pymatgen
pymatgen/core/tensors.py
Tensor.voigt_symmetrized
def voigt_symmetrized(self): """ Returns a "voigt"-symmetrized tensor, i. e. a voigt-notation tensor such that it is invariant wrt permutation of indices """ if not (self.rank % 2 == 0 and self.rank >= 2): raise ValueError("V-symmetrization requires rank even and >= 2") v = self.voigt perms = list(itertools.permutations(range(len(v.shape)))) new_v = sum([np.transpose(v, ind) for ind in perms]) / len(perms) return self.__class__.from_voigt(new_v)
python
def voigt_symmetrized(self): """ Returns a "voigt"-symmetrized tensor, i. e. a voigt-notation tensor such that it is invariant wrt permutation of indices """ if not (self.rank % 2 == 0 and self.rank >= 2): raise ValueError("V-symmetrization requires rank even and >= 2") v = self.voigt perms = list(itertools.permutations(range(len(v.shape)))) new_v = sum([np.transpose(v, ind) for ind in perms]) / len(perms) return self.__class__.from_voigt(new_v)
[ "def", "voigt_symmetrized", "(", "self", ")", ":", "if", "not", "(", "self", ".", "rank", "%", "2", "==", "0", "and", "self", ".", "rank", ">=", "2", ")", ":", "raise", "ValueError", "(", "\"V-symmetrization requires rank even and >= 2\"", ")", "v", "=", "self", ".", "voigt", "perms", "=", "list", "(", "itertools", ".", "permutations", "(", "range", "(", "len", "(", "v", ".", "shape", ")", ")", ")", ")", "new_v", "=", "sum", "(", "[", "np", ".", "transpose", "(", "v", ",", "ind", ")", "for", "ind", "in", "perms", "]", ")", "/", "len", "(", "perms", ")", "return", "self", ".", "__class__", ".", "from_voigt", "(", "new_v", ")" ]
Returns a "voigt"-symmetrized tensor, i. e. a voigt-notation tensor such that it is invariant wrt permutation of indices
[ "Returns", "a", "voigt", "-", "symmetrized", "tensor", "i", ".", "e", ".", "a", "voigt", "-", "notation", "tensor", "such", "that", "it", "is", "invariant", "wrt", "permutation", "of", "indices" ]
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/tensors.py#L315-L326
0.003745
saltstack/salt
salt/modules/elasticsearch.py
snapshot_get
def snapshot_get(repository, snapshot, ignore_unavailable=False, hosts=None, profile=None): ''' .. versionadded:: 2017.7.0 Obtain snapshot residing in specified repository. repository Repository name snapshot Snapshot name, use _all to obtain all snapshots in specified repository ignore_unavailable Ignore unavailable snapshots CLI example:: salt myminion elasticsearch.snapshot_get testrepo testsnapshot ''' es = _get_instance(hosts, profile) try: return es.snapshot.get(repository=repository, snapshot=snapshot, ignore_unavailable=ignore_unavailable) except elasticsearch.TransportError as e: raise CommandExecutionError("Cannot obtain details of snapshot {0} in repository {1}, server returned code {2} with message {3}".format(snapshot, repository, e.status_code, e.error))
python
def snapshot_get(repository, snapshot, ignore_unavailable=False, hosts=None, profile=None): ''' .. versionadded:: 2017.7.0 Obtain snapshot residing in specified repository. repository Repository name snapshot Snapshot name, use _all to obtain all snapshots in specified repository ignore_unavailable Ignore unavailable snapshots CLI example:: salt myminion elasticsearch.snapshot_get testrepo testsnapshot ''' es = _get_instance(hosts, profile) try: return es.snapshot.get(repository=repository, snapshot=snapshot, ignore_unavailable=ignore_unavailable) except elasticsearch.TransportError as e: raise CommandExecutionError("Cannot obtain details of snapshot {0} in repository {1}, server returned code {2} with message {3}".format(snapshot, repository, e.status_code, e.error))
[ "def", "snapshot_get", "(", "repository", ",", "snapshot", ",", "ignore_unavailable", "=", "False", ",", "hosts", "=", "None", ",", "profile", "=", "None", ")", ":", "es", "=", "_get_instance", "(", "hosts", ",", "profile", ")", "try", ":", "return", "es", ".", "snapshot", ".", "get", "(", "repository", "=", "repository", ",", "snapshot", "=", "snapshot", ",", "ignore_unavailable", "=", "ignore_unavailable", ")", "except", "elasticsearch", ".", "TransportError", "as", "e", ":", "raise", "CommandExecutionError", "(", "\"Cannot obtain details of snapshot {0} in repository {1}, server returned code {2} with message {3}\"", ".", "format", "(", "snapshot", ",", "repository", ",", "e", ".", "status_code", ",", "e", ".", "error", ")", ")" ]
.. versionadded:: 2017.7.0 Obtain snapshot residing in specified repository. repository Repository name snapshot Snapshot name, use _all to obtain all snapshots in specified repository ignore_unavailable Ignore unavailable snapshots CLI example:: salt myminion elasticsearch.snapshot_get testrepo testsnapshot
[ "..", "versionadded", "::", "2017", ".", "7", ".", "0" ]
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/elasticsearch.py#L1113-L1135
0.004571
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_terminal.py
brocade_terminal.terminal_cfg_line_exec_timeout
def terminal_cfg_line_exec_timeout(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") terminal_cfg = ET.SubElement(config, "terminal-cfg", xmlns="urn:brocade.com:mgmt:brocade-terminal") line = ET.SubElement(terminal_cfg, "line") sessionid_key = ET.SubElement(line, "sessionid") sessionid_key.text = kwargs.pop('sessionid') exec_timeout = ET.SubElement(line, "exec-timeout") exec_timeout.text = kwargs.pop('exec_timeout') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def terminal_cfg_line_exec_timeout(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") terminal_cfg = ET.SubElement(config, "terminal-cfg", xmlns="urn:brocade.com:mgmt:brocade-terminal") line = ET.SubElement(terminal_cfg, "line") sessionid_key = ET.SubElement(line, "sessionid") sessionid_key.text = kwargs.pop('sessionid') exec_timeout = ET.SubElement(line, "exec-timeout") exec_timeout.text = kwargs.pop('exec_timeout') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "terminal_cfg_line_exec_timeout", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "terminal_cfg", "=", "ET", ".", "SubElement", "(", "config", ",", "\"terminal-cfg\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-terminal\"", ")", "line", "=", "ET", ".", "SubElement", "(", "terminal_cfg", ",", "\"line\"", ")", "sessionid_key", "=", "ET", ".", "SubElement", "(", "line", ",", "\"sessionid\"", ")", "sessionid_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'sessionid'", ")", "exec_timeout", "=", "ET", ".", "SubElement", "(", "line", ",", "\"exec-timeout\"", ")", "exec_timeout", ".", "text", "=", "kwargs", ".", "pop", "(", "'exec_timeout'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_terminal.py#L24-L36
0.00495
datacamp/protowhat
protowhat/checks/check_funcs.py
has_code
def has_code( state, text, incorrect_msg="Check the {ast_path}. The checker expected to find {text}.", fixed=False, ): """Test whether the student code contains text. Args: state: State instance describing student and solution code. Can be omitted if used with Ex(). text : text that student code must contain. Can be a regex pattern or a simple string. incorrect_msg: feedback message if text is not in student code. fixed: whether to match text exactly, rather than using regular expressions. Note: Functions like ``check_node`` focus on certain parts of code. Using these functions followed by ``has_code`` will only look in the code being focused on. :Example: If the student code is.. :: SELECT a FROM b WHERE id < 100 Then the first test below would (unfortunately) pass, but the second would fail..:: # contained in student code Ex().has_code(text="id < 10") # the $ means that you are matching the end of a line Ex().has_code(text="id < 10$") By setting ``fixed = True``, you can search for fixed strings:: # without fixed = True, '*' matches any character Ex().has_code(text="SELECT * FROM b") # passes Ex().has_code(text="SELECT \\\\* FROM b") # fails Ex().has_code(text="SELECT * FROM b", fixed=True) # fails You can check only the code corresponding to the WHERE clause, using :: where = Ex().check_node('SelectStmt', 0).check_edge('where_clause') where.has_code(text = "id < 10) """ stu_ast = state.student_ast stu_code = state.student_code # fallback on using complete student code if no ast ParseError = state.ast_dispatcher.ParseError def get_text(ast, code): if isinstance(ast, ParseError): return code try: return ast.get_text(code) except: return code stu_text = get_text(stu_ast, stu_code) _msg = incorrect_msg.format( ast_path=state.get_ast_path() or "highlighted code", text=text ) # either simple text matching or regex test res = text in stu_text if fixed else re.search(text, stu_text) if not res: state.report(Feedback(_msg)) return state
python
def has_code( state, text, incorrect_msg="Check the {ast_path}. The checker expected to find {text}.", fixed=False, ): """Test whether the student code contains text. Args: state: State instance describing student and solution code. Can be omitted if used with Ex(). text : text that student code must contain. Can be a regex pattern or a simple string. incorrect_msg: feedback message if text is not in student code. fixed: whether to match text exactly, rather than using regular expressions. Note: Functions like ``check_node`` focus on certain parts of code. Using these functions followed by ``has_code`` will only look in the code being focused on. :Example: If the student code is.. :: SELECT a FROM b WHERE id < 100 Then the first test below would (unfortunately) pass, but the second would fail..:: # contained in student code Ex().has_code(text="id < 10") # the $ means that you are matching the end of a line Ex().has_code(text="id < 10$") By setting ``fixed = True``, you can search for fixed strings:: # without fixed = True, '*' matches any character Ex().has_code(text="SELECT * FROM b") # passes Ex().has_code(text="SELECT \\\\* FROM b") # fails Ex().has_code(text="SELECT * FROM b", fixed=True) # fails You can check only the code corresponding to the WHERE clause, using :: where = Ex().check_node('SelectStmt', 0).check_edge('where_clause') where.has_code(text = "id < 10) """ stu_ast = state.student_ast stu_code = state.student_code # fallback on using complete student code if no ast ParseError = state.ast_dispatcher.ParseError def get_text(ast, code): if isinstance(ast, ParseError): return code try: return ast.get_text(code) except: return code stu_text = get_text(stu_ast, stu_code) _msg = incorrect_msg.format( ast_path=state.get_ast_path() or "highlighted code", text=text ) # either simple text matching or regex test res = text in stu_text if fixed else re.search(text, stu_text) if not res: state.report(Feedback(_msg)) return state
[ "def", "has_code", "(", "state", ",", "text", ",", "incorrect_msg", "=", "\"Check the {ast_path}. The checker expected to find {text}.\"", ",", "fixed", "=", "False", ",", ")", ":", "stu_ast", "=", "state", ".", "student_ast", "stu_code", "=", "state", ".", "student_code", "# fallback on using complete student code if no ast", "ParseError", "=", "state", ".", "ast_dispatcher", ".", "ParseError", "def", "get_text", "(", "ast", ",", "code", ")", ":", "if", "isinstance", "(", "ast", ",", "ParseError", ")", ":", "return", "code", "try", ":", "return", "ast", ".", "get_text", "(", "code", ")", "except", ":", "return", "code", "stu_text", "=", "get_text", "(", "stu_ast", ",", "stu_code", ")", "_msg", "=", "incorrect_msg", ".", "format", "(", "ast_path", "=", "state", ".", "get_ast_path", "(", ")", "or", "\"highlighted code\"", ",", "text", "=", "text", ")", "# either simple text matching or regex test", "res", "=", "text", "in", "stu_text", "if", "fixed", "else", "re", ".", "search", "(", "text", ",", "stu_text", ")", "if", "not", "res", ":", "state", ".", "report", "(", "Feedback", "(", "_msg", ")", ")", "return", "state" ]
Test whether the student code contains text. Args: state: State instance describing student and solution code. Can be omitted if used with Ex(). text : text that student code must contain. Can be a regex pattern or a simple string. incorrect_msg: feedback message if text is not in student code. fixed: whether to match text exactly, rather than using regular expressions. Note: Functions like ``check_node`` focus on certain parts of code. Using these functions followed by ``has_code`` will only look in the code being focused on. :Example: If the student code is.. :: SELECT a FROM b WHERE id < 100 Then the first test below would (unfortunately) pass, but the second would fail..:: # contained in student code Ex().has_code(text="id < 10") # the $ means that you are matching the end of a line Ex().has_code(text="id < 10$") By setting ``fixed = True``, you can search for fixed strings:: # without fixed = True, '*' matches any character Ex().has_code(text="SELECT * FROM b") # passes Ex().has_code(text="SELECT \\\\* FROM b") # fails Ex().has_code(text="SELECT * FROM b", fixed=True) # fails You can check only the code corresponding to the WHERE clause, using :: where = Ex().check_node('SelectStmt', 0).check_edge('where_clause') where.has_code(text = "id < 10)
[ "Test", "whether", "the", "student", "code", "contains", "text", "." ]
train
https://github.com/datacamp/protowhat/blob/a392b4e51e07a2e50e7b7f6ad918b3f5cbb63edc/protowhat/checks/check_funcs.py#L172-L243
0.002509
tjcsl/ion
intranet/apps/users/models.py
User.absence_count
def absence_count(self): """Return the user's absence count. If the user has no absences or is not a signup user, returns 0. """ # FIXME: remove recursive dep from ..eighth.models import EighthSignup return EighthSignup.objects.filter(user=self, was_absent=True, scheduled_activity__attendance_taken=True).count()
python
def absence_count(self): """Return the user's absence count. If the user has no absences or is not a signup user, returns 0. """ # FIXME: remove recursive dep from ..eighth.models import EighthSignup return EighthSignup.objects.filter(user=self, was_absent=True, scheduled_activity__attendance_taken=True).count()
[ "def", "absence_count", "(", "self", ")", ":", "# FIXME: remove recursive dep", "from", ".", ".", "eighth", ".", "models", "import", "EighthSignup", "return", "EighthSignup", ".", "objects", ".", "filter", "(", "user", "=", "self", ",", "was_absent", "=", "True", ",", "scheduled_activity__attendance_taken", "=", "True", ")", ".", "count", "(", ")" ]
Return the user's absence count. If the user has no absences or is not a signup user, returns 0.
[ "Return", "the", "user", "s", "absence", "count", "." ]
train
https://github.com/tjcsl/ion/blob/5d722b0725d572039bb0929fd5715a4070c82c72/intranet/apps/users/models.py#L735-L745
0.008065
vtkiorg/vtki
vtki/plotting.py
BasePlotter.add_actor
def add_actor(self, uinput, reset_camera=False, name=None, loc=None, culling=False): """ Adds an actor to render window. Creates an actor if input is a mapper. Parameters ---------- uinput : vtk.vtkMapper or vtk.vtkActor vtk mapper or vtk actor to be added. reset_camera : bool, optional Resets the camera when true. loc : int, tuple, or list Index of the renderer to add the actor to. For example, ``loc=2`` or ``loc=(1, 1)``. If None, selects the last active Renderer. culling : bool optional Does not render faces that should not be visible to the plotter. This can be helpful for dense surface meshes, especially when edges are visible, but can cause flat meshes to be partially displayed. Default False. Returns ------- actor : vtk.vtkActor The actor. actor_properties : vtk.Properties Actor properties. """ # add actor to the correct render window self._active_renderer_index = self.loc_to_index(loc) renderer = self.renderers[self._active_renderer_index] return renderer.add_actor(uinput, reset_camera, name, culling)
python
def add_actor(self, uinput, reset_camera=False, name=None, loc=None, culling=False): """ Adds an actor to render window. Creates an actor if input is a mapper. Parameters ---------- uinput : vtk.vtkMapper or vtk.vtkActor vtk mapper or vtk actor to be added. reset_camera : bool, optional Resets the camera when true. loc : int, tuple, or list Index of the renderer to add the actor to. For example, ``loc=2`` or ``loc=(1, 1)``. If None, selects the last active Renderer. culling : bool optional Does not render faces that should not be visible to the plotter. This can be helpful for dense surface meshes, especially when edges are visible, but can cause flat meshes to be partially displayed. Default False. Returns ------- actor : vtk.vtkActor The actor. actor_properties : vtk.Properties Actor properties. """ # add actor to the correct render window self._active_renderer_index = self.loc_to_index(loc) renderer = self.renderers[self._active_renderer_index] return renderer.add_actor(uinput, reset_camera, name, culling)
[ "def", "add_actor", "(", "self", ",", "uinput", ",", "reset_camera", "=", "False", ",", "name", "=", "None", ",", "loc", "=", "None", ",", "culling", "=", "False", ")", ":", "# add actor to the correct render window", "self", ".", "_active_renderer_index", "=", "self", ".", "loc_to_index", "(", "loc", ")", "renderer", "=", "self", ".", "renderers", "[", "self", ".", "_active_renderer_index", "]", "return", "renderer", ".", "add_actor", "(", "uinput", ",", "reset_camera", ",", "name", ",", "culling", ")" ]
Adds an actor to render window. Creates an actor if input is a mapper. Parameters ---------- uinput : vtk.vtkMapper or vtk.vtkActor vtk mapper or vtk actor to be added. reset_camera : bool, optional Resets the camera when true. loc : int, tuple, or list Index of the renderer to add the actor to. For example, ``loc=2`` or ``loc=(1, 1)``. If None, selects the last active Renderer. culling : bool optional Does not render faces that should not be visible to the plotter. This can be helpful for dense surface meshes, especially when edges are visible, but can cause flat meshes to be partially displayed. Default False. Returns ------- actor : vtk.vtkActor The actor. actor_properties : vtk.Properties Actor properties.
[ "Adds", "an", "actor", "to", "render", "window", ".", "Creates", "an", "actor", "if", "input", "is", "a", "mapper", "." ]
train
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/plotting.py#L1183-L1220
0.002256
tutorcruncher/django-bootstrap3-datetimepicker
bootstrap3_datetime/widgets.py
DateTimePicker._format_value
def _format_value(self, value): """This function name was changed in Django 1.10 and removed in 2.0.""" # Use renamed format_name() for Django versions >= 1.10. if hasattr(self, 'format_value'): return super(DateTimePicker, self).format_value(value) # Use old _format_name() for Django versions < 1.10. else: return super(DateTimePicker, self)._format_value(value)
python
def _format_value(self, value): """This function name was changed in Django 1.10 and removed in 2.0.""" # Use renamed format_name() for Django versions >= 1.10. if hasattr(self, 'format_value'): return super(DateTimePicker, self).format_value(value) # Use old _format_name() for Django versions < 1.10. else: return super(DateTimePicker, self)._format_value(value)
[ "def", "_format_value", "(", "self", ",", "value", ")", ":", "# Use renamed format_name() for Django versions >= 1.10.", "if", "hasattr", "(", "self", ",", "'format_value'", ")", ":", "return", "super", "(", "DateTimePicker", ",", "self", ")", ".", "format_value", "(", "value", ")", "# Use old _format_name() for Django versions < 1.10.", "else", ":", "return", "super", "(", "DateTimePicker", ",", "self", ")", ".", "_format_value", "(", "value", ")" ]
This function name was changed in Django 1.10 and removed in 2.0.
[ "This", "function", "name", "was", "changed", "in", "Django", "1", ".", "10", "and", "removed", "in", "2", ".", "0", "." ]
train
https://github.com/tutorcruncher/django-bootstrap3-datetimepicker/blob/21ee9245116e1a3424e8d9d751446b5651cb6441/bootstrap3_datetime/widgets.py#L84-L91
0.004673
cggh/scikit-allel
allel/model/ndarray.py
SortedIndex.intersect_range
def intersect_range(self, start=None, stop=None): """Intersect with range defined by `start` and `stop` values **inclusive**. Parameters ---------- start : int, optional Start value. stop : int, optional Stop value. Returns ------- idx : SortedIndex Examples -------- >>> import allel >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> idx.intersect_range(4, 32) <SortedIndex shape=(3,) dtype=int64> [6, 11, 20] """ try: loc = self.locate_range(start=start, stop=stop) except KeyError: return self.values[0:0] else: return self[loc]
python
def intersect_range(self, start=None, stop=None): """Intersect with range defined by `start` and `stop` values **inclusive**. Parameters ---------- start : int, optional Start value. stop : int, optional Stop value. Returns ------- idx : SortedIndex Examples -------- >>> import allel >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> idx.intersect_range(4, 32) <SortedIndex shape=(3,) dtype=int64> [6, 11, 20] """ try: loc = self.locate_range(start=start, stop=stop) except KeyError: return self.values[0:0] else: return self[loc]
[ "def", "intersect_range", "(", "self", ",", "start", "=", "None", ",", "stop", "=", "None", ")", ":", "try", ":", "loc", "=", "self", ".", "locate_range", "(", "start", "=", "start", ",", "stop", "=", "stop", ")", "except", "KeyError", ":", "return", "self", ".", "values", "[", "0", ":", "0", "]", "else", ":", "return", "self", "[", "loc", "]" ]
Intersect with range defined by `start` and `stop` values **inclusive**. Parameters ---------- start : int, optional Start value. stop : int, optional Stop value. Returns ------- idx : SortedIndex Examples -------- >>> import allel >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> idx.intersect_range(4, 32) <SortedIndex shape=(3,) dtype=int64> [6, 11, 20]
[ "Intersect", "with", "range", "defined", "by", "start", "and", "stop", "values", "**", "inclusive", "**", "." ]
train
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L3632-L3663
0.002635
ioos/compliance-checker
compliance_checker/cf/cf.py
CFBaseCheck._split_standard_name
def _split_standard_name(self, standard_name): ''' Returns a tuple of the standard_name and standard_name modifier Nones are used to represent the absence of a modifier or standard_name :rtype: tuple :return: 2-tuple of standard_name and modifier as strings ''' if isinstance(standard_name, basestring) and ' ' in standard_name: return standard_name.split(' ', 1) # if this isn't a string, then it doesn't make sense to split # -- treat value as standard name with no modifier else: return standard_name, None
python
def _split_standard_name(self, standard_name): ''' Returns a tuple of the standard_name and standard_name modifier Nones are used to represent the absence of a modifier or standard_name :rtype: tuple :return: 2-tuple of standard_name and modifier as strings ''' if isinstance(standard_name, basestring) and ' ' in standard_name: return standard_name.split(' ', 1) # if this isn't a string, then it doesn't make sense to split # -- treat value as standard name with no modifier else: return standard_name, None
[ "def", "_split_standard_name", "(", "self", ",", "standard_name", ")", ":", "if", "isinstance", "(", "standard_name", ",", "basestring", ")", "and", "' '", "in", "standard_name", ":", "return", "standard_name", ".", "split", "(", "' '", ",", "1", ")", "# if this isn't a string, then it doesn't make sense to split", "# -- treat value as standard name with no modifier", "else", ":", "return", "standard_name", ",", "None" ]
Returns a tuple of the standard_name and standard_name modifier Nones are used to represent the absence of a modifier or standard_name :rtype: tuple :return: 2-tuple of standard_name and modifier as strings
[ "Returns", "a", "tuple", "of", "the", "standard_name", "and", "standard_name", "modifier" ]
train
https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/cf/cf.py#L994-L1009
0.003247
eyurtsev/FlowCytometryTools
FlowCytometryTools/gui/wx_backend/gui.py
GUIEmbedded.update_widget_channels
def update_widget_channels(self): """ Parameters ------------- axis : 'x' or 'y' event : pick of list text """ sel1 = self.x_axis_list.GetSelection() sel2 = self.y_axis_list.GetSelection() if sel1 >= 0 and sel2 >= 0: channel_1 = self.x_axis_list.GetString(sel1) channel_2 = self.y_axis_list.GetString(sel2) self.fcgatemanager.set_axes((channel_1, channel_2), self.ax)
python
def update_widget_channels(self): """ Parameters ------------- axis : 'x' or 'y' event : pick of list text """ sel1 = self.x_axis_list.GetSelection() sel2 = self.y_axis_list.GetSelection() if sel1 >= 0 and sel2 >= 0: channel_1 = self.x_axis_list.GetString(sel1) channel_2 = self.y_axis_list.GetString(sel2) self.fcgatemanager.set_axes((channel_1, channel_2), self.ax)
[ "def", "update_widget_channels", "(", "self", ")", ":", "sel1", "=", "self", ".", "x_axis_list", ".", "GetSelection", "(", ")", "sel2", "=", "self", ".", "y_axis_list", ".", "GetSelection", "(", ")", "if", "sel1", ">=", "0", "and", "sel2", ">=", "0", ":", "channel_1", "=", "self", ".", "x_axis_list", ".", "GetString", "(", "sel1", ")", "channel_2", "=", "self", ".", "y_axis_list", ".", "GetString", "(", "sel2", ")", "self", ".", "fcgatemanager", ".", "set_axes", "(", "(", "channel_1", ",", "channel_2", ")", ",", "self", ".", "ax", ")" ]
Parameters ------------- axis : 'x' or 'y' event : pick of list text
[ "Parameters", "-------------", "axis", ":", "x", "or", "y", "event", ":", "pick", "of", "list", "text" ]
train
https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/gui/wx_backend/gui.py#L70-L83
0.004202
monarch-initiative/dipper
dipper/sources/UDP.py
UDP._parse_rs_map_file
def _parse_rs_map_file(rsfile): """ Parses rsID mapping file from dbSNP Outputs dict where keys are coordinates in the format {chromsome}-{position} { chr1-1234: [ { 'type': 'snp' 'rs_id': 'rs1234' 'alleles': 'A/G/T' } ] } :param file: file path :param limit: limit (int, optional) limit the number of rows processed :return: dict """ rs_map = {} col = ['chromosome', 'position', 'rs_id', 'var_type', 'alleles'] if os.path.exists(os.path.join(os.path.dirname(__file__), rsfile)): with open(os.path.join(os.path.dirname(__file__), rsfile)) as tsvfile: reader = csv.reader(tsvfile, delimiter="\t") for row in reader: chromosome = row[col.index('chromosome')] position = row[col.index('position')] # rs_id, var_type, alleles) = row map_key = "chr{0}-{1}".format(chromosome, position) rs_info = { 'type': row[col.index('var_type')], 'rs_id': row[col.index('rs_id')], 'alleles': row[col.index('alleles')] } if map_key in rs_map: rs_map[map_key].append(rs_info) else: rs_map[map_key] = [rs_info] return rs_map
python
def _parse_rs_map_file(rsfile): """ Parses rsID mapping file from dbSNP Outputs dict where keys are coordinates in the format {chromsome}-{position} { chr1-1234: [ { 'type': 'snp' 'rs_id': 'rs1234' 'alleles': 'A/G/T' } ] } :param file: file path :param limit: limit (int, optional) limit the number of rows processed :return: dict """ rs_map = {} col = ['chromosome', 'position', 'rs_id', 'var_type', 'alleles'] if os.path.exists(os.path.join(os.path.dirname(__file__), rsfile)): with open(os.path.join(os.path.dirname(__file__), rsfile)) as tsvfile: reader = csv.reader(tsvfile, delimiter="\t") for row in reader: chromosome = row[col.index('chromosome')] position = row[col.index('position')] # rs_id, var_type, alleles) = row map_key = "chr{0}-{1}".format(chromosome, position) rs_info = { 'type': row[col.index('var_type')], 'rs_id': row[col.index('rs_id')], 'alleles': row[col.index('alleles')] } if map_key in rs_map: rs_map[map_key].append(rs_info) else: rs_map[map_key] = [rs_info] return rs_map
[ "def", "_parse_rs_map_file", "(", "rsfile", ")", ":", "rs_map", "=", "{", "}", "col", "=", "[", "'chromosome'", ",", "'position'", ",", "'rs_id'", ",", "'var_type'", ",", "'alleles'", "]", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "rsfile", ")", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "rsfile", ")", ")", "as", "tsvfile", ":", "reader", "=", "csv", ".", "reader", "(", "tsvfile", ",", "delimiter", "=", "\"\\t\"", ")", "for", "row", "in", "reader", ":", "chromosome", "=", "row", "[", "col", ".", "index", "(", "'chromosome'", ")", "]", "position", "=", "row", "[", "col", ".", "index", "(", "'position'", ")", "]", "# rs_id, var_type, alleles) = row", "map_key", "=", "\"chr{0}-{1}\"", ".", "format", "(", "chromosome", ",", "position", ")", "rs_info", "=", "{", "'type'", ":", "row", "[", "col", ".", "index", "(", "'var_type'", ")", "]", ",", "'rs_id'", ":", "row", "[", "col", ".", "index", "(", "'rs_id'", ")", "]", ",", "'alleles'", ":", "row", "[", "col", ".", "index", "(", "'alleles'", ")", "]", "}", "if", "map_key", "in", "rs_map", ":", "rs_map", "[", "map_key", "]", ".", "append", "(", "rs_info", ")", "else", ":", "rs_map", "[", "map_key", "]", "=", "[", "rs_info", "]", "return", "rs_map" ]
Parses rsID mapping file from dbSNP Outputs dict where keys are coordinates in the format {chromsome}-{position} { chr1-1234: [ { 'type': 'snp' 'rs_id': 'rs1234' 'alleles': 'A/G/T' } ] } :param file: file path :param limit: limit (int, optional) limit the number of rows processed :return: dict
[ "Parses", "rsID", "mapping", "file", "from", "dbSNP", "Outputs", "dict", "where", "keys", "are", "coordinates", "in", "the", "format", "{", "chromsome", "}", "-", "{", "position", "}" ]
train
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/UDP.py#L642-L681
0.001927
jaredLunde/redis_structures
redis_structures/__init__.py
RedisSet.diffstore
def diffstore(self, destination, *others): """ The same as :meth:difference, but stores the resulting set @destination @destination: #str keyname or :class:RedisSet @others: one or several #str keynames or :class:RedisSet objects -> #int number of members in resulting set """ others = self._typesafe_others(others) destination = self._typesafe(destination) return self._client.sdiffstore(destination, self.key_prefix, *others)
python
def diffstore(self, destination, *others): """ The same as :meth:difference, but stores the resulting set @destination @destination: #str keyname or :class:RedisSet @others: one or several #str keynames or :class:RedisSet objects -> #int number of members in resulting set """ others = self._typesafe_others(others) destination = self._typesafe(destination) return self._client.sdiffstore(destination, self.key_prefix, *others)
[ "def", "diffstore", "(", "self", ",", "destination", ",", "*", "others", ")", ":", "others", "=", "self", ".", "_typesafe_others", "(", "others", ")", "destination", "=", "self", ".", "_typesafe", "(", "destination", ")", "return", "self", ".", "_client", ".", "sdiffstore", "(", "destination", ",", "self", ".", "key_prefix", ",", "*", "others", ")" ]
The same as :meth:difference, but stores the resulting set @destination @destination: #str keyname or :class:RedisSet @others: one or several #str keynames or :class:RedisSet objects -> #int number of members in resulting set
[ "The", "same", "as", ":", "meth", ":", "difference", "but", "stores", "the", "resulting", "set", "@destination" ]
train
https://github.com/jaredLunde/redis_structures/blob/b9cce5f5c85db5e12c292633ff8d04e3ae053294/redis_structures/__init__.py#L1801-L1812
0.003868
graphql-python/graphql-core-next
graphql/utilities/separate_operations.py
collect_transitive_dependencies
def collect_transitive_dependencies( collected: Set[str], dep_graph: DepGraph, from_name: str ) -> None: """Collect transitive dependencies. From a dependency graph, collects a list of transitive dependencies by recursing through a dependency graph. """ immediate_deps = dep_graph[from_name] for to_name in immediate_deps: if to_name not in collected: collected.add(to_name) collect_transitive_dependencies(collected, dep_graph, to_name)
python
def collect_transitive_dependencies( collected: Set[str], dep_graph: DepGraph, from_name: str ) -> None: """Collect transitive dependencies. From a dependency graph, collects a list of transitive dependencies by recursing through a dependency graph. """ immediate_deps = dep_graph[from_name] for to_name in immediate_deps: if to_name not in collected: collected.add(to_name) collect_transitive_dependencies(collected, dep_graph, to_name)
[ "def", "collect_transitive_dependencies", "(", "collected", ":", "Set", "[", "str", "]", ",", "dep_graph", ":", "DepGraph", ",", "from_name", ":", "str", ")", "->", "None", ":", "immediate_deps", "=", "dep_graph", "[", "from_name", "]", "for", "to_name", "in", "immediate_deps", ":", "if", "to_name", "not", "in", "collected", ":", "collected", ".", "add", "(", "to_name", ")", "collect_transitive_dependencies", "(", "collected", ",", "dep_graph", ",", "to_name", ")" ]
Collect transitive dependencies. From a dependency graph, collects a list of transitive dependencies by recursing through a dependency graph.
[ "Collect", "transitive", "dependencies", "." ]
train
https://github.com/graphql-python/graphql-core-next/blob/073dce3f002f897d40f9348ffd8f107815160540/graphql/utilities/separate_operations.py#L87-L99
0.004016