repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
4
175
func_name
stringlengths
1
129
whole_func_string
stringlengths
91
50.9k
language
stringclasses
1 value
func_code_string
stringlengths
91
50.9k
func_code_tokens
sequence
func_documentation_string
stringlengths
1
31.6k
func_documentation_tokens
sequence
split_name
stringclasses
1 value
func_code_url
stringlengths
89
268
score
float64
0
0.09
dslackw/slpkg
slpkg/messages.py
Msg.build_FAILED
def build_FAILED(self, prgnam): """Print error message if build failed """ self.template(78) print("| Some error on the package {0} [ {1}FAILED{2} ]".format( prgnam, self.meta.color["RED"], self.meta.color["ENDC"])) self.template(78) print("| See the log file in '{0}/var/log/slpkg/sbo/build_logs{1}' " "directory or read the README file".format( self.meta.color["CYAN"], self.meta.color["ENDC"])) self.template(78) print("")
python
def build_FAILED(self, prgnam): """Print error message if build failed """ self.template(78) print("| Some error on the package {0} [ {1}FAILED{2} ]".format( prgnam, self.meta.color["RED"], self.meta.color["ENDC"])) self.template(78) print("| See the log file in '{0}/var/log/slpkg/sbo/build_logs{1}' " "directory or read the README file".format( self.meta.color["CYAN"], self.meta.color["ENDC"])) self.template(78) print("")
[ "def", "build_FAILED", "(", "self", ",", "prgnam", ")", ":", "self", ".", "template", "(", "78", ")", "print", "(", "\"| Some error on the package {0} [ {1}FAILED{2} ]\"", ".", "format", "(", "prgnam", ",", "self", ".", "meta", ".", "color", "[", "\"RED\"", "]", ",", "self", ".", "meta", ".", "color", "[", "\"ENDC\"", "]", ")", ")", "self", ".", "template", "(", "78", ")", "print", "(", "\"| See the log file in '{0}/var/log/slpkg/sbo/build_logs{1}' \"", "\"directory or read the README file\"", ".", "format", "(", "self", ".", "meta", ".", "color", "[", "\"CYAN\"", "]", ",", "self", ".", "meta", ".", "color", "[", "\"ENDC\"", "]", ")", ")", "self", ".", "template", "(", "78", ")", "print", "(", "\"\"", ")" ]
Print error message if build failed
[ "Print", "error", "message", "if", "build", "failed" ]
train
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/messages.py#L52-L63
0.003752
LEMS/pylems
lems/parser/LEMS.py
LEMSFileParser.parse_on_condition
def parse_on_condition(self, node): """ Parses <OnCondition> @param node: Node containing the <OnCondition> element @type node: xml.etree.Element """ try: test = node.lattrib['test'] except: self.raise_error('<OnCondition> must specify a test.') event_handler = OnCondition(test) self.current_regime.add_event_handler(event_handler) self.current_event_handler = event_handler self.process_nested_tags(node) self.current_event_handler = None
python
def parse_on_condition(self, node): """ Parses <OnCondition> @param node: Node containing the <OnCondition> element @type node: xml.etree.Element """ try: test = node.lattrib['test'] except: self.raise_error('<OnCondition> must specify a test.') event_handler = OnCondition(test) self.current_regime.add_event_handler(event_handler) self.current_event_handler = event_handler self.process_nested_tags(node) self.current_event_handler = None
[ "def", "parse_on_condition", "(", "self", ",", "node", ")", ":", "try", ":", "test", "=", "node", ".", "lattrib", "[", "'test'", "]", "except", ":", "self", ".", "raise_error", "(", "'<OnCondition> must specify a test.'", ")", "event_handler", "=", "OnCondition", "(", "test", ")", "self", ".", "current_regime", ".", "add_event_handler", "(", "event_handler", ")", "self", ".", "current_event_handler", "=", "event_handler", "self", ".", "process_nested_tags", "(", "node", ")", "self", ".", "current_event_handler", "=", "None" ]
Parses <OnCondition> @param node: Node containing the <OnCondition> element @type node: xml.etree.Element
[ "Parses", "<OnCondition", ">" ]
train
https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/parser/LEMS.py#L1110-L1129
0.006932
ibis-project/ibis
ibis/sql/alchemy.py
AlchemyClient.list_tables
def list_tables(self, like=None, database=None, schema=None): """ List tables/views in the current (or indicated) database. Parameters ---------- like : string, default None Checks for this string contained in name database : string, default None If not passed, uses the current/default database Returns ------- tables : list of strings """ inspector = self.inspector names = inspector.get_table_names(schema=schema) names.extend(inspector.get_view_names(schema=schema)) if like is not None: names = [x for x in names if like in x] return sorted(names)
python
def list_tables(self, like=None, database=None, schema=None): """ List tables/views in the current (or indicated) database. Parameters ---------- like : string, default None Checks for this string contained in name database : string, default None If not passed, uses the current/default database Returns ------- tables : list of strings """ inspector = self.inspector names = inspector.get_table_names(schema=schema) names.extend(inspector.get_view_names(schema=schema)) if like is not None: names = [x for x in names if like in x] return sorted(names)
[ "def", "list_tables", "(", "self", ",", "like", "=", "None", ",", "database", "=", "None", ",", "schema", "=", "None", ")", ":", "inspector", "=", "self", ".", "inspector", "names", "=", "inspector", ".", "get_table_names", "(", "schema", "=", "schema", ")", "names", ".", "extend", "(", "inspector", ".", "get_view_names", "(", "schema", "=", "schema", ")", ")", "if", "like", "is", "not", "None", ":", "names", "=", "[", "x", "for", "x", "in", "names", "if", "like", "in", "x", "]", "return", "sorted", "(", "names", ")" ]
List tables/views in the current (or indicated) database. Parameters ---------- like : string, default None Checks for this string contained in name database : string, default None If not passed, uses the current/default database Returns ------- tables : list of strings
[ "List", "tables", "/", "views", "in", "the", "current", "(", "or", "indicated", ")", "database", "." ]
train
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/sql/alchemy.py#L996-L1016
0.002833
coreGreenberet/homematicip-rest-api
homematicip/home.py
Home.set_zones_device_assignment
def set_zones_device_assignment(self, internal_devices, external_devices) -> dict: """ sets the devices for the security zones Args: internal_devices(List[Device]): the devices which should be used for the internal zone external_devices(List[Device]): the devices which should be used for the external(hull) zone Returns: the result of _restCall """ internal = [x.id for x in internal_devices] external = [x.id for x in external_devices] data = {"zonesDeviceAssignment": {"INTERNAL": internal, "EXTERNAL": external}} return self._restCall( "home/security/setZonesDeviceAssignment", body=json.dumps(data) )
python
def set_zones_device_assignment(self, internal_devices, external_devices) -> dict: """ sets the devices for the security zones Args: internal_devices(List[Device]): the devices which should be used for the internal zone external_devices(List[Device]): the devices which should be used for the external(hull) zone Returns: the result of _restCall """ internal = [x.id for x in internal_devices] external = [x.id for x in external_devices] data = {"zonesDeviceAssignment": {"INTERNAL": internal, "EXTERNAL": external}} return self._restCall( "home/security/setZonesDeviceAssignment", body=json.dumps(data) )
[ "def", "set_zones_device_assignment", "(", "self", ",", "internal_devices", ",", "external_devices", ")", "->", "dict", ":", "internal", "=", "[", "x", ".", "id", "for", "x", "in", "internal_devices", "]", "external", "=", "[", "x", ".", "id", "for", "x", "in", "external_devices", "]", "data", "=", "{", "\"zonesDeviceAssignment\"", ":", "{", "\"INTERNAL\"", ":", "internal", ",", "\"EXTERNAL\"", ":", "external", "}", "}", "return", "self", ".", "_restCall", "(", "\"home/security/setZonesDeviceAssignment\"", ",", "body", "=", "json", ".", "dumps", "(", "data", ")", ")" ]
sets the devices for the security zones Args: internal_devices(List[Device]): the devices which should be used for the internal zone external_devices(List[Device]): the devices which should be used for the external(hull) zone Returns: the result of _restCall
[ "sets", "the", "devices", "for", "the", "security", "zones", "Args", ":", "internal_devices", "(", "List", "[", "Device", "]", ")", ":", "the", "devices", "which", "should", "be", "used", "for", "the", "internal", "zone", "external_devices", "(", "List", "[", "Device", "]", ")", ":", "the", "devices", "which", "should", "be", "used", "for", "the", "external", "(", "hull", ")", "zone", "Returns", ":", "the", "result", "of", "_restCall" ]
train
https://github.com/coreGreenberet/homematicip-rest-api/blob/d4c8df53281577e01709f75cacb78b1a5a1d00db/homematicip/home.py#L633-L647
0.009346
CloudGenix/sdk-python
cloudgenix/__init__.py
API.notify_for_new_version
def notify_for_new_version(self): """ Check for a new version of the SDK on API constructor instantiation. If new version found, print Notification to STDERR. On failure of this check, fail silently. **Returns:** No item returned, directly prints notification to `sys.stderr`. """ # broad exception clause, if this fails for any reason just return. try: recommend_update = False update_check_resp = requests.get(self.update_info_url, timeout=3) web_version = update_check_resp.json()["info"]["version"] api_logger.debug("RETRIEVED_VERSION: %s", web_version) available_version = SDK_BUILD_REGEX.search(web_version).groupdict() current_version = SDK_BUILD_REGEX.search(self.version).groupdict() available_major = available_version.get('major') available_minor = available_version.get('minor') available_patch = available_version.get('patch') available_build = available_version.get('build') current_major = current_version.get('major') current_minor = current_version.get('minor') current_patch = current_version.get('patch') current_build = current_version.get('build') api_logger.debug("AVAILABLE_VERSION: %s", available_version) api_logger.debug("CURRENT_VERSION: %s", current_version) # check for major/minor version differences, do not alert for build differences. if available_major > current_major: recommend_update = True elif available_major >= current_major and available_minor > current_minor: recommend_update = True elif available_major >= current_major and available_minor >= current_minor and \ available_patch > current_patch: recommend_update = True api_logger.debug("NEED_UPDATE: %s", recommend_update) # notify. if recommend_update: sys.stderr.write("WARNING: CloudGenix Python SDK upgrade available. SDKs are typically deprecated 6 " "months after release of a new version.\n" "\tLatest Version: {0}\n" "\tCurrent Version: {1}\n" "\tFor more info, see 'https://github.com/cloudgenix/sdk-python'. Additionally, this " "message can be suppressed by instantiating the API with API(update_check=False).\n\n" "".format(web_version, self.version)) return except Exception: # just return and continue. return
python
def notify_for_new_version(self): """ Check for a new version of the SDK on API constructor instantiation. If new version found, print Notification to STDERR. On failure of this check, fail silently. **Returns:** No item returned, directly prints notification to `sys.stderr`. """ # broad exception clause, if this fails for any reason just return. try: recommend_update = False update_check_resp = requests.get(self.update_info_url, timeout=3) web_version = update_check_resp.json()["info"]["version"] api_logger.debug("RETRIEVED_VERSION: %s", web_version) available_version = SDK_BUILD_REGEX.search(web_version).groupdict() current_version = SDK_BUILD_REGEX.search(self.version).groupdict() available_major = available_version.get('major') available_minor = available_version.get('minor') available_patch = available_version.get('patch') available_build = available_version.get('build') current_major = current_version.get('major') current_minor = current_version.get('minor') current_patch = current_version.get('patch') current_build = current_version.get('build') api_logger.debug("AVAILABLE_VERSION: %s", available_version) api_logger.debug("CURRENT_VERSION: %s", current_version) # check for major/minor version differences, do not alert for build differences. if available_major > current_major: recommend_update = True elif available_major >= current_major and available_minor > current_minor: recommend_update = True elif available_major >= current_major and available_minor >= current_minor and \ available_patch > current_patch: recommend_update = True api_logger.debug("NEED_UPDATE: %s", recommend_update) # notify. if recommend_update: sys.stderr.write("WARNING: CloudGenix Python SDK upgrade available. SDKs are typically deprecated 6 " "months after release of a new version.\n" "\tLatest Version: {0}\n" "\tCurrent Version: {1}\n" "\tFor more info, see 'https://github.com/cloudgenix/sdk-python'. Additionally, this " "message can be suppressed by instantiating the API with API(update_check=False).\n\n" "".format(web_version, self.version)) return except Exception: # just return and continue. return
[ "def", "notify_for_new_version", "(", "self", ")", ":", "# broad exception clause, if this fails for any reason just return.", "try", ":", "recommend_update", "=", "False", "update_check_resp", "=", "requests", ".", "get", "(", "self", ".", "update_info_url", ",", "timeout", "=", "3", ")", "web_version", "=", "update_check_resp", ".", "json", "(", ")", "[", "\"info\"", "]", "[", "\"version\"", "]", "api_logger", ".", "debug", "(", "\"RETRIEVED_VERSION: %s\"", ",", "web_version", ")", "available_version", "=", "SDK_BUILD_REGEX", ".", "search", "(", "web_version", ")", ".", "groupdict", "(", ")", "current_version", "=", "SDK_BUILD_REGEX", ".", "search", "(", "self", ".", "version", ")", ".", "groupdict", "(", ")", "available_major", "=", "available_version", ".", "get", "(", "'major'", ")", "available_minor", "=", "available_version", ".", "get", "(", "'minor'", ")", "available_patch", "=", "available_version", ".", "get", "(", "'patch'", ")", "available_build", "=", "available_version", ".", "get", "(", "'build'", ")", "current_major", "=", "current_version", ".", "get", "(", "'major'", ")", "current_minor", "=", "current_version", ".", "get", "(", "'minor'", ")", "current_patch", "=", "current_version", ".", "get", "(", "'patch'", ")", "current_build", "=", "current_version", ".", "get", "(", "'build'", ")", "api_logger", ".", "debug", "(", "\"AVAILABLE_VERSION: %s\"", ",", "available_version", ")", "api_logger", ".", "debug", "(", "\"CURRENT_VERSION: %s\"", ",", "current_version", ")", "# check for major/minor version differences, do not alert for build differences.", "if", "available_major", ">", "current_major", ":", "recommend_update", "=", "True", "elif", "available_major", ">=", "current_major", "and", "available_minor", ">", "current_minor", ":", "recommend_update", "=", "True", "elif", "available_major", ">=", "current_major", "and", "available_minor", ">=", "current_minor", "and", "available_patch", ">", "current_patch", ":", "recommend_update", "=", "True", "api_logger", ".", "debug", "(", "\"NEED_UPDATE: %s\"", ",", "recommend_update", ")", "# notify.", "if", "recommend_update", ":", "sys", ".", "stderr", ".", "write", "(", "\"WARNING: CloudGenix Python SDK upgrade available. SDKs are typically deprecated 6 \"", "\"months after release of a new version.\\n\"", "\"\\tLatest Version: {0}\\n\"", "\"\\tCurrent Version: {1}\\n\"", "\"\\tFor more info, see 'https://github.com/cloudgenix/sdk-python'. Additionally, this \"", "\"message can be suppressed by instantiating the API with API(update_check=False).\\n\\n\"", "\"\"", ".", "format", "(", "web_version", ",", "self", ".", "version", ")", ")", "return", "except", "Exception", ":", "# just return and continue.", "return" ]
Check for a new version of the SDK on API constructor instantiation. If new version found, print Notification to STDERR. On failure of this check, fail silently. **Returns:** No item returned, directly prints notification to `sys.stderr`.
[ "Check", "for", "a", "new", "version", "of", "the", "SDK", "on", "API", "constructor", "instantiation", ".", "If", "new", "version", "found", "print", "Notification", "to", "STDERR", "." ]
train
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L446-L503
0.003577
ajenhl/tacl
tacl/__main__.py
generate_catalogue_subparser
def generate_catalogue_subparser(subparsers): """Adds a sub-command parser to `subparsers` to generate and save a catalogue file.""" parser = subparsers.add_parser( 'catalogue', description=constants.CATALOGUE_DESCRIPTION, epilog=constants.CATALOGUE_EPILOG, formatter_class=ParagraphFormatter, help=constants.CATALOGUE_HELP) utils.add_common_arguments(parser) parser.set_defaults(func=generate_catalogue) parser.add_argument('corpus', help=constants.DB_CORPUS_HELP, metavar='CORPUS') utils.add_query_arguments(parser) parser.add_argument('-l', '--label', default='', help=constants.CATALOGUE_LABEL_HELP)
python
def generate_catalogue_subparser(subparsers): """Adds a sub-command parser to `subparsers` to generate and save a catalogue file.""" parser = subparsers.add_parser( 'catalogue', description=constants.CATALOGUE_DESCRIPTION, epilog=constants.CATALOGUE_EPILOG, formatter_class=ParagraphFormatter, help=constants.CATALOGUE_HELP) utils.add_common_arguments(parser) parser.set_defaults(func=generate_catalogue) parser.add_argument('corpus', help=constants.DB_CORPUS_HELP, metavar='CORPUS') utils.add_query_arguments(parser) parser.add_argument('-l', '--label', default='', help=constants.CATALOGUE_LABEL_HELP)
[ "def", "generate_catalogue_subparser", "(", "subparsers", ")", ":", "parser", "=", "subparsers", ".", "add_parser", "(", "'catalogue'", ",", "description", "=", "constants", ".", "CATALOGUE_DESCRIPTION", ",", "epilog", "=", "constants", ".", "CATALOGUE_EPILOG", ",", "formatter_class", "=", "ParagraphFormatter", ",", "help", "=", "constants", ".", "CATALOGUE_HELP", ")", "utils", ".", "add_common_arguments", "(", "parser", ")", "parser", ".", "set_defaults", "(", "func", "=", "generate_catalogue", ")", "parser", ".", "add_argument", "(", "'corpus'", ",", "help", "=", "constants", ".", "DB_CORPUS_HELP", ",", "metavar", "=", "'CORPUS'", ")", "utils", ".", "add_query_arguments", "(", "parser", ")", "parser", ".", "add_argument", "(", "'-l'", ",", "'--label'", ",", "default", "=", "''", ",", "help", "=", "constants", ".", "CATALOGUE_LABEL_HELP", ")" ]
Adds a sub-command parser to `subparsers` to generate and save a catalogue file.
[ "Adds", "a", "sub", "-", "command", "parser", "to", "subparsers", "to", "generate", "and", "save", "a", "catalogue", "file", "." ]
train
https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/__main__.py#L123-L136
0.001414
cackharot/suds-py3
suds/bindings/binding.py
Binding.detect_fault
def detect_fault(self, body): """ Detect I{hidden} soapenv:Fault element in the soap body. @param body: The soap envelope body. @type body: L{Element} @raise WebFault: When found. """ fault = body.getChild('Fault', envns) if fault is None: return unmarshaller = self.unmarshaller(False) p = unmarshaller.process(fault) if self.options().faults: raise WebFault(p, fault) return self
python
def detect_fault(self, body): """ Detect I{hidden} soapenv:Fault element in the soap body. @param body: The soap envelope body. @type body: L{Element} @raise WebFault: When found. """ fault = body.getChild('Fault', envns) if fault is None: return unmarshaller = self.unmarshaller(False) p = unmarshaller.process(fault) if self.options().faults: raise WebFault(p, fault) return self
[ "def", "detect_fault", "(", "self", ",", "body", ")", ":", "fault", "=", "body", ".", "getChild", "(", "'Fault'", ",", "envns", ")", "if", "fault", "is", "None", ":", "return", "unmarshaller", "=", "self", ".", "unmarshaller", "(", "False", ")", "p", "=", "unmarshaller", ".", "process", "(", "fault", ")", "if", "self", ".", "options", "(", ")", ".", "faults", ":", "raise", "WebFault", "(", "p", ",", "fault", ")", "return", "self" ]
Detect I{hidden} soapenv:Fault element in the soap body. @param body: The soap envelope body. @type body: L{Element} @raise WebFault: When found.
[ "Detect", "I", "{", "hidden", "}", "soapenv", ":", "Fault", "element", "in", "the", "soap", "body", "." ]
train
https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/bindings/binding.py#L170-L184
0.003992
thombashi/SimpleSQLite
simplesqlite/core.py
SimpleSQLite.has_attr
def has_attr(self, table_name, attr_name): """ :param str table_name: Table name that the attribute exists. :param str attr_name: Attribute name to be tested. :return: |True| if the table has the attribute. :rtype: bool :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :Sample Code: .. code:: python import simplesqlite table_name = "sample_table" con = simplesqlite.SimpleSQLite("sample.sqlite", "w") con.create_table_from_data_matrix( table_name, ["attr_a", "attr_b"], [[1, "a"], [2, "b"]]) print(con.has_attr(table_name, "attr_a")) print(con.has_attr(table_name, "not_existing")) try: print(con.has_attr("not_existing", "attr_a")) except simplesqlite.TableNotFoundError as e: print(e) :Output: .. parsed-literal:: True False 'not_existing' table not found in /tmp/sample.sqlite """ self.verify_table_existence(table_name) if typepy.is_null_string(attr_name): return False return attr_name in self.fetch_attr_names(table_name)
python
def has_attr(self, table_name, attr_name): """ :param str table_name: Table name that the attribute exists. :param str attr_name: Attribute name to be tested. :return: |True| if the table has the attribute. :rtype: bool :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :Sample Code: .. code:: python import simplesqlite table_name = "sample_table" con = simplesqlite.SimpleSQLite("sample.sqlite", "w") con.create_table_from_data_matrix( table_name, ["attr_a", "attr_b"], [[1, "a"], [2, "b"]]) print(con.has_attr(table_name, "attr_a")) print(con.has_attr(table_name, "not_existing")) try: print(con.has_attr("not_existing", "attr_a")) except simplesqlite.TableNotFoundError as e: print(e) :Output: .. parsed-literal:: True False 'not_existing' table not found in /tmp/sample.sqlite """ self.verify_table_existence(table_name) if typepy.is_null_string(attr_name): return False return attr_name in self.fetch_attr_names(table_name)
[ "def", "has_attr", "(", "self", ",", "table_name", ",", "attr_name", ")", ":", "self", ".", "verify_table_existence", "(", "table_name", ")", "if", "typepy", ".", "is_null_string", "(", "attr_name", ")", ":", "return", "False", "return", "attr_name", "in", "self", ".", "fetch_attr_names", "(", "table_name", ")" ]
:param str table_name: Table name that the attribute exists. :param str attr_name: Attribute name to be tested. :return: |True| if the table has the attribute. :rtype: bool :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :Sample Code: .. code:: python import simplesqlite table_name = "sample_table" con = simplesqlite.SimpleSQLite("sample.sqlite", "w") con.create_table_from_data_matrix( table_name, ["attr_a", "attr_b"], [[1, "a"], [2, "b"]]) print(con.has_attr(table_name, "attr_a")) print(con.has_attr(table_name, "not_existing")) try: print(con.has_attr("not_existing", "attr_a")) except simplesqlite.TableNotFoundError as e: print(e) :Output: .. parsed-literal:: True False 'not_existing' table not found in /tmp/sample.sqlite
[ ":", "param", "str", "table_name", ":", "Table", "name", "that", "the", "attribute", "exists", ".", ":", "param", "str", "attr_name", ":", "Attribute", "name", "to", "be", "tested", ".", ":", "return", ":", "|True|", "if", "the", "table", "has", "the", "attribute", ".", ":", "rtype", ":", "bool", ":", "raises", "simplesqlite", ".", "TableNotFoundError", ":", "|raises_verify_table_existence|" ]
train
https://github.com/thombashi/SimpleSQLite/blob/b16f212132b9b98773e68bf7395abc2f60f56fe5/simplesqlite/core.py#L955-L995
0.001449
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py
MainFrame.SetPercentageView
def SetPercentageView(self, percentageView): """Set whether to display percentage or absolute values""" self.percentageView = percentageView self.percentageMenuItem.Check(self.percentageView) self.percentageViewTool.SetValue(self.percentageView) total = self.adapter.value( self.loader.get_root( self.viewType ) ) for control in self.ProfileListControls: control.SetPercentage(self.percentageView, total) self.adapter.SetPercentage(self.percentageView, total)
python
def SetPercentageView(self, percentageView): """Set whether to display percentage or absolute values""" self.percentageView = percentageView self.percentageMenuItem.Check(self.percentageView) self.percentageViewTool.SetValue(self.percentageView) total = self.adapter.value( self.loader.get_root( self.viewType ) ) for control in self.ProfileListControls: control.SetPercentage(self.percentageView, total) self.adapter.SetPercentage(self.percentageView, total)
[ "def", "SetPercentageView", "(", "self", ",", "percentageView", ")", ":", "self", ".", "percentageView", "=", "percentageView", "self", ".", "percentageMenuItem", ".", "Check", "(", "self", ".", "percentageView", ")", "self", ".", "percentageViewTool", ".", "SetValue", "(", "self", ".", "percentageView", ")", "total", "=", "self", ".", "adapter", ".", "value", "(", "self", ".", "loader", ".", "get_root", "(", "self", ".", "viewType", ")", ")", "for", "control", "in", "self", ".", "ProfileListControls", ":", "control", ".", "SetPercentage", "(", "self", ".", "percentageView", ",", "total", ")", "self", ".", "adapter", ".", "SetPercentage", "(", "self", ".", "percentageView", ",", "total", ")" ]
Set whether to display percentage or absolute values
[ "Set", "whether", "to", "display", "percentage", "or", "absolute", "values" ]
train
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L532-L540
0.011385
inveniosoftware-attic/invenio-comments
invenio_comments/api.py
get_reply_order_cache_data
def get_reply_order_cache_data(comid): """ Prepare a representation of the comment ID given as parameter so that it is suitable for byte ordering in MySQL. """ return "%s%s%s%s" % (chr((comid >> 24) % 256), chr((comid >> 16) % 256), chr((comid >> 8) % 256), chr(comid % 256))
python
def get_reply_order_cache_data(comid): """ Prepare a representation of the comment ID given as parameter so that it is suitable for byte ordering in MySQL. """ return "%s%s%s%s" % (chr((comid >> 24) % 256), chr((comid >> 16) % 256), chr((comid >> 8) % 256), chr(comid % 256))
[ "def", "get_reply_order_cache_data", "(", "comid", ")", ":", "return", "\"%s%s%s%s\"", "%", "(", "chr", "(", "(", "comid", ">>", "24", ")", "%", "256", ")", ",", "chr", "(", "(", "comid", ">>", "16", ")", "%", "256", ")", ",", "chr", "(", "(", "comid", ">>", "8", ")", "%", "256", ")", ",", "chr", "(", "comid", "%", "256", ")", ")" ]
Prepare a representation of the comment ID given as parameter so that it is suitable for byte ordering in MySQL.
[ "Prepare", "a", "representation", "of", "the", "comment", "ID", "given", "as", "parameter", "so", "that", "it", "is", "suitable", "for", "byte", "ordering", "in", "MySQL", "." ]
train
https://github.com/inveniosoftware-attic/invenio-comments/blob/62bb6e07c146baf75bf8de80b5896ab2a01a8423/invenio_comments/api.py#L945-L951
0.003125
bast/flanders
cmake/update.py
main
def main(argv): """ Main function. """ if len(argv) != 2: sys.stderr.write("\nYou can update a project in two steps.\n\n") sys.stderr.write("Step 1: Update or create infrastructure files\n") sys.stderr.write(" which will be needed to configure and build the project:\n") sys.stderr.write(" $ {0} --self\n\n".format(argv[0])) sys.stderr.write("Step 2: Create CMakeLists.txt and setup script in PROJECT_ROOT:\n") sys.stderr.write(" $ {0} <PROJECT_ROOT>\n".format(argv[0])) sys.stderr.write(" example:\n") sys.stderr.write(" $ {0} ..\n".format(argv[0])) sys.exit(-1) if argv[1] in ['-h', '--help']: print('Usage:') for t, h in [('python update.py --self', 'Update this script and fetch or update infrastructure files under autocmake/.'), ('python update.py <builddir>', '(Re)generate CMakeLists.txt and setup script and fetch or update CMake modules.'), ('python update.py (-h | --help)', 'Show this help text.')]: print(' {0:30} {1}'.format(t, h)) sys.exit(0) if argv[1] == '--self': # update self if not os.path.isfile('autocmake.yml'): print('- fetching example autocmake.yml') fetch_url( src='{0}example/autocmake.yml'.format(AUTOCMAKE_GITHUB_URL), dst='autocmake.yml' ) if not os.path.isfile('.gitignore'): print('- creating .gitignore') with open('.gitignore', 'w') as f: f.write('*.pyc\n') for f in ['autocmake/configure.py', 'autocmake/__init__.py', 'autocmake/external/docopt.py', 'autocmake/external/__init__.py', 'autocmake/generate.py', 'autocmake/extract.py', 'autocmake/interpolate.py', 'autocmake/parse_rst.py', 'autocmake/parse_yaml.py', 'update.py']: print('- fetching {0}'.format(f)) fetch_url( src='{0}{1}'.format(AUTOCMAKE_GITHUB_URL, f), dst='{0}'.format(f) ) # finally create a README.md with licensing information with open('README.md', 'w') as f: print('- generating licensing information') f.write(licensing_info()) sys.exit(0) process_yaml(argv)
python
def main(argv): """ Main function. """ if len(argv) != 2: sys.stderr.write("\nYou can update a project in two steps.\n\n") sys.stderr.write("Step 1: Update or create infrastructure files\n") sys.stderr.write(" which will be needed to configure and build the project:\n") sys.stderr.write(" $ {0} --self\n\n".format(argv[0])) sys.stderr.write("Step 2: Create CMakeLists.txt and setup script in PROJECT_ROOT:\n") sys.stderr.write(" $ {0} <PROJECT_ROOT>\n".format(argv[0])) sys.stderr.write(" example:\n") sys.stderr.write(" $ {0} ..\n".format(argv[0])) sys.exit(-1) if argv[1] in ['-h', '--help']: print('Usage:') for t, h in [('python update.py --self', 'Update this script and fetch or update infrastructure files under autocmake/.'), ('python update.py <builddir>', '(Re)generate CMakeLists.txt and setup script and fetch or update CMake modules.'), ('python update.py (-h | --help)', 'Show this help text.')]: print(' {0:30} {1}'.format(t, h)) sys.exit(0) if argv[1] == '--self': # update self if not os.path.isfile('autocmake.yml'): print('- fetching example autocmake.yml') fetch_url( src='{0}example/autocmake.yml'.format(AUTOCMAKE_GITHUB_URL), dst='autocmake.yml' ) if not os.path.isfile('.gitignore'): print('- creating .gitignore') with open('.gitignore', 'w') as f: f.write('*.pyc\n') for f in ['autocmake/configure.py', 'autocmake/__init__.py', 'autocmake/external/docopt.py', 'autocmake/external/__init__.py', 'autocmake/generate.py', 'autocmake/extract.py', 'autocmake/interpolate.py', 'autocmake/parse_rst.py', 'autocmake/parse_yaml.py', 'update.py']: print('- fetching {0}'.format(f)) fetch_url( src='{0}{1}'.format(AUTOCMAKE_GITHUB_URL, f), dst='{0}'.format(f) ) # finally create a README.md with licensing information with open('README.md', 'w') as f: print('- generating licensing information') f.write(licensing_info()) sys.exit(0) process_yaml(argv)
[ "def", "main", "(", "argv", ")", ":", "if", "len", "(", "argv", ")", "!=", "2", ":", "sys", ".", "stderr", ".", "write", "(", "\"\\nYou can update a project in two steps.\\n\\n\"", ")", "sys", ".", "stderr", ".", "write", "(", "\"Step 1: Update or create infrastructure files\\n\"", ")", "sys", ".", "stderr", ".", "write", "(", "\" which will be needed to configure and build the project:\\n\"", ")", "sys", ".", "stderr", ".", "write", "(", "\" $ {0} --self\\n\\n\"", ".", "format", "(", "argv", "[", "0", "]", ")", ")", "sys", ".", "stderr", ".", "write", "(", "\"Step 2: Create CMakeLists.txt and setup script in PROJECT_ROOT:\\n\"", ")", "sys", ".", "stderr", ".", "write", "(", "\" $ {0} <PROJECT_ROOT>\\n\"", ".", "format", "(", "argv", "[", "0", "]", ")", ")", "sys", ".", "stderr", ".", "write", "(", "\" example:\\n\"", ")", "sys", ".", "stderr", ".", "write", "(", "\" $ {0} ..\\n\"", ".", "format", "(", "argv", "[", "0", "]", ")", ")", "sys", ".", "exit", "(", "-", "1", ")", "if", "argv", "[", "1", "]", "in", "[", "'-h'", ",", "'--help'", "]", ":", "print", "(", "'Usage:'", ")", "for", "t", ",", "h", "in", "[", "(", "'python update.py --self'", ",", "'Update this script and fetch or update infrastructure files under autocmake/.'", ")", ",", "(", "'python update.py <builddir>'", ",", "'(Re)generate CMakeLists.txt and setup script and fetch or update CMake modules.'", ")", ",", "(", "'python update.py (-h | --help)'", ",", "'Show this help text.'", ")", "]", ":", "print", "(", "' {0:30} {1}'", ".", "format", "(", "t", ",", "h", ")", ")", "sys", ".", "exit", "(", "0", ")", "if", "argv", "[", "1", "]", "==", "'--self'", ":", "# update self", "if", "not", "os", ".", "path", ".", "isfile", "(", "'autocmake.yml'", ")", ":", "print", "(", "'- fetching example autocmake.yml'", ")", "fetch_url", "(", "src", "=", "'{0}example/autocmake.yml'", ".", "format", "(", "AUTOCMAKE_GITHUB_URL", ")", ",", "dst", "=", "'autocmake.yml'", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "'.gitignore'", ")", ":", "print", "(", "'- creating .gitignore'", ")", "with", "open", "(", "'.gitignore'", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "'*.pyc\\n'", ")", "for", "f", "in", "[", "'autocmake/configure.py'", ",", "'autocmake/__init__.py'", ",", "'autocmake/external/docopt.py'", ",", "'autocmake/external/__init__.py'", ",", "'autocmake/generate.py'", ",", "'autocmake/extract.py'", ",", "'autocmake/interpolate.py'", ",", "'autocmake/parse_rst.py'", ",", "'autocmake/parse_yaml.py'", ",", "'update.py'", "]", ":", "print", "(", "'- fetching {0}'", ".", "format", "(", "f", ")", ")", "fetch_url", "(", "src", "=", "'{0}{1}'", ".", "format", "(", "AUTOCMAKE_GITHUB_URL", ",", "f", ")", ",", "dst", "=", "'{0}'", ".", "format", "(", "f", ")", ")", "# finally create a README.md with licensing information", "with", "open", "(", "'README.md'", ",", "'w'", ")", "as", "f", ":", "print", "(", "'- generating licensing information'", ")", "f", ".", "write", "(", "licensing_info", "(", ")", ")", "sys", ".", "exit", "(", "0", ")", "process_yaml", "(", "argv", ")" ]
Main function.
[ "Main", "function", "." ]
train
https://github.com/bast/flanders/blob/792f9eed8511cb553e67a25b6c5ce60fd6ae97bc/cmake/update.py#L216-L276
0.001946
Gandi/gandi.cli
gandi/cli/modules/metric.py
Metric.query
def query(cls, resources, time_range, query, resource_type, sampler): """Query statistics for given resources.""" if not isinstance(resources, (list, tuple)): resources = [resources] now = time.time() start_utc = datetime.utcfromtimestamp(now - time_range) end_utc = datetime.utcfromtimestamp(now) date_format = '%Y-%m-%d %H:%M:%S' start = start_utc.strftime(date_format) end = end_utc.strftime(date_format) query = {'start': start, 'end': end, 'query': query, 'resource_id': resources, 'resource_type': resource_type, 'sampler': sampler} return cls.call('hosting.metric.query', query)
python
def query(cls, resources, time_range, query, resource_type, sampler): """Query statistics for given resources.""" if not isinstance(resources, (list, tuple)): resources = [resources] now = time.time() start_utc = datetime.utcfromtimestamp(now - time_range) end_utc = datetime.utcfromtimestamp(now) date_format = '%Y-%m-%d %H:%M:%S' start = start_utc.strftime(date_format) end = end_utc.strftime(date_format) query = {'start': start, 'end': end, 'query': query, 'resource_id': resources, 'resource_type': resource_type, 'sampler': sampler} return cls.call('hosting.metric.query', query)
[ "def", "query", "(", "cls", ",", "resources", ",", "time_range", ",", "query", ",", "resource_type", ",", "sampler", ")", ":", "if", "not", "isinstance", "(", "resources", ",", "(", "list", ",", "tuple", ")", ")", ":", "resources", "=", "[", "resources", "]", "now", "=", "time", ".", "time", "(", ")", "start_utc", "=", "datetime", ".", "utcfromtimestamp", "(", "now", "-", "time_range", ")", "end_utc", "=", "datetime", ".", "utcfromtimestamp", "(", "now", ")", "date_format", "=", "'%Y-%m-%d %H:%M:%S'", "start", "=", "start_utc", ".", "strftime", "(", "date_format", ")", "end", "=", "end_utc", ".", "strftime", "(", "date_format", ")", "query", "=", "{", "'start'", ":", "start", ",", "'end'", ":", "end", ",", "'query'", ":", "query", ",", "'resource_id'", ":", "resources", ",", "'resource_type'", ":", "resource_type", ",", "'sampler'", ":", "sampler", "}", "return", "cls", ".", "call", "(", "'hosting.metric.query'", ",", "query", ")" ]
Query statistics for given resources.
[ "Query", "statistics", "for", "given", "resources", "." ]
train
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/metric.py#L16-L33
0.002621
maas/python-libmaas
maas/client/flesh/machines.py
MachineSSHMixin._async_get_sshable_ips
async def _async_get_sshable_ips(self, ip_addresses): """Return list of all IP address that could be pinged.""" async def _async_ping(ip_address): try: reader, writer = await asyncio.wait_for( asyncio.open_connection(ip_address, 22), timeout=5) except (OSError, TimeoutError): return None try: line = await reader.readline() finally: writer.close() if line.startswith(b'SSH-'): return ip_address ssh_ips = await asyncio.gather(*[ _async_ping(ip_address) for ip_address in ip_addresses ]) return [ ip_address for ip_address in ssh_ips if ip_address is not None ]
python
async def _async_get_sshable_ips(self, ip_addresses): """Return list of all IP address that could be pinged.""" async def _async_ping(ip_address): try: reader, writer = await asyncio.wait_for( asyncio.open_connection(ip_address, 22), timeout=5) except (OSError, TimeoutError): return None try: line = await reader.readline() finally: writer.close() if line.startswith(b'SSH-'): return ip_address ssh_ips = await asyncio.gather(*[ _async_ping(ip_address) for ip_address in ip_addresses ]) return [ ip_address for ip_address in ssh_ips if ip_address is not None ]
[ "async", "def", "_async_get_sshable_ips", "(", "self", ",", "ip_addresses", ")", ":", "async", "def", "_async_ping", "(", "ip_address", ")", ":", "try", ":", "reader", ",", "writer", "=", "await", "asyncio", ".", "wait_for", "(", "asyncio", ".", "open_connection", "(", "ip_address", ",", "22", ")", ",", "timeout", "=", "5", ")", "except", "(", "OSError", ",", "TimeoutError", ")", ":", "return", "None", "try", ":", "line", "=", "await", "reader", ".", "readline", "(", ")", "finally", ":", "writer", ".", "close", "(", ")", "if", "line", ".", "startswith", "(", "b'SSH-'", ")", ":", "return", "ip_address", "ssh_ips", "=", "await", "asyncio", ".", "gather", "(", "*", "[", "_async_ping", "(", "ip_address", ")", "for", "ip_address", "in", "ip_addresses", "]", ")", "return", "[", "ip_address", "for", "ip_address", "in", "ssh_ips", "if", "ip_address", "is", "not", "None", "]" ]
Return list of all IP address that could be pinged.
[ "Return", "list", "of", "all", "IP", "address", "that", "could", "be", "pinged", "." ]
train
https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/flesh/machines.py#L436-L460
0.002407
justquick/django-activity-stream
actstream/managers.py
ActionManager.model_actions
def model_actions(self, model, **kwargs): """ Stream of most recent actions by any particular model """ check(model) ctype = ContentType.objects.get_for_model(model) return self.public( (Q(target_content_type=ctype) | Q(action_object_content_type=ctype) | Q(actor_content_type=ctype)), **kwargs )
python
def model_actions(self, model, **kwargs): """ Stream of most recent actions by any particular model """ check(model) ctype = ContentType.objects.get_for_model(model) return self.public( (Q(target_content_type=ctype) | Q(action_object_content_type=ctype) | Q(actor_content_type=ctype)), **kwargs )
[ "def", "model_actions", "(", "self", ",", "model", ",", "*", "*", "kwargs", ")", ":", "check", "(", "model", ")", "ctype", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "model", ")", "return", "self", ".", "public", "(", "(", "Q", "(", "target_content_type", "=", "ctype", ")", "|", "Q", "(", "action_object_content_type", "=", "ctype", ")", "|", "Q", "(", "actor_content_type", "=", "ctype", ")", ")", ",", "*", "*", "kwargs", ")" ]
Stream of most recent actions by any particular model
[ "Stream", "of", "most", "recent", "actions", "by", "any", "particular", "model" ]
train
https://github.com/justquick/django-activity-stream/blob/a1e06f2e6429cc5fc321e7801440dd7c5b9d5a35/actstream/managers.py#L51-L62
0.004975
tanghaibao/jcvi
jcvi/projects/str.py
expand_alleles
def expand_alleles(p, tolerance=0): """ Returns expanded allele set given the tolerance. """ _p = set() for x in p: _p |= set(range(x - tolerance, x + tolerance + 1)) return _p
python
def expand_alleles(p, tolerance=0): """ Returns expanded allele set given the tolerance. """ _p = set() for x in p: _p |= set(range(x - tolerance, x + tolerance + 1)) return _p
[ "def", "expand_alleles", "(", "p", ",", "tolerance", "=", "0", ")", ":", "_p", "=", "set", "(", ")", "for", "x", "in", "p", ":", "_p", "|=", "set", "(", "range", "(", "x", "-", "tolerance", ",", "x", "+", "tolerance", "+", "1", ")", ")", "return", "_p" ]
Returns expanded allele set given the tolerance.
[ "Returns", "expanded", "allele", "set", "given", "the", "tolerance", "." ]
train
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/str.py#L127-L134
0.004808
openvax/pyensembl
pyensembl/genome.py
Genome.gene_by_id
def gene_by_id(self, gene_id): """ Construct a Gene object for the given gene ID. """ if gene_id not in self._genes: field_names = [ "seqname", "start", "end", "strand", ] optional_field_names = [ "gene_name", "gene_biotype", ] # Do not look for gene_name and gene_biotype if they are # not in the database. field_names.extend([ name for name in optional_field_names if self.db.column_exists("gene", name) ]) result = self.db.query_one( field_names, filter_column="gene_id", filter_value=gene_id, feature="gene") if not result: raise ValueError("Gene not found: %s" % (gene_id,)) gene_name, gene_biotype = None, None assert len(result) >= 4 and len(result) <= 6, \ "Result is not the expected length: %d" % len(result) contig, start, end, strand = result[:4] if len(result) == 5: if "gene_name" in field_names: gene_name = result[4] else: gene_biotype = result[4] elif len(result) == 6: gene_name, gene_biotype = result[4:] self._genes[gene_id] = Gene( gene_id=gene_id, gene_name=gene_name, contig=contig, start=start, end=end, strand=strand, biotype=gene_biotype, genome=self) return self._genes[gene_id]
python
def gene_by_id(self, gene_id): """ Construct a Gene object for the given gene ID. """ if gene_id not in self._genes: field_names = [ "seqname", "start", "end", "strand", ] optional_field_names = [ "gene_name", "gene_biotype", ] # Do not look for gene_name and gene_biotype if they are # not in the database. field_names.extend([ name for name in optional_field_names if self.db.column_exists("gene", name) ]) result = self.db.query_one( field_names, filter_column="gene_id", filter_value=gene_id, feature="gene") if not result: raise ValueError("Gene not found: %s" % (gene_id,)) gene_name, gene_biotype = None, None assert len(result) >= 4 and len(result) <= 6, \ "Result is not the expected length: %d" % len(result) contig, start, end, strand = result[:4] if len(result) == 5: if "gene_name" in field_names: gene_name = result[4] else: gene_biotype = result[4] elif len(result) == 6: gene_name, gene_biotype = result[4:] self._genes[gene_id] = Gene( gene_id=gene_id, gene_name=gene_name, contig=contig, start=start, end=end, strand=strand, biotype=gene_biotype, genome=self) return self._genes[gene_id]
[ "def", "gene_by_id", "(", "self", ",", "gene_id", ")", ":", "if", "gene_id", "not", "in", "self", ".", "_genes", ":", "field_names", "=", "[", "\"seqname\"", ",", "\"start\"", ",", "\"end\"", ",", "\"strand\"", ",", "]", "optional_field_names", "=", "[", "\"gene_name\"", ",", "\"gene_biotype\"", ",", "]", "# Do not look for gene_name and gene_biotype if they are", "# not in the database.", "field_names", ".", "extend", "(", "[", "name", "for", "name", "in", "optional_field_names", "if", "self", ".", "db", ".", "column_exists", "(", "\"gene\"", ",", "name", ")", "]", ")", "result", "=", "self", ".", "db", ".", "query_one", "(", "field_names", ",", "filter_column", "=", "\"gene_id\"", ",", "filter_value", "=", "gene_id", ",", "feature", "=", "\"gene\"", ")", "if", "not", "result", ":", "raise", "ValueError", "(", "\"Gene not found: %s\"", "%", "(", "gene_id", ",", ")", ")", "gene_name", ",", "gene_biotype", "=", "None", ",", "None", "assert", "len", "(", "result", ")", ">=", "4", "and", "len", "(", "result", ")", "<=", "6", ",", "\"Result is not the expected length: %d\"", "%", "len", "(", "result", ")", "contig", ",", "start", ",", "end", ",", "strand", "=", "result", "[", ":", "4", "]", "if", "len", "(", "result", ")", "==", "5", ":", "if", "\"gene_name\"", "in", "field_names", ":", "gene_name", "=", "result", "[", "4", "]", "else", ":", "gene_biotype", "=", "result", "[", "4", "]", "elif", "len", "(", "result", ")", "==", "6", ":", "gene_name", ",", "gene_biotype", "=", "result", "[", "4", ":", "]", "self", ".", "_genes", "[", "gene_id", "]", "=", "Gene", "(", "gene_id", "=", "gene_id", ",", "gene_name", "=", "gene_name", ",", "contig", "=", "contig", ",", "start", "=", "start", ",", "end", "=", "end", ",", "strand", "=", "strand", ",", "biotype", "=", "gene_biotype", ",", "genome", "=", "self", ")", "return", "self", ".", "_genes", "[", "gene_id", "]" ]
Construct a Gene object for the given gene ID.
[ "Construct", "a", "Gene", "object", "for", "the", "given", "gene", "ID", "." ]
train
https://github.com/openvax/pyensembl/blob/4b995fb72e848206d6fbf11950cf30964cd9b3aa/pyensembl/genome.py#L660-L711
0.001124
inveniosoftware/invenio-previewer
invenio_previewer/extensions/zip.py
make_tree
def make_tree(file): """Create tree structure from ZIP archive.""" max_files_count = current_app.config.get('PREVIEWER_ZIP_MAX_FILES', 1000) tree = {'type': 'folder', 'id': -1, 'children': {}} try: with file.open() as fp: zf = zipfile.ZipFile(fp) # Detect filenames encoding. sample = ' '.join(zf.namelist()[:max_files_count]) if not isinstance(sample, binary_type): sample = sample.encode('utf-16be') encoding = chardet.detect(sample).get('encoding', 'utf-8') for i, info in enumerate(zf.infolist()): if i > max_files_count: raise BufferError('Too many files inside the ZIP file.') comps = info.filename.split(os.sep) node = tree for c in comps: if not isinstance(c, text_type): c = c.decode(encoding) if c not in node['children']: if c == '': node['type'] = 'folder' continue node['children'][c] = { 'name': c, 'type': 'item', 'id': 'item{0}'.format(i), 'children': {} } node = node['children'][c] node['size'] = info.file_size except BufferError: return tree, True, None except (zipfile.LargeZipFile): return tree, False, 'Zipfile is too large to be previewed.' except Exception as e: current_app.logger.warning(str(e), exc_info=True) return tree, False, 'Zipfile is not previewable.' return tree, False, None
python
def make_tree(file): """Create tree structure from ZIP archive.""" max_files_count = current_app.config.get('PREVIEWER_ZIP_MAX_FILES', 1000) tree = {'type': 'folder', 'id': -1, 'children': {}} try: with file.open() as fp: zf = zipfile.ZipFile(fp) # Detect filenames encoding. sample = ' '.join(zf.namelist()[:max_files_count]) if not isinstance(sample, binary_type): sample = sample.encode('utf-16be') encoding = chardet.detect(sample).get('encoding', 'utf-8') for i, info in enumerate(zf.infolist()): if i > max_files_count: raise BufferError('Too many files inside the ZIP file.') comps = info.filename.split(os.sep) node = tree for c in comps: if not isinstance(c, text_type): c = c.decode(encoding) if c not in node['children']: if c == '': node['type'] = 'folder' continue node['children'][c] = { 'name': c, 'type': 'item', 'id': 'item{0}'.format(i), 'children': {} } node = node['children'][c] node['size'] = info.file_size except BufferError: return tree, True, None except (zipfile.LargeZipFile): return tree, False, 'Zipfile is too large to be previewed.' except Exception as e: current_app.logger.warning(str(e), exc_info=True) return tree, False, 'Zipfile is not previewable.' return tree, False, None
[ "def", "make_tree", "(", "file", ")", ":", "max_files_count", "=", "current_app", ".", "config", ".", "get", "(", "'PREVIEWER_ZIP_MAX_FILES'", ",", "1000", ")", "tree", "=", "{", "'type'", ":", "'folder'", ",", "'id'", ":", "-", "1", ",", "'children'", ":", "{", "}", "}", "try", ":", "with", "file", ".", "open", "(", ")", "as", "fp", ":", "zf", "=", "zipfile", ".", "ZipFile", "(", "fp", ")", "# Detect filenames encoding.", "sample", "=", "' '", ".", "join", "(", "zf", ".", "namelist", "(", ")", "[", ":", "max_files_count", "]", ")", "if", "not", "isinstance", "(", "sample", ",", "binary_type", ")", ":", "sample", "=", "sample", ".", "encode", "(", "'utf-16be'", ")", "encoding", "=", "chardet", ".", "detect", "(", "sample", ")", ".", "get", "(", "'encoding'", ",", "'utf-8'", ")", "for", "i", ",", "info", "in", "enumerate", "(", "zf", ".", "infolist", "(", ")", ")", ":", "if", "i", ">", "max_files_count", ":", "raise", "BufferError", "(", "'Too many files inside the ZIP file.'", ")", "comps", "=", "info", ".", "filename", ".", "split", "(", "os", ".", "sep", ")", "node", "=", "tree", "for", "c", "in", "comps", ":", "if", "not", "isinstance", "(", "c", ",", "text_type", ")", ":", "c", "=", "c", ".", "decode", "(", "encoding", ")", "if", "c", "not", "in", "node", "[", "'children'", "]", ":", "if", "c", "==", "''", ":", "node", "[", "'type'", "]", "=", "'folder'", "continue", "node", "[", "'children'", "]", "[", "c", "]", "=", "{", "'name'", ":", "c", ",", "'type'", ":", "'item'", ",", "'id'", ":", "'item{0}'", ".", "format", "(", "i", ")", ",", "'children'", ":", "{", "}", "}", "node", "=", "node", "[", "'children'", "]", "[", "c", "]", "node", "[", "'size'", "]", "=", "info", ".", "file_size", "except", "BufferError", ":", "return", "tree", ",", "True", ",", "None", "except", "(", "zipfile", ".", "LargeZipFile", ")", ":", "return", "tree", ",", "False", ",", "'Zipfile is too large to be previewed.'", "except", "Exception", "as", "e", ":", "current_app", ".", "logger", ".", "warning", "(", "str", "(", "e", ")", ",", "exc_info", "=", "True", ")", "return", "tree", ",", "False", ",", "'Zipfile is not previewable.'", "return", "tree", ",", "False", ",", "None" ]
Create tree structure from ZIP archive.
[ "Create", "tree", "structure", "from", "ZIP", "archive", "." ]
train
https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/extensions/zip.py#L26-L67
0.000556
PMEAL/OpenPNM
openpnm/algorithms/MixedInvasionPercolation.py
MixedInvasionPercolation._check_coop
def _check_coop(self, pore, queue): r""" Method run in loop after every pore invasion. All connecting throats are now given access to the invading phase. Two throats with access to the invading phase can cooperatively fill any pores that they are both connected to, common pores. The invasion of theses throats connected to the common pore is handled elsewhere. """ net = self.project.network t_inv = 'throat.invasion_sequence' p_inv = 'pore.invasion_sequence' for throat in net.find_neighbor_throats(pores=pore): # A pore has just been invaded, all it's throats now have # An interface residing inside them if self[t_inv][throat] == -1: # If the throat is not the invading throat that gave access # to this pore, get the pores that this throat connects with a = set(net['throat.conns'][throat]) # Get a list of pre-calculated coop filling pressures for all # Throats this throat can coop fill with ts_Pc = self.tt_Pc.data[throat] # Network indices of throats that can act as filling pairs ts = self.tt_Pc.rows[throat] # If there are any potential coop filling throats if np.any(~np.isnan(ts_Pc)): ts_Pc = np.asarray(ts_Pc) ts = np.asarray(ts) ts = ts[~np.isnan(ts_Pc)] ts_Pc = ts_Pc[~np.isnan(ts_Pc)] # For each throat find the common pore and the uncommon # pores for i, t in enumerate(ts): # Find common pore (cP) and uncommon pores (uPs) b = set(net['throat.conns'][t]) cP = list(a.intersection(b)) uPs = list(a.symmetric_difference(b)) # If the common pore is not invaded but the others are # The potential coop filling event can now happen # Add the coop pressure to the queue if ((np.all(self[p_inv][uPs] > -1)) and (self[p_inv][cP] == -1)): # Coop pore filling fills the common pore # The throats that gave access are not invaded now # However, isolated throats between invaded pores # Are taken care of elsewhere... hq.heappush(queue, [ts_Pc[i], list(cP), 'pore'])
python
def _check_coop(self, pore, queue): r""" Method run in loop after every pore invasion. All connecting throats are now given access to the invading phase. Two throats with access to the invading phase can cooperatively fill any pores that they are both connected to, common pores. The invasion of theses throats connected to the common pore is handled elsewhere. """ net = self.project.network t_inv = 'throat.invasion_sequence' p_inv = 'pore.invasion_sequence' for throat in net.find_neighbor_throats(pores=pore): # A pore has just been invaded, all it's throats now have # An interface residing inside them if self[t_inv][throat] == -1: # If the throat is not the invading throat that gave access # to this pore, get the pores that this throat connects with a = set(net['throat.conns'][throat]) # Get a list of pre-calculated coop filling pressures for all # Throats this throat can coop fill with ts_Pc = self.tt_Pc.data[throat] # Network indices of throats that can act as filling pairs ts = self.tt_Pc.rows[throat] # If there are any potential coop filling throats if np.any(~np.isnan(ts_Pc)): ts_Pc = np.asarray(ts_Pc) ts = np.asarray(ts) ts = ts[~np.isnan(ts_Pc)] ts_Pc = ts_Pc[~np.isnan(ts_Pc)] # For each throat find the common pore and the uncommon # pores for i, t in enumerate(ts): # Find common pore (cP) and uncommon pores (uPs) b = set(net['throat.conns'][t]) cP = list(a.intersection(b)) uPs = list(a.symmetric_difference(b)) # If the common pore is not invaded but the others are # The potential coop filling event can now happen # Add the coop pressure to the queue if ((np.all(self[p_inv][uPs] > -1)) and (self[p_inv][cP] == -1)): # Coop pore filling fills the common pore # The throats that gave access are not invaded now # However, isolated throats between invaded pores # Are taken care of elsewhere... hq.heappush(queue, [ts_Pc[i], list(cP), 'pore'])
[ "def", "_check_coop", "(", "self", ",", "pore", ",", "queue", ")", ":", "net", "=", "self", ".", "project", ".", "network", "t_inv", "=", "'throat.invasion_sequence'", "p_inv", "=", "'pore.invasion_sequence'", "for", "throat", "in", "net", ".", "find_neighbor_throats", "(", "pores", "=", "pore", ")", ":", "# A pore has just been invaded, all it's throats now have", "# An interface residing inside them", "if", "self", "[", "t_inv", "]", "[", "throat", "]", "==", "-", "1", ":", "# If the throat is not the invading throat that gave access", "# to this pore, get the pores that this throat connects with", "a", "=", "set", "(", "net", "[", "'throat.conns'", "]", "[", "throat", "]", ")", "# Get a list of pre-calculated coop filling pressures for all", "# Throats this throat can coop fill with", "ts_Pc", "=", "self", ".", "tt_Pc", ".", "data", "[", "throat", "]", "# Network indices of throats that can act as filling pairs", "ts", "=", "self", ".", "tt_Pc", ".", "rows", "[", "throat", "]", "# If there are any potential coop filling throats", "if", "np", ".", "any", "(", "~", "np", ".", "isnan", "(", "ts_Pc", ")", ")", ":", "ts_Pc", "=", "np", ".", "asarray", "(", "ts_Pc", ")", "ts", "=", "np", ".", "asarray", "(", "ts", ")", "ts", "=", "ts", "[", "~", "np", ".", "isnan", "(", "ts_Pc", ")", "]", "ts_Pc", "=", "ts_Pc", "[", "~", "np", ".", "isnan", "(", "ts_Pc", ")", "]", "# For each throat find the common pore and the uncommon", "# pores", "for", "i", ",", "t", "in", "enumerate", "(", "ts", ")", ":", "# Find common pore (cP) and uncommon pores (uPs)", "b", "=", "set", "(", "net", "[", "'throat.conns'", "]", "[", "t", "]", ")", "cP", "=", "list", "(", "a", ".", "intersection", "(", "b", ")", ")", "uPs", "=", "list", "(", "a", ".", "symmetric_difference", "(", "b", ")", ")", "# If the common pore is not invaded but the others are", "# The potential coop filling event can now happen", "# Add the coop pressure to the queue", "if", "(", "(", "np", ".", "all", "(", "self", "[", "p_inv", "]", "[", "uPs", "]", ">", "-", "1", ")", ")", "and", "(", "self", "[", "p_inv", "]", "[", "cP", "]", "==", "-", "1", ")", ")", ":", "# Coop pore filling fills the common pore", "# The throats that gave access are not invaded now", "# However, isolated throats between invaded pores", "# Are taken care of elsewhere...", "hq", ".", "heappush", "(", "queue", ",", "[", "ts_Pc", "[", "i", "]", ",", "list", "(", "cP", ")", ",", "'pore'", "]", ")" ]
r""" Method run in loop after every pore invasion. All connecting throats are now given access to the invading phase. Two throats with access to the invading phase can cooperatively fill any pores that they are both connected to, common pores. The invasion of theses throats connected to the common pore is handled elsewhere.
[ "r", "Method", "run", "in", "loop", "after", "every", "pore", "invasion", ".", "All", "connecting", "throats", "are", "now", "given", "access", "to", "the", "invading", "phase", ".", "Two", "throats", "with", "access", "to", "the", "invading", "phase", "can", "cooperatively", "fill", "any", "pores", "that", "they", "are", "both", "connected", "to", "common", "pores", ".", "The", "invasion", "of", "theses", "throats", "connected", "to", "the", "common", "pore", "is", "handled", "elsewhere", "." ]
train
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/algorithms/MixedInvasionPercolation.py#L1072-L1118
0.000751
priestc/giotto
giotto/views/__init__.py
lazy_jinja_template
def lazy_jinja_template(template_name, name='data', mimetype='text/html'): """ Jinja template renderer that does not render the template at all. Instead of returns the context and template object blended together. Make sure to add ``giotto.middleware.RenderLazytemplate`` to the output middleware stread of any program that uses this renderer. """ def lazy_jinja_renderer(result, errors): template = get_jinja_template(template_name) context = {name: result or Mock(), 'errors': errors} data = ('jinja2', template, context) return {'body': data, 'mimetype': mimetype} return lazy_jinja_renderer
python
def lazy_jinja_template(template_name, name='data', mimetype='text/html'): """ Jinja template renderer that does not render the template at all. Instead of returns the context and template object blended together. Make sure to add ``giotto.middleware.RenderLazytemplate`` to the output middleware stread of any program that uses this renderer. """ def lazy_jinja_renderer(result, errors): template = get_jinja_template(template_name) context = {name: result or Mock(), 'errors': errors} data = ('jinja2', template, context) return {'body': data, 'mimetype': mimetype} return lazy_jinja_renderer
[ "def", "lazy_jinja_template", "(", "template_name", ",", "name", "=", "'data'", ",", "mimetype", "=", "'text/html'", ")", ":", "def", "lazy_jinja_renderer", "(", "result", ",", "errors", ")", ":", "template", "=", "get_jinja_template", "(", "template_name", ")", "context", "=", "{", "name", ":", "result", "or", "Mock", "(", ")", ",", "'errors'", ":", "errors", "}", "data", "=", "(", "'jinja2'", ",", "template", ",", "context", ")", "return", "{", "'body'", ":", "data", ",", "'mimetype'", ":", "mimetype", "}", "return", "lazy_jinja_renderer" ]
Jinja template renderer that does not render the template at all. Instead of returns the context and template object blended together. Make sure to add ``giotto.middleware.RenderLazytemplate`` to the output middleware stread of any program that uses this renderer.
[ "Jinja", "template", "renderer", "that", "does", "not", "render", "the", "template", "at", "all", ".", "Instead", "of", "returns", "the", "context", "and", "template", "object", "blended", "together", ".", "Make", "sure", "to", "add", "giotto", ".", "middleware", ".", "RenderLazytemplate", "to", "the", "output", "middleware", "stread", "of", "any", "program", "that", "uses", "this", "renderer", "." ]
train
https://github.com/priestc/giotto/blob/d4c26380caefa7745bb27135e315de830f7254d3/giotto/views/__init__.py#L249-L261
0.00152
Dallinger/Dallinger
dallinger/recruiters.py
MultiRecruiter.open_recruitment
def open_recruitment(self, n=1): """Return initial experiment URL list. """ logger.info("Multi recruitment running for {} participants".format(n)) recruitments = [] messages = {} remaining = n for recruiter, count in self.recruiters(n): if not count: break if recruiter.nickname in messages: result = recruiter.recruit(count) recruitments.extend(result) else: result = recruiter.open_recruitment(count) recruitments.extend(result["items"]) messages[recruiter.nickname] = result["message"] remaining -= count if remaining <= 0: break logger.info( ( "Multi-recruited {} out of {} participants, " "using {} recruiters." ).format(n - remaining, n, len(messages)) ) return {"items": recruitments, "message": "\n".join(messages.values())}
python
def open_recruitment(self, n=1): """Return initial experiment URL list. """ logger.info("Multi recruitment running for {} participants".format(n)) recruitments = [] messages = {} remaining = n for recruiter, count in self.recruiters(n): if not count: break if recruiter.nickname in messages: result = recruiter.recruit(count) recruitments.extend(result) else: result = recruiter.open_recruitment(count) recruitments.extend(result["items"]) messages[recruiter.nickname] = result["message"] remaining -= count if remaining <= 0: break logger.info( ( "Multi-recruited {} out of {} participants, " "using {} recruiters." ).format(n - remaining, n, len(messages)) ) return {"items": recruitments, "message": "\n".join(messages.values())}
[ "def", "open_recruitment", "(", "self", ",", "n", "=", "1", ")", ":", "logger", ".", "info", "(", "\"Multi recruitment running for {} participants\"", ".", "format", "(", "n", ")", ")", "recruitments", "=", "[", "]", "messages", "=", "{", "}", "remaining", "=", "n", "for", "recruiter", ",", "count", "in", "self", ".", "recruiters", "(", "n", ")", ":", "if", "not", "count", ":", "break", "if", "recruiter", ".", "nickname", "in", "messages", ":", "result", "=", "recruiter", ".", "recruit", "(", "count", ")", "recruitments", ".", "extend", "(", "result", ")", "else", ":", "result", "=", "recruiter", ".", "open_recruitment", "(", "count", ")", "recruitments", ".", "extend", "(", "result", "[", "\"items\"", "]", ")", "messages", "[", "recruiter", ".", "nickname", "]", "=", "result", "[", "\"message\"", "]", "remaining", "-=", "count", "if", "remaining", "<=", "0", ":", "break", "logger", ".", "info", "(", "(", "\"Multi-recruited {} out of {} participants, \"", "\"using {} recruiters.\"", ")", ".", "format", "(", "n", "-", "remaining", ",", "n", ",", "len", "(", "messages", ")", ")", ")", "return", "{", "\"items\"", ":", "recruitments", ",", "\"message\"", ":", "\"\\n\"", ".", "join", "(", "messages", ".", "values", "(", ")", ")", "}" ]
Return initial experiment URL list.
[ "Return", "initial", "experiment", "URL", "list", "." ]
train
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/recruiters.py#L885-L913
0.002921
pipermerriam/flex
flex/validation/schema.py
construct_schema_validators
def construct_schema_validators(schema, context): """ Given a schema object, construct a dictionary of validators needed to validate a response matching the given schema. Special Cases: - $ref: These validators need to be Lazily evaluating so that circular validation dependencies do not result in an infinitely deep validation chain. - properties: These validators are meant to apply to properties of the object being validated rather than the object itself. In this case, we need recurse back into this function to generate a dictionary of validators for the property. """ validators = ValidationDict() if '$ref' in schema: validators.add_validator( '$ref', SchemaReferenceValidator(schema['$ref'], context), ) if 'properties' in schema: for property_, property_schema in schema['properties'].items(): property_validator = generate_object_validator( schema=property_schema, context=context, ) validators.add_property_validator(property_, property_validator) if schema.get('additionalProperties') is False: validators.add_validator( 'additionalProperties', generate_additional_properties_validator(context=context, **schema), ) assert 'context' not in schema for key in schema: if key in validator_mapping: validators.add_validator(key, validator_mapping[key](context=context, **schema)) return validators
python
def construct_schema_validators(schema, context): """ Given a schema object, construct a dictionary of validators needed to validate a response matching the given schema. Special Cases: - $ref: These validators need to be Lazily evaluating so that circular validation dependencies do not result in an infinitely deep validation chain. - properties: These validators are meant to apply to properties of the object being validated rather than the object itself. In this case, we need recurse back into this function to generate a dictionary of validators for the property. """ validators = ValidationDict() if '$ref' in schema: validators.add_validator( '$ref', SchemaReferenceValidator(schema['$ref'], context), ) if 'properties' in schema: for property_, property_schema in schema['properties'].items(): property_validator = generate_object_validator( schema=property_schema, context=context, ) validators.add_property_validator(property_, property_validator) if schema.get('additionalProperties') is False: validators.add_validator( 'additionalProperties', generate_additional_properties_validator(context=context, **schema), ) assert 'context' not in schema for key in schema: if key in validator_mapping: validators.add_validator(key, validator_mapping[key](context=context, **schema)) return validators
[ "def", "construct_schema_validators", "(", "schema", ",", "context", ")", ":", "validators", "=", "ValidationDict", "(", ")", "if", "'$ref'", "in", "schema", ":", "validators", ".", "add_validator", "(", "'$ref'", ",", "SchemaReferenceValidator", "(", "schema", "[", "'$ref'", "]", ",", "context", ")", ",", ")", "if", "'properties'", "in", "schema", ":", "for", "property_", ",", "property_schema", "in", "schema", "[", "'properties'", "]", ".", "items", "(", ")", ":", "property_validator", "=", "generate_object_validator", "(", "schema", "=", "property_schema", ",", "context", "=", "context", ",", ")", "validators", ".", "add_property_validator", "(", "property_", ",", "property_validator", ")", "if", "schema", ".", "get", "(", "'additionalProperties'", ")", "is", "False", ":", "validators", ".", "add_validator", "(", "'additionalProperties'", ",", "generate_additional_properties_validator", "(", "context", "=", "context", ",", "*", "*", "schema", ")", ",", ")", "assert", "'context'", "not", "in", "schema", "for", "key", "in", "schema", ":", "if", "key", "in", "validator_mapping", ":", "validators", ".", "add_validator", "(", "key", ",", "validator_mapping", "[", "key", "]", "(", "context", "=", "context", ",", "*", "*", "schema", ")", ")", "return", "validators" ]
Given a schema object, construct a dictionary of validators needed to validate a response matching the given schema. Special Cases: - $ref: These validators need to be Lazily evaluating so that circular validation dependencies do not result in an infinitely deep validation chain. - properties: These validators are meant to apply to properties of the object being validated rather than the object itself. In this case, we need recurse back into this function to generate a dictionary of validators for the property.
[ "Given", "a", "schema", "object", "construct", "a", "dictionary", "of", "validators", "needed", "to", "validate", "a", "response", "matching", "the", "given", "schema", "." ]
train
https://github.com/pipermerriam/flex/blob/233f8149fb851a6255753bcec948cb6fefb2723b/flex/validation/schema.py#L199-L236
0.001852
acutesoftware/AIKIF
aikif/toolbox/cls_grid.py
Grid.eight_neighbors
def eight_neighbors(self, row, col): """ Returns horiz/vert neighbors of cell (row, col) as well as diagonal neighbors """ ans = [] if row > 0: ans.append((row - 1, col)) if row < self.grid_height - 1: ans.append((row + 1, col)) if col > 0: ans.append((row, col - 1)) if col < self.grid_width - 1: ans.append((row, col + 1)) if (row > 0) and (col > 0): ans.append((row - 1, col - 1)) if (row > 0) and (col < self.grid_width - 1): ans.append((row - 1, col + 1)) if (row < self.grid_height - 1) and (col > 0): ans.append((row + 1, col - 1)) if (row < self.grid_height - 1) and (col < self.grid_width - 1): ans.append((row + 1, col + 1)) return ans
python
def eight_neighbors(self, row, col): """ Returns horiz/vert neighbors of cell (row, col) as well as diagonal neighbors """ ans = [] if row > 0: ans.append((row - 1, col)) if row < self.grid_height - 1: ans.append((row + 1, col)) if col > 0: ans.append((row, col - 1)) if col < self.grid_width - 1: ans.append((row, col + 1)) if (row > 0) and (col > 0): ans.append((row - 1, col - 1)) if (row > 0) and (col < self.grid_width - 1): ans.append((row - 1, col + 1)) if (row < self.grid_height - 1) and (col > 0): ans.append((row + 1, col - 1)) if (row < self.grid_height - 1) and (col < self.grid_width - 1): ans.append((row + 1, col + 1)) return ans
[ "def", "eight_neighbors", "(", "self", ",", "row", ",", "col", ")", ":", "ans", "=", "[", "]", "if", "row", ">", "0", ":", "ans", ".", "append", "(", "(", "row", "-", "1", ",", "col", ")", ")", "if", "row", "<", "self", ".", "grid_height", "-", "1", ":", "ans", ".", "append", "(", "(", "row", "+", "1", ",", "col", ")", ")", "if", "col", ">", "0", ":", "ans", ".", "append", "(", "(", "row", ",", "col", "-", "1", ")", ")", "if", "col", "<", "self", ".", "grid_width", "-", "1", ":", "ans", ".", "append", "(", "(", "row", ",", "col", "+", "1", ")", ")", "if", "(", "row", ">", "0", ")", "and", "(", "col", ">", "0", ")", ":", "ans", ".", "append", "(", "(", "row", "-", "1", ",", "col", "-", "1", ")", ")", "if", "(", "row", ">", "0", ")", "and", "(", "col", "<", "self", ".", "grid_width", "-", "1", ")", ":", "ans", ".", "append", "(", "(", "row", "-", "1", ",", "col", "+", "1", ")", ")", "if", "(", "row", "<", "self", ".", "grid_height", "-", "1", ")", "and", "(", "col", ">", "0", ")", ":", "ans", ".", "append", "(", "(", "row", "+", "1", ",", "col", "-", "1", ")", ")", "if", "(", "row", "<", "self", ".", "grid_height", "-", "1", ")", "and", "(", "col", "<", "self", ".", "grid_width", "-", "1", ")", ":", "ans", ".", "append", "(", "(", "row", "+", "1", ",", "col", "+", "1", ")", ")", "return", "ans" ]
Returns horiz/vert neighbors of cell (row, col) as well as diagonal neighbors
[ "Returns", "horiz", "/", "vert", "neighbors", "of", "cell", "(", "row", "col", ")", "as", "well", "as", "diagonal", "neighbors" ]
train
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/cls_grid.py#L258-L280
0.002345
photo/openphoto-python
trovebox/api/api_action.py
ApiAction.delete
def delete(self, action, **kwds): """ Endpoint: /action/<id>/delete.json Deletes an action. Returns True if successful. Raises a TroveboxError if not. """ return self._client.post("/action/%s/delete.json" % self._extract_id(action), **kwds)["result"]
python
def delete(self, action, **kwds): """ Endpoint: /action/<id>/delete.json Deletes an action. Returns True if successful. Raises a TroveboxError if not. """ return self._client.post("/action/%s/delete.json" % self._extract_id(action), **kwds)["result"]
[ "def", "delete", "(", "self", ",", "action", ",", "*", "*", "kwds", ")", ":", "return", "self", ".", "_client", ".", "post", "(", "\"/action/%s/delete.json\"", "%", "self", ".", "_extract_id", "(", "action", ")", ",", "*", "*", "kwds", ")", "[", "\"result\"", "]" ]
Endpoint: /action/<id>/delete.json Deletes an action. Returns True if successful. Raises a TroveboxError if not.
[ "Endpoint", ":", "/", "action", "/", "<id", ">", "/", "delete", ".", "json" ]
train
https://github.com/photo/openphoto-python/blob/209a1da27c8d8c88dbcf4ea6c6f57031ea1bc44b/trovebox/api/api_action.py#L33-L43
0.005362
Spirent/py-stcrestclient
stcrestclient/stchttp.py
StcHttp.download
def download(self, file_name, save_as=None): """Download the specified file from the server. Arguments: file_name -- Name of file resource to save. save_as -- Optional path name to write file to. If not specified, then file named by the last part of the resource path is downloaded to current directory. Return: (save_path, bytes) save_path -- Path where downloaded file was saved. bytes -- Bytes downloaded. """ self._check_session() try: if save_as: save_as = os.path.normpath(save_as) save_dir = os.path.dirname(save_as) if save_dir: if not os.path.exists(save_dir): os.makedirs(save_dir) elif not os.path.isdir(save_dir): raise RuntimeError(save_dir + " is not a directory") status, save_path, bytes = self._rest.download_file( 'files', file_name, save_as, 'application/octet-stream') except resthttp.RestHttpError as e: raise RuntimeError('failed to download "%s": %s' % (file_name, e)) return save_path, bytes
python
def download(self, file_name, save_as=None): """Download the specified file from the server. Arguments: file_name -- Name of file resource to save. save_as -- Optional path name to write file to. If not specified, then file named by the last part of the resource path is downloaded to current directory. Return: (save_path, bytes) save_path -- Path where downloaded file was saved. bytes -- Bytes downloaded. """ self._check_session() try: if save_as: save_as = os.path.normpath(save_as) save_dir = os.path.dirname(save_as) if save_dir: if not os.path.exists(save_dir): os.makedirs(save_dir) elif not os.path.isdir(save_dir): raise RuntimeError(save_dir + " is not a directory") status, save_path, bytes = self._rest.download_file( 'files', file_name, save_as, 'application/octet-stream') except resthttp.RestHttpError as e: raise RuntimeError('failed to download "%s": %s' % (file_name, e)) return save_path, bytes
[ "def", "download", "(", "self", ",", "file_name", ",", "save_as", "=", "None", ")", ":", "self", ".", "_check_session", "(", ")", "try", ":", "if", "save_as", ":", "save_as", "=", "os", ".", "path", ".", "normpath", "(", "save_as", ")", "save_dir", "=", "os", ".", "path", ".", "dirname", "(", "save_as", ")", "if", "save_dir", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "save_dir", ")", ":", "os", ".", "makedirs", "(", "save_dir", ")", "elif", "not", "os", ".", "path", ".", "isdir", "(", "save_dir", ")", ":", "raise", "RuntimeError", "(", "save_dir", "+", "\" is not a directory\"", ")", "status", ",", "save_path", ",", "bytes", "=", "self", ".", "_rest", ".", "download_file", "(", "'files'", ",", "file_name", ",", "save_as", ",", "'application/octet-stream'", ")", "except", "resthttp", ".", "RestHttpError", "as", "e", ":", "raise", "RuntimeError", "(", "'failed to download \"%s\": %s'", "%", "(", "file_name", ",", "e", ")", ")", "return", "save_path", ",", "bytes" ]
Download the specified file from the server. Arguments: file_name -- Name of file resource to save. save_as -- Optional path name to write file to. If not specified, then file named by the last part of the resource path is downloaded to current directory. Return: (save_path, bytes) save_path -- Path where downloaded file was saved. bytes -- Bytes downloaded.
[ "Download", "the", "specified", "file", "from", "the", "server", "." ]
train
https://github.com/Spirent/py-stcrestclient/blob/80ee82bddf2fb2808f3da8ff2c80b7d588e165e8/stcrestclient/stchttp.py#L599-L627
0.001599
CenturyLinkCloud/clc-python-sdk
src/clc/APIv1/server.py
Server.GetAllServers
def GetAllServers(alias=None,name_groups=False): """Gets a deep list of all Servers in all groups and datacenters. :param alias: short code for a particular account. If none will use account's default alias """ if alias is None: alias = clc.v1.Account.GetAlias() servers = [] clc.v1.Account.GetLocations() for location in clc.LOCATIONS: try: r = clc.v1.API.Call('post','Server/GetAllServers', {'AccountAlias': alias, 'Location': location }, hide_errors=[5,] ) if name_groups: r['Servers'] = clc.v1.Group.NameGroups(r['Servers'],'HardwareGroupUUID') if int(r['StatusCode']) == 0: servers += r['Servers'] except: pass return(servers)
python
def GetAllServers(alias=None,name_groups=False): """Gets a deep list of all Servers in all groups and datacenters. :param alias: short code for a particular account. If none will use account's default alias """ if alias is None: alias = clc.v1.Account.GetAlias() servers = [] clc.v1.Account.GetLocations() for location in clc.LOCATIONS: try: r = clc.v1.API.Call('post','Server/GetAllServers', {'AccountAlias': alias, 'Location': location }, hide_errors=[5,] ) if name_groups: r['Servers'] = clc.v1.Group.NameGroups(r['Servers'],'HardwareGroupUUID') if int(r['StatusCode']) == 0: servers += r['Servers'] except: pass return(servers)
[ "def", "GetAllServers", "(", "alias", "=", "None", ",", "name_groups", "=", "False", ")", ":", "if", "alias", "is", "None", ":", "alias", "=", "clc", ".", "v1", ".", "Account", ".", "GetAlias", "(", ")", "servers", "=", "[", "]", "clc", ".", "v1", ".", "Account", ".", "GetLocations", "(", ")", "for", "location", "in", "clc", ".", "LOCATIONS", ":", "try", ":", "r", "=", "clc", ".", "v1", ".", "API", ".", "Call", "(", "'post'", ",", "'Server/GetAllServers'", ",", "{", "'AccountAlias'", ":", "alias", ",", "'Location'", ":", "location", "}", ",", "hide_errors", "=", "[", "5", ",", "]", ")", "if", "name_groups", ":", "r", "[", "'Servers'", "]", "=", "clc", ".", "v1", ".", "Group", ".", "NameGroups", "(", "r", "[", "'Servers'", "]", ",", "'HardwareGroupUUID'", ")", "if", "int", "(", "r", "[", "'StatusCode'", "]", ")", "==", "0", ":", "servers", "+=", "r", "[", "'Servers'", "]", "except", ":", "pass", "return", "(", "servers", ")" ]
Gets a deep list of all Servers in all groups and datacenters. :param alias: short code for a particular account. If none will use account's default alias
[ "Gets", "a", "deep", "list", "of", "all", "Servers", "in", "all", "groups", "and", "datacenters", "." ]
train
https://github.com/CenturyLinkCloud/clc-python-sdk/blob/f4dba40c627cb08dd4b7d0d277e8d67578010b05/src/clc/APIv1/server.py#L61-L76
0.04451
rosenbrockc/fortpy
fortpy/scripts/analyze.py
FortpyShell.do_rmpostfix
def do_rmpostfix(self, arg): """Removes a postfix function from a variable. See 'postfix'.""" altered = False if arg in self.curargs["functions"]: del self.curargs["functions"][arg] altered = True elif arg == "*": for varname in list(self.curargs["functions"].keys()): del self.curargs["functions"][varname] altered = True if altered: self.do_postfix("list")
python
def do_rmpostfix(self, arg): """Removes a postfix function from a variable. See 'postfix'.""" altered = False if arg in self.curargs["functions"]: del self.curargs["functions"][arg] altered = True elif arg == "*": for varname in list(self.curargs["functions"].keys()): del self.curargs["functions"][varname] altered = True if altered: self.do_postfix("list")
[ "def", "do_rmpostfix", "(", "self", ",", "arg", ")", ":", "altered", "=", "False", "if", "arg", "in", "self", ".", "curargs", "[", "\"functions\"", "]", ":", "del", "self", ".", "curargs", "[", "\"functions\"", "]", "[", "arg", "]", "altered", "=", "True", "elif", "arg", "==", "\"*\"", ":", "for", "varname", "in", "list", "(", "self", ".", "curargs", "[", "\"functions\"", "]", ".", "keys", "(", ")", ")", ":", "del", "self", ".", "curargs", "[", "\"functions\"", "]", "[", "varname", "]", "altered", "=", "True", "if", "altered", ":", "self", ".", "do_postfix", "(", "\"list\"", ")" ]
Removes a postfix function from a variable. See 'postfix'.
[ "Removes", "a", "postfix", "function", "from", "a", "variable", ".", "See", "postfix", "." ]
train
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/scripts/analyze.py#L765-L776
0.004219
cydrobolt/pifx
pifx/core.py
PIFX.cycle_lights
def cycle_lights(self, states, defaults, direction='forward', selector='all'): """Cycle through list of effects. Provide array states as a list of dictionaries with set_state arguments. See http://api.developer.lifx.com/docs/cycle selector: String The selector to limit which lights will run the effect. default: all states: required List of Dicts List of arguments, named as per set_state. Must have 2 to 5 entries. defaults: Object Default values to use when not specified in each states[] object. Argument names as per set_state. direction: String Direction in which to cycle through the list. Can be forward or backward default: forward """ argument_tuples = [ ("states", states), ("defaults", defaults), ("direction", direction) ] return self.client.perform_request( method='post', endpoint='lights/{}/cycle', endpoint_args=[selector], argument_tuples=argument_tuples, json_body=True)
python
def cycle_lights(self, states, defaults, direction='forward', selector='all'): """Cycle through list of effects. Provide array states as a list of dictionaries with set_state arguments. See http://api.developer.lifx.com/docs/cycle selector: String The selector to limit which lights will run the effect. default: all states: required List of Dicts List of arguments, named as per set_state. Must have 2 to 5 entries. defaults: Object Default values to use when not specified in each states[] object. Argument names as per set_state. direction: String Direction in which to cycle through the list. Can be forward or backward default: forward """ argument_tuples = [ ("states", states), ("defaults", defaults), ("direction", direction) ] return self.client.perform_request( method='post', endpoint='lights/{}/cycle', endpoint_args=[selector], argument_tuples=argument_tuples, json_body=True)
[ "def", "cycle_lights", "(", "self", ",", "states", ",", "defaults", ",", "direction", "=", "'forward'", ",", "selector", "=", "'all'", ")", ":", "argument_tuples", "=", "[", "(", "\"states\"", ",", "states", ")", ",", "(", "\"defaults\"", ",", "defaults", ")", ",", "(", "\"direction\"", ",", "direction", ")", "]", "return", "self", ".", "client", ".", "perform_request", "(", "method", "=", "'post'", ",", "endpoint", "=", "'lights/{}/cycle'", ",", "endpoint_args", "=", "[", "selector", "]", ",", "argument_tuples", "=", "argument_tuples", ",", "json_body", "=", "True", ")" ]
Cycle through list of effects. Provide array states as a list of dictionaries with set_state arguments. See http://api.developer.lifx.com/docs/cycle selector: String The selector to limit which lights will run the effect. default: all states: required List of Dicts List of arguments, named as per set_state. Must have 2 to 5 entries. defaults: Object Default values to use when not specified in each states[] object. Argument names as per set_state. direction: String Direction in which to cycle through the list. Can be forward or backward default: forward
[ "Cycle", "through", "list", "of", "effects", "." ]
train
https://github.com/cydrobolt/pifx/blob/c9de9c2695c3e6e72de4aa0de47b78fc13c457c3/pifx/core.py#L235-L266
0.006162
pandas-dev/pandas
pandas/core/panel.py
Panel.count
def count(self, axis='major'): """ Return number of observations over requested axis. Parameters ---------- axis : {'items', 'major', 'minor'} or {0, 1, 2} Returns ------- count : DataFrame """ i = self._get_axis_number(axis) values = self.values mask = np.isfinite(values) result = mask.sum(axis=i, dtype='int64') return self._wrap_result(result, axis)
python
def count(self, axis='major'): """ Return number of observations over requested axis. Parameters ---------- axis : {'items', 'major', 'minor'} or {0, 1, 2} Returns ------- count : DataFrame """ i = self._get_axis_number(axis) values = self.values mask = np.isfinite(values) result = mask.sum(axis=i, dtype='int64') return self._wrap_result(result, axis)
[ "def", "count", "(", "self", ",", "axis", "=", "'major'", ")", ":", "i", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "values", "=", "self", ".", "values", "mask", "=", "np", ".", "isfinite", "(", "values", ")", "result", "=", "mask", ".", "sum", "(", "axis", "=", "i", ",", "dtype", "=", "'int64'", ")", "return", "self", ".", "_wrap_result", "(", "result", ",", "axis", ")" ]
Return number of observations over requested axis. Parameters ---------- axis : {'items', 'major', 'minor'} or {0, 1, 2} Returns ------- count : DataFrame
[ "Return", "number", "of", "observations", "over", "requested", "axis", "." ]
train
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L1288-L1306
0.004264
bhmm/bhmm
bhmm/output_models/gaussian.py
GaussianOutputModel.generate_observation_from_state
def generate_observation_from_state(self, state_index): """ Generate a single synthetic observation data from a given state. Parameters ---------- state_index : int Index of the state from which observations are to be generated. Returns ------- observation : float A single observation from the given state. Examples -------- Generate an observation model. >>> output_model = GaussianOutputModel(nstates=2, means=[0, 1], sigmas=[1, 2]) Generate sample from a state. >>> observation = output_model.generate_observation_from_state(0) """ observation = self.sigmas[state_index] * np.random.randn() + self.means[state_index] return observation
python
def generate_observation_from_state(self, state_index): """ Generate a single synthetic observation data from a given state. Parameters ---------- state_index : int Index of the state from which observations are to be generated. Returns ------- observation : float A single observation from the given state. Examples -------- Generate an observation model. >>> output_model = GaussianOutputModel(nstates=2, means=[0, 1], sigmas=[1, 2]) Generate sample from a state. >>> observation = output_model.generate_observation_from_state(0) """ observation = self.sigmas[state_index] * np.random.randn() + self.means[state_index] return observation
[ "def", "generate_observation_from_state", "(", "self", ",", "state_index", ")", ":", "observation", "=", "self", ".", "sigmas", "[", "state_index", "]", "*", "np", ".", "random", ".", "randn", "(", ")", "+", "self", ".", "means", "[", "state_index", "]", "return", "observation" ]
Generate a single synthetic observation data from a given state. Parameters ---------- state_index : int Index of the state from which observations are to be generated. Returns ------- observation : float A single observation from the given state. Examples -------- Generate an observation model. >>> output_model = GaussianOutputModel(nstates=2, means=[0, 1], sigmas=[1, 2]) Generate sample from a state. >>> observation = output_model.generate_observation_from_state(0)
[ "Generate", "a", "single", "synthetic", "observation", "data", "from", "a", "given", "state", "." ]
train
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/output_models/gaussian.py#L322-L349
0.004957
pytroll/satpy
satpy/readers/ahi_hsd.py
AHIHSDFileHandler.scheduled_time
def scheduled_time(self): """Time this band was scheduled to be recorded.""" timeline = "{:04d}".format(self.basic_info['observation_timeline'][0]) return self.start_time.replace(hour=int(timeline[:2]), minute=int(timeline[2:4]), second=0, microsecond=0)
python
def scheduled_time(self): """Time this band was scheduled to be recorded.""" timeline = "{:04d}".format(self.basic_info['observation_timeline'][0]) return self.start_time.replace(hour=int(timeline[:2]), minute=int(timeline[2:4]), second=0, microsecond=0)
[ "def", "scheduled_time", "(", "self", ")", ":", "timeline", "=", "\"{:04d}\"", ".", "format", "(", "self", ".", "basic_info", "[", "'observation_timeline'", "]", "[", "0", "]", ")", "return", "self", ".", "start_time", ".", "replace", "(", "hour", "=", "int", "(", "timeline", "[", ":", "2", "]", ")", ",", "minute", "=", "int", "(", "timeline", "[", "2", ":", "4", "]", ")", ",", "second", "=", "0", ",", "microsecond", "=", "0", ")" ]
Time this band was scheduled to be recorded.
[ "Time", "this", "band", "was", "scheduled", "to", "be", "recorded", "." ]
train
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/ahi_hsd.py#L310-L313
0.010791
invoice-x/invoice2data
src/invoice2data/output/to_json.py
write_to_file
def write_to_file(data, path): """Export extracted fields to json Appends .json to path if missing and generates json file in specified directory, if not then in root Parameters ---------- data : dict Dictionary of extracted fields path : str directory to save generated json file Notes ---- Do give file name to the function parameter path. Examples -------- >>> from invoice2data.output import to_json >>> to_json.write_to_file(data, "/exported_json/invoice.json") >>> to_json.write_to_file(data, "invoice.json") """ if path.endswith('.json'): filename = path else: filename = path + '.json' with codecs.open(filename, "w", encoding='utf-8') as json_file: for line in data: line['date'] = line['date'].strftime('%d/%m/%Y') print(type(json)) print(json) json.dump( data, json_file, indent=4, sort_keys=True, default=myconverter, ensure_ascii=False )
python
def write_to_file(data, path): """Export extracted fields to json Appends .json to path if missing and generates json file in specified directory, if not then in root Parameters ---------- data : dict Dictionary of extracted fields path : str directory to save generated json file Notes ---- Do give file name to the function parameter path. Examples -------- >>> from invoice2data.output import to_json >>> to_json.write_to_file(data, "/exported_json/invoice.json") >>> to_json.write_to_file(data, "invoice.json") """ if path.endswith('.json'): filename = path else: filename = path + '.json' with codecs.open(filename, "w", encoding='utf-8') as json_file: for line in data: line['date'] = line['date'].strftime('%d/%m/%Y') print(type(json)) print(json) json.dump( data, json_file, indent=4, sort_keys=True, default=myconverter, ensure_ascii=False )
[ "def", "write_to_file", "(", "data", ",", "path", ")", ":", "if", "path", ".", "endswith", "(", "'.json'", ")", ":", "filename", "=", "path", "else", ":", "filename", "=", "path", "+", "'.json'", "with", "codecs", ".", "open", "(", "filename", ",", "\"w\"", ",", "encoding", "=", "'utf-8'", ")", "as", "json_file", ":", "for", "line", "in", "data", ":", "line", "[", "'date'", "]", "=", "line", "[", "'date'", "]", ".", "strftime", "(", "'%d/%m/%Y'", ")", "print", "(", "type", "(", "json", ")", ")", "print", "(", "json", ")", "json", ".", "dump", "(", "data", ",", "json_file", ",", "indent", "=", "4", ",", "sort_keys", "=", "True", ",", "default", "=", "myconverter", ",", "ensure_ascii", "=", "False", ")" ]
Export extracted fields to json Appends .json to path if missing and generates json file in specified directory, if not then in root Parameters ---------- data : dict Dictionary of extracted fields path : str directory to save generated json file Notes ---- Do give file name to the function parameter path. Examples -------- >>> from invoice2data.output import to_json >>> to_json.write_to_file(data, "/exported_json/invoice.json") >>> to_json.write_to_file(data, "invoice.json")
[ "Export", "extracted", "fields", "to", "json" ]
train
https://github.com/invoice-x/invoice2data/blob/d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20/src/invoice2data/output/to_json.py#L12-L47
0.002896
manns/pyspread
pyspread/src/gui/_chart_dialog.py
AllSeriesPanel.OnSeriesChanged
def OnSeriesChanged(self, event): """FlatNotebook change event handler""" selection = event.GetSelection() if not self.updating and \ selection == self.series_notebook.GetPageCount() - 1: # Add new series new_panel = SeriesPanel(self, {"type": "plot"}) self.series_notebook.InsertPage(selection, new_panel, _("Series")) event.Skip()
python
def OnSeriesChanged(self, event): """FlatNotebook change event handler""" selection = event.GetSelection() if not self.updating and \ selection == self.series_notebook.GetPageCount() - 1: # Add new series new_panel = SeriesPanel(self, {"type": "plot"}) self.series_notebook.InsertPage(selection, new_panel, _("Series")) event.Skip()
[ "def", "OnSeriesChanged", "(", "self", ",", "event", ")", ":", "selection", "=", "event", ".", "GetSelection", "(", ")", "if", "not", "self", ".", "updating", "and", "selection", "==", "self", ".", "series_notebook", ".", "GetPageCount", "(", ")", "-", "1", ":", "# Add new series", "new_panel", "=", "SeriesPanel", "(", "self", ",", "{", "\"type\"", ":", "\"plot\"", "}", ")", "self", ".", "series_notebook", ".", "InsertPage", "(", "selection", ",", "new_panel", ",", "_", "(", "\"Series\"", ")", ")", "event", ".", "Skip", "(", ")" ]
FlatNotebook change event handler
[ "FlatNotebook", "change", "event", "handler" ]
train
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_chart_dialog.py#L1528-L1539
0.004831
spyder-ide/spyder
spyder/plugins/explorer/widgets.py
DirView.set_name_filters
def set_name_filters(self, name_filters): """Set name filters""" self.name_filters = name_filters self.fsmodel.setNameFilters(name_filters)
python
def set_name_filters(self, name_filters): """Set name filters""" self.name_filters = name_filters self.fsmodel.setNameFilters(name_filters)
[ "def", "set_name_filters", "(", "self", ",", "name_filters", ")", ":", "self", ".", "name_filters", "=", "name_filters", "self", ".", "fsmodel", ".", "setNameFilters", "(", "name_filters", ")" ]
Set name filters
[ "Set", "name", "filters" ]
train
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/explorer/widgets.py#L197-L200
0.012048
noxdafox/vminspect
vminspect/timeline.py
FSTimeline._visit_filesystem
def _visit_filesystem(self): """Walks through the filesystem content.""" self.logger.debug("Parsing File System content.") root_partition = self._filesystem.inspect_get_roots()[0] yield from self._root_dirent() for entry in self._filesystem.filesystem_walk(root_partition): yield Dirent( entry['tsk_inode'], self._filesystem.path('/' + entry['tsk_name']), entry['tsk_size'], entry['tsk_type'], True if entry['tsk_flags'] & TSK_ALLOC else False, timestamp(entry['tsk_atime_sec'], entry['tsk_atime_nsec']), timestamp(entry['tsk_mtime_sec'], entry['tsk_mtime_nsec']), timestamp(entry['tsk_ctime_sec'], entry['tsk_ctime_nsec']), timestamp(entry['tsk_crtime_sec'], entry['tsk_crtime_nsec']))
python
def _visit_filesystem(self): """Walks through the filesystem content.""" self.logger.debug("Parsing File System content.") root_partition = self._filesystem.inspect_get_roots()[0] yield from self._root_dirent() for entry in self._filesystem.filesystem_walk(root_partition): yield Dirent( entry['tsk_inode'], self._filesystem.path('/' + entry['tsk_name']), entry['tsk_size'], entry['tsk_type'], True if entry['tsk_flags'] & TSK_ALLOC else False, timestamp(entry['tsk_atime_sec'], entry['tsk_atime_nsec']), timestamp(entry['tsk_mtime_sec'], entry['tsk_mtime_nsec']), timestamp(entry['tsk_ctime_sec'], entry['tsk_ctime_nsec']), timestamp(entry['tsk_crtime_sec'], entry['tsk_crtime_nsec']))
[ "def", "_visit_filesystem", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Parsing File System content.\"", ")", "root_partition", "=", "self", ".", "_filesystem", ".", "inspect_get_roots", "(", ")", "[", "0", "]", "yield", "from", "self", ".", "_root_dirent", "(", ")", "for", "entry", "in", "self", ".", "_filesystem", ".", "filesystem_walk", "(", "root_partition", ")", ":", "yield", "Dirent", "(", "entry", "[", "'tsk_inode'", "]", ",", "self", ".", "_filesystem", ".", "path", "(", "'/'", "+", "entry", "[", "'tsk_name'", "]", ")", ",", "entry", "[", "'tsk_size'", "]", ",", "entry", "[", "'tsk_type'", "]", ",", "True", "if", "entry", "[", "'tsk_flags'", "]", "&", "TSK_ALLOC", "else", "False", ",", "timestamp", "(", "entry", "[", "'tsk_atime_sec'", "]", ",", "entry", "[", "'tsk_atime_nsec'", "]", ")", ",", "timestamp", "(", "entry", "[", "'tsk_mtime_sec'", "]", ",", "entry", "[", "'tsk_mtime_nsec'", "]", ")", ",", "timestamp", "(", "entry", "[", "'tsk_ctime_sec'", "]", ",", "entry", "[", "'tsk_ctime_nsec'", "]", ")", ",", "timestamp", "(", "entry", "[", "'tsk_crtime_sec'", "]", ",", "entry", "[", "'tsk_crtime_nsec'", "]", ")", ")" ]
Walks through the filesystem content.
[ "Walks", "through", "the", "filesystem", "content", "." ]
train
https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/timeline.py#L97-L114
0.002301
caseyjlaw/rtpipe
rtpipe/RT.py
calc_nfalse
def calc_nfalse(d): """ Calculate the number of thermal-noise false positives per segment. """ dtfactor = n.sum([1./i for i in d['dtarr']]) # assumes dedisperse-all algorithm ntrials = d['readints'] * dtfactor * len(d['dmarr']) * d['npixx'] * d['npixy'] qfrac = 1 - (erf(d['sigma_image1']/n.sqrt(2)) + 1)/2. nfalse = int(qfrac*ntrials) return nfalse
python
def calc_nfalse(d): """ Calculate the number of thermal-noise false positives per segment. """ dtfactor = n.sum([1./i for i in d['dtarr']]) # assumes dedisperse-all algorithm ntrials = d['readints'] * dtfactor * len(d['dmarr']) * d['npixx'] * d['npixy'] qfrac = 1 - (erf(d['sigma_image1']/n.sqrt(2)) + 1)/2. nfalse = int(qfrac*ntrials) return nfalse
[ "def", "calc_nfalse", "(", "d", ")", ":", "dtfactor", "=", "n", ".", "sum", "(", "[", "1.", "/", "i", "for", "i", "in", "d", "[", "'dtarr'", "]", "]", ")", "# assumes dedisperse-all algorithm", "ntrials", "=", "d", "[", "'readints'", "]", "*", "dtfactor", "*", "len", "(", "d", "[", "'dmarr'", "]", ")", "*", "d", "[", "'npixx'", "]", "*", "d", "[", "'npixy'", "]", "qfrac", "=", "1", "-", "(", "erf", "(", "d", "[", "'sigma_image1'", "]", "/", "n", ".", "sqrt", "(", "2", ")", ")", "+", "1", ")", "/", "2.", "nfalse", "=", "int", "(", "qfrac", "*", "ntrials", ")", "return", "nfalse" ]
Calculate the number of thermal-noise false positives per segment.
[ "Calculate", "the", "number", "of", "thermal", "-", "noise", "false", "positives", "per", "segment", "." ]
train
https://github.com/caseyjlaw/rtpipe/blob/ac33e4332cf215091a63afbb3137850876d73ec0/rtpipe/RT.py#L961-L969
0.007874
teepark/junction
junction/futures.py
Future.on_finish
def on_finish(self, func): '''Assign a callback function to be run when successfully complete :param function func: A callback to run when complete. It will be given one argument (the value that has arrived), and it's return value is ignored. ''' if self._done.is_set(): if self._failure is None: backend.schedule(func, args=(self._value,)) else: self._cbacks.append(func)
python
def on_finish(self, func): '''Assign a callback function to be run when successfully complete :param function func: A callback to run when complete. It will be given one argument (the value that has arrived), and it's return value is ignored. ''' if self._done.is_set(): if self._failure is None: backend.schedule(func, args=(self._value,)) else: self._cbacks.append(func)
[ "def", "on_finish", "(", "self", ",", "func", ")", ":", "if", "self", ".", "_done", ".", "is_set", "(", ")", ":", "if", "self", ".", "_failure", "is", "None", ":", "backend", ".", "schedule", "(", "func", ",", "args", "=", "(", "self", ".", "_value", ",", ")", ")", "else", ":", "self", ".", "_cbacks", ".", "append", "(", "func", ")" ]
Assign a callback function to be run when successfully complete :param function func: A callback to run when complete. It will be given one argument (the value that has arrived), and it's return value is ignored.
[ "Assign", "a", "callback", "function", "to", "be", "run", "when", "successfully", "complete" ]
train
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/futures.py#L119-L130
0.004193
mikeboers/Flask-ACL
flask_acl/extension.py
ACLManager.route_acl
def route_acl(self, *acl, **options): """Decorator to attach an ACL to a route. E.g:: @app.route('/url/to/view') @authz.route_acl(''' ALLOW WHEEL ALL DENY ANY ALL ''') def my_admin_function(): pass """ def _route_acl(func): func.__acl__ = acl @functools.wraps(func) def wrapped(*args, **kwargs): permission = 'http.' + request.method.lower() local_opts = options.copy() local_opts.setdefault('default', current_app.config['ACL_ROUTE_DEFAULT_STATE']) self.assert_can(permission, func, **local_opts) return func(*args, **kwargs) return wrapped return _route_acl
python
def route_acl(self, *acl, **options): """Decorator to attach an ACL to a route. E.g:: @app.route('/url/to/view') @authz.route_acl(''' ALLOW WHEEL ALL DENY ANY ALL ''') def my_admin_function(): pass """ def _route_acl(func): func.__acl__ = acl @functools.wraps(func) def wrapped(*args, **kwargs): permission = 'http.' + request.method.lower() local_opts = options.copy() local_opts.setdefault('default', current_app.config['ACL_ROUTE_DEFAULT_STATE']) self.assert_can(permission, func, **local_opts) return func(*args, **kwargs) return wrapped return _route_acl
[ "def", "route_acl", "(", "self", ",", "*", "acl", ",", "*", "*", "options", ")", ":", "def", "_route_acl", "(", "func", ")", ":", "func", ".", "__acl__", "=", "acl", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "permission", "=", "'http.'", "+", "request", ".", "method", ".", "lower", "(", ")", "local_opts", "=", "options", ".", "copy", "(", ")", "local_opts", ".", "setdefault", "(", "'default'", ",", "current_app", ".", "config", "[", "'ACL_ROUTE_DEFAULT_STATE'", "]", ")", "self", ".", "assert_can", "(", "permission", ",", "func", ",", "*", "*", "local_opts", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapped", "return", "_route_acl" ]
Decorator to attach an ACL to a route. E.g:: @app.route('/url/to/view') @authz.route_acl(''' ALLOW WHEEL ALL DENY ANY ALL ''') def my_admin_function(): pass
[ "Decorator", "to", "attach", "an", "ACL", "to", "a", "route", "." ]
train
https://github.com/mikeboers/Flask-ACL/blob/7339b89f96ad8686d1526e25c138244ad912e12d/flask_acl/extension.py#L116-L144
0.004756
pazz/alot
alot/buffers/thread.py
ThreadBuffer.focus_first_reply
def focus_first_reply(self): """move focus to first reply to currently focussed message""" mid = self.get_selected_mid() newpos = self._tree.first_child_position(mid) if newpos is not None: newpos = self._sanitize_position((newpos,)) self.body.set_focus(newpos)
python
def focus_first_reply(self): """move focus to first reply to currently focussed message""" mid = self.get_selected_mid() newpos = self._tree.first_child_position(mid) if newpos is not None: newpos = self._sanitize_position((newpos,)) self.body.set_focus(newpos)
[ "def", "focus_first_reply", "(", "self", ")", ":", "mid", "=", "self", ".", "get_selected_mid", "(", ")", "newpos", "=", "self", ".", "_tree", ".", "first_child_position", "(", "mid", ")", "if", "newpos", "is", "not", "None", ":", "newpos", "=", "self", ".", "_sanitize_position", "(", "(", "newpos", ",", ")", ")", "self", ".", "body", ".", "set_focus", "(", "newpos", ")" ]
move focus to first reply to currently focussed message
[ "move", "focus", "to", "first", "reply", "to", "currently", "focussed", "message" ]
train
https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/buffers/thread.py#L207-L213
0.006309
PyCQA/astroid
astroid/builder.py
_extract_expressions
def _extract_expressions(node): """Find expressions in a call to _TRANSIENT_FUNCTION and extract them. The function walks the AST recursively to search for expressions that are wrapped into a call to _TRANSIENT_FUNCTION. If it finds such an expression, it completely removes the function call node from the tree, replacing it by the wrapped expression inside the parent. :param node: An astroid node. :type node: astroid.bases.NodeNG :yields: The sequence of wrapped expressions on the modified tree expression can be found. """ if ( isinstance(node, nodes.Call) and isinstance(node.func, nodes.Name) and node.func.name == _TRANSIENT_FUNCTION ): real_expr = node.args[0] real_expr.parent = node.parent # Search for node in all _astng_fields (the fields checked when # get_children is called) of its parent. Some of those fields may # be lists or tuples, in which case the elements need to be checked. # When we find it, replace it by real_expr, so that the AST looks # like no call to _TRANSIENT_FUNCTION ever took place. for name in node.parent._astroid_fields: child = getattr(node.parent, name) if isinstance(child, (list, tuple)): for idx, compound_child in enumerate(child): if compound_child is node: child[idx] = real_expr elif child is node: setattr(node.parent, name, real_expr) yield real_expr else: for child in node.get_children(): yield from _extract_expressions(child)
python
def _extract_expressions(node): """Find expressions in a call to _TRANSIENT_FUNCTION and extract them. The function walks the AST recursively to search for expressions that are wrapped into a call to _TRANSIENT_FUNCTION. If it finds such an expression, it completely removes the function call node from the tree, replacing it by the wrapped expression inside the parent. :param node: An astroid node. :type node: astroid.bases.NodeNG :yields: The sequence of wrapped expressions on the modified tree expression can be found. """ if ( isinstance(node, nodes.Call) and isinstance(node.func, nodes.Name) and node.func.name == _TRANSIENT_FUNCTION ): real_expr = node.args[0] real_expr.parent = node.parent # Search for node in all _astng_fields (the fields checked when # get_children is called) of its parent. Some of those fields may # be lists or tuples, in which case the elements need to be checked. # When we find it, replace it by real_expr, so that the AST looks # like no call to _TRANSIENT_FUNCTION ever took place. for name in node.parent._astroid_fields: child = getattr(node.parent, name) if isinstance(child, (list, tuple)): for idx, compound_child in enumerate(child): if compound_child is node: child[idx] = real_expr elif child is node: setattr(node.parent, name, real_expr) yield real_expr else: for child in node.get_children(): yield from _extract_expressions(child)
[ "def", "_extract_expressions", "(", "node", ")", ":", "if", "(", "isinstance", "(", "node", ",", "nodes", ".", "Call", ")", "and", "isinstance", "(", "node", ".", "func", ",", "nodes", ".", "Name", ")", "and", "node", ".", "func", ".", "name", "==", "_TRANSIENT_FUNCTION", ")", ":", "real_expr", "=", "node", ".", "args", "[", "0", "]", "real_expr", ".", "parent", "=", "node", ".", "parent", "# Search for node in all _astng_fields (the fields checked when", "# get_children is called) of its parent. Some of those fields may", "# be lists or tuples, in which case the elements need to be checked.", "# When we find it, replace it by real_expr, so that the AST looks", "# like no call to _TRANSIENT_FUNCTION ever took place.", "for", "name", "in", "node", ".", "parent", ".", "_astroid_fields", ":", "child", "=", "getattr", "(", "node", ".", "parent", ",", "name", ")", "if", "isinstance", "(", "child", ",", "(", "list", ",", "tuple", ")", ")", ":", "for", "idx", ",", "compound_child", "in", "enumerate", "(", "child", ")", ":", "if", "compound_child", "is", "node", ":", "child", "[", "idx", "]", "=", "real_expr", "elif", "child", "is", "node", ":", "setattr", "(", "node", ".", "parent", ",", "name", ",", "real_expr", ")", "yield", "real_expr", "else", ":", "for", "child", "in", "node", ".", "get_children", "(", ")", ":", "yield", "from", "_extract_expressions", "(", "child", ")" ]
Find expressions in a call to _TRANSIENT_FUNCTION and extract them. The function walks the AST recursively to search for expressions that are wrapped into a call to _TRANSIENT_FUNCTION. If it finds such an expression, it completely removes the function call node from the tree, replacing it by the wrapped expression inside the parent. :param node: An astroid node. :type node: astroid.bases.NodeNG :yields: The sequence of wrapped expressions on the modified tree expression can be found.
[ "Find", "expressions", "in", "a", "call", "to", "_TRANSIENT_FUNCTION", "and", "extract", "them", "." ]
train
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/builder.py#L278-L314
0.000601
bpython/curtsies
curtsies/formatstring.py
linesplit
def linesplit(string, columns): # type: (Union[Text, FmtStr], int) -> List[FmtStr] """Returns a list of lines, split on the last possible space of each line. Split spaces will be removed. Whitespaces will be normalized to one space. Spaces will be the color of the first whitespace character of the normalized whitespace. If a word extends beyond the line, wrap it anyway. >>> linesplit(fmtstr(" home is where the heart-eating mummy is", 'blue'), 10) [blue('home')+blue(' ')+blue('is'), blue('where')+blue(' ')+blue('the'), blue('heart-eati'), blue('ng')+blue(' ')+blue('mummy'), blue('is')] """ if not isinstance(string, FmtStr): string = fmtstr(string) string_s = string.s matches = list(re.finditer(r'\s+', string_s)) spaces = [string[m.start():m.end()] for m in matches if m.start() != 0 and m.end() != len(string_s)] words = [string[start:end] for start, end in zip( [0] + [m.end() for m in matches], [m.start() for m in matches] + [len(string_s)]) if start != end] word_to_lines = lambda word: [word[columns*i:columns*(i+1)] for i in range((len(word) - 1) // columns + 1)] lines = word_to_lines(words[0]) for word, space in zip(words[1:], spaces): if len(lines[-1]) + len(word) < columns: lines[-1] += fmtstr(' ', **space.shared_atts) lines[-1] += word else: lines.extend(word_to_lines(word)) return lines
python
def linesplit(string, columns): # type: (Union[Text, FmtStr], int) -> List[FmtStr] """Returns a list of lines, split on the last possible space of each line. Split spaces will be removed. Whitespaces will be normalized to one space. Spaces will be the color of the first whitespace character of the normalized whitespace. If a word extends beyond the line, wrap it anyway. >>> linesplit(fmtstr(" home is where the heart-eating mummy is", 'blue'), 10) [blue('home')+blue(' ')+blue('is'), blue('where')+blue(' ')+blue('the'), blue('heart-eati'), blue('ng')+blue(' ')+blue('mummy'), blue('is')] """ if not isinstance(string, FmtStr): string = fmtstr(string) string_s = string.s matches = list(re.finditer(r'\s+', string_s)) spaces = [string[m.start():m.end()] for m in matches if m.start() != 0 and m.end() != len(string_s)] words = [string[start:end] for start, end in zip( [0] + [m.end() for m in matches], [m.start() for m in matches] + [len(string_s)]) if start != end] word_to_lines = lambda word: [word[columns*i:columns*(i+1)] for i in range((len(word) - 1) // columns + 1)] lines = word_to_lines(words[0]) for word, space in zip(words[1:], spaces): if len(lines[-1]) + len(word) < columns: lines[-1] += fmtstr(' ', **space.shared_atts) lines[-1] += word else: lines.extend(word_to_lines(word)) return lines
[ "def", "linesplit", "(", "string", ",", "columns", ")", ":", "# type: (Union[Text, FmtStr], int) -> List[FmtStr]", "if", "not", "isinstance", "(", "string", ",", "FmtStr", ")", ":", "string", "=", "fmtstr", "(", "string", ")", "string_s", "=", "string", ".", "s", "matches", "=", "list", "(", "re", ".", "finditer", "(", "r'\\s+'", ",", "string_s", ")", ")", "spaces", "=", "[", "string", "[", "m", ".", "start", "(", ")", ":", "m", ".", "end", "(", ")", "]", "for", "m", "in", "matches", "if", "m", ".", "start", "(", ")", "!=", "0", "and", "m", ".", "end", "(", ")", "!=", "len", "(", "string_s", ")", "]", "words", "=", "[", "string", "[", "start", ":", "end", "]", "for", "start", ",", "end", "in", "zip", "(", "[", "0", "]", "+", "[", "m", ".", "end", "(", ")", "for", "m", "in", "matches", "]", ",", "[", "m", ".", "start", "(", ")", "for", "m", "in", "matches", "]", "+", "[", "len", "(", "string_s", ")", "]", ")", "if", "start", "!=", "end", "]", "word_to_lines", "=", "lambda", "word", ":", "[", "word", "[", "columns", "*", "i", ":", "columns", "*", "(", "i", "+", "1", ")", "]", "for", "i", "in", "range", "(", "(", "len", "(", "word", ")", "-", "1", ")", "//", "columns", "+", "1", ")", "]", "lines", "=", "word_to_lines", "(", "words", "[", "0", "]", ")", "for", "word", ",", "space", "in", "zip", "(", "words", "[", "1", ":", "]", ",", "spaces", ")", ":", "if", "len", "(", "lines", "[", "-", "1", "]", ")", "+", "len", "(", "word", ")", "<", "columns", ":", "lines", "[", "-", "1", "]", "+=", "fmtstr", "(", "' '", ",", "*", "*", "space", ".", "shared_atts", ")", "lines", "[", "-", "1", "]", "+=", "word", "else", ":", "lines", ".", "extend", "(", "word_to_lines", "(", "word", ")", ")", "return", "lines" ]
Returns a list of lines, split on the last possible space of each line. Split spaces will be removed. Whitespaces will be normalized to one space. Spaces will be the color of the first whitespace character of the normalized whitespace. If a word extends beyond the line, wrap it anyway. >>> linesplit(fmtstr(" home is where the heart-eating mummy is", 'blue'), 10) [blue('home')+blue(' ')+blue('is'), blue('where')+blue(' ')+blue('the'), blue('heart-eati'), blue('ng')+blue(' ')+blue('mummy'), blue('is')]
[ "Returns", "a", "list", "of", "lines", "split", "on", "the", "last", "possible", "space", "of", "each", "line", "." ]
train
https://github.com/bpython/curtsies/blob/223e42b97fbf6c86b479ed4f0963a067333c5a63/curtsies/formatstring.py#L674-L705
0.004068
markovmodel/msmtools
msmtools/analysis/sparse/decomposition.py
timescales
def timescales(T, tau=1, k=None, ncv=None, reversible=False, mu=None): r"""Compute implied time scales of given transition matrix Parameters ---------- T : transition matrix tau : lag time k : int (optional) Compute the first k implied time scales. ncv : int (optional) The number of Lanczos vectors generated, `ncv` must be greater than k; it is recommended that ncv > 2*k reversible : bool, optional Indicate that transition matrix is reversible mu : (M,) ndarray, optional Stationary distribution of T Returns ------- ts : ndarray The implied time scales of the transition matrix. """ if k is None: raise ValueError("Number of time scales required for decomposition of sparse matrix") values = eigenvalues(T, k=k, ncv=ncv, reversible=reversible) """Check for dominant eigenvalues with large imaginary part""" if not np.allclose(values.imag, 0.0): warnings.warn('Using eigenvalues with non-zero imaginary part ' 'for implied time scale computation', ImaginaryEigenValueWarning) """Check for multiple eigenvalues of magnitude one""" ind_abs_one = np.isclose(np.abs(values), 1.0) if sum(ind_abs_one) > 1: warnings.warn('Multiple eigenvalues with magnitude one.', SpectralWarning) """Compute implied time scales""" ts = np.zeros(len(values)) """Eigenvalues of magnitude one imply infinite rate""" ts[ind_abs_one] = np.inf """All other eigenvalues give rise to finite rates""" ts[np.logical_not(ind_abs_one)] = \ -1.0 * tau / np.log(np.abs(values[np.logical_not(ind_abs_one)])) return ts
python
def timescales(T, tau=1, k=None, ncv=None, reversible=False, mu=None): r"""Compute implied time scales of given transition matrix Parameters ---------- T : transition matrix tau : lag time k : int (optional) Compute the first k implied time scales. ncv : int (optional) The number of Lanczos vectors generated, `ncv` must be greater than k; it is recommended that ncv > 2*k reversible : bool, optional Indicate that transition matrix is reversible mu : (M,) ndarray, optional Stationary distribution of T Returns ------- ts : ndarray The implied time scales of the transition matrix. """ if k is None: raise ValueError("Number of time scales required for decomposition of sparse matrix") values = eigenvalues(T, k=k, ncv=ncv, reversible=reversible) """Check for dominant eigenvalues with large imaginary part""" if not np.allclose(values.imag, 0.0): warnings.warn('Using eigenvalues with non-zero imaginary part ' 'for implied time scale computation', ImaginaryEigenValueWarning) """Check for multiple eigenvalues of magnitude one""" ind_abs_one = np.isclose(np.abs(values), 1.0) if sum(ind_abs_one) > 1: warnings.warn('Multiple eigenvalues with magnitude one.', SpectralWarning) """Compute implied time scales""" ts = np.zeros(len(values)) """Eigenvalues of magnitude one imply infinite rate""" ts[ind_abs_one] = np.inf """All other eigenvalues give rise to finite rates""" ts[np.logical_not(ind_abs_one)] = \ -1.0 * tau / np.log(np.abs(values[np.logical_not(ind_abs_one)])) return ts
[ "def", "timescales", "(", "T", ",", "tau", "=", "1", ",", "k", "=", "None", ",", "ncv", "=", "None", ",", "reversible", "=", "False", ",", "mu", "=", "None", ")", ":", "if", "k", "is", "None", ":", "raise", "ValueError", "(", "\"Number of time scales required for decomposition of sparse matrix\"", ")", "values", "=", "eigenvalues", "(", "T", ",", "k", "=", "k", ",", "ncv", "=", "ncv", ",", "reversible", "=", "reversible", ")", "\"\"\"Check for dominant eigenvalues with large imaginary part\"\"\"", "if", "not", "np", ".", "allclose", "(", "values", ".", "imag", ",", "0.0", ")", ":", "warnings", ".", "warn", "(", "'Using eigenvalues with non-zero imaginary part '", "'for implied time scale computation'", ",", "ImaginaryEigenValueWarning", ")", "\"\"\"Check for multiple eigenvalues of magnitude one\"\"\"", "ind_abs_one", "=", "np", ".", "isclose", "(", "np", ".", "abs", "(", "values", ")", ",", "1.0", ")", "if", "sum", "(", "ind_abs_one", ")", ">", "1", ":", "warnings", ".", "warn", "(", "'Multiple eigenvalues with magnitude one.'", ",", "SpectralWarning", ")", "\"\"\"Compute implied time scales\"\"\"", "ts", "=", "np", ".", "zeros", "(", "len", "(", "values", ")", ")", "\"\"\"Eigenvalues of magnitude one imply infinite rate\"\"\"", "ts", "[", "ind_abs_one", "]", "=", "np", ".", "inf", "\"\"\"All other eigenvalues give rise to finite rates\"\"\"", "ts", "[", "np", ".", "logical_not", "(", "ind_abs_one", ")", "]", "=", "-", "1.0", "*", "tau", "/", "np", ".", "log", "(", "np", ".", "abs", "(", "values", "[", "np", ".", "logical_not", "(", "ind_abs_one", ")", "]", ")", ")", "return", "ts" ]
r"""Compute implied time scales of given transition matrix Parameters ---------- T : transition matrix tau : lag time k : int (optional) Compute the first k implied time scales. ncv : int (optional) The number of Lanczos vectors generated, `ncv` must be greater than k; it is recommended that ncv > 2*k reversible : bool, optional Indicate that transition matrix is reversible mu : (M,) ndarray, optional Stationary distribution of T Returns ------- ts : ndarray The implied time scales of the transition matrix.
[ "r", "Compute", "implied", "time", "scales", "of", "given", "transition", "matrix" ]
train
https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/analysis/sparse/decomposition.py#L470-L515
0.00235
regebro/hovercraft
hovercraft/generate.py
generate
def generate(args): """Generates the presentation and returns a list of files used""" source_files = {args.presentation} # Parse the template info template_info = Template(args.template) if args.css: presentation_dir = os.path.split(args.presentation)[0] target_path = os.path.relpath(args.css, presentation_dir) template_info.add_resource(args.css, CSS_RESOURCE, target=target_path, extra_info='all') source_files.add(args.css) if args.js: presentation_dir = os.path.split(args.presentation)[0] target_path = os.path.relpath(args.js, presentation_dir) template_info.add_resource(args.js, JS_RESOURCE, target=target_path, extra_info=JS_POSITION_BODY) source_files.add(args.js) # Make the resulting HTML htmldata, dependencies = rst2html(args.presentation, template_info, args.auto_console, args.skip_help, args.skip_notes, args.mathjax, args.slide_numbers) source_files.update(dependencies) # Write the HTML out if not os.path.exists(args.targetdir): os.makedirs(args.targetdir) with open(os.path.join(args.targetdir, 'index.html'), 'wb') as outfile: outfile.write(htmldata) # Copy supporting files source_files.update(template_info.copy_resources(args.targetdir)) # Copy images from the source: sourcedir = os.path.split(os.path.abspath(args.presentation))[0] tree = html.fromstring(htmldata) for image in tree.iterdescendants('img'): filename = image.attrib['src'] source_files.add(copy_resource(filename, sourcedir, args.targetdir)) RE_CSS_URL = re.compile(br"""url\(['"]?(.*?)['"]?[\)\?\#]""") # Copy any files referenced by url() in the css-files: for resource in template_info.resources: if resource.resource_type != CSS_RESOURCE: continue # path in CSS is relative to CSS file; construct source/dest accordingly css_base = template_info.template_root if resource.is_in_template else sourcedir css_sourcedir = os.path.dirname(os.path.join(css_base, resource.filepath)) css_targetdir = os.path.dirname(os.path.join(args.targetdir, resource.final_path())) uris = RE_CSS_URL.findall(template_info.read_data(resource)) uris = [uri.decode() for uri in uris] if resource.is_in_template and template_info.builtin_template: for filename in uris: template_info.add_resource(filename, OTHER_RESOURCE, target=css_targetdir, is_in_template=True) else: for filename in uris: source_files.add(copy_resource(filename, css_sourcedir, css_targetdir)) # All done! return {os.path.abspath(f) for f in source_files if f}
python
def generate(args): """Generates the presentation and returns a list of files used""" source_files = {args.presentation} # Parse the template info template_info = Template(args.template) if args.css: presentation_dir = os.path.split(args.presentation)[0] target_path = os.path.relpath(args.css, presentation_dir) template_info.add_resource(args.css, CSS_RESOURCE, target=target_path, extra_info='all') source_files.add(args.css) if args.js: presentation_dir = os.path.split(args.presentation)[0] target_path = os.path.relpath(args.js, presentation_dir) template_info.add_resource(args.js, JS_RESOURCE, target=target_path, extra_info=JS_POSITION_BODY) source_files.add(args.js) # Make the resulting HTML htmldata, dependencies = rst2html(args.presentation, template_info, args.auto_console, args.skip_help, args.skip_notes, args.mathjax, args.slide_numbers) source_files.update(dependencies) # Write the HTML out if not os.path.exists(args.targetdir): os.makedirs(args.targetdir) with open(os.path.join(args.targetdir, 'index.html'), 'wb') as outfile: outfile.write(htmldata) # Copy supporting files source_files.update(template_info.copy_resources(args.targetdir)) # Copy images from the source: sourcedir = os.path.split(os.path.abspath(args.presentation))[0] tree = html.fromstring(htmldata) for image in tree.iterdescendants('img'): filename = image.attrib['src'] source_files.add(copy_resource(filename, sourcedir, args.targetdir)) RE_CSS_URL = re.compile(br"""url\(['"]?(.*?)['"]?[\)\?\#]""") # Copy any files referenced by url() in the css-files: for resource in template_info.resources: if resource.resource_type != CSS_RESOURCE: continue # path in CSS is relative to CSS file; construct source/dest accordingly css_base = template_info.template_root if resource.is_in_template else sourcedir css_sourcedir = os.path.dirname(os.path.join(css_base, resource.filepath)) css_targetdir = os.path.dirname(os.path.join(args.targetdir, resource.final_path())) uris = RE_CSS_URL.findall(template_info.read_data(resource)) uris = [uri.decode() for uri in uris] if resource.is_in_template and template_info.builtin_template: for filename in uris: template_info.add_resource(filename, OTHER_RESOURCE, target=css_targetdir, is_in_template=True) else: for filename in uris: source_files.add(copy_resource(filename, css_sourcedir, css_targetdir)) # All done! return {os.path.abspath(f) for f in source_files if f}
[ "def", "generate", "(", "args", ")", ":", "source_files", "=", "{", "args", ".", "presentation", "}", "# Parse the template info", "template_info", "=", "Template", "(", "args", ".", "template", ")", "if", "args", ".", "css", ":", "presentation_dir", "=", "os", ".", "path", ".", "split", "(", "args", ".", "presentation", ")", "[", "0", "]", "target_path", "=", "os", ".", "path", ".", "relpath", "(", "args", ".", "css", ",", "presentation_dir", ")", "template_info", ".", "add_resource", "(", "args", ".", "css", ",", "CSS_RESOURCE", ",", "target", "=", "target_path", ",", "extra_info", "=", "'all'", ")", "source_files", ".", "add", "(", "args", ".", "css", ")", "if", "args", ".", "js", ":", "presentation_dir", "=", "os", ".", "path", ".", "split", "(", "args", ".", "presentation", ")", "[", "0", "]", "target_path", "=", "os", ".", "path", ".", "relpath", "(", "args", ".", "js", ",", "presentation_dir", ")", "template_info", ".", "add_resource", "(", "args", ".", "js", ",", "JS_RESOURCE", ",", "target", "=", "target_path", ",", "extra_info", "=", "JS_POSITION_BODY", ")", "source_files", ".", "add", "(", "args", ".", "js", ")", "# Make the resulting HTML", "htmldata", ",", "dependencies", "=", "rst2html", "(", "args", ".", "presentation", ",", "template_info", ",", "args", ".", "auto_console", ",", "args", ".", "skip_help", ",", "args", ".", "skip_notes", ",", "args", ".", "mathjax", ",", "args", ".", "slide_numbers", ")", "source_files", ".", "update", "(", "dependencies", ")", "# Write the HTML out", "if", "not", "os", ".", "path", ".", "exists", "(", "args", ".", "targetdir", ")", ":", "os", ".", "makedirs", "(", "args", ".", "targetdir", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "args", ".", "targetdir", ",", "'index.html'", ")", ",", "'wb'", ")", "as", "outfile", ":", "outfile", ".", "write", "(", "htmldata", ")", "# Copy supporting files", "source_files", ".", "update", "(", "template_info", ".", "copy_resources", "(", "args", ".", "targetdir", ")", ")", "# Copy images from the source:", "sourcedir", "=", "os", ".", "path", ".", "split", "(", "os", ".", "path", ".", "abspath", "(", "args", ".", "presentation", ")", ")", "[", "0", "]", "tree", "=", "html", ".", "fromstring", "(", "htmldata", ")", "for", "image", "in", "tree", ".", "iterdescendants", "(", "'img'", ")", ":", "filename", "=", "image", ".", "attrib", "[", "'src'", "]", "source_files", ".", "add", "(", "copy_resource", "(", "filename", ",", "sourcedir", ",", "args", ".", "targetdir", ")", ")", "RE_CSS_URL", "=", "re", ".", "compile", "(", "br\"\"\"url\\(['\"]?(.*?)['\"]?[\\)\\?\\#]\"\"\"", ")", "# Copy any files referenced by url() in the css-files:", "for", "resource", "in", "template_info", ".", "resources", ":", "if", "resource", ".", "resource_type", "!=", "CSS_RESOURCE", ":", "continue", "# path in CSS is relative to CSS file; construct source/dest accordingly", "css_base", "=", "template_info", ".", "template_root", "if", "resource", ".", "is_in_template", "else", "sourcedir", "css_sourcedir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "join", "(", "css_base", ",", "resource", ".", "filepath", ")", ")", "css_targetdir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "join", "(", "args", ".", "targetdir", ",", "resource", ".", "final_path", "(", ")", ")", ")", "uris", "=", "RE_CSS_URL", ".", "findall", "(", "template_info", ".", "read_data", "(", "resource", ")", ")", "uris", "=", "[", "uri", ".", "decode", "(", ")", "for", "uri", "in", "uris", "]", "if", "resource", ".", "is_in_template", "and", "template_info", ".", "builtin_template", ":", "for", "filename", "in", "uris", ":", "template_info", ".", "add_resource", "(", "filename", ",", "OTHER_RESOURCE", ",", "target", "=", "css_targetdir", ",", "is_in_template", "=", "True", ")", "else", ":", "for", "filename", "in", "uris", ":", "source_files", ".", "add", "(", "copy_resource", "(", "filename", ",", "css_sourcedir", ",", "css_targetdir", ")", ")", "# All done!", "return", "{", "os", ".", "path", ".", "abspath", "(", "f", ")", "for", "f", "in", "source_files", "if", "f", "}" ]
Generates the presentation and returns a list of files used
[ "Generates", "the", "presentation", "and", "returns", "a", "list", "of", "files", "used" ]
train
https://github.com/regebro/hovercraft/blob/d9f63bfdfe1519c4d7a81697ee066e49dc26a30b/hovercraft/generate.py#L144-L207
0.003102
earwig/mwparserfromhell
mwparserfromhell/parser/tokenizer.py
Tokenizer._handle_hr
def _handle_hr(self): """Handle a wiki-style horizontal rule (``----``) in the string.""" length = 4 self._head += 3 while self._read(1) == "-": length += 1 self._head += 1 self._emit(tokens.TagOpenOpen(wiki_markup="-" * length)) self._emit_text("hr") self._emit(tokens.TagCloseSelfclose())
python
def _handle_hr(self): """Handle a wiki-style horizontal rule (``----``) in the string.""" length = 4 self._head += 3 while self._read(1) == "-": length += 1 self._head += 1 self._emit(tokens.TagOpenOpen(wiki_markup="-" * length)) self._emit_text("hr") self._emit(tokens.TagCloseSelfclose())
[ "def", "_handle_hr", "(", "self", ")", ":", "length", "=", "4", "self", ".", "_head", "+=", "3", "while", "self", ".", "_read", "(", "1", ")", "==", "\"-\"", ":", "length", "+=", "1", "self", ".", "_head", "+=", "1", "self", ".", "_emit", "(", "tokens", ".", "TagOpenOpen", "(", "wiki_markup", "=", "\"-\"", "*", "length", ")", ")", "self", ".", "_emit_text", "(", "\"hr\"", ")", "self", ".", "_emit", "(", "tokens", ".", "TagCloseSelfclose", "(", ")", ")" ]
Handle a wiki-style horizontal rule (``----``) in the string.
[ "Handle", "a", "wiki", "-", "style", "horizontal", "rule", "(", "----", ")", "in", "the", "string", "." ]
train
https://github.com/earwig/mwparserfromhell/blob/98dc30902d35c714a70aca8e6616f49d71cb24cc/mwparserfromhell/parser/tokenizer.py#L1051-L1060
0.005405
aetros/aetros-cli
aetros/backend.py
JobBackend.get_parameter
def get_parameter(self, path, default=None, return_group=False): """ Reads hyperparameter from job configuration. If nothing found use given default. :param path: str :param default: * :param return_group: If true and path is a choice_group, we return the dict instead of the group name. :return: * """ value = read_parameter_by_path(self.job['config']['parameters'], path, return_group) if value is None: return default return value
python
def get_parameter(self, path, default=None, return_group=False): """ Reads hyperparameter from job configuration. If nothing found use given default. :param path: str :param default: * :param return_group: If true and path is a choice_group, we return the dict instead of the group name. :return: * """ value = read_parameter_by_path(self.job['config']['parameters'], path, return_group) if value is None: return default return value
[ "def", "get_parameter", "(", "self", ",", "path", ",", "default", "=", "None", ",", "return_group", "=", "False", ")", ":", "value", "=", "read_parameter_by_path", "(", "self", ".", "job", "[", "'config'", "]", "[", "'parameters'", "]", ",", "path", ",", "return_group", ")", "if", "value", "is", "None", ":", "return", "default", "return", "value" ]
Reads hyperparameter from job configuration. If nothing found use given default. :param path: str :param default: * :param return_group: If true and path is a choice_group, we return the dict instead of the group name. :return: *
[ "Reads", "hyperparameter", "from", "job", "configuration", ".", "If", "nothing", "found", "use", "given", "default", "." ]
train
https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/backend.py#L1280-L1294
0.011342
PmagPy/PmagPy
pmagpy/nlt.py
compare
def compare(a, b): """ Compare items in 2 arrays. Returns sum(abs(a(i)-b(i))) """ s=0 for i in range(len(a)): s=s+abs(a[i]-b[i]) return s
python
def compare(a, b): """ Compare items in 2 arrays. Returns sum(abs(a(i)-b(i))) """ s=0 for i in range(len(a)): s=s+abs(a[i]-b[i]) return s
[ "def", "compare", "(", "a", ",", "b", ")", ":", "s", "=", "0", "for", "i", "in", "range", "(", "len", "(", "a", ")", ")", ":", "s", "=", "s", "+", "abs", "(", "a", "[", "i", "]", "-", "b", "[", "i", "]", ")", "return", "s" ]
Compare items in 2 arrays. Returns sum(abs(a(i)-b(i)))
[ "Compare", "items", "in", "2", "arrays", ".", "Returns", "sum", "(", "abs", "(", "a", "(", "i", ")", "-", "b", "(", "i", ")))" ]
train
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/nlt.py#L34-L41
0.017647
saltstack/salt
salt/modules/keystoneng.py
role_update
def role_update(auth=None, **kwargs): ''' Update a role CLI Example: .. code-block:: bash salt '*' keystoneng.role_update name=role1 new_name=newrole salt '*' keystoneng.role_update name=1eb6edd5525e4ac39af571adee673559 new_name=newrole ''' cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) if 'new_name' in kwargs: kwargs['name'] = kwargs.pop('new_name') return cloud.update_role(**kwargs)
python
def role_update(auth=None, **kwargs): ''' Update a role CLI Example: .. code-block:: bash salt '*' keystoneng.role_update name=role1 new_name=newrole salt '*' keystoneng.role_update name=1eb6edd5525e4ac39af571adee673559 new_name=newrole ''' cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) if 'new_name' in kwargs: kwargs['name'] = kwargs.pop('new_name') return cloud.update_role(**kwargs)
[ "def", "role_update", "(", "auth", "=", "None", ",", "*", "*", "kwargs", ")", ":", "cloud", "=", "get_operator_cloud", "(", "auth", ")", "kwargs", "=", "_clean_kwargs", "(", "*", "*", "kwargs", ")", "if", "'new_name'", "in", "kwargs", ":", "kwargs", "[", "'name'", "]", "=", "kwargs", ".", "pop", "(", "'new_name'", ")", "return", "cloud", ".", "update_role", "(", "*", "*", "kwargs", ")" ]
Update a role CLI Example: .. code-block:: bash salt '*' keystoneng.role_update name=role1 new_name=newrole salt '*' keystoneng.role_update name=1eb6edd5525e4ac39af571adee673559 new_name=newrole
[ "Update", "a", "role" ]
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/keystoneng.py#L462-L477
0.004264
marcinmiklitz/pywindow
pywindow/molecular.py
Molecule.calculate_pore_diameter
def calculate_pore_diameter(self): """ Return the intrinsic pore diameter. Returns ------- :class:`float` The intrinsic pore diameter. """ self.pore_diameter, self.pore_closest_atom = pore_diameter( self.elements, self.coordinates) self.properties['pore_diameter'] = { 'diameter': self.pore_diameter, 'atom': int(self.pore_closest_atom), } return self.pore_diameter
python
def calculate_pore_diameter(self): """ Return the intrinsic pore diameter. Returns ------- :class:`float` The intrinsic pore diameter. """ self.pore_diameter, self.pore_closest_atom = pore_diameter( self.elements, self.coordinates) self.properties['pore_diameter'] = { 'diameter': self.pore_diameter, 'atom': int(self.pore_closest_atom), } return self.pore_diameter
[ "def", "calculate_pore_diameter", "(", "self", ")", ":", "self", ".", "pore_diameter", ",", "self", ".", "pore_closest_atom", "=", "pore_diameter", "(", "self", ".", "elements", ",", "self", ".", "coordinates", ")", "self", ".", "properties", "[", "'pore_diameter'", "]", "=", "{", "'diameter'", ":", "self", ".", "pore_diameter", ",", "'atom'", ":", "int", "(", "self", ".", "pore_closest_atom", ")", ",", "}", "return", "self", ".", "pore_diameter" ]
Return the intrinsic pore diameter. Returns ------- :class:`float` The intrinsic pore diameter.
[ "Return", "the", "intrinsic", "pore", "diameter", "." ]
train
https://github.com/marcinmiklitz/pywindow/blob/e5264812157224f22a691741ca2e0aefdc9bd2eb/pywindow/molecular.py#L464-L480
0.00404
chemlab/chemlab
chemlab/graphics/shaders.py
compileShader
def compileShader( source, shaderType ): """Compile shader source of given type source -- GLSL source-code for the shader shaderType -- GLenum GL_VERTEX_SHADER, GL_FRAGMENT_SHADER, etc, returns GLuint compiled shader reference raises RuntimeError when a compilation failure occurs """ if isinstance(source, str): source = [source] elif isinstance(source, bytes): source = [source.decode('utf-8')] shader = glCreateShader(shaderType) glShaderSource(shader, source) glCompileShader(shader) result = glGetShaderiv(shader, GL_COMPILE_STATUS) if not(result): # TODO: this will be wrong if the user has # disabled traditional unpacking array support. raise RuntimeError( """Shader compile failure (%s): %s"""%( result, glGetShaderInfoLog( shader ), ), source, shaderType, ) return shader
python
def compileShader( source, shaderType ): """Compile shader source of given type source -- GLSL source-code for the shader shaderType -- GLenum GL_VERTEX_SHADER, GL_FRAGMENT_SHADER, etc, returns GLuint compiled shader reference raises RuntimeError when a compilation failure occurs """ if isinstance(source, str): source = [source] elif isinstance(source, bytes): source = [source.decode('utf-8')] shader = glCreateShader(shaderType) glShaderSource(shader, source) glCompileShader(shader) result = glGetShaderiv(shader, GL_COMPILE_STATUS) if not(result): # TODO: this will be wrong if the user has # disabled traditional unpacking array support. raise RuntimeError( """Shader compile failure (%s): %s"""%( result, glGetShaderInfoLog( shader ), ), source, shaderType, ) return shader
[ "def", "compileShader", "(", "source", ",", "shaderType", ")", ":", "if", "isinstance", "(", "source", ",", "str", ")", ":", "source", "=", "[", "source", "]", "elif", "isinstance", "(", "source", ",", "bytes", ")", ":", "source", "=", "[", "source", ".", "decode", "(", "'utf-8'", ")", "]", "shader", "=", "glCreateShader", "(", "shaderType", ")", "glShaderSource", "(", "shader", ",", "source", ")", "glCompileShader", "(", "shader", ")", "result", "=", "glGetShaderiv", "(", "shader", ",", "GL_COMPILE_STATUS", ")", "if", "not", "(", "result", ")", ":", "# TODO: this will be wrong if the user has ", "# disabled traditional unpacking array support.", "raise", "RuntimeError", "(", "\"\"\"Shader compile failure (%s): %s\"\"\"", "%", "(", "result", ",", "glGetShaderInfoLog", "(", "shader", ")", ",", ")", ",", "source", ",", "shaderType", ",", ")", "return", "shader" ]
Compile shader source of given type source -- GLSL source-code for the shader shaderType -- GLenum GL_VERTEX_SHADER, GL_FRAGMENT_SHADER, etc, returns GLuint compiled shader reference raises RuntimeError when a compilation failure occurs
[ "Compile", "shader", "source", "of", "given", "type", "source", "--", "GLSL", "source", "-", "code", "for", "the", "shader", "shaderType", "--", "GLenum", "GL_VERTEX_SHADER", "GL_FRAGMENT_SHADER", "etc", "returns", "GLuint", "compiled", "shader", "reference", "raises", "RuntimeError", "when", "a", "compilation", "failure", "occurs" ]
train
https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/graphics/shaders.py#L26-L56
0.01217
hotdoc/hotdoc
hotdoc/utils/utils.py
get_extension_classes
def get_extension_classes(sort, extra_extension_paths=None): """ Banana banana """ all_classes = {} deps_map = {} for entry_point in pkg_resources.iter_entry_points( group='hotdoc.extensions', name='get_extension_classes'): if entry_point.module_name == 'hotdoc_c_extension.extensions': continue try: activation_function = entry_point.load() classes = activation_function() # pylint: disable=broad-except except Exception as exc: info("Failed to load %s" % entry_point.module_name, exc) debug(traceback.format_exc()) continue for klass in classes: all_classes[klass.extension_name] = klass if extra_extension_paths: for klass in __get_extra_extension_classes(extra_extension_paths): all_classes[klass.extension_name] = klass klass_list = list(all_classes.values()) if not sort: return klass_list for i, klass in enumerate(klass_list): deps = klass.get_dependencies() topodeps = set() for dep in deps: if dep.dependency_name not in all_classes: if dep.optional: continue else: error("setup-issue", "Missing dependency %s for %s" % (dep.dependency_name, klass.extension_name)) if dep.is_upstream: topodeps.add( klass_list.index(all_classes[dep.dependency_name])) deps_map[i] = topodeps sorted_class_indices = toposort_flatten(deps_map) sorted_classes = [klass_list[i] for i in sorted_class_indices] return sorted_classes
python
def get_extension_classes(sort, extra_extension_paths=None): """ Banana banana """ all_classes = {} deps_map = {} for entry_point in pkg_resources.iter_entry_points( group='hotdoc.extensions', name='get_extension_classes'): if entry_point.module_name == 'hotdoc_c_extension.extensions': continue try: activation_function = entry_point.load() classes = activation_function() # pylint: disable=broad-except except Exception as exc: info("Failed to load %s" % entry_point.module_name, exc) debug(traceback.format_exc()) continue for klass in classes: all_classes[klass.extension_name] = klass if extra_extension_paths: for klass in __get_extra_extension_classes(extra_extension_paths): all_classes[klass.extension_name] = klass klass_list = list(all_classes.values()) if not sort: return klass_list for i, klass in enumerate(klass_list): deps = klass.get_dependencies() topodeps = set() for dep in deps: if dep.dependency_name not in all_classes: if dep.optional: continue else: error("setup-issue", "Missing dependency %s for %s" % (dep.dependency_name, klass.extension_name)) if dep.is_upstream: topodeps.add( klass_list.index(all_classes[dep.dependency_name])) deps_map[i] = topodeps sorted_class_indices = toposort_flatten(deps_map) sorted_classes = [klass_list[i] for i in sorted_class_indices] return sorted_classes
[ "def", "get_extension_classes", "(", "sort", ",", "extra_extension_paths", "=", "None", ")", ":", "all_classes", "=", "{", "}", "deps_map", "=", "{", "}", "for", "entry_point", "in", "pkg_resources", ".", "iter_entry_points", "(", "group", "=", "'hotdoc.extensions'", ",", "name", "=", "'get_extension_classes'", ")", ":", "if", "entry_point", ".", "module_name", "==", "'hotdoc_c_extension.extensions'", ":", "continue", "try", ":", "activation_function", "=", "entry_point", ".", "load", "(", ")", "classes", "=", "activation_function", "(", ")", "# pylint: disable=broad-except", "except", "Exception", "as", "exc", ":", "info", "(", "\"Failed to load %s\"", "%", "entry_point", ".", "module_name", ",", "exc", ")", "debug", "(", "traceback", ".", "format_exc", "(", ")", ")", "continue", "for", "klass", "in", "classes", ":", "all_classes", "[", "klass", ".", "extension_name", "]", "=", "klass", "if", "extra_extension_paths", ":", "for", "klass", "in", "__get_extra_extension_classes", "(", "extra_extension_paths", ")", ":", "all_classes", "[", "klass", ".", "extension_name", "]", "=", "klass", "klass_list", "=", "list", "(", "all_classes", ".", "values", "(", ")", ")", "if", "not", "sort", ":", "return", "klass_list", "for", "i", ",", "klass", "in", "enumerate", "(", "klass_list", ")", ":", "deps", "=", "klass", ".", "get_dependencies", "(", ")", "topodeps", "=", "set", "(", ")", "for", "dep", "in", "deps", ":", "if", "dep", ".", "dependency_name", "not", "in", "all_classes", ":", "if", "dep", ".", "optional", ":", "continue", "else", ":", "error", "(", "\"setup-issue\"", ",", "\"Missing dependency %s for %s\"", "%", "(", "dep", ".", "dependency_name", ",", "klass", ".", "extension_name", ")", ")", "if", "dep", ".", "is_upstream", ":", "topodeps", ".", "add", "(", "klass_list", ".", "index", "(", "all_classes", "[", "dep", ".", "dependency_name", "]", ")", ")", "deps_map", "[", "i", "]", "=", "topodeps", "sorted_class_indices", "=", "toposort_flatten", "(", "deps_map", ")", "sorted_classes", "=", "[", "klass_list", "[", "i", "]", "for", "i", "in", "sorted_class_indices", "]", "return", "sorted_classes" ]
Banana banana
[ "Banana", "banana" ]
train
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/utils/utils.py#L159-L209
0.000569
JawboneHealth/jhhalchemy
jhhalchemy/migrate.py
upgrade
def upgrade(dbname, connect_str, alembic_conf): """ Get the database's upgrade lock and run alembic. :param dbname: Name of the database to upgrade/create :param connect_str: Connection string to the database (usually Flask's SQLALCHEMY_DATABASE_URI) :param alembic_conf: location of alembic.ini """ # # The db has to exist before we can get the lock. On the off-chance that another process creates the db between # checking if it exists and running the create, ignore the exception. # if not sqlalchemy_utils.database_exists(connect_str): logger.info('Creating {}'.format(dbname)) try: sqlalchemy_utils.create_database(connect_str) except sqlalchemy.exc.ProgrammingError as exc: if not sqlalchemy_utils.database_exists(connect_str): logger.error('Could not create {}'.format(dbname)) raise exc with get_upgrade_lock(dbname, connect_str): alembic_config = alembic.config.Config( alembic_conf, attributes={'configure_logger': False}) logger.info('Upgrading {} to head'.format(dbname)) alembic.command.upgrade(alembic_config, 'head')
python
def upgrade(dbname, connect_str, alembic_conf): """ Get the database's upgrade lock and run alembic. :param dbname: Name of the database to upgrade/create :param connect_str: Connection string to the database (usually Flask's SQLALCHEMY_DATABASE_URI) :param alembic_conf: location of alembic.ini """ # # The db has to exist before we can get the lock. On the off-chance that another process creates the db between # checking if it exists and running the create, ignore the exception. # if not sqlalchemy_utils.database_exists(connect_str): logger.info('Creating {}'.format(dbname)) try: sqlalchemy_utils.create_database(connect_str) except sqlalchemy.exc.ProgrammingError as exc: if not sqlalchemy_utils.database_exists(connect_str): logger.error('Could not create {}'.format(dbname)) raise exc with get_upgrade_lock(dbname, connect_str): alembic_config = alembic.config.Config( alembic_conf, attributes={'configure_logger': False}) logger.info('Upgrading {} to head'.format(dbname)) alembic.command.upgrade(alembic_config, 'head')
[ "def", "upgrade", "(", "dbname", ",", "connect_str", ",", "alembic_conf", ")", ":", "#", "# The db has to exist before we can get the lock. On the off-chance that another process creates the db between", "# checking if it exists and running the create, ignore the exception.", "#", "if", "not", "sqlalchemy_utils", ".", "database_exists", "(", "connect_str", ")", ":", "logger", ".", "info", "(", "'Creating {}'", ".", "format", "(", "dbname", ")", ")", "try", ":", "sqlalchemy_utils", ".", "create_database", "(", "connect_str", ")", "except", "sqlalchemy", ".", "exc", ".", "ProgrammingError", "as", "exc", ":", "if", "not", "sqlalchemy_utils", ".", "database_exists", "(", "connect_str", ")", ":", "logger", ".", "error", "(", "'Could not create {}'", ".", "format", "(", "dbname", ")", ")", "raise", "exc", "with", "get_upgrade_lock", "(", "dbname", ",", "connect_str", ")", ":", "alembic_config", "=", "alembic", ".", "config", ".", "Config", "(", "alembic_conf", ",", "attributes", "=", "{", "'configure_logger'", ":", "False", "}", ")", "logger", ".", "info", "(", "'Upgrading {} to head'", ".", "format", "(", "dbname", ")", ")", "alembic", ".", "command", ".", "upgrade", "(", "alembic_config", ",", "'head'", ")" ]
Get the database's upgrade lock and run alembic. :param dbname: Name of the database to upgrade/create :param connect_str: Connection string to the database (usually Flask's SQLALCHEMY_DATABASE_URI) :param alembic_conf: location of alembic.ini
[ "Get", "the", "database", "s", "upgrade", "lock", "and", "run", "alembic", "." ]
train
https://github.com/JawboneHealth/jhhalchemy/blob/ca0011d644e404561a142c9d7f0a8a569f1f4f27/jhhalchemy/migrate.py#L64-L90
0.002481
cloudera/cm_api
python/src/cm_api/endpoints/clusters.py
ApiCluster.enter_maintenance_mode
def enter_maintenance_mode(self): """ Put the cluster in maintenance mode. @return: Reference to the completed command. @since: API v2 """ cmd = self._cmd('enterMaintenanceMode') if cmd.success: self._update(get_cluster(self._get_resource_root(), self.name)) return cmd
python
def enter_maintenance_mode(self): """ Put the cluster in maintenance mode. @return: Reference to the completed command. @since: API v2 """ cmd = self._cmd('enterMaintenanceMode') if cmd.success: self._update(get_cluster(self._get_resource_root(), self.name)) return cmd
[ "def", "enter_maintenance_mode", "(", "self", ")", ":", "cmd", "=", "self", ".", "_cmd", "(", "'enterMaintenanceMode'", ")", "if", "cmd", ".", "success", ":", "self", ".", "_update", "(", "get_cluster", "(", "self", ".", "_get_resource_root", "(", ")", ",", "self", ".", "name", ")", ")", "return", "cmd" ]
Put the cluster in maintenance mode. @return: Reference to the completed command. @since: API v2
[ "Put", "the", "cluster", "in", "maintenance", "mode", "." ]
train
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/clusters.py#L340-L350
0.006494
dmwm/DBS
Server/Python/src/dbs/business/DBSDataType.py
DBSDataType.listDataType
def listDataType(self, dataType="", dataset=""): """ List data-type/primary-ds-type """ conn = self.dbi.connection() try: if dataset and dataType: dbsExceptionHandler('dbsException-invalid-input', "DBSDataType/listDataType. Data Type can be only searched by data_type or by dataset, not both.") else: result = self.dataType.execute(conn, dataType, dataset) return result finally: if conn: conn.close()
python
def listDataType(self, dataType="", dataset=""): """ List data-type/primary-ds-type """ conn = self.dbi.connection() try: if dataset and dataType: dbsExceptionHandler('dbsException-invalid-input', "DBSDataType/listDataType. Data Type can be only searched by data_type or by dataset, not both.") else: result = self.dataType.execute(conn, dataType, dataset) return result finally: if conn: conn.close()
[ "def", "listDataType", "(", "self", ",", "dataType", "=", "\"\"", ",", "dataset", "=", "\"\"", ")", ":", "conn", "=", "self", ".", "dbi", ".", "connection", "(", ")", "try", ":", "if", "dataset", "and", "dataType", ":", "dbsExceptionHandler", "(", "'dbsException-invalid-input'", ",", "\"DBSDataType/listDataType. Data Type can be only searched by data_type or by dataset, not both.\"", ")", "else", ":", "result", "=", "self", ".", "dataType", ".", "execute", "(", "conn", ",", "dataType", ",", "dataset", ")", "return", "result", "finally", ":", "if", "conn", ":", "conn", ".", "close", "(", ")" ]
List data-type/primary-ds-type
[ "List", "data", "-", "type", "/", "primary", "-", "ds", "-", "type" ]
train
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/business/DBSDataType.py#L22-L36
0.008772
UCL-INGI/INGInious
inginious/client/client.py
Client._handle_job_queue_update
async def _handle_job_queue_update(self, message: BackendGetQueue): """ Handles a BackendGetQueue containing a snapshot of the job queue """ self._logger.debug("Received job queue update") self._queue_update_last_attempt = 0 self._queue_cache = message # Do some precomputation new_job_queue_cache = {} # format is job_id: (nb_jobs_before, max_remaining_time) for (job_id, is_local, _, _2, _3, _4, max_end) in message.jobs_running: if is_local: new_job_queue_cache[job_id] = (-1, max_end - time.time()) wait_time = 0 nb_tasks = 0 for (job_id, is_local, _, _2, timeout) in message.jobs_waiting: if timeout > 0: wait_time += timeout if is_local: new_job_queue_cache[job_id] = (nb_tasks, wait_time) nb_tasks += 1 self._queue_job_cache = new_job_queue_cache
python
async def _handle_job_queue_update(self, message: BackendGetQueue): """ Handles a BackendGetQueue containing a snapshot of the job queue """ self._logger.debug("Received job queue update") self._queue_update_last_attempt = 0 self._queue_cache = message # Do some precomputation new_job_queue_cache = {} # format is job_id: (nb_jobs_before, max_remaining_time) for (job_id, is_local, _, _2, _3, _4, max_end) in message.jobs_running: if is_local: new_job_queue_cache[job_id] = (-1, max_end - time.time()) wait_time = 0 nb_tasks = 0 for (job_id, is_local, _, _2, timeout) in message.jobs_waiting: if timeout > 0: wait_time += timeout if is_local: new_job_queue_cache[job_id] = (nb_tasks, wait_time) nb_tasks += 1 self._queue_job_cache = new_job_queue_cache
[ "async", "def", "_handle_job_queue_update", "(", "self", ",", "message", ":", "BackendGetQueue", ")", ":", "self", ".", "_logger", ".", "debug", "(", "\"Received job queue update\"", ")", "self", ".", "_queue_update_last_attempt", "=", "0", "self", ".", "_queue_cache", "=", "message", "# Do some precomputation", "new_job_queue_cache", "=", "{", "}", "# format is job_id: (nb_jobs_before, max_remaining_time)", "for", "(", "job_id", ",", "is_local", ",", "_", ",", "_2", ",", "_3", ",", "_4", ",", "max_end", ")", "in", "message", ".", "jobs_running", ":", "if", "is_local", ":", "new_job_queue_cache", "[", "job_id", "]", "=", "(", "-", "1", ",", "max_end", "-", "time", ".", "time", "(", ")", ")", "wait_time", "=", "0", "nb_tasks", "=", "0", "for", "(", "job_id", ",", "is_local", ",", "_", ",", "_2", ",", "timeout", ")", "in", "message", ".", "jobs_waiting", ":", "if", "timeout", ">", "0", ":", "wait_time", "+=", "timeout", "if", "is_local", ":", "new_job_queue_cache", "[", "job_id", "]", "=", "(", "nb_tasks", ",", "wait_time", ")", "nb_tasks", "+=", "1", "self", ".", "_queue_job_cache", "=", "new_job_queue_cache" ]
Handles a BackendGetQueue containing a snapshot of the job queue
[ "Handles", "a", "BackendGetQueue", "containing", "a", "snapshot", "of", "the", "job", "queue" ]
train
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/client/client.py#L164-L185
0.003168
mitsei/dlkit
dlkit/json_/repository/searches.py
RepositorySearchResults.get_repositories
def get_repositories(self): """Gets the repository list resulting from the search. return: (osid.repository.RepositoryList) - the repository list raise: IllegalState - the list has already been retrieved *compliance: mandatory -- This method must be implemented.* """ if self.retrieved: raise errors.IllegalState('List has already been retrieved.') self.retrieved = True return objects.RepositoryList(self._results, runtime=self._runtime)
python
def get_repositories(self): """Gets the repository list resulting from the search. return: (osid.repository.RepositoryList) - the repository list raise: IllegalState - the list has already been retrieved *compliance: mandatory -- This method must be implemented.* """ if self.retrieved: raise errors.IllegalState('List has already been retrieved.') self.retrieved = True return objects.RepositoryList(self._results, runtime=self._runtime)
[ "def", "get_repositories", "(", "self", ")", ":", "if", "self", ".", "retrieved", ":", "raise", "errors", ".", "IllegalState", "(", "'List has already been retrieved.'", ")", "self", ".", "retrieved", "=", "True", "return", "objects", ".", "RepositoryList", "(", "self", ".", "_results", ",", "runtime", "=", "self", ".", "_runtime", ")" ]
Gets the repository list resulting from the search. return: (osid.repository.RepositoryList) - the repository list raise: IllegalState - the list has already been retrieved *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "repository", "list", "resulting", "from", "the", "search", "." ]
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/searches.py#L342-L353
0.003868
eliangcs/http-prompt
http_prompt/context/transform.py
format_to_httpie
def format_to_httpie(context, method=None): """Format a Context object to an HTTPie command.""" cmd = ['http'] + _extract_httpie_options(context, quote=True, join_key_value=True) if method: cmd.append(method.upper()) cmd.append(context.url) cmd += _extract_httpie_request_items(context, quote=True) return ' '.join(cmd) + '\n'
python
def format_to_httpie(context, method=None): """Format a Context object to an HTTPie command.""" cmd = ['http'] + _extract_httpie_options(context, quote=True, join_key_value=True) if method: cmd.append(method.upper()) cmd.append(context.url) cmd += _extract_httpie_request_items(context, quote=True) return ' '.join(cmd) + '\n'
[ "def", "format_to_httpie", "(", "context", ",", "method", "=", "None", ")", ":", "cmd", "=", "[", "'http'", "]", "+", "_extract_httpie_options", "(", "context", ",", "quote", "=", "True", ",", "join_key_value", "=", "True", ")", "if", "method", ":", "cmd", ".", "append", "(", "method", ".", "upper", "(", ")", ")", "cmd", ".", "append", "(", "context", ".", "url", ")", "cmd", "+=", "_extract_httpie_request_items", "(", "context", ",", "quote", "=", "True", ")", "return", "' '", ".", "join", "(", "cmd", ")", "+", "'\\n'" ]
Format a Context object to an HTTPie command.
[ "Format", "a", "Context", "object", "to", "an", "HTTPie", "command", "." ]
train
https://github.com/eliangcs/http-prompt/blob/189321f25e3526fa1b79a9dc38c317892c478986/http_prompt/context/transform.py#L97-L105
0.002481
henzk/ape
ape/container_mode/validators/feature_order_validator.py
FeatureOrderValidator.check_order
def check_order(self): """ Performs the check and store the violations in self.violations. :return: boolean indicating the error state """ for feature, info in self.constraints.items(): self._check_feature(feature, info, 'before') self._check_feature(feature, info, 'after') self._check_position(feature, info) return not self.has_errors()
python
def check_order(self): """ Performs the check and store the violations in self.violations. :return: boolean indicating the error state """ for feature, info in self.constraints.items(): self._check_feature(feature, info, 'before') self._check_feature(feature, info, 'after') self._check_position(feature, info) return not self.has_errors()
[ "def", "check_order", "(", "self", ")", ":", "for", "feature", ",", "info", "in", "self", ".", "constraints", ".", "items", "(", ")", ":", "self", ".", "_check_feature", "(", "feature", ",", "info", ",", "'before'", ")", "self", ".", "_check_feature", "(", "feature", ",", "info", ",", "'after'", ")", "self", ".", "_check_position", "(", "feature", ",", "info", ")", "return", "not", "self", ".", "has_errors", "(", ")" ]
Performs the check and store the violations in self.violations. :return: boolean indicating the error state
[ "Performs", "the", "check", "and", "store", "the", "violations", "in", "self", ".", "violations", ".", ":", "return", ":", "boolean", "indicating", "the", "error", "state" ]
train
https://github.com/henzk/ape/blob/a1b7ea5e5b25c42beffeaaa5c32d94ad82634819/ape/container_mode/validators/feature_order_validator.py#L25-L36
0.004706
osrg/ryu
ryu/services/protocols/bgp/bgpspeaker.py
BGPSpeaker.neighbor_add
def neighbor_add(self, address, remote_as, remote_port=DEFAULT_BGP_PORT, enable_ipv4=DEFAULT_CAP_MBGP_IPV4, enable_ipv6=DEFAULT_CAP_MBGP_IPV6, enable_vpnv4=DEFAULT_CAP_MBGP_VPNV4, enable_vpnv6=DEFAULT_CAP_MBGP_VPNV6, enable_evpn=DEFAULT_CAP_MBGP_EVPN, enable_ipv4fs=DEFAULT_CAP_MBGP_IPV4FS, enable_ipv6fs=DEFAULT_CAP_MBGP_IPV6FS, enable_vpnv4fs=DEFAULT_CAP_MBGP_VPNV4FS, enable_vpnv6fs=DEFAULT_CAP_MBGP_VPNV6FS, enable_l2vpnfs=DEFAULT_CAP_MBGP_L2VPNFS, enable_enhanced_refresh=DEFAULT_CAP_ENHANCED_REFRESH, enable_four_octet_as_number=DEFAULT_CAP_FOUR_OCTET_AS_NUMBER, next_hop=None, password=None, multi_exit_disc=None, site_of_origins=None, is_route_server_client=DEFAULT_IS_ROUTE_SERVER_CLIENT, is_route_reflector_client=DEFAULT_IS_ROUTE_REFLECTOR_CLIENT, is_next_hop_self=DEFAULT_IS_NEXT_HOP_SELF, local_address=None, local_port=None, local_as=None, connect_mode=DEFAULT_CONNECT_MODE): """ This method registers a new neighbor. The BGP speaker tries to establish a bgp session with the peer (accepts a connection from the peer and also tries to connect to it). ``address`` specifies the IP address of the peer. It must be the string representation of an IP address. Only IPv4 is supported now. ``remote_as`` specifies the AS number of the peer. It must be an integer between 1 and 65535. ``remote_port`` specifies the TCP port number of the peer. ``enable_ipv4`` enables IPv4 address family for this neighbor. ``enable_ipv6`` enables IPv6 address family for this neighbor. ``enable_vpnv4`` enables VPNv4 address family for this neighbor. ``enable_vpnv6`` enables VPNv6 address family for this neighbor. ``enable_evpn`` enables Ethernet VPN address family for this neighbor. ``enable_ipv4fs`` enables IPv4 Flow Specification address family for this neighbor. ``enable_ipv6fs`` enables IPv6 Flow Specification address family for this neighbor. ``enable_vpnv4fs`` enables VPNv4 Flow Specification address family for this neighbor. ``enable_vpnv6fs`` enables VPNv6 Flow Specification address family for this neighbor. ``enable_l2vpnfs`` enables L2VPN Flow Specification address family for this neighbor. ``enable_enhanced_refresh`` enables Enhanced Route Refresh for this neighbor. ``enable_four_octet_as_number`` enables Four-Octet AS Number capability for this neighbor. ``next_hop`` specifies the next hop IP address. If not specified, host's ip address to access to a peer is used. ``password`` is used for the MD5 authentication if it's specified. By default, the MD5 authentication is disabled. ``multi_exit_disc`` specifies multi exit discriminator (MED) value as an int type value. If omitted, MED is not sent to the neighbor. ``site_of_origins`` specifies site_of_origin values. This parameter must be a list of string. ``is_route_server_client`` specifies whether this neighbor is a router server's client or not. ``is_route_reflector_client`` specifies whether this neighbor is a router reflector's client or not. ``is_next_hop_self`` specifies whether the BGP speaker announces its own ip address to iBGP neighbor or not as path's next_hop address. ``local_address`` specifies Loopback interface address for iBGP peering. ``local_port`` specifies source TCP port for iBGP peering. ``local_as`` specifies local AS number per-peer. If omitted, the AS number of BGPSpeaker instance is used. ``connect_mode`` specifies how to connect to this neighbor. This parameter must be one of the following. - CONNECT_MODE_ACTIVE = 'active' - CONNECT_MODE_PASSIVE = 'passive' - CONNECT_MODE_BOTH (default) = 'both' """ bgp_neighbor = { neighbors.IP_ADDRESS: address, neighbors.REMOTE_AS: remote_as, REMOTE_PORT: remote_port, PEER_NEXT_HOP: next_hop, PASSWORD: password, IS_ROUTE_SERVER_CLIENT: is_route_server_client, IS_ROUTE_REFLECTOR_CLIENT: is_route_reflector_client, IS_NEXT_HOP_SELF: is_next_hop_self, CONNECT_MODE: connect_mode, CAP_ENHANCED_REFRESH: enable_enhanced_refresh, CAP_FOUR_OCTET_AS_NUMBER: enable_four_octet_as_number, CAP_MBGP_IPV4: enable_ipv4, CAP_MBGP_IPV6: enable_ipv6, CAP_MBGP_VPNV4: enable_vpnv4, CAP_MBGP_VPNV6: enable_vpnv6, CAP_MBGP_EVPN: enable_evpn, CAP_MBGP_IPV4FS: enable_ipv4fs, CAP_MBGP_IPV6FS: enable_ipv6fs, CAP_MBGP_VPNV4FS: enable_vpnv4fs, CAP_MBGP_VPNV6FS: enable_vpnv6fs, CAP_MBGP_L2VPNFS: enable_l2vpnfs, } if multi_exit_disc: bgp_neighbor[MULTI_EXIT_DISC] = multi_exit_disc if site_of_origins: bgp_neighbor[SITE_OF_ORIGINS] = site_of_origins if local_address: bgp_neighbor[LOCAL_ADDRESS] = local_address if local_port: bgp_neighbor[LOCAL_PORT] = local_port if local_as: bgp_neighbor[LOCAL_AS] = local_as call('neighbor.create', **bgp_neighbor)
python
def neighbor_add(self, address, remote_as, remote_port=DEFAULT_BGP_PORT, enable_ipv4=DEFAULT_CAP_MBGP_IPV4, enable_ipv6=DEFAULT_CAP_MBGP_IPV6, enable_vpnv4=DEFAULT_CAP_MBGP_VPNV4, enable_vpnv6=DEFAULT_CAP_MBGP_VPNV6, enable_evpn=DEFAULT_CAP_MBGP_EVPN, enable_ipv4fs=DEFAULT_CAP_MBGP_IPV4FS, enable_ipv6fs=DEFAULT_CAP_MBGP_IPV6FS, enable_vpnv4fs=DEFAULT_CAP_MBGP_VPNV4FS, enable_vpnv6fs=DEFAULT_CAP_MBGP_VPNV6FS, enable_l2vpnfs=DEFAULT_CAP_MBGP_L2VPNFS, enable_enhanced_refresh=DEFAULT_CAP_ENHANCED_REFRESH, enable_four_octet_as_number=DEFAULT_CAP_FOUR_OCTET_AS_NUMBER, next_hop=None, password=None, multi_exit_disc=None, site_of_origins=None, is_route_server_client=DEFAULT_IS_ROUTE_SERVER_CLIENT, is_route_reflector_client=DEFAULT_IS_ROUTE_REFLECTOR_CLIENT, is_next_hop_self=DEFAULT_IS_NEXT_HOP_SELF, local_address=None, local_port=None, local_as=None, connect_mode=DEFAULT_CONNECT_MODE): """ This method registers a new neighbor. The BGP speaker tries to establish a bgp session with the peer (accepts a connection from the peer and also tries to connect to it). ``address`` specifies the IP address of the peer. It must be the string representation of an IP address. Only IPv4 is supported now. ``remote_as`` specifies the AS number of the peer. It must be an integer between 1 and 65535. ``remote_port`` specifies the TCP port number of the peer. ``enable_ipv4`` enables IPv4 address family for this neighbor. ``enable_ipv6`` enables IPv6 address family for this neighbor. ``enable_vpnv4`` enables VPNv4 address family for this neighbor. ``enable_vpnv6`` enables VPNv6 address family for this neighbor. ``enable_evpn`` enables Ethernet VPN address family for this neighbor. ``enable_ipv4fs`` enables IPv4 Flow Specification address family for this neighbor. ``enable_ipv6fs`` enables IPv6 Flow Specification address family for this neighbor. ``enable_vpnv4fs`` enables VPNv4 Flow Specification address family for this neighbor. ``enable_vpnv6fs`` enables VPNv6 Flow Specification address family for this neighbor. ``enable_l2vpnfs`` enables L2VPN Flow Specification address family for this neighbor. ``enable_enhanced_refresh`` enables Enhanced Route Refresh for this neighbor. ``enable_four_octet_as_number`` enables Four-Octet AS Number capability for this neighbor. ``next_hop`` specifies the next hop IP address. If not specified, host's ip address to access to a peer is used. ``password`` is used for the MD5 authentication if it's specified. By default, the MD5 authentication is disabled. ``multi_exit_disc`` specifies multi exit discriminator (MED) value as an int type value. If omitted, MED is not sent to the neighbor. ``site_of_origins`` specifies site_of_origin values. This parameter must be a list of string. ``is_route_server_client`` specifies whether this neighbor is a router server's client or not. ``is_route_reflector_client`` specifies whether this neighbor is a router reflector's client or not. ``is_next_hop_self`` specifies whether the BGP speaker announces its own ip address to iBGP neighbor or not as path's next_hop address. ``local_address`` specifies Loopback interface address for iBGP peering. ``local_port`` specifies source TCP port for iBGP peering. ``local_as`` specifies local AS number per-peer. If omitted, the AS number of BGPSpeaker instance is used. ``connect_mode`` specifies how to connect to this neighbor. This parameter must be one of the following. - CONNECT_MODE_ACTIVE = 'active' - CONNECT_MODE_PASSIVE = 'passive' - CONNECT_MODE_BOTH (default) = 'both' """ bgp_neighbor = { neighbors.IP_ADDRESS: address, neighbors.REMOTE_AS: remote_as, REMOTE_PORT: remote_port, PEER_NEXT_HOP: next_hop, PASSWORD: password, IS_ROUTE_SERVER_CLIENT: is_route_server_client, IS_ROUTE_REFLECTOR_CLIENT: is_route_reflector_client, IS_NEXT_HOP_SELF: is_next_hop_self, CONNECT_MODE: connect_mode, CAP_ENHANCED_REFRESH: enable_enhanced_refresh, CAP_FOUR_OCTET_AS_NUMBER: enable_four_octet_as_number, CAP_MBGP_IPV4: enable_ipv4, CAP_MBGP_IPV6: enable_ipv6, CAP_MBGP_VPNV4: enable_vpnv4, CAP_MBGP_VPNV6: enable_vpnv6, CAP_MBGP_EVPN: enable_evpn, CAP_MBGP_IPV4FS: enable_ipv4fs, CAP_MBGP_IPV6FS: enable_ipv6fs, CAP_MBGP_VPNV4FS: enable_vpnv4fs, CAP_MBGP_VPNV6FS: enable_vpnv6fs, CAP_MBGP_L2VPNFS: enable_l2vpnfs, } if multi_exit_disc: bgp_neighbor[MULTI_EXIT_DISC] = multi_exit_disc if site_of_origins: bgp_neighbor[SITE_OF_ORIGINS] = site_of_origins if local_address: bgp_neighbor[LOCAL_ADDRESS] = local_address if local_port: bgp_neighbor[LOCAL_PORT] = local_port if local_as: bgp_neighbor[LOCAL_AS] = local_as call('neighbor.create', **bgp_neighbor)
[ "def", "neighbor_add", "(", "self", ",", "address", ",", "remote_as", ",", "remote_port", "=", "DEFAULT_BGP_PORT", ",", "enable_ipv4", "=", "DEFAULT_CAP_MBGP_IPV4", ",", "enable_ipv6", "=", "DEFAULT_CAP_MBGP_IPV6", ",", "enable_vpnv4", "=", "DEFAULT_CAP_MBGP_VPNV4", ",", "enable_vpnv6", "=", "DEFAULT_CAP_MBGP_VPNV6", ",", "enable_evpn", "=", "DEFAULT_CAP_MBGP_EVPN", ",", "enable_ipv4fs", "=", "DEFAULT_CAP_MBGP_IPV4FS", ",", "enable_ipv6fs", "=", "DEFAULT_CAP_MBGP_IPV6FS", ",", "enable_vpnv4fs", "=", "DEFAULT_CAP_MBGP_VPNV4FS", ",", "enable_vpnv6fs", "=", "DEFAULT_CAP_MBGP_VPNV6FS", ",", "enable_l2vpnfs", "=", "DEFAULT_CAP_MBGP_L2VPNFS", ",", "enable_enhanced_refresh", "=", "DEFAULT_CAP_ENHANCED_REFRESH", ",", "enable_four_octet_as_number", "=", "DEFAULT_CAP_FOUR_OCTET_AS_NUMBER", ",", "next_hop", "=", "None", ",", "password", "=", "None", ",", "multi_exit_disc", "=", "None", ",", "site_of_origins", "=", "None", ",", "is_route_server_client", "=", "DEFAULT_IS_ROUTE_SERVER_CLIENT", ",", "is_route_reflector_client", "=", "DEFAULT_IS_ROUTE_REFLECTOR_CLIENT", ",", "is_next_hop_self", "=", "DEFAULT_IS_NEXT_HOP_SELF", ",", "local_address", "=", "None", ",", "local_port", "=", "None", ",", "local_as", "=", "None", ",", "connect_mode", "=", "DEFAULT_CONNECT_MODE", ")", ":", "bgp_neighbor", "=", "{", "neighbors", ".", "IP_ADDRESS", ":", "address", ",", "neighbors", ".", "REMOTE_AS", ":", "remote_as", ",", "REMOTE_PORT", ":", "remote_port", ",", "PEER_NEXT_HOP", ":", "next_hop", ",", "PASSWORD", ":", "password", ",", "IS_ROUTE_SERVER_CLIENT", ":", "is_route_server_client", ",", "IS_ROUTE_REFLECTOR_CLIENT", ":", "is_route_reflector_client", ",", "IS_NEXT_HOP_SELF", ":", "is_next_hop_self", ",", "CONNECT_MODE", ":", "connect_mode", ",", "CAP_ENHANCED_REFRESH", ":", "enable_enhanced_refresh", ",", "CAP_FOUR_OCTET_AS_NUMBER", ":", "enable_four_octet_as_number", ",", "CAP_MBGP_IPV4", ":", "enable_ipv4", ",", "CAP_MBGP_IPV6", ":", "enable_ipv6", ",", "CAP_MBGP_VPNV4", ":", "enable_vpnv4", ",", "CAP_MBGP_VPNV6", ":", "enable_vpnv6", ",", "CAP_MBGP_EVPN", ":", "enable_evpn", ",", "CAP_MBGP_IPV4FS", ":", "enable_ipv4fs", ",", "CAP_MBGP_IPV6FS", ":", "enable_ipv6fs", ",", "CAP_MBGP_VPNV4FS", ":", "enable_vpnv4fs", ",", "CAP_MBGP_VPNV6FS", ":", "enable_vpnv6fs", ",", "CAP_MBGP_L2VPNFS", ":", "enable_l2vpnfs", ",", "}", "if", "multi_exit_disc", ":", "bgp_neighbor", "[", "MULTI_EXIT_DISC", "]", "=", "multi_exit_disc", "if", "site_of_origins", ":", "bgp_neighbor", "[", "SITE_OF_ORIGINS", "]", "=", "site_of_origins", "if", "local_address", ":", "bgp_neighbor", "[", "LOCAL_ADDRESS", "]", "=", "local_address", "if", "local_port", ":", "bgp_neighbor", "[", "LOCAL_PORT", "]", "=", "local_port", "if", "local_as", ":", "bgp_neighbor", "[", "LOCAL_AS", "]", "=", "local_as", "call", "(", "'neighbor.create'", ",", "*", "*", "bgp_neighbor", ")" ]
This method registers a new neighbor. The BGP speaker tries to establish a bgp session with the peer (accepts a connection from the peer and also tries to connect to it). ``address`` specifies the IP address of the peer. It must be the string representation of an IP address. Only IPv4 is supported now. ``remote_as`` specifies the AS number of the peer. It must be an integer between 1 and 65535. ``remote_port`` specifies the TCP port number of the peer. ``enable_ipv4`` enables IPv4 address family for this neighbor. ``enable_ipv6`` enables IPv6 address family for this neighbor. ``enable_vpnv4`` enables VPNv4 address family for this neighbor. ``enable_vpnv6`` enables VPNv6 address family for this neighbor. ``enable_evpn`` enables Ethernet VPN address family for this neighbor. ``enable_ipv4fs`` enables IPv4 Flow Specification address family for this neighbor. ``enable_ipv6fs`` enables IPv6 Flow Specification address family for this neighbor. ``enable_vpnv4fs`` enables VPNv4 Flow Specification address family for this neighbor. ``enable_vpnv6fs`` enables VPNv6 Flow Specification address family for this neighbor. ``enable_l2vpnfs`` enables L2VPN Flow Specification address family for this neighbor. ``enable_enhanced_refresh`` enables Enhanced Route Refresh for this neighbor. ``enable_four_octet_as_number`` enables Four-Octet AS Number capability for this neighbor. ``next_hop`` specifies the next hop IP address. If not specified, host's ip address to access to a peer is used. ``password`` is used for the MD5 authentication if it's specified. By default, the MD5 authentication is disabled. ``multi_exit_disc`` specifies multi exit discriminator (MED) value as an int type value. If omitted, MED is not sent to the neighbor. ``site_of_origins`` specifies site_of_origin values. This parameter must be a list of string. ``is_route_server_client`` specifies whether this neighbor is a router server's client or not. ``is_route_reflector_client`` specifies whether this neighbor is a router reflector's client or not. ``is_next_hop_self`` specifies whether the BGP speaker announces its own ip address to iBGP neighbor or not as path's next_hop address. ``local_address`` specifies Loopback interface address for iBGP peering. ``local_port`` specifies source TCP port for iBGP peering. ``local_as`` specifies local AS number per-peer. If omitted, the AS number of BGPSpeaker instance is used. ``connect_mode`` specifies how to connect to this neighbor. This parameter must be one of the following. - CONNECT_MODE_ACTIVE = 'active' - CONNECT_MODE_PASSIVE = 'passive' - CONNECT_MODE_BOTH (default) = 'both'
[ "This", "method", "registers", "a", "new", "neighbor", ".", "The", "BGP", "speaker", "tries", "to", "establish", "a", "bgp", "session", "with", "the", "peer", "(", "accepts", "a", "connection", "from", "the", "peer", "and", "also", "tries", "to", "connect", "to", "it", ")", "." ]
train
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/bgpspeaker.py#L410-L557
0.004222
sparknetworks/pgpm
pgpm/lib/deploy.py
DeploymentManager._reorder_types
def _reorder_types(self, types_script): """ Takes type scripts and reorders them to avoid Type doesn't exist exception """ self._logger.debug('Running types definitions scripts') self._logger.debug('Reordering types definitions scripts to avoid "type does not exist" exceptions') _type_statements = sqlparse.split(types_script) # TODO: move up to classes _type_statements_dict = {} # dictionary that store statements with type and order. type_unordered_scripts = [] # scripts to execute without order type_drop_scripts = [] # drop scripts to execute first for _type_statement in _type_statements: _type_statement_parsed = sqlparse.parse(_type_statement) if len(_type_statement_parsed) > 0: # can be empty parsed object so need to check # we need only type declarations to be ordered if _type_statement_parsed[0].get_type() == 'CREATE': _type_body_r = r'\bcreate\s+\b(?:type|domain)\s+\b(\w+\.\w+|\w+)\b' _type_name = re.compile(_type_body_r, flags=re.IGNORECASE).findall(_type_statement)[0] _type_statements_dict[str(_type_name)] = \ {'script': _type_statement, 'deps': []} elif _type_statement_parsed[0].get_type() == 'DROP': type_drop_scripts.append(_type_statement) else: type_unordered_scripts.append(_type_statement) # now let's add dependant types to dictionary with types # _type_statements_list = [] # list of statements to be ordered for _type_key in _type_statements_dict.keys(): for _type_key_sub, _type_value in _type_statements_dict.items(): if _type_key != _type_key_sub: if pgpm.lib.utils.misc.find_whole_word(_type_key)(_type_value['script']): _type_value['deps'].append(_type_key) # now let's add order to type scripts and put them ordered to list _deps_unresolved = True _type_script_order = 0 _type_names = [] type_ordered_scripts = [] # ordered list with scripts to execute while _deps_unresolved: for k, v in _type_statements_dict.items(): if not v['deps']: _type_names.append(k) v['order'] = _type_script_order _type_script_order += 1 if not v['script'] in type_ordered_scripts: type_ordered_scripts.append(v['script']) else: _dep_exists = True for _dep in v['deps']: if _dep not in _type_names: _dep_exists = False if _dep_exists: _type_names.append(k) v['order'] = _type_script_order _type_script_order += 1 if not v['script'] in type_ordered_scripts: type_ordered_scripts.append(v['script']) else: v['order'] = -1 _deps_unresolved = False for k, v in _type_statements_dict.items(): if v['order'] == -1: _deps_unresolved = True return type_drop_scripts, type_ordered_scripts, type_unordered_scripts
python
def _reorder_types(self, types_script): """ Takes type scripts and reorders them to avoid Type doesn't exist exception """ self._logger.debug('Running types definitions scripts') self._logger.debug('Reordering types definitions scripts to avoid "type does not exist" exceptions') _type_statements = sqlparse.split(types_script) # TODO: move up to classes _type_statements_dict = {} # dictionary that store statements with type and order. type_unordered_scripts = [] # scripts to execute without order type_drop_scripts = [] # drop scripts to execute first for _type_statement in _type_statements: _type_statement_parsed = sqlparse.parse(_type_statement) if len(_type_statement_parsed) > 0: # can be empty parsed object so need to check # we need only type declarations to be ordered if _type_statement_parsed[0].get_type() == 'CREATE': _type_body_r = r'\bcreate\s+\b(?:type|domain)\s+\b(\w+\.\w+|\w+)\b' _type_name = re.compile(_type_body_r, flags=re.IGNORECASE).findall(_type_statement)[0] _type_statements_dict[str(_type_name)] = \ {'script': _type_statement, 'deps': []} elif _type_statement_parsed[0].get_type() == 'DROP': type_drop_scripts.append(_type_statement) else: type_unordered_scripts.append(_type_statement) # now let's add dependant types to dictionary with types # _type_statements_list = [] # list of statements to be ordered for _type_key in _type_statements_dict.keys(): for _type_key_sub, _type_value in _type_statements_dict.items(): if _type_key != _type_key_sub: if pgpm.lib.utils.misc.find_whole_word(_type_key)(_type_value['script']): _type_value['deps'].append(_type_key) # now let's add order to type scripts and put them ordered to list _deps_unresolved = True _type_script_order = 0 _type_names = [] type_ordered_scripts = [] # ordered list with scripts to execute while _deps_unresolved: for k, v in _type_statements_dict.items(): if not v['deps']: _type_names.append(k) v['order'] = _type_script_order _type_script_order += 1 if not v['script'] in type_ordered_scripts: type_ordered_scripts.append(v['script']) else: _dep_exists = True for _dep in v['deps']: if _dep not in _type_names: _dep_exists = False if _dep_exists: _type_names.append(k) v['order'] = _type_script_order _type_script_order += 1 if not v['script'] in type_ordered_scripts: type_ordered_scripts.append(v['script']) else: v['order'] = -1 _deps_unresolved = False for k, v in _type_statements_dict.items(): if v['order'] == -1: _deps_unresolved = True return type_drop_scripts, type_ordered_scripts, type_unordered_scripts
[ "def", "_reorder_types", "(", "self", ",", "types_script", ")", ":", "self", ".", "_logger", ".", "debug", "(", "'Running types definitions scripts'", ")", "self", ".", "_logger", ".", "debug", "(", "'Reordering types definitions scripts to avoid \"type does not exist\" exceptions'", ")", "_type_statements", "=", "sqlparse", ".", "split", "(", "types_script", ")", "# TODO: move up to classes", "_type_statements_dict", "=", "{", "}", "# dictionary that store statements with type and order.", "type_unordered_scripts", "=", "[", "]", "# scripts to execute without order", "type_drop_scripts", "=", "[", "]", "# drop scripts to execute first", "for", "_type_statement", "in", "_type_statements", ":", "_type_statement_parsed", "=", "sqlparse", ".", "parse", "(", "_type_statement", ")", "if", "len", "(", "_type_statement_parsed", ")", ">", "0", ":", "# can be empty parsed object so need to check", "# we need only type declarations to be ordered", "if", "_type_statement_parsed", "[", "0", "]", ".", "get_type", "(", ")", "==", "'CREATE'", ":", "_type_body_r", "=", "r'\\bcreate\\s+\\b(?:type|domain)\\s+\\b(\\w+\\.\\w+|\\w+)\\b'", "_type_name", "=", "re", ".", "compile", "(", "_type_body_r", ",", "flags", "=", "re", ".", "IGNORECASE", ")", ".", "findall", "(", "_type_statement", ")", "[", "0", "]", "_type_statements_dict", "[", "str", "(", "_type_name", ")", "]", "=", "{", "'script'", ":", "_type_statement", ",", "'deps'", ":", "[", "]", "}", "elif", "_type_statement_parsed", "[", "0", "]", ".", "get_type", "(", ")", "==", "'DROP'", ":", "type_drop_scripts", ".", "append", "(", "_type_statement", ")", "else", ":", "type_unordered_scripts", ".", "append", "(", "_type_statement", ")", "# now let's add dependant types to dictionary with types", "# _type_statements_list = [] # list of statements to be ordered", "for", "_type_key", "in", "_type_statements_dict", ".", "keys", "(", ")", ":", "for", "_type_key_sub", ",", "_type_value", "in", "_type_statements_dict", ".", "items", "(", ")", ":", "if", "_type_key", "!=", "_type_key_sub", ":", "if", "pgpm", ".", "lib", ".", "utils", ".", "misc", ".", "find_whole_word", "(", "_type_key", ")", "(", "_type_value", "[", "'script'", "]", ")", ":", "_type_value", "[", "'deps'", "]", ".", "append", "(", "_type_key", ")", "# now let's add order to type scripts and put them ordered to list", "_deps_unresolved", "=", "True", "_type_script_order", "=", "0", "_type_names", "=", "[", "]", "type_ordered_scripts", "=", "[", "]", "# ordered list with scripts to execute", "while", "_deps_unresolved", ":", "for", "k", ",", "v", "in", "_type_statements_dict", ".", "items", "(", ")", ":", "if", "not", "v", "[", "'deps'", "]", ":", "_type_names", ".", "append", "(", "k", ")", "v", "[", "'order'", "]", "=", "_type_script_order", "_type_script_order", "+=", "1", "if", "not", "v", "[", "'script'", "]", "in", "type_ordered_scripts", ":", "type_ordered_scripts", ".", "append", "(", "v", "[", "'script'", "]", ")", "else", ":", "_dep_exists", "=", "True", "for", "_dep", "in", "v", "[", "'deps'", "]", ":", "if", "_dep", "not", "in", "_type_names", ":", "_dep_exists", "=", "False", "if", "_dep_exists", ":", "_type_names", ".", "append", "(", "k", ")", "v", "[", "'order'", "]", "=", "_type_script_order", "_type_script_order", "+=", "1", "if", "not", "v", "[", "'script'", "]", "in", "type_ordered_scripts", ":", "type_ordered_scripts", ".", "append", "(", "v", "[", "'script'", "]", ")", "else", ":", "v", "[", "'order'", "]", "=", "-", "1", "_deps_unresolved", "=", "False", "for", "k", ",", "v", "in", "_type_statements_dict", ".", "items", "(", ")", ":", "if", "v", "[", "'order'", "]", "==", "-", "1", ":", "_deps_unresolved", "=", "True", "return", "type_drop_scripts", ",", "type_ordered_scripts", ",", "type_unordered_scripts" ]
Takes type scripts and reorders them to avoid Type doesn't exist exception
[ "Takes", "type", "scripts", "and", "reorders", "them", "to", "avoid", "Type", "doesn", "t", "exist", "exception" ]
train
https://github.com/sparknetworks/pgpm/blob/1a060df46a886095181f692ea870a73a32510a2e/pgpm/lib/deploy.py#L507-L568
0.002595
cloud-custodian/cloud-custodian
tools/sandbox/c7n_sphere11/c7n_sphere11/cli.py
lock_status
def lock_status(account_id, resource_id, parent_id): """Show extant locks' status """ return output( Client(BASE_URL, account_id).lock_status(resource_id, parent_id))
python
def lock_status(account_id, resource_id, parent_id): """Show extant locks' status """ return output( Client(BASE_URL, account_id).lock_status(resource_id, parent_id))
[ "def", "lock_status", "(", "account_id", ",", "resource_id", ",", "parent_id", ")", ":", "return", "output", "(", "Client", "(", "BASE_URL", ",", "account_id", ")", ".", "lock_status", "(", "resource_id", ",", "parent_id", ")", ")" ]
Show extant locks' status
[ "Show", "extant", "locks", "status" ]
train
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/sandbox/c7n_sphere11/c7n_sphere11/cli.py#L67-L71
0.005376
ff0000/scarlet
scarlet/cms/internal_tags/fields.py
TaggedRelationWidget.get_add_link
def get_add_link(self): """ Appends the popup=1 query string to the url so the destination url treats it as a popup. """ url = super(TaggedRelationWidget, self).get_add_link() if url: qs = self.get_add_qs() if qs: url = "%s&%s" % (url, urllib.urlencode(qs)) return url
python
def get_add_link(self): """ Appends the popup=1 query string to the url so the destination url treats it as a popup. """ url = super(TaggedRelationWidget, self).get_add_link() if url: qs = self.get_add_qs() if qs: url = "%s&%s" % (url, urllib.urlencode(qs)) return url
[ "def", "get_add_link", "(", "self", ")", ":", "url", "=", "super", "(", "TaggedRelationWidget", ",", "self", ")", ".", "get_add_link", "(", ")", "if", "url", ":", "qs", "=", "self", ".", "get_add_qs", "(", ")", "if", "qs", ":", "url", "=", "\"%s&%s\"", "%", "(", "url", ",", "urllib", ".", "urlencode", "(", "qs", ")", ")", "return", "url" ]
Appends the popup=1 query string to the url so the destination url treats it as a popup.
[ "Appends", "the", "popup", "=", "1", "query", "string", "to", "the", "url", "so", "the", "destination", "url", "treats", "it", "as", "a", "popup", "." ]
train
https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/cms/internal_tags/fields.py#L41-L52
0.005479
django-dbbackup/django-dbbackup
dbbackup/management/commands/_base.py
BaseDbBackupCommand._cleanup_old_backups
def _cleanup_old_backups(self, database=None, servername=None): """ Cleanup old backups, keeping the number of backups specified by DBBACKUP_CLEANUP_KEEP and any backups that occur on first of the month. """ self.storage.clean_old_backups(encrypted=self.encrypt, compressed=self.compress, content_type=self.content_type, database=database, servername=servername)
python
def _cleanup_old_backups(self, database=None, servername=None): """ Cleanup old backups, keeping the number of backups specified by DBBACKUP_CLEANUP_KEEP and any backups that occur on first of the month. """ self.storage.clean_old_backups(encrypted=self.encrypt, compressed=self.compress, content_type=self.content_type, database=database, servername=servername)
[ "def", "_cleanup_old_backups", "(", "self", ",", "database", "=", "None", ",", "servername", "=", "None", ")", ":", "self", ".", "storage", ".", "clean_old_backups", "(", "encrypted", "=", "self", ".", "encrypt", ",", "compressed", "=", "self", ".", "compress", ",", "content_type", "=", "self", ".", "content_type", ",", "database", "=", "database", ",", "servername", "=", "servername", ")" ]
Cleanup old backups, keeping the number of backups specified by DBBACKUP_CLEANUP_KEEP and any backups that occur on first of the month.
[ "Cleanup", "old", "backups", "keeping", "the", "number", "of", "backups", "specified", "by", "DBBACKUP_CLEANUP_KEEP", "and", "any", "backups", "that", "occur", "on", "first", "of", "the", "month", "." ]
train
https://github.com/django-dbbackup/django-dbbackup/blob/77de209e2d5317e51510d0f888e085ee0c400d66/dbbackup/management/commands/_base.py#L123-L132
0.003584
Microsoft/nni
src/sdk/pynni/nni/curvefitting_assessor/curvefunctions.py
pow4
def pow4(x, alpha, a, b, c): """pow4 Parameters ---------- x: int alpha: float a: float b: float c: float Returns ------- float c - (a*x+b)**-alpha """ return c - (a*x+b)**-alpha
python
def pow4(x, alpha, a, b, c): """pow4 Parameters ---------- x: int alpha: float a: float b: float c: float Returns ------- float c - (a*x+b)**-alpha """ return c - (a*x+b)**-alpha
[ "def", "pow4", "(", "x", ",", "alpha", ",", "a", ",", "b", ",", "c", ")", ":", "return", "c", "-", "(", "a", "*", "x", "+", "b", ")", "**", "-", "alpha" ]
pow4 Parameters ---------- x: int alpha: float a: float b: float c: float Returns ------- float c - (a*x+b)**-alpha
[ "pow4" ]
train
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/curvefitting_assessor/curvefunctions.py#L152-L168
0.004167
google/google-visualization-python
gviz_api.py
DataTable.CoerceValue
def CoerceValue(value, value_type): """Coerces a single value into the type expected for its column. Internal helper method. Args: value: The value which should be converted value_type: One of "string", "number", "boolean", "date", "datetime" or "timeofday". Returns: An item of the Python type appropriate to the given value_type. Strings are also converted to Unicode using UTF-8 encoding if necessary. If a tuple is given, it should be in one of the following forms: - (value, formatted value) - (value, formatted value, custom properties) where the formatted value is a string, and custom properties is a dictionary of the custom properties for this cell. To specify custom properties without specifying formatted value, one can pass None as the formatted value. One can also have a null-valued cell with formatted value and/or custom properties by specifying None for the value. This method ignores the custom properties except for checking that it is a dictionary. The custom properties are handled in the ToJSon and ToJSCode methods. The real type of the given value is not strictly checked. For example, any type can be used for string - as we simply take its str( ) and for boolean value we just check "if value". Examples: CoerceValue(None, "string") returns None CoerceValue((5, "5$"), "number") returns (5, "5$") CoerceValue(100, "string") returns "100" CoerceValue(0, "boolean") returns False Raises: DataTableException: The value and type did not match in a not-recoverable way, for example given value 'abc' for type 'number'. """ if isinstance(value, tuple): # In case of a tuple, we run the same function on the value itself and # add the formatted value. if (len(value) not in [2, 3] or (len(value) == 3 and not isinstance(value[2], dict))): raise DataTableException("Wrong format for value and formatting - %s." % str(value)) if not isinstance(value[1], six.string_types + (type(None),)): raise DataTableException("Formatted value is not string, given %s." % type(value[1])) js_value = DataTable.CoerceValue(value[0], value_type) return (js_value,) + value[1:] t_value = type(value) if value is None: return value if value_type == "boolean": return bool(value) elif value_type == "number": if isinstance(value, six.integer_types + (float,)): return value raise DataTableException("Wrong type %s when expected number" % t_value) elif value_type == "string": if isinstance(value, six.text_type): return value if isinstance(value, bytes): return six.text_type(value, encoding="utf-8") else: return six.text_type(value) elif value_type == "date": if isinstance(value, datetime.datetime): return datetime.date(value.year, value.month, value.day) elif isinstance(value, datetime.date): return value else: raise DataTableException("Wrong type %s when expected date" % t_value) elif value_type == "timeofday": if isinstance(value, datetime.datetime): return datetime.time(value.hour, value.minute, value.second) elif isinstance(value, datetime.time): return value else: raise DataTableException("Wrong type %s when expected time" % t_value) elif value_type == "datetime": if isinstance(value, datetime.datetime): return value else: raise DataTableException("Wrong type %s when expected datetime" % t_value) # If we got here, it means the given value_type was not one of the # supported types. raise DataTableException("Unsupported type %s" % value_type)
python
def CoerceValue(value, value_type): """Coerces a single value into the type expected for its column. Internal helper method. Args: value: The value which should be converted value_type: One of "string", "number", "boolean", "date", "datetime" or "timeofday". Returns: An item of the Python type appropriate to the given value_type. Strings are also converted to Unicode using UTF-8 encoding if necessary. If a tuple is given, it should be in one of the following forms: - (value, formatted value) - (value, formatted value, custom properties) where the formatted value is a string, and custom properties is a dictionary of the custom properties for this cell. To specify custom properties without specifying formatted value, one can pass None as the formatted value. One can also have a null-valued cell with formatted value and/or custom properties by specifying None for the value. This method ignores the custom properties except for checking that it is a dictionary. The custom properties are handled in the ToJSon and ToJSCode methods. The real type of the given value is not strictly checked. For example, any type can be used for string - as we simply take its str( ) and for boolean value we just check "if value". Examples: CoerceValue(None, "string") returns None CoerceValue((5, "5$"), "number") returns (5, "5$") CoerceValue(100, "string") returns "100" CoerceValue(0, "boolean") returns False Raises: DataTableException: The value and type did not match in a not-recoverable way, for example given value 'abc' for type 'number'. """ if isinstance(value, tuple): # In case of a tuple, we run the same function on the value itself and # add the formatted value. if (len(value) not in [2, 3] or (len(value) == 3 and not isinstance(value[2], dict))): raise DataTableException("Wrong format for value and formatting - %s." % str(value)) if not isinstance(value[1], six.string_types + (type(None),)): raise DataTableException("Formatted value is not string, given %s." % type(value[1])) js_value = DataTable.CoerceValue(value[0], value_type) return (js_value,) + value[1:] t_value = type(value) if value is None: return value if value_type == "boolean": return bool(value) elif value_type == "number": if isinstance(value, six.integer_types + (float,)): return value raise DataTableException("Wrong type %s when expected number" % t_value) elif value_type == "string": if isinstance(value, six.text_type): return value if isinstance(value, bytes): return six.text_type(value, encoding="utf-8") else: return six.text_type(value) elif value_type == "date": if isinstance(value, datetime.datetime): return datetime.date(value.year, value.month, value.day) elif isinstance(value, datetime.date): return value else: raise DataTableException("Wrong type %s when expected date" % t_value) elif value_type == "timeofday": if isinstance(value, datetime.datetime): return datetime.time(value.hour, value.minute, value.second) elif isinstance(value, datetime.time): return value else: raise DataTableException("Wrong type %s when expected time" % t_value) elif value_type == "datetime": if isinstance(value, datetime.datetime): return value else: raise DataTableException("Wrong type %s when expected datetime" % t_value) # If we got here, it means the given value_type was not one of the # supported types. raise DataTableException("Unsupported type %s" % value_type)
[ "def", "CoerceValue", "(", "value", ",", "value_type", ")", ":", "if", "isinstance", "(", "value", ",", "tuple", ")", ":", "# In case of a tuple, we run the same function on the value itself and", "# add the formatted value.", "if", "(", "len", "(", "value", ")", "not", "in", "[", "2", ",", "3", "]", "or", "(", "len", "(", "value", ")", "==", "3", "and", "not", "isinstance", "(", "value", "[", "2", "]", ",", "dict", ")", ")", ")", ":", "raise", "DataTableException", "(", "\"Wrong format for value and formatting - %s.\"", "%", "str", "(", "value", ")", ")", "if", "not", "isinstance", "(", "value", "[", "1", "]", ",", "six", ".", "string_types", "+", "(", "type", "(", "None", ")", ",", ")", ")", ":", "raise", "DataTableException", "(", "\"Formatted value is not string, given %s.\"", "%", "type", "(", "value", "[", "1", "]", ")", ")", "js_value", "=", "DataTable", ".", "CoerceValue", "(", "value", "[", "0", "]", ",", "value_type", ")", "return", "(", "js_value", ",", ")", "+", "value", "[", "1", ":", "]", "t_value", "=", "type", "(", "value", ")", "if", "value", "is", "None", ":", "return", "value", "if", "value_type", "==", "\"boolean\"", ":", "return", "bool", "(", "value", ")", "elif", "value_type", "==", "\"number\"", ":", "if", "isinstance", "(", "value", ",", "six", ".", "integer_types", "+", "(", "float", ",", ")", ")", ":", "return", "value", "raise", "DataTableException", "(", "\"Wrong type %s when expected number\"", "%", "t_value", ")", "elif", "value_type", "==", "\"string\"", ":", "if", "isinstance", "(", "value", ",", "six", ".", "text_type", ")", ":", "return", "value", "if", "isinstance", "(", "value", ",", "bytes", ")", ":", "return", "six", ".", "text_type", "(", "value", ",", "encoding", "=", "\"utf-8\"", ")", "else", ":", "return", "six", ".", "text_type", "(", "value", ")", "elif", "value_type", "==", "\"date\"", ":", "if", "isinstance", "(", "value", ",", "datetime", ".", "datetime", ")", ":", "return", "datetime", ".", "date", "(", "value", ".", "year", ",", "value", ".", "month", ",", "value", ".", "day", ")", "elif", "isinstance", "(", "value", ",", "datetime", ".", "date", ")", ":", "return", "value", "else", ":", "raise", "DataTableException", "(", "\"Wrong type %s when expected date\"", "%", "t_value", ")", "elif", "value_type", "==", "\"timeofday\"", ":", "if", "isinstance", "(", "value", ",", "datetime", ".", "datetime", ")", ":", "return", "datetime", ".", "time", "(", "value", ".", "hour", ",", "value", ".", "minute", ",", "value", ".", "second", ")", "elif", "isinstance", "(", "value", ",", "datetime", ".", "time", ")", ":", "return", "value", "else", ":", "raise", "DataTableException", "(", "\"Wrong type %s when expected time\"", "%", "t_value", ")", "elif", "value_type", "==", "\"datetime\"", ":", "if", "isinstance", "(", "value", ",", "datetime", ".", "datetime", ")", ":", "return", "value", "else", ":", "raise", "DataTableException", "(", "\"Wrong type %s when expected datetime\"", "%", "t_value", ")", "# If we got here, it means the given value_type was not one of the", "# supported types.", "raise", "DataTableException", "(", "\"Unsupported type %s\"", "%", "value_type", ")" ]
Coerces a single value into the type expected for its column. Internal helper method. Args: value: The value which should be converted value_type: One of "string", "number", "boolean", "date", "datetime" or "timeofday". Returns: An item of the Python type appropriate to the given value_type. Strings are also converted to Unicode using UTF-8 encoding if necessary. If a tuple is given, it should be in one of the following forms: - (value, formatted value) - (value, formatted value, custom properties) where the formatted value is a string, and custom properties is a dictionary of the custom properties for this cell. To specify custom properties without specifying formatted value, one can pass None as the formatted value. One can also have a null-valued cell with formatted value and/or custom properties by specifying None for the value. This method ignores the custom properties except for checking that it is a dictionary. The custom properties are handled in the ToJSon and ToJSCode methods. The real type of the given value is not strictly checked. For example, any type can be used for string - as we simply take its str( ) and for boolean value we just check "if value". Examples: CoerceValue(None, "string") returns None CoerceValue((5, "5$"), "number") returns (5, "5$") CoerceValue(100, "string") returns "100" CoerceValue(0, "boolean") returns False Raises: DataTableException: The value and type did not match in a not-recoverable way, for example given value 'abc' for type 'number'.
[ "Coerces", "a", "single", "value", "into", "the", "type", "expected", "for", "its", "column", "." ]
train
https://github.com/google/google-visualization-python/blob/cbfb4d69ad2f4ca30dc55791629280aa3214c8e3/gviz_api.py#L176-L270
0.006252
svartalf/python-opus
opus/api/encoder.py
encode_float
def encode_float(encoder, pcm, frame_size, max_data_bytes): """Encodes an Opus frame from floating point input""" pcm = ctypes.cast(pcm, c_float_pointer) data = (ctypes.c_char * max_data_bytes)() result = _encode_float(encoder, pcm, frame_size, data, max_data_bytes) if result < 0: raise OpusError(result) return array.array('c', data[:result]).tostring()
python
def encode_float(encoder, pcm, frame_size, max_data_bytes): """Encodes an Opus frame from floating point input""" pcm = ctypes.cast(pcm, c_float_pointer) data = (ctypes.c_char * max_data_bytes)() result = _encode_float(encoder, pcm, frame_size, data, max_data_bytes) if result < 0: raise OpusError(result) return array.array('c', data[:result]).tostring()
[ "def", "encode_float", "(", "encoder", ",", "pcm", ",", "frame_size", ",", "max_data_bytes", ")", ":", "pcm", "=", "ctypes", ".", "cast", "(", "pcm", ",", "c_float_pointer", ")", "data", "=", "(", "ctypes", ".", "c_char", "*", "max_data_bytes", ")", "(", ")", "result", "=", "_encode_float", "(", "encoder", ",", "pcm", ",", "frame_size", ",", "data", ",", "max_data_bytes", ")", "if", "result", "<", "0", ":", "raise", "OpusError", "(", "result", ")", "return", "array", ".", "array", "(", "'c'", ",", "data", "[", ":", "result", "]", ")", ".", "tostring", "(", ")" ]
Encodes an Opus frame from floating point input
[ "Encodes", "an", "Opus", "frame", "from", "floating", "point", "input" ]
train
https://github.com/svartalf/python-opus/blob/a3c1d556d2772b5be659ddd08c033ddd4d566b3a/opus/api/encoder.py#L89-L99
0.002564
juju/python-libjuju
juju/user.py
User.enable
async def enable(self): """Re-enable this user. """ await self.controller.enable_user(self.username) self._user_info.disabled = False
python
async def enable(self): """Re-enable this user. """ await self.controller.enable_user(self.username) self._user_info.disabled = False
[ "async", "def", "enable", "(", "self", ")", ":", "await", "self", ".", "controller", ".", "enable_user", "(", "self", ".", "username", ")", "self", ".", "_user_info", ".", "disabled", "=", "False" ]
Re-enable this user.
[ "Re", "-", "enable", "this", "user", "." ]
train
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/user.py#L82-L86
0.012121
PyHDI/Pyverilog
pyverilog/vparser/parser.py
VerilogParser.p_expression_Or
def p_expression_Or(self, p): 'expression : expression OR expression' p[0] = Or(p[1], p[3], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
python
def p_expression_Or(self, p): 'expression : expression OR expression' p[0] = Or(p[1], p[3], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
[ "def", "p_expression_Or", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "Or", "(", "p", "[", "1", "]", ",", "p", "[", "3", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "p", ".", "set_lineno", "(", "0", ",", "p", ".", "lineno", "(", "1", ")", ")" ]
expression : expression OR expression
[ "expression", ":", "expression", "OR", "expression" ]
train
https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L1088-L1091
0.012195
mehmetg/streak_client
streak_client/streak_client.py
StreakClient.get_box_reminders
def get_box_reminders(self, box_key): '''Gets all reminders for a box Args: reminder updated reminder of StreakReminder type return (status code, reminder dict) ''' #required sanity check if box_key: return requests.codes.bad_request, None uri = '/'.join([self.api_uri, self.boxes_suffix, box_key, self.reminders_suffix ]) return self._req('get', uri)
python
def get_box_reminders(self, box_key): '''Gets all reminders for a box Args: reminder updated reminder of StreakReminder type return (status code, reminder dict) ''' #required sanity check if box_key: return requests.codes.bad_request, None uri = '/'.join([self.api_uri, self.boxes_suffix, box_key, self.reminders_suffix ]) return self._req('get', uri)
[ "def", "get_box_reminders", "(", "self", ",", "box_key", ")", ":", "#required sanity check", "if", "box_key", ":", "return", "requests", ".", "codes", ".", "bad_request", ",", "None", "uri", "=", "'/'", ".", "join", "(", "[", "self", ".", "api_uri", ",", "self", ".", "boxes_suffix", ",", "box_key", ",", "self", ".", "reminders_suffix", "]", ")", "return", "self", ".", "_req", "(", "'get'", ",", "uri", ")" ]
Gets all reminders for a box Args: reminder updated reminder of StreakReminder type return (status code, reminder dict)
[ "Gets", "all", "reminders", "for", "a", "box", "Args", ":", "reminder", "updated", "reminder", "of", "StreakReminder", "type", "return", "(", "status", "code", "reminder", "dict", ")" ]
train
https://github.com/mehmetg/streak_client/blob/46575510b4e4163a4a3cc06f7283a1ae377cdce6/streak_client/streak_client.py#L950-L966
0.054455
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/generators.py
register_standard
def register_standard (id, source_types, target_types, requirements = []): """ Creates new instance of the 'generator' class and registers it. Returns the creates instance. Rationale: the instance is returned so that it's possible to first register a generator and then call 'run' method on that generator, bypassing all generator selection. """ g = Generator (id, False, source_types, target_types, requirements) register (g) return g
python
def register_standard (id, source_types, target_types, requirements = []): """ Creates new instance of the 'generator' class and registers it. Returns the creates instance. Rationale: the instance is returned so that it's possible to first register a generator and then call 'run' method on that generator, bypassing all generator selection. """ g = Generator (id, False, source_types, target_types, requirements) register (g) return g
[ "def", "register_standard", "(", "id", ",", "source_types", ",", "target_types", ",", "requirements", "=", "[", "]", ")", ":", "g", "=", "Generator", "(", "id", ",", "False", ",", "source_types", ",", "target_types", ",", "requirements", ")", "register", "(", "g", ")", "return", "g" ]
Creates new instance of the 'generator' class and registers it. Returns the creates instance. Rationale: the instance is returned so that it's possible to first register a generator and then call 'run' method on that generator, bypassing all generator selection.
[ "Creates", "new", "instance", "of", "the", "generator", "class", "and", "registers", "it", ".", "Returns", "the", "creates", "instance", ".", "Rationale", ":", "the", "instance", "is", "returned", "so", "that", "it", "s", "possible", "to", "first", "register", "a", "generator", "and", "then", "call", "run", "method", "on", "that", "generator", "bypassing", "all", "generator", "selection", "." ]
train
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/generators.py#L723-L732
0.014374
manns/pyspread
pyspread/src/actions/_grid_cell_actions.py
CellActions.change_frozen_attr
def change_frozen_attr(self): """Changes frozen state of cell if there is no selection""" # Selections are not supported if self.grid.selection: statustext = _("Freezing selections is not supported.") post_command_event(self.main_window, self.StatusBarMsg, text=statustext) cursor = self.grid.actions.cursor frozen = self.grid.code_array.cell_attributes[cursor]["frozen"] if frozen: # We have an frozen cell that has to be unfrozen # Delete frozen cache content self.grid.code_array.frozen_cache.pop(repr(cursor)) else: # We have an non-frozen cell that has to be frozen # Add frozen cache content res_obj = self.grid.code_array[cursor] self.grid.code_array.frozen_cache[repr(cursor)] = res_obj # Set the new frozen state / code selection = Selection([], [], [], [], [cursor[:2]]) self.set_attr("frozen", not frozen, selection=selection)
python
def change_frozen_attr(self): """Changes frozen state of cell if there is no selection""" # Selections are not supported if self.grid.selection: statustext = _("Freezing selections is not supported.") post_command_event(self.main_window, self.StatusBarMsg, text=statustext) cursor = self.grid.actions.cursor frozen = self.grid.code_array.cell_attributes[cursor]["frozen"] if frozen: # We have an frozen cell that has to be unfrozen # Delete frozen cache content self.grid.code_array.frozen_cache.pop(repr(cursor)) else: # We have an non-frozen cell that has to be frozen # Add frozen cache content res_obj = self.grid.code_array[cursor] self.grid.code_array.frozen_cache[repr(cursor)] = res_obj # Set the new frozen state / code selection = Selection([], [], [], [], [cursor[:2]]) self.set_attr("frozen", not frozen, selection=selection)
[ "def", "change_frozen_attr", "(", "self", ")", ":", "# Selections are not supported", "if", "self", ".", "grid", ".", "selection", ":", "statustext", "=", "_", "(", "\"Freezing selections is not supported.\"", ")", "post_command_event", "(", "self", ".", "main_window", ",", "self", ".", "StatusBarMsg", ",", "text", "=", "statustext", ")", "cursor", "=", "self", ".", "grid", ".", "actions", ".", "cursor", "frozen", "=", "self", ".", "grid", ".", "code_array", ".", "cell_attributes", "[", "cursor", "]", "[", "\"frozen\"", "]", "if", "frozen", ":", "# We have an frozen cell that has to be unfrozen", "# Delete frozen cache content", "self", ".", "grid", ".", "code_array", ".", "frozen_cache", ".", "pop", "(", "repr", "(", "cursor", ")", ")", "else", ":", "# We have an non-frozen cell that has to be frozen", "# Add frozen cache content", "res_obj", "=", "self", ".", "grid", ".", "code_array", "[", "cursor", "]", "self", ".", "grid", ".", "code_array", ".", "frozen_cache", "[", "repr", "(", "cursor", ")", "]", "=", "res_obj", "# Set the new frozen state / code", "selection", "=", "Selection", "(", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "cursor", "[", ":", "2", "]", "]", ")", "self", ".", "set_attr", "(", "\"frozen\"", ",", "not", "frozen", ",", "selection", "=", "selection", ")" ]
Changes frozen state of cell if there is no selection
[ "Changes", "frozen", "state", "of", "cell", "if", "there", "is", "no", "selection" ]
train
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/actions/_grid_cell_actions.py#L308-L337
0.001878
nickpandolfi/Cyther
cyther/searcher.py
search_file
def search_file(pattern, file_path): """ Search a given file's contents for the regex pattern given as 'pattern' """ try: with open(file_path) as file: string = file.read() except PermissionError: return [] matches = re.findall(pattern, string) return matches
python
def search_file(pattern, file_path): """ Search a given file's contents for the regex pattern given as 'pattern' """ try: with open(file_path) as file: string = file.read() except PermissionError: return [] matches = re.findall(pattern, string) return matches
[ "def", "search_file", "(", "pattern", ",", "file_path", ")", ":", "try", ":", "with", "open", "(", "file_path", ")", "as", "file", ":", "string", "=", "file", ".", "read", "(", ")", "except", "PermissionError", ":", "return", "[", "]", "matches", "=", "re", ".", "findall", "(", "pattern", ",", "string", ")", "return", "matches" ]
Search a given file's contents for the regex pattern given as 'pattern'
[ "Search", "a", "given", "file", "s", "contents", "for", "the", "regex", "pattern", "given", "as", "pattern" ]
train
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/searcher.py#L34-L46
0.003155
openstack/networking-cisco
networking_cisco/plugins/cisco/db/device_manager/hosting_device_manager_db.py
HostingDeviceManagerMixin._update_hosting_device_exclusivity
def _update_hosting_device_exclusivity(self, context, hosting_device, tenant_id): """Make <hosting device> bound or unbound to <tenant_id>. If <tenant_id> is None the device is unbound, otherwise it gets bound to that <tenant_id> """ with context.session.begin(subtransactions=True): hosting_device['tenant_bound'] = tenant_id context.session.add(hosting_device) for item in (context.session.query(hd_models.SlotAllocation). filter_by(hosting_device_id=hosting_device['id'])): item['tenant_bound'] = tenant_id context.session.add(item)
python
def _update_hosting_device_exclusivity(self, context, hosting_device, tenant_id): """Make <hosting device> bound or unbound to <tenant_id>. If <tenant_id> is None the device is unbound, otherwise it gets bound to that <tenant_id> """ with context.session.begin(subtransactions=True): hosting_device['tenant_bound'] = tenant_id context.session.add(hosting_device) for item in (context.session.query(hd_models.SlotAllocation). filter_by(hosting_device_id=hosting_device['id'])): item['tenant_bound'] = tenant_id context.session.add(item)
[ "def", "_update_hosting_device_exclusivity", "(", "self", ",", "context", ",", "hosting_device", ",", "tenant_id", ")", ":", "with", "context", ".", "session", ".", "begin", "(", "subtransactions", "=", "True", ")", ":", "hosting_device", "[", "'tenant_bound'", "]", "=", "tenant_id", "context", ".", "session", ".", "add", "(", "hosting_device", ")", "for", "item", "in", "(", "context", ".", "session", ".", "query", "(", "hd_models", ".", "SlotAllocation", ")", ".", "filter_by", "(", "hosting_device_id", "=", "hosting_device", "[", "'id'", "]", ")", ")", ":", "item", "[", "'tenant_bound'", "]", "=", "tenant_id", "context", ".", "session", ".", "add", "(", "item", ")" ]
Make <hosting device> bound or unbound to <tenant_id>. If <tenant_id> is None the device is unbound, otherwise it gets bound to that <tenant_id>
[ "Make", "<hosting", "device", ">", "bound", "or", "unbound", "to", "<tenant_id", ">", "." ]
train
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/db/device_manager/hosting_device_manager_db.py#L900-L913
0.004213
jldbc/pybaseball
pybaseball/retrosheet.py
world_series_logs
def world_series_logs(): """ Pull Retrosheet World Series Game Logs """ file_name = 'GLWS.TXT' z = get_zip_file(world_series_url) data = pd.read_csv(z.open(file_name), header=None, sep=',', quotechar='"') data.columns = gamelog_columns return data
python
def world_series_logs(): """ Pull Retrosheet World Series Game Logs """ file_name = 'GLWS.TXT' z = get_zip_file(world_series_url) data = pd.read_csv(z.open(file_name), header=None, sep=',', quotechar='"') data.columns = gamelog_columns return data
[ "def", "world_series_logs", "(", ")", ":", "file_name", "=", "'GLWS.TXT'", "z", "=", "get_zip_file", "(", "world_series_url", ")", "data", "=", "pd", ".", "read_csv", "(", "z", ".", "open", "(", "file_name", ")", ",", "header", "=", "None", ",", "sep", "=", "','", ",", "quotechar", "=", "'\"'", ")", "data", ".", "columns", "=", "gamelog_columns", "return", "data" ]
Pull Retrosheet World Series Game Logs
[ "Pull", "Retrosheet", "World", "Series", "Game", "Logs" ]
train
https://github.com/jldbc/pybaseball/blob/085ea26bfd1b5f5926d79d4fac985c88278115f2/pybaseball/retrosheet.py#L103-L111
0.003584
MacHu-GWU/single_file_module-project
sfm/fingerprint.py
FingerPrint.use
def use(self, algorithm): """Change the hash algorithm you gonna use. """ try: self.hash_algo = self._mapper[algorithm.strip().lower()] except IndexError: # pragma: no cover template = "'%s' is not supported, try one of %s." raise ValueError(template % (algorithm, list(self._mapper)))
python
def use(self, algorithm): """Change the hash algorithm you gonna use. """ try: self.hash_algo = self._mapper[algorithm.strip().lower()] except IndexError: # pragma: no cover template = "'%s' is not supported, try one of %s." raise ValueError(template % (algorithm, list(self._mapper)))
[ "def", "use", "(", "self", ",", "algorithm", ")", ":", "try", ":", "self", ".", "hash_algo", "=", "self", ".", "_mapper", "[", "algorithm", ".", "strip", "(", ")", ".", "lower", "(", ")", "]", "except", "IndexError", ":", "# pragma: no cover", "template", "=", "\"'%s' is not supported, try one of %s.\"", "raise", "ValueError", "(", "template", "%", "(", "algorithm", ",", "list", "(", "self", ".", "_mapper", ")", ")", ")" ]
Change the hash algorithm you gonna use.
[ "Change", "the", "hash", "algorithm", "you", "gonna", "use", "." ]
train
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/fingerprint.py#L76-L83
0.00565
user-cont/conu
conu/backend/docker/container.py
DockerContainer.get_port_mappings
def get_port_mappings(self, port=None): """ Get list of port mappings between container and host. The format of dicts is: {"HostIp": XX, "HostPort": YY}; When port is None - return all port mappings. The container needs to be running, otherwise this returns an empty list. :param port: int or None, container port :return: list of dict or None; dict when port=None """ port_mappings = self.inspect(refresh=True)["NetworkSettings"]["Ports"] if not port: return port_mappings if str(port) not in self.get_ports(): return [] for p in port_mappings: if p.split("/")[0] == str(port): return port_mappings[p]
python
def get_port_mappings(self, port=None): """ Get list of port mappings between container and host. The format of dicts is: {"HostIp": XX, "HostPort": YY}; When port is None - return all port mappings. The container needs to be running, otherwise this returns an empty list. :param port: int or None, container port :return: list of dict or None; dict when port=None """ port_mappings = self.inspect(refresh=True)["NetworkSettings"]["Ports"] if not port: return port_mappings if str(port) not in self.get_ports(): return [] for p in port_mappings: if p.split("/")[0] == str(port): return port_mappings[p]
[ "def", "get_port_mappings", "(", "self", ",", "port", "=", "None", ")", ":", "port_mappings", "=", "self", ".", "inspect", "(", "refresh", "=", "True", ")", "[", "\"NetworkSettings\"", "]", "[", "\"Ports\"", "]", "if", "not", "port", ":", "return", "port_mappings", "if", "str", "(", "port", ")", "not", "in", "self", ".", "get_ports", "(", ")", ":", "return", "[", "]", "for", "p", "in", "port_mappings", ":", "if", "p", ".", "split", "(", "\"/\"", ")", "[", "0", "]", "==", "str", "(", "port", ")", ":", "return", "port_mappings", "[", "p", "]" ]
Get list of port mappings between container and host. The format of dicts is: {"HostIp": XX, "HostPort": YY}; When port is None - return all port mappings. The container needs to be running, otherwise this returns an empty list. :param port: int or None, container port :return: list of dict or None; dict when port=None
[ "Get", "list", "of", "port", "mappings", "between", "container", "and", "host", ".", "The", "format", "of", "dicts", "is", ":" ]
train
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/docker/container.py#L378-L400
0.003947
bioidiap/bob.ip.facedetect
bob/ip/facedetect/train/Bootstrap.py
Bootstrap._load
def _load(self, hdf5): """Loads the intermediate state of the bootstrapping from file.""" positives = set(hdf5.get("PositiveIndices")) negatives = set(hdf5.get("NegativeIndices")) hdf5.cd("Model") model = bob.learn.boosting.BoostedMachine(hdf5) return model, positives, negatives
python
def _load(self, hdf5): """Loads the intermediate state of the bootstrapping from file.""" positives = set(hdf5.get("PositiveIndices")) negatives = set(hdf5.get("NegativeIndices")) hdf5.cd("Model") model = bob.learn.boosting.BoostedMachine(hdf5) return model, positives, negatives
[ "def", "_load", "(", "self", ",", "hdf5", ")", ":", "positives", "=", "set", "(", "hdf5", ".", "get", "(", "\"PositiveIndices\"", ")", ")", "negatives", "=", "set", "(", "hdf5", ".", "get", "(", "\"NegativeIndices\"", ")", ")", "hdf5", ".", "cd", "(", "\"Model\"", ")", "model", "=", "bob", ".", "learn", ".", "boosting", ".", "BoostedMachine", "(", "hdf5", ")", "return", "model", ",", "positives", ",", "negatives" ]
Loads the intermediate state of the bootstrapping from file.
[ "Loads", "the", "intermediate", "state", "of", "the", "bootstrapping", "from", "file", "." ]
train
https://github.com/bioidiap/bob.ip.facedetect/blob/601da5141ca7302ad36424d1421b33190ba46779/bob/ip/facedetect/train/Bootstrap.py#L132-L138
0.0033
magrathealabs/feito
feito/github/api.py
API.create_comment_commit
def create_comment_commit(self, body, commit_id, path, position, pr_id): """ Posts a comment to a given commit at a certain pull request. Check https://developer.github.com/v3/pulls/comments/#create-a-comment param body: str -> Comment text param commit_id: str -> SHA of the commit param path: str -> Relative path of the file to be commented param position: int -> The position in the diff to add a review comment param pr_id: int -> Github pull request id """ comments_url = f"{self.GITHUB_API_URL}/repos/{self.user}/{self.repo}/pulls/{pr_id}/comments" data = {'body': body, 'commit_id': commit_id, 'path': path, 'position': position} return requests.post(comments_url, json=data, headers=self.auth_header)
python
def create_comment_commit(self, body, commit_id, path, position, pr_id): """ Posts a comment to a given commit at a certain pull request. Check https://developer.github.com/v3/pulls/comments/#create-a-comment param body: str -> Comment text param commit_id: str -> SHA of the commit param path: str -> Relative path of the file to be commented param position: int -> The position in the diff to add a review comment param pr_id: int -> Github pull request id """ comments_url = f"{self.GITHUB_API_URL}/repos/{self.user}/{self.repo}/pulls/{pr_id}/comments" data = {'body': body, 'commit_id': commit_id, 'path': path, 'position': position} return requests.post(comments_url, json=data, headers=self.auth_header)
[ "def", "create_comment_commit", "(", "self", ",", "body", ",", "commit_id", ",", "path", ",", "position", ",", "pr_id", ")", ":", "comments_url", "=", "f\"{self.GITHUB_API_URL}/repos/{self.user}/{self.repo}/pulls/{pr_id}/comments\"", "data", "=", "{", "'body'", ":", "body", ",", "'commit_id'", ":", "commit_id", ",", "'path'", ":", "path", ",", "'position'", ":", "position", "}", "return", "requests", ".", "post", "(", "comments_url", ",", "json", "=", "data", ",", "headers", "=", "self", ".", "auth_header", ")" ]
Posts a comment to a given commit at a certain pull request. Check https://developer.github.com/v3/pulls/comments/#create-a-comment param body: str -> Comment text param commit_id: str -> SHA of the commit param path: str -> Relative path of the file to be commented param position: int -> The position in the diff to add a review comment param pr_id: int -> Github pull request id
[ "Posts", "a", "comment", "to", "a", "given", "commit", "at", "a", "certain", "pull", "request", ".", "Check", "https", ":", "//", "developer", ".", "github", ".", "com", "/", "v3", "/", "pulls", "/", "comments", "/", "#create", "-", "a", "-", "comment" ]
train
https://github.com/magrathealabs/feito/blob/4179e40233ccf6e5a6c9892e528595690ce9ef43/feito/github/api.py#L23-L37
0.004957
inveniosoftware-attic/invenio-comments
invenio_comments/api.py
perform_request_vote
def perform_request_vote(cmt_id, client_ip_address, value, uid=-1): """ Vote positively or negatively for a comment/review :param cmt_id: review id :param value: +1 for voting positively -1 for voting negatively :return: integer 1 if successful, integer 0 if not """ cmt_id = wash_url_argument(cmt_id, 'int') client_ip_address = wash_url_argument(client_ip_address, 'str') value = wash_url_argument(value, 'int') uid = wash_url_argument(uid, 'int') if cmt_id > 0 and value in [-1, 1] and check_user_can_vote(cmt_id, client_ip_address, uid): action_date = convert_datestruct_to_datetext(time.localtime()) action_code = CFG_WEBCOMMENT_ACTION_CODE['VOTE'] # FIXME compatibility with postgresql query = """INSERT INTO "cmtACTIONHISTORY" ("id_cmtRECORDCOMMENT", id_bibrec, id_user, client_host, action_time, action_code) VALUES (%s, NULL ,%s, inet_aton(%s), %s, %s)""" params = (cmt_id, uid, client_ip_address, action_date, action_code) run_sql(query, params) return query_record_useful_review(cmt_id, value) else: return 0
python
def perform_request_vote(cmt_id, client_ip_address, value, uid=-1): """ Vote positively or negatively for a comment/review :param cmt_id: review id :param value: +1 for voting positively -1 for voting negatively :return: integer 1 if successful, integer 0 if not """ cmt_id = wash_url_argument(cmt_id, 'int') client_ip_address = wash_url_argument(client_ip_address, 'str') value = wash_url_argument(value, 'int') uid = wash_url_argument(uid, 'int') if cmt_id > 0 and value in [-1, 1] and check_user_can_vote(cmt_id, client_ip_address, uid): action_date = convert_datestruct_to_datetext(time.localtime()) action_code = CFG_WEBCOMMENT_ACTION_CODE['VOTE'] # FIXME compatibility with postgresql query = """INSERT INTO "cmtACTIONHISTORY" ("id_cmtRECORDCOMMENT", id_bibrec, id_user, client_host, action_time, action_code) VALUES (%s, NULL ,%s, inet_aton(%s), %s, %s)""" params = (cmt_id, uid, client_ip_address, action_date, action_code) run_sql(query, params) return query_record_useful_review(cmt_id, value) else: return 0
[ "def", "perform_request_vote", "(", "cmt_id", ",", "client_ip_address", ",", "value", ",", "uid", "=", "-", "1", ")", ":", "cmt_id", "=", "wash_url_argument", "(", "cmt_id", ",", "'int'", ")", "client_ip_address", "=", "wash_url_argument", "(", "client_ip_address", ",", "'str'", ")", "value", "=", "wash_url_argument", "(", "value", ",", "'int'", ")", "uid", "=", "wash_url_argument", "(", "uid", ",", "'int'", ")", "if", "cmt_id", ">", "0", "and", "value", "in", "[", "-", "1", ",", "1", "]", "and", "check_user_can_vote", "(", "cmt_id", ",", "client_ip_address", ",", "uid", ")", ":", "action_date", "=", "convert_datestruct_to_datetext", "(", "time", ".", "localtime", "(", ")", ")", "action_code", "=", "CFG_WEBCOMMENT_ACTION_CODE", "[", "'VOTE'", "]", "# FIXME compatibility with postgresql", "query", "=", "\"\"\"INSERT INTO \"cmtACTIONHISTORY\" (\"id_cmtRECORDCOMMENT\",\n id_bibrec, id_user, client_host, action_time,\n action_code)\n VALUES (%s, NULL ,%s, inet_aton(%s), %s, %s)\"\"\"", "params", "=", "(", "cmt_id", ",", "uid", ",", "client_ip_address", ",", "action_date", ",", "action_code", ")", "run_sql", "(", "query", ",", "params", ")", "return", "query_record_useful_review", "(", "cmt_id", ",", "value", ")", "else", ":", "return", "0" ]
Vote positively or negatively for a comment/review :param cmt_id: review id :param value: +1 for voting positively -1 for voting negatively :return: integer 1 if successful, integer 0 if not
[ "Vote", "positively", "or", "negatively", "for", "a", "comment", "/", "review", ":", "param", "cmt_id", ":", "review", "id", ":", "param", "value", ":", "+", "1", "for", "voting", "positively", "-", "1", "for", "voting", "negatively", ":", "return", ":", "integer", "1", "if", "successful", "integer", "0", "if", "not" ]
train
https://github.com/inveniosoftware-attic/invenio-comments/blob/62bb6e07c146baf75bf8de80b5896ab2a01a8423/invenio_comments/api.py#L378-L405
0.000737
sdispater/orator
orator/dbal/platforms/platform.py
Platform._get_create_table_sql
def _get_create_table_sql(self, table_name, columns, options=None): """ Returns the SQL used to create a table. :param table_name: The name of the table to create :type table_name: str :param columns: The table columns :type columns: dict :param options: The options :type options: dict :rtype: str """ options = options or {} column_list_sql = self.get_column_declaration_list_sql(columns) if options.get("unique_constraints"): for name, definition in options["unique_constraints"].items(): column_list_sql += ", %s" % self.get_unique_constraint_declaration_sql( name, definition ) if options.get("primary"): column_list_sql += ", PRIMARY KEY(%s)" % ", ".join(options["primary"]) if options.get("indexes"): for index, definition in options["indexes"]: column_list_sql += ", %s" % self.get_index_declaration_sql( index, definition ) query = "CREATE TABLE %s (%s" % (table_name, column_list_sql) check = self.get_check_declaration_sql(columns) if check: query += ", %s" % check query += ")" sql = [query] if options.get("foreign_keys"): for definition in options["foreign_keys"]: sql.append(self.get_create_foreign_key_sql(definition, table_name)) return sql
python
def _get_create_table_sql(self, table_name, columns, options=None): """ Returns the SQL used to create a table. :param table_name: The name of the table to create :type table_name: str :param columns: The table columns :type columns: dict :param options: The options :type options: dict :rtype: str """ options = options or {} column_list_sql = self.get_column_declaration_list_sql(columns) if options.get("unique_constraints"): for name, definition in options["unique_constraints"].items(): column_list_sql += ", %s" % self.get_unique_constraint_declaration_sql( name, definition ) if options.get("primary"): column_list_sql += ", PRIMARY KEY(%s)" % ", ".join(options["primary"]) if options.get("indexes"): for index, definition in options["indexes"]: column_list_sql += ", %s" % self.get_index_declaration_sql( index, definition ) query = "CREATE TABLE %s (%s" % (table_name, column_list_sql) check = self.get_check_declaration_sql(columns) if check: query += ", %s" % check query += ")" sql = [query] if options.get("foreign_keys"): for definition in options["foreign_keys"]: sql.append(self.get_create_foreign_key_sql(definition, table_name)) return sql
[ "def", "_get_create_table_sql", "(", "self", ",", "table_name", ",", "columns", ",", "options", "=", "None", ")", ":", "options", "=", "options", "or", "{", "}", "column_list_sql", "=", "self", ".", "get_column_declaration_list_sql", "(", "columns", ")", "if", "options", ".", "get", "(", "\"unique_constraints\"", ")", ":", "for", "name", ",", "definition", "in", "options", "[", "\"unique_constraints\"", "]", ".", "items", "(", ")", ":", "column_list_sql", "+=", "\", %s\"", "%", "self", ".", "get_unique_constraint_declaration_sql", "(", "name", ",", "definition", ")", "if", "options", ".", "get", "(", "\"primary\"", ")", ":", "column_list_sql", "+=", "\", PRIMARY KEY(%s)\"", "%", "\", \"", ".", "join", "(", "options", "[", "\"primary\"", "]", ")", "if", "options", ".", "get", "(", "\"indexes\"", ")", ":", "for", "index", ",", "definition", "in", "options", "[", "\"indexes\"", "]", ":", "column_list_sql", "+=", "\", %s\"", "%", "self", ".", "get_index_declaration_sql", "(", "index", ",", "definition", ")", "query", "=", "\"CREATE TABLE %s (%s\"", "%", "(", "table_name", ",", "column_list_sql", ")", "check", "=", "self", ".", "get_check_declaration_sql", "(", "columns", ")", "if", "check", ":", "query", "+=", "\", %s\"", "%", "check", "query", "+=", "\")\"", "sql", "=", "[", "query", "]", "if", "options", ".", "get", "(", "\"foreign_keys\"", ")", ":", "for", "definition", "in", "options", "[", "\"foreign_keys\"", "]", ":", "sql", ".", "append", "(", "self", ".", "get_create_foreign_key_sql", "(", "definition", ",", "table_name", ")", ")", "return", "sql" ]
Returns the SQL used to create a table. :param table_name: The name of the table to create :type table_name: str :param columns: The table columns :type columns: dict :param options: The options :type options: dict :rtype: str
[ "Returns", "the", "SQL", "used", "to", "create", "a", "table", "." ]
train
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/dbal/platforms/platform.py#L605-L653
0.003274
fabioz/PyDev.Debugger
_pydev_bundle/pydev_umd.py
_get_globals
def _get_globals(): """Return current Python interpreter globals namespace""" if _get_globals_callback is not None: return _get_globals_callback() else: try: from __main__ import __dict__ as namespace except ImportError: try: # The import fails on IronPython import __main__ namespace = __main__.__dict__ except: namespace shell = namespace.get('__ipythonshell__') if shell is not None and hasattr(shell, 'user_ns'): # IPython 0.12+ kernel return shell.user_ns else: # Python interpreter return namespace return namespace
python
def _get_globals(): """Return current Python interpreter globals namespace""" if _get_globals_callback is not None: return _get_globals_callback() else: try: from __main__ import __dict__ as namespace except ImportError: try: # The import fails on IronPython import __main__ namespace = __main__.__dict__ except: namespace shell = namespace.get('__ipythonshell__') if shell is not None and hasattr(shell, 'user_ns'): # IPython 0.12+ kernel return shell.user_ns else: # Python interpreter return namespace return namespace
[ "def", "_get_globals", "(", ")", ":", "if", "_get_globals_callback", "is", "not", "None", ":", "return", "_get_globals_callback", "(", ")", "else", ":", "try", ":", "from", "__main__", "import", "__dict__", "as", "namespace", "except", "ImportError", ":", "try", ":", "# The import fails on IronPython", "import", "__main__", "namespace", "=", "__main__", ".", "__dict__", "except", ":", "namespace", "shell", "=", "namespace", ".", "get", "(", "'__ipythonshell__'", ")", "if", "shell", "is", "not", "None", "and", "hasattr", "(", "shell", ",", "'user_ns'", ")", ":", "# IPython 0.12+ kernel", "return", "shell", ".", "user_ns", "else", ":", "# Python interpreter", "return", "namespace", "return", "namespace" ]
Return current Python interpreter globals namespace
[ "Return", "current", "Python", "interpreter", "globals", "namespace" ]
train
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydev_bundle/pydev_umd.py#L102-L123
0.002714
DLR-RM/RAFCON
source/rafcon/gui/controllers/main_window.py
MainWindowController.on_notebook_tab_switch
def on_notebook_tab_switch(self, notebook, page, page_num, title_label, window, notebook_identifier): """Triggered whenever a left-bar notebook tab is changed. Updates the title of the corresponding notebook and updates the title of the left-bar window in case un-docked. :param notebook: The GTK notebook where a tab-change occurred :param page_num: The page number of the currently-selected tab :param title_label: The label holding the notebook's title :param window: The left-bar window, for which the title should be changed :param notebook_identifier: A string identifying whether the notebook is the upper or the lower one """ title = gui_helper_label.set_notebook_title(notebook, page_num, title_label) window.reset_title(title, notebook_identifier) self.on_switch_page_check_collapse_button(notebook, page_num)
python
def on_notebook_tab_switch(self, notebook, page, page_num, title_label, window, notebook_identifier): """Triggered whenever a left-bar notebook tab is changed. Updates the title of the corresponding notebook and updates the title of the left-bar window in case un-docked. :param notebook: The GTK notebook where a tab-change occurred :param page_num: The page number of the currently-selected tab :param title_label: The label holding the notebook's title :param window: The left-bar window, for which the title should be changed :param notebook_identifier: A string identifying whether the notebook is the upper or the lower one """ title = gui_helper_label.set_notebook_title(notebook, page_num, title_label) window.reset_title(title, notebook_identifier) self.on_switch_page_check_collapse_button(notebook, page_num)
[ "def", "on_notebook_tab_switch", "(", "self", ",", "notebook", ",", "page", ",", "page_num", ",", "title_label", ",", "window", ",", "notebook_identifier", ")", ":", "title", "=", "gui_helper_label", ".", "set_notebook_title", "(", "notebook", ",", "page_num", ",", "title_label", ")", "window", ".", "reset_title", "(", "title", ",", "notebook_identifier", ")", "self", ".", "on_switch_page_check_collapse_button", "(", "notebook", ",", "page_num", ")" ]
Triggered whenever a left-bar notebook tab is changed. Updates the title of the corresponding notebook and updates the title of the left-bar window in case un-docked. :param notebook: The GTK notebook where a tab-change occurred :param page_num: The page number of the currently-selected tab :param title_label: The label holding the notebook's title :param window: The left-bar window, for which the title should be changed :param notebook_identifier: A string identifying whether the notebook is the upper or the lower one
[ "Triggered", "whenever", "a", "left", "-", "bar", "notebook", "tab", "is", "changed", "." ]
train
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/main_window.py#L575-L588
0.007701
galaxy-genome-annotation/python-apollo
apollo/annotations/__init__.py
AnnotationsClient.add_dbxref
def add_dbxref(self, feature_id, db, accession, organism=None, sequence=None): """ Add a dbxref to a feature :type feature_id: str :param feature_id: Feature UUID :type db: str :param db: DB Name (e.g. PMID) :type accession: str :param accession: Accession Value :type organism: str :param organism: Organism Common Name :type sequence: str :param sequence: Sequence Name This seems to show two attributes being added, but it behaves like those two are one. :rtype: dict :return: A standard apollo feature dictionary ({"features": [{...}]}) """ data = { 'features': [ { 'uniquename': feature_id, 'dbxrefs': [ { 'db': db, 'accession': accession, } ] } ] } data = self._update_data(data, organism, sequence) return self.post('addDbxref', data)
python
def add_dbxref(self, feature_id, db, accession, organism=None, sequence=None): """ Add a dbxref to a feature :type feature_id: str :param feature_id: Feature UUID :type db: str :param db: DB Name (e.g. PMID) :type accession: str :param accession: Accession Value :type organism: str :param organism: Organism Common Name :type sequence: str :param sequence: Sequence Name This seems to show two attributes being added, but it behaves like those two are one. :rtype: dict :return: A standard apollo feature dictionary ({"features": [{...}]}) """ data = { 'features': [ { 'uniquename': feature_id, 'dbxrefs': [ { 'db': db, 'accession': accession, } ] } ] } data = self._update_data(data, organism, sequence) return self.post('addDbxref', data)
[ "def", "add_dbxref", "(", "self", ",", "feature_id", ",", "db", ",", "accession", ",", "organism", "=", "None", ",", "sequence", "=", "None", ")", ":", "data", "=", "{", "'features'", ":", "[", "{", "'uniquename'", ":", "feature_id", ",", "'dbxrefs'", ":", "[", "{", "'db'", ":", "db", ",", "'accession'", ":", "accession", ",", "}", "]", "}", "]", "}", "data", "=", "self", ".", "_update_data", "(", "data", ",", "organism", ",", "sequence", ")", "return", "self", ".", "post", "(", "'addDbxref'", ",", "data", ")" ]
Add a dbxref to a feature :type feature_id: str :param feature_id: Feature UUID :type db: str :param db: DB Name (e.g. PMID) :type accession: str :param accession: Accession Value :type organism: str :param organism: Organism Common Name :type sequence: str :param sequence: Sequence Name This seems to show two attributes being added, but it behaves like those two are one. :rtype: dict :return: A standard apollo feature dictionary ({"features": [{...}]})
[ "Add", "a", "dbxref", "to", "a", "feature" ]
train
https://github.com/galaxy-genome-annotation/python-apollo/blob/2bc9991302abe4402ec2885dcaac35915475b387/apollo/annotations/__init__.py#L341-L379
0.00266
pedrotgn/pyactor
pyactor/green_thread/future.py
Future.exception
def exception(self, timeout=None): """Return a exception raised by the call that the future represents. :param timeout: The number of seconds to wait for the exception if the future has not been completed. None, the default, sets no limit. :returns: The exception raised by the call that the future represents or None if the call completed without raising. :raises: TimeoutError: If the timeout is reached before the future ends execution. """ # with self.__condition: if self.__state == FINISHED: return self.__exception self.__condition.wait(timeout) if self.__state == FINISHED: return self.__exception else: raise TimeoutError('Future: %r' % self.__method)
python
def exception(self, timeout=None): """Return a exception raised by the call that the future represents. :param timeout: The number of seconds to wait for the exception if the future has not been completed. None, the default, sets no limit. :returns: The exception raised by the call that the future represents or None if the call completed without raising. :raises: TimeoutError: If the timeout is reached before the future ends execution. """ # with self.__condition: if self.__state == FINISHED: return self.__exception self.__condition.wait(timeout) if self.__state == FINISHED: return self.__exception else: raise TimeoutError('Future: %r' % self.__method)
[ "def", "exception", "(", "self", ",", "timeout", "=", "None", ")", ":", "# with self.__condition:", "if", "self", ".", "__state", "==", "FINISHED", ":", "return", "self", ".", "__exception", "self", ".", "__condition", ".", "wait", "(", "timeout", ")", "if", "self", ".", "__state", "==", "FINISHED", ":", "return", "self", ".", "__exception", "else", ":", "raise", "TimeoutError", "(", "'Future: %r'", "%", "self", ".", "__method", ")" ]
Return a exception raised by the call that the future represents. :param timeout: The number of seconds to wait for the exception if the future has not been completed. None, the default, sets no limit. :returns: The exception raised by the call that the future represents or None if the call completed without raising. :raises: TimeoutError: If the timeout is reached before the future ends execution.
[ "Return", "a", "exception", "raised", "by", "the", "call", "that", "the", "future", "represents", ".", ":", "param", "timeout", ":", "The", "number", "of", "seconds", "to", "wait", "for", "the", "exception", "if", "the", "future", "has", "not", "been", "completed", ".", "None", "the", "default", "sets", "no", "limit", ".", ":", "returns", ":", "The", "exception", "raised", "by", "the", "call", "that", "the", "future", "represents", "or", "None", "if", "the", "call", "completed", "without", "raising", ".", ":", "raises", ":", "TimeoutError", ":", "If", "the", "timeout", "is", "reached", "before", "the", "future", "ends", "execution", "." ]
train
https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/green_thread/future.py#L114-L134
0.002398
Alexis-benoist/eralchemy
eralchemy/parser.py
line_iterator_to_intermediary
def line_iterator_to_intermediary(line_iterator): """ Parse an iterator of str (one string per line) to the intermediary syntax""" current_table = None tables = [] relations = [] errors = [] for line_nb, line, raw_line in filter_lines_from_comments(line_iterator): try: new_obj = parse_line(line) current_table, tables, relations = update_models(new_obj, current_table, tables, relations) except ParsingException as e: e.line_nb = line_nb e.line = raw_line errors.append(e) if len(errors) != 0: msg = 'ERAlchemy couldn\'t complete the generation due the {} following errors'.format(len(errors)) raise ParsingException(msg + '\n\n'.join(e.traceback for e in errors)) return tables, relations
python
def line_iterator_to_intermediary(line_iterator): """ Parse an iterator of str (one string per line) to the intermediary syntax""" current_table = None tables = [] relations = [] errors = [] for line_nb, line, raw_line in filter_lines_from_comments(line_iterator): try: new_obj = parse_line(line) current_table, tables, relations = update_models(new_obj, current_table, tables, relations) except ParsingException as e: e.line_nb = line_nb e.line = raw_line errors.append(e) if len(errors) != 0: msg = 'ERAlchemy couldn\'t complete the generation due the {} following errors'.format(len(errors)) raise ParsingException(msg + '\n\n'.join(e.traceback for e in errors)) return tables, relations
[ "def", "line_iterator_to_intermediary", "(", "line_iterator", ")", ":", "current_table", "=", "None", "tables", "=", "[", "]", "relations", "=", "[", "]", "errors", "=", "[", "]", "for", "line_nb", ",", "line", ",", "raw_line", "in", "filter_lines_from_comments", "(", "line_iterator", ")", ":", "try", ":", "new_obj", "=", "parse_line", "(", "line", ")", "current_table", ",", "tables", ",", "relations", "=", "update_models", "(", "new_obj", ",", "current_table", ",", "tables", ",", "relations", ")", "except", "ParsingException", "as", "e", ":", "e", ".", "line_nb", "=", "line_nb", "e", ".", "line", "=", "raw_line", "errors", ".", "append", "(", "e", ")", "if", "len", "(", "errors", ")", "!=", "0", ":", "msg", "=", "'ERAlchemy couldn\\'t complete the generation due the {} following errors'", ".", "format", "(", "len", "(", "errors", ")", ")", "raise", "ParsingException", "(", "msg", "+", "'\\n\\n'", ".", "join", "(", "e", ".", "traceback", "for", "e", "in", "errors", ")", ")", "return", "tables", ",", "relations" ]
Parse an iterator of str (one string per line) to the intermediary syntax
[ "Parse", "an", "iterator", "of", "str", "(", "one", "string", "per", "line", ")", "to", "the", "intermediary", "syntax" ]
train
https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/eralchemy/parser.py#L126-L143
0.004914
CodeReclaimers/neat-python
neat/distributed.py
host_is_local
def host_is_local(hostname, port=22): # no port specified, just use the ssh port """ Returns True if the hostname points to the localhost, otherwise False. """ hostname = socket.getfqdn(hostname) if hostname in ("localhost", "0.0.0.0", "127.0.0.1", "1.0.0.127.in-addr.arpa", "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa"): return True localhost = socket.gethostname() if hostname == localhost: return True localaddrs = socket.getaddrinfo(localhost, port) targetaddrs = socket.getaddrinfo(hostname, port) for (ignored_family, ignored_socktype, ignored_proto, ignored_canonname, sockaddr) in localaddrs: for (ignored_rfamily, ignored_rsocktype, ignored_rproto, ignored_rcanonname, rsockaddr) in targetaddrs: if rsockaddr[0] == sockaddr[0]: return True return False
python
def host_is_local(hostname, port=22): # no port specified, just use the ssh port """ Returns True if the hostname points to the localhost, otherwise False. """ hostname = socket.getfqdn(hostname) if hostname in ("localhost", "0.0.0.0", "127.0.0.1", "1.0.0.127.in-addr.arpa", "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa"): return True localhost = socket.gethostname() if hostname == localhost: return True localaddrs = socket.getaddrinfo(localhost, port) targetaddrs = socket.getaddrinfo(hostname, port) for (ignored_family, ignored_socktype, ignored_proto, ignored_canonname, sockaddr) in localaddrs: for (ignored_rfamily, ignored_rsocktype, ignored_rproto, ignored_rcanonname, rsockaddr) in targetaddrs: if rsockaddr[0] == sockaddr[0]: return True return False
[ "def", "host_is_local", "(", "hostname", ",", "port", "=", "22", ")", ":", "# no port specified, just use the ssh port", "hostname", "=", "socket", ".", "getfqdn", "(", "hostname", ")", "if", "hostname", "in", "(", "\"localhost\"", ",", "\"0.0.0.0\"", ",", "\"127.0.0.1\"", ",", "\"1.0.0.127.in-addr.arpa\"", ",", "\"1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa\"", ")", ":", "return", "True", "localhost", "=", "socket", ".", "gethostname", "(", ")", "if", "hostname", "==", "localhost", ":", "return", "True", "localaddrs", "=", "socket", ".", "getaddrinfo", "(", "localhost", ",", "port", ")", "targetaddrs", "=", "socket", ".", "getaddrinfo", "(", "hostname", ",", "port", ")", "for", "(", "ignored_family", ",", "ignored_socktype", ",", "ignored_proto", ",", "ignored_canonname", ",", "sockaddr", ")", "in", "localaddrs", ":", "for", "(", "ignored_rfamily", ",", "ignored_rsocktype", ",", "ignored_rproto", ",", "ignored_rcanonname", ",", "rsockaddr", ")", "in", "targetaddrs", ":", "if", "rsockaddr", "[", "0", "]", "==", "sockaddr", "[", "0", "]", ":", "return", "True", "return", "False" ]
Returns True if the hostname points to the localhost, otherwise False.
[ "Returns", "True", "if", "the", "hostname", "points", "to", "the", "localhost", "otherwise", "False", "." ]
train
https://github.com/CodeReclaimers/neat-python/blob/e3dbe77c0d776eae41d598e6439e6ac02ab90b18/neat/distributed.py#L105-L124
0.005382
andymccurdy/redis-py
redis/client.py
Redis.expireat
def expireat(self, name, when): """ Set an expire flag on key ``name``. ``when`` can be represented as an integer indicating unix time or a Python datetime object. """ if isinstance(when, datetime.datetime): when = int(mod_time.mktime(when.timetuple())) return self.execute_command('EXPIREAT', name, when)
python
def expireat(self, name, when): """ Set an expire flag on key ``name``. ``when`` can be represented as an integer indicating unix time or a Python datetime object. """ if isinstance(when, datetime.datetime): when = int(mod_time.mktime(when.timetuple())) return self.execute_command('EXPIREAT', name, when)
[ "def", "expireat", "(", "self", ",", "name", ",", "when", ")", ":", "if", "isinstance", "(", "when", ",", "datetime", ".", "datetime", ")", ":", "when", "=", "int", "(", "mod_time", ".", "mktime", "(", "when", ".", "timetuple", "(", ")", ")", ")", "return", "self", ".", "execute_command", "(", "'EXPIREAT'", ",", "name", ",", "when", ")" ]
Set an expire flag on key ``name``. ``when`` can be represented as an integer indicating unix time or a Python datetime object.
[ "Set", "an", "expire", "flag", "on", "key", "name", ".", "when", "can", "be", "represented", "as", "an", "integer", "indicating", "unix", "time", "or", "a", "Python", "datetime", "object", "." ]
train
https://github.com/andymccurdy/redis-py/blob/cdfe2befbe00db4a3c48c9ddd6d64dea15f6f0db/redis/client.py#L1253-L1260
0.005479
saltstack/salt
salt/modules/mac_system.py
_execute_command
def _execute_command(cmd, at_time=None): ''' Helper function to execute the command :param str cmd: the command to run :param str at_time: If passed, the cmd will be scheduled. Returns: bool ''' if at_time: cmd = 'echo \'{0}\' | at {1}'.format(cmd, _cmd_quote(at_time)) return not bool(__salt__['cmd.retcode'](cmd, python_shell=True))
python
def _execute_command(cmd, at_time=None): ''' Helper function to execute the command :param str cmd: the command to run :param str at_time: If passed, the cmd will be scheduled. Returns: bool ''' if at_time: cmd = 'echo \'{0}\' | at {1}'.format(cmd, _cmd_quote(at_time)) return not bool(__salt__['cmd.retcode'](cmd, python_shell=True))
[ "def", "_execute_command", "(", "cmd", ",", "at_time", "=", "None", ")", ":", "if", "at_time", ":", "cmd", "=", "'echo \\'{0}\\' | at {1}'", ".", "format", "(", "cmd", ",", "_cmd_quote", "(", "at_time", ")", ")", "return", "not", "bool", "(", "__salt__", "[", "'cmd.retcode'", "]", "(", "cmd", ",", "python_shell", "=", "True", ")", ")" ]
Helper function to execute the command :param str cmd: the command to run :param str at_time: If passed, the cmd will be scheduled. Returns: bool
[ "Helper", "function", "to", "execute", "the", "command" ]
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_system.py#L68-L80
0.002653
inveniosoftware/invenio-files-rest
invenio_files_rest/serializer.py
json_serializer
def json_serializer(data=None, code=200, headers=None, context=None, etag=None, task_result=None): """Build a json flask response using the given data. :param data: The data to serialize. (Default: ``None``) :param code: The HTTP status code. (Default: ``200``) :param headers: The HTTP headers to include. (Default: ``None``) :param context: The schema class context. (Default: ``None``) :param etag: The ETag header. (Default: ``None``) :param task_result: Optionally you can pass async task to wait for. (Default: ``None``) :returns: A Flask response with json data. :rtype: :py:class:`flask.Response` """ schema_class, many = schema_from_context(context or {}) if data is not None: # Generate JSON response data = json.dumps( schema_class(context=context).dump(data, many=many).data, **_format_args() ) interval = current_app.config['FILES_REST_TASK_WAIT_INTERVAL'] max_rounds = int( current_app.config['FILES_REST_TASK_WAIT_MAX_SECONDS'] // interval ) response = current_app.response_class( # Stream response if waiting for task result. data if task_result is None else wait_for_taskresult( task_result, data, interval, max_rounds, ), mimetype='application/json' ) else: response = current_app.response_class(mimetype='application/json') response.status_code = code if headers is not None: response.headers.extend(headers) if etag: response.set_etag(etag) return response
python
def json_serializer(data=None, code=200, headers=None, context=None, etag=None, task_result=None): """Build a json flask response using the given data. :param data: The data to serialize. (Default: ``None``) :param code: The HTTP status code. (Default: ``200``) :param headers: The HTTP headers to include. (Default: ``None``) :param context: The schema class context. (Default: ``None``) :param etag: The ETag header. (Default: ``None``) :param task_result: Optionally you can pass async task to wait for. (Default: ``None``) :returns: A Flask response with json data. :rtype: :py:class:`flask.Response` """ schema_class, many = schema_from_context(context or {}) if data is not None: # Generate JSON response data = json.dumps( schema_class(context=context).dump(data, many=many).data, **_format_args() ) interval = current_app.config['FILES_REST_TASK_WAIT_INTERVAL'] max_rounds = int( current_app.config['FILES_REST_TASK_WAIT_MAX_SECONDS'] // interval ) response = current_app.response_class( # Stream response if waiting for task result. data if task_result is None else wait_for_taskresult( task_result, data, interval, max_rounds, ), mimetype='application/json' ) else: response = current_app.response_class(mimetype='application/json') response.status_code = code if headers is not None: response.headers.extend(headers) if etag: response.set_etag(etag) return response
[ "def", "json_serializer", "(", "data", "=", "None", ",", "code", "=", "200", ",", "headers", "=", "None", ",", "context", "=", "None", ",", "etag", "=", "None", ",", "task_result", "=", "None", ")", ":", "schema_class", ",", "many", "=", "schema_from_context", "(", "context", "or", "{", "}", ")", "if", "data", "is", "not", "None", ":", "# Generate JSON response", "data", "=", "json", ".", "dumps", "(", "schema_class", "(", "context", "=", "context", ")", ".", "dump", "(", "data", ",", "many", "=", "many", ")", ".", "data", ",", "*", "*", "_format_args", "(", ")", ")", "interval", "=", "current_app", ".", "config", "[", "'FILES_REST_TASK_WAIT_INTERVAL'", "]", "max_rounds", "=", "int", "(", "current_app", ".", "config", "[", "'FILES_REST_TASK_WAIT_MAX_SECONDS'", "]", "//", "interval", ")", "response", "=", "current_app", ".", "response_class", "(", "# Stream response if waiting for task result.", "data", "if", "task_result", "is", "None", "else", "wait_for_taskresult", "(", "task_result", ",", "data", ",", "interval", ",", "max_rounds", ",", ")", ",", "mimetype", "=", "'application/json'", ")", "else", ":", "response", "=", "current_app", ".", "response_class", "(", "mimetype", "=", "'application/json'", ")", "response", ".", "status_code", "=", "code", "if", "headers", "is", "not", "None", ":", "response", ".", "headers", ".", "extend", "(", "headers", ")", "if", "etag", ":", "response", ".", "set_etag", "(", "etag", ")", "return", "response" ]
Build a json flask response using the given data. :param data: The data to serialize. (Default: ``None``) :param code: The HTTP status code. (Default: ``200``) :param headers: The HTTP headers to include. (Default: ``None``) :param context: The schema class context. (Default: ``None``) :param etag: The ETag header. (Default: ``None``) :param task_result: Optionally you can pass async task to wait for. (Default: ``None``) :returns: A Flask response with json data. :rtype: :py:class:`flask.Response`
[ "Build", "a", "json", "flask", "response", "using", "the", "given", "data", "." ]
train
https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/serializer.py#L268-L312
0.000604
zero-os/0-core
client/py-client/zeroos/core0/client/client.py
BridgeManager.delete
def delete(self, bridge): """ Delete a bridge by name :param bridge: bridge name :return: """ args = { 'name': bridge, } self._bridge_chk.check(args) return self._client.json('bridge.delete', args)
python
def delete(self, bridge): """ Delete a bridge by name :param bridge: bridge name :return: """ args = { 'name': bridge, } self._bridge_chk.check(args) return self._client.json('bridge.delete', args)
[ "def", "delete", "(", "self", ",", "bridge", ")", ":", "args", "=", "{", "'name'", ":", "bridge", ",", "}", "self", ".", "_bridge_chk", ".", "check", "(", "args", ")", "return", "self", ".", "_client", ".", "json", "(", "'bridge.delete'", ",", "args", ")" ]
Delete a bridge by name :param bridge: bridge name :return:
[ "Delete", "a", "bridge", "by", "name" ]
train
https://github.com/zero-os/0-core/blob/69f6ce845ab8b8ad805a79a415227e7ac566c218/client/py-client/zeroos/core0/client/client.py#L1563-L1576
0.007042
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_snmp.py
brocade_snmp.snmp_server_view_mibtree
def snmp_server_view_mibtree(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") snmp_server = ET.SubElement(config, "snmp-server", xmlns="urn:brocade.com:mgmt:brocade-snmp") view = ET.SubElement(snmp_server, "view") viewname_key = ET.SubElement(view, "viewname") viewname_key.text = kwargs.pop('viewname') mibtree = ET.SubElement(view, "mibtree") mibtree.text = kwargs.pop('mibtree') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def snmp_server_view_mibtree(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") snmp_server = ET.SubElement(config, "snmp-server", xmlns="urn:brocade.com:mgmt:brocade-snmp") view = ET.SubElement(snmp_server, "view") viewname_key = ET.SubElement(view, "viewname") viewname_key.text = kwargs.pop('viewname') mibtree = ET.SubElement(view, "mibtree") mibtree.text = kwargs.pop('mibtree') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "snmp_server_view_mibtree", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "snmp_server", "=", "ET", ".", "SubElement", "(", "config", ",", "\"snmp-server\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-snmp\"", ")", "view", "=", "ET", ".", "SubElement", "(", "snmp_server", ",", "\"view\"", ")", "viewname_key", "=", "ET", ".", "SubElement", "(", "view", ",", "\"viewname\"", ")", "viewname_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'viewname'", ")", "mibtree", "=", "ET", ".", "SubElement", "(", "view", ",", "\"mibtree\"", ")", "mibtree", ".", "text", "=", "kwargs", ".", "pop", "(", "'mibtree'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_snmp.py#L566-L578
0.005272
yoavaviram/python-amazon-simple-product-api
amazon/api.py
AmazonAPI.cart_add
def cart_add(self, items, CartId=None, HMAC=None, **kwargs): """CartAdd. :param items: A dictionary containing the items to be added to the cart. Or a list containing these dictionaries. It is not possible to create an empty cart! example: [{'offer_id': 'rt2ofih3f389nwiuhf8934z87o3f4h', 'quantity': 1}] :param CartId: Id of Cart :param HMAC: HMAC of Cart, see CartCreate for more info :return: An :class:`~.AmazonCart`. """ if not CartId or not HMAC: raise CartException('CartId and HMAC required for CartAdd call') if isinstance(items, dict): items = [items] if len(items) > 10: raise CartException("You can't add more than 10 items at once") offer_id_key_template = 'Item.{0}.OfferListingId' quantity_key_template = 'Item.{0}.Quantity' for i, item in enumerate(items): kwargs[offer_id_key_template.format(i)] = item['offer_id'] kwargs[quantity_key_template.format(i)] = item['quantity'] response = self.api.CartAdd(CartId=CartId, HMAC=HMAC, **kwargs) root = objectify.fromstring(response) new_cart = AmazonCart(root) self._check_for_cart_error(new_cart) return new_cart
python
def cart_add(self, items, CartId=None, HMAC=None, **kwargs): """CartAdd. :param items: A dictionary containing the items to be added to the cart. Or a list containing these dictionaries. It is not possible to create an empty cart! example: [{'offer_id': 'rt2ofih3f389nwiuhf8934z87o3f4h', 'quantity': 1}] :param CartId: Id of Cart :param HMAC: HMAC of Cart, see CartCreate for more info :return: An :class:`~.AmazonCart`. """ if not CartId or not HMAC: raise CartException('CartId and HMAC required for CartAdd call') if isinstance(items, dict): items = [items] if len(items) > 10: raise CartException("You can't add more than 10 items at once") offer_id_key_template = 'Item.{0}.OfferListingId' quantity_key_template = 'Item.{0}.Quantity' for i, item in enumerate(items): kwargs[offer_id_key_template.format(i)] = item['offer_id'] kwargs[quantity_key_template.format(i)] = item['quantity'] response = self.api.CartAdd(CartId=CartId, HMAC=HMAC, **kwargs) root = objectify.fromstring(response) new_cart = AmazonCart(root) self._check_for_cart_error(new_cart) return new_cart
[ "def", "cart_add", "(", "self", ",", "items", ",", "CartId", "=", "None", ",", "HMAC", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "CartId", "or", "not", "HMAC", ":", "raise", "CartException", "(", "'CartId and HMAC required for CartAdd call'", ")", "if", "isinstance", "(", "items", ",", "dict", ")", ":", "items", "=", "[", "items", "]", "if", "len", "(", "items", ")", ">", "10", ":", "raise", "CartException", "(", "\"You can't add more than 10 items at once\"", ")", "offer_id_key_template", "=", "'Item.{0}.OfferListingId'", "quantity_key_template", "=", "'Item.{0}.Quantity'", "for", "i", ",", "item", "in", "enumerate", "(", "items", ")", ":", "kwargs", "[", "offer_id_key_template", ".", "format", "(", "i", ")", "]", "=", "item", "[", "'offer_id'", "]", "kwargs", "[", "quantity_key_template", ".", "format", "(", "i", ")", "]", "=", "item", "[", "'quantity'", "]", "response", "=", "self", ".", "api", ".", "CartAdd", "(", "CartId", "=", "CartId", ",", "HMAC", "=", "HMAC", ",", "*", "*", "kwargs", ")", "root", "=", "objectify", ".", "fromstring", "(", "response", ")", "new_cart", "=", "AmazonCart", "(", "root", ")", "self", ".", "_check_for_cart_error", "(", "new_cart", ")", "return", "new_cart" ]
CartAdd. :param items: A dictionary containing the items to be added to the cart. Or a list containing these dictionaries. It is not possible to create an empty cart! example: [{'offer_id': 'rt2ofih3f389nwiuhf8934z87o3f4h', 'quantity': 1}] :param CartId: Id of Cart :param HMAC: HMAC of Cart, see CartCreate for more info :return: An :class:`~.AmazonCart`.
[ "CartAdd", ".", ":", "param", "items", ":", "A", "dictionary", "containing", "the", "items", "to", "be", "added", "to", "the", "cart", ".", "Or", "a", "list", "containing", "these", "dictionaries", ".", "It", "is", "not", "possible", "to", "create", "an", "empty", "cart!", "example", ":", "[", "{", "offer_id", ":", "rt2ofih3f389nwiuhf8934z87o3f4h", "quantity", ":", "1", "}", "]", ":", "param", "CartId", ":", "Id", "of", "Cart", ":", "param", "HMAC", ":", "HMAC", "of", "Cart", "see", "CartCreate", "for", "more", "info", ":", "return", ":", "An", ":", "class", ":", "~", ".", "AmazonCart", "." ]
train
https://github.com/yoavaviram/python-amazon-simple-product-api/blob/f1cb0e209145fcfac9444e4c733dd19deb59d31a/amazon/api.py#L321-L356
0.001485
JukeboxPipeline/jukebox-core
src/jukeboxcore/gui/djitemdata.py
prj_resolution_data
def prj_resolution_data(project, role): """Return the data for resolution :param project: the project that holds the data :type project: :class:`jukeboxcore.djadapter.models.Project` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the resolution :rtype: depending on role :raises: None """ if role == QtCore.Qt.DisplayRole: return '%s x %s' % (project.resx, project.resy)
python
def prj_resolution_data(project, role): """Return the data for resolution :param project: the project that holds the data :type project: :class:`jukeboxcore.djadapter.models.Project` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the resolution :rtype: depending on role :raises: None """ if role == QtCore.Qt.DisplayRole: return '%s x %s' % (project.resx, project.resy)
[ "def", "prj_resolution_data", "(", "project", ",", "role", ")", ":", "if", "role", "==", "QtCore", ".", "Qt", ".", "DisplayRole", ":", "return", "'%s x %s'", "%", "(", "project", ".", "resx", ",", "project", ".", "resy", ")" ]
Return the data for resolution :param project: the project that holds the data :type project: :class:`jukeboxcore.djadapter.models.Project` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the resolution :rtype: depending on role :raises: None
[ "Return", "the", "data", "for", "resolution" ]
train
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/djitemdata.py#L98-L110
0.002203
theislab/scanpy
scanpy/utils.py
identify_groups
def identify_groups(ref_labels, pred_labels, return_overlaps=False): """Which predicted label explains which reference label? A predicted label explains the reference label which maximizes the minimum of ``relative_overlaps_pred`` and ``relative_overlaps_ref``. Compare this with ``compute_association_matrix_of_groups``. Returns ------- A dictionary of length ``len(np.unique(ref_labels))`` that stores for each reference label the predicted label that best explains it. If ``return_overlaps`` is ``True``, this will in addition return the overlap of the reference group with the predicted group; normalized with respect to the reference group size and the predicted group size, respectively. """ ref_unique, ref_counts = np.unique(ref_labels, return_counts=True) ref_dict = dict(zip(ref_unique, ref_counts)) pred_unique, pred_counts = np.unique(pred_labels, return_counts=True) pred_dict = dict(zip(pred_unique, pred_counts)) associated_predictions = {} associated_overlaps = {} for ref_label in ref_unique: sub_pred_unique, sub_pred_counts = np.unique(pred_labels[ref_label == ref_labels], return_counts=True) relative_overlaps_pred = [sub_pred_counts[i] / pred_dict[n] for i, n in enumerate(sub_pred_unique)] relative_overlaps_ref = [sub_pred_counts[i] / ref_dict[ref_label] for i, n in enumerate(sub_pred_unique)] relative_overlaps = np.c_[relative_overlaps_pred, relative_overlaps_ref] relative_overlaps_min = np.min(relative_overlaps, axis=1) pred_best_index = np.argsort(relative_overlaps_min)[::-1] associated_predictions[ref_label] = sub_pred_unique[pred_best_index] associated_overlaps[ref_label] = relative_overlaps[pred_best_index] if return_overlaps: return associated_predictions, associated_overlaps else: return associated_predictions
python
def identify_groups(ref_labels, pred_labels, return_overlaps=False): """Which predicted label explains which reference label? A predicted label explains the reference label which maximizes the minimum of ``relative_overlaps_pred`` and ``relative_overlaps_ref``. Compare this with ``compute_association_matrix_of_groups``. Returns ------- A dictionary of length ``len(np.unique(ref_labels))`` that stores for each reference label the predicted label that best explains it. If ``return_overlaps`` is ``True``, this will in addition return the overlap of the reference group with the predicted group; normalized with respect to the reference group size and the predicted group size, respectively. """ ref_unique, ref_counts = np.unique(ref_labels, return_counts=True) ref_dict = dict(zip(ref_unique, ref_counts)) pred_unique, pred_counts = np.unique(pred_labels, return_counts=True) pred_dict = dict(zip(pred_unique, pred_counts)) associated_predictions = {} associated_overlaps = {} for ref_label in ref_unique: sub_pred_unique, sub_pred_counts = np.unique(pred_labels[ref_label == ref_labels], return_counts=True) relative_overlaps_pred = [sub_pred_counts[i] / pred_dict[n] for i, n in enumerate(sub_pred_unique)] relative_overlaps_ref = [sub_pred_counts[i] / ref_dict[ref_label] for i, n in enumerate(sub_pred_unique)] relative_overlaps = np.c_[relative_overlaps_pred, relative_overlaps_ref] relative_overlaps_min = np.min(relative_overlaps, axis=1) pred_best_index = np.argsort(relative_overlaps_min)[::-1] associated_predictions[ref_label] = sub_pred_unique[pred_best_index] associated_overlaps[ref_label] = relative_overlaps[pred_best_index] if return_overlaps: return associated_predictions, associated_overlaps else: return associated_predictions
[ "def", "identify_groups", "(", "ref_labels", ",", "pred_labels", ",", "return_overlaps", "=", "False", ")", ":", "ref_unique", ",", "ref_counts", "=", "np", ".", "unique", "(", "ref_labels", ",", "return_counts", "=", "True", ")", "ref_dict", "=", "dict", "(", "zip", "(", "ref_unique", ",", "ref_counts", ")", ")", "pred_unique", ",", "pred_counts", "=", "np", ".", "unique", "(", "pred_labels", ",", "return_counts", "=", "True", ")", "pred_dict", "=", "dict", "(", "zip", "(", "pred_unique", ",", "pred_counts", ")", ")", "associated_predictions", "=", "{", "}", "associated_overlaps", "=", "{", "}", "for", "ref_label", "in", "ref_unique", ":", "sub_pred_unique", ",", "sub_pred_counts", "=", "np", ".", "unique", "(", "pred_labels", "[", "ref_label", "==", "ref_labels", "]", ",", "return_counts", "=", "True", ")", "relative_overlaps_pred", "=", "[", "sub_pred_counts", "[", "i", "]", "/", "pred_dict", "[", "n", "]", "for", "i", ",", "n", "in", "enumerate", "(", "sub_pred_unique", ")", "]", "relative_overlaps_ref", "=", "[", "sub_pred_counts", "[", "i", "]", "/", "ref_dict", "[", "ref_label", "]", "for", "i", ",", "n", "in", "enumerate", "(", "sub_pred_unique", ")", "]", "relative_overlaps", "=", "np", ".", "c_", "[", "relative_overlaps_pred", ",", "relative_overlaps_ref", "]", "relative_overlaps_min", "=", "np", ".", "min", "(", "relative_overlaps", ",", "axis", "=", "1", ")", "pred_best_index", "=", "np", ".", "argsort", "(", "relative_overlaps_min", ")", "[", ":", ":", "-", "1", "]", "associated_predictions", "[", "ref_label", "]", "=", "sub_pred_unique", "[", "pred_best_index", "]", "associated_overlaps", "[", "ref_label", "]", "=", "relative_overlaps", "[", "pred_best_index", "]", "if", "return_overlaps", ":", "return", "associated_predictions", ",", "associated_overlaps", "else", ":", "return", "associated_predictions" ]
Which predicted label explains which reference label? A predicted label explains the reference label which maximizes the minimum of ``relative_overlaps_pred`` and ``relative_overlaps_ref``. Compare this with ``compute_association_matrix_of_groups``. Returns ------- A dictionary of length ``len(np.unique(ref_labels))`` that stores for each reference label the predicted label that best explains it. If ``return_overlaps`` is ``True``, this will in addition return the overlap of the reference group with the predicted group; normalized with respect to the reference group size and the predicted group size, respectively.
[ "Which", "predicted", "label", "explains", "which", "reference", "label?" ]
train
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/utils.py#L511-L544
0.004204
pysathq/pysat
examples/rc2.py
RC2.compute_
def compute_(self): """ Main core-guided loop, which iteratively calls a SAT oracle, extracts a new unsatisfiable core and processes it. The loop finishes as soon as a satisfiable formula is obtained. If specified in the command line, the method additionally calls :meth:`adapt_am1` to detect and adapt intrinsic AtMost1 constraints before executing the loop. :rtype: bool """ # trying to adapt (simplify) the formula # by detecting and using atmost1 constraints if self.adapt: self.adapt_am1() # main solving loop while not self.oracle.solve(assumptions=self.sels + self.sums): self.get_core() if not self.core: # core is empty, i.e. hard part is unsatisfiable return False self.process_core() if self.verbose > 1: print('c cost: {0}; core sz: {1}; soft sz: {2}'.format(self.cost, len(self.core), len(self.sels) + len(self.sums))) return True
python
def compute_(self): """ Main core-guided loop, which iteratively calls a SAT oracle, extracts a new unsatisfiable core and processes it. The loop finishes as soon as a satisfiable formula is obtained. If specified in the command line, the method additionally calls :meth:`adapt_am1` to detect and adapt intrinsic AtMost1 constraints before executing the loop. :rtype: bool """ # trying to adapt (simplify) the formula # by detecting and using atmost1 constraints if self.adapt: self.adapt_am1() # main solving loop while not self.oracle.solve(assumptions=self.sels + self.sums): self.get_core() if not self.core: # core is empty, i.e. hard part is unsatisfiable return False self.process_core() if self.verbose > 1: print('c cost: {0}; core sz: {1}; soft sz: {2}'.format(self.cost, len(self.core), len(self.sels) + len(self.sums))) return True
[ "def", "compute_", "(", "self", ")", ":", "# trying to adapt (simplify) the formula", "# by detecting and using atmost1 constraints", "if", "self", ".", "adapt", ":", "self", ".", "adapt_am1", "(", ")", "# main solving loop", "while", "not", "self", ".", "oracle", ".", "solve", "(", "assumptions", "=", "self", ".", "sels", "+", "self", ".", "sums", ")", ":", "self", ".", "get_core", "(", ")", "if", "not", "self", ".", "core", ":", "# core is empty, i.e. hard part is unsatisfiable", "return", "False", "self", ".", "process_core", "(", ")", "if", "self", ".", "verbose", ">", "1", ":", "print", "(", "'c cost: {0}; core sz: {1}; soft sz: {2}'", ".", "format", "(", "self", ".", "cost", ",", "len", "(", "self", ".", "core", ")", ",", "len", "(", "self", ".", "sels", ")", "+", "len", "(", "self", ".", "sums", ")", ")", ")", "return", "True" ]
Main core-guided loop, which iteratively calls a SAT oracle, extracts a new unsatisfiable core and processes it. The loop finishes as soon as a satisfiable formula is obtained. If specified in the command line, the method additionally calls :meth:`adapt_am1` to detect and adapt intrinsic AtMost1 constraints before executing the loop. :rtype: bool
[ "Main", "core", "-", "guided", "loop", "which", "iteratively", "calls", "a", "SAT", "oracle", "extracts", "a", "new", "unsatisfiable", "core", "and", "processes", "it", ".", "The", "loop", "finishes", "as", "soon", "as", "a", "satisfiable", "formula", "is", "obtained", ".", "If", "specified", "in", "the", "command", "line", "the", "method", "additionally", "calls", ":", "meth", ":", "adapt_am1", "to", "detect", "and", "adapt", "intrinsic", "AtMost1", "constraints", "before", "executing", "the", "loop", "." ]
train
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/examples/rc2.py#L491-L522
0.003552
UCSBarchlab/PyRTL
pyrtl/helperfuncs.py
val_to_signed_integer
def val_to_signed_integer(value, bitwidth): """ Return value as intrepreted as a signed integer under twos complement. :param value: a python integer holding the value to convert :param bitwidth: the length of the integer in bits to assume for conversion Given an unsigned integer (not a wirevector!) covert that to a signed integer. This is useful for printing and interpreting values which are negative numbers in twos complement. :: val_to_signed_integer(0xff, 8) == -1 """ if isinstance(value, WireVector) or isinstance(bitwidth, WireVector): raise PyrtlError('inputs must not be wirevectors') if bitwidth < 1: raise PyrtlError('bitwidth must be a positive integer') neg_mask = 1 << (bitwidth - 1) neg_part = value & neg_mask pos_mask = neg_mask - 1 pos_part = value & pos_mask return pos_part - neg_part
python
def val_to_signed_integer(value, bitwidth): """ Return value as intrepreted as a signed integer under twos complement. :param value: a python integer holding the value to convert :param bitwidth: the length of the integer in bits to assume for conversion Given an unsigned integer (not a wirevector!) covert that to a signed integer. This is useful for printing and interpreting values which are negative numbers in twos complement. :: val_to_signed_integer(0xff, 8) == -1 """ if isinstance(value, WireVector) or isinstance(bitwidth, WireVector): raise PyrtlError('inputs must not be wirevectors') if bitwidth < 1: raise PyrtlError('bitwidth must be a positive integer') neg_mask = 1 << (bitwidth - 1) neg_part = value & neg_mask pos_mask = neg_mask - 1 pos_part = value & pos_mask return pos_part - neg_part
[ "def", "val_to_signed_integer", "(", "value", ",", "bitwidth", ")", ":", "if", "isinstance", "(", "value", ",", "WireVector", ")", "or", "isinstance", "(", "bitwidth", ",", "WireVector", ")", ":", "raise", "PyrtlError", "(", "'inputs must not be wirevectors'", ")", "if", "bitwidth", "<", "1", ":", "raise", "PyrtlError", "(", "'bitwidth must be a positive integer'", ")", "neg_mask", "=", "1", "<<", "(", "bitwidth", "-", "1", ")", "neg_part", "=", "value", "&", "neg_mask", "pos_mask", "=", "neg_mask", "-", "1", "pos_part", "=", "value", "&", "pos_mask", "return", "pos_part", "-", "neg_part" ]
Return value as intrepreted as a signed integer under twos complement. :param value: a python integer holding the value to convert :param bitwidth: the length of the integer in bits to assume for conversion Given an unsigned integer (not a wirevector!) covert that to a signed integer. This is useful for printing and interpreting values which are negative numbers in twos complement. :: val_to_signed_integer(0xff, 8) == -1
[ "Return", "value", "as", "intrepreted", "as", "a", "signed", "integer", "under", "twos", "complement", "." ]
train
https://github.com/UCSBarchlab/PyRTL/blob/0988e5c9c10ededd5e1f58d5306603f9edf4b3e2/pyrtl/helperfuncs.py#L200-L223
0.001117
GeospatialPython/pyshp
shapefile.py
Reader.iterShapes
def iterShapes(self): """Serves up shapes in a shapefile as an iterator. Useful for handling large shapefiles.""" shp = self.__getFileObj(self.shp) shp.seek(0,2) self.shpLength = shp.tell() shp.seek(100) while shp.tell() < self.shpLength: yield self.__shape()
python
def iterShapes(self): """Serves up shapes in a shapefile as an iterator. Useful for handling large shapefiles.""" shp = self.__getFileObj(self.shp) shp.seek(0,2) self.shpLength = shp.tell() shp.seek(100) while shp.tell() < self.shpLength: yield self.__shape()
[ "def", "iterShapes", "(", "self", ")", ":", "shp", "=", "self", ".", "__getFileObj", "(", "self", ".", "shp", ")", "shp", ".", "seek", "(", "0", ",", "2", ")", "self", ".", "shpLength", "=", "shp", ".", "tell", "(", ")", "shp", ".", "seek", "(", "100", ")", "while", "shp", ".", "tell", "(", ")", "<", "self", ".", "shpLength", ":", "yield", "self", ".", "__shape", "(", ")" ]
Serves up shapes in a shapefile as an iterator. Useful for handling large shapefiles.
[ "Serves", "up", "shapes", "in", "a", "shapefile", "as", "an", "iterator", ".", "Useful", "for", "handling", "large", "shapefiles", "." ]
train
https://github.com/GeospatialPython/pyshp/blob/71231ddc5aa54f155d4f0563c56006fffbfc84e7/shapefile.py#L871-L879
0.008955
google/python-gflags
gflags/flag.py
MultiFlag.parse
def parse(self, arguments): """Parses one or more arguments with the installed parser. Args: arguments: a single argument or a list of arguments (typically a list of default values); a single argument is converted internally into a list containing one item. """ if not isinstance(arguments, list): # Default value may be a list of values. Most other arguments # will not be, so convert them into a single-item list to make # processing simpler below. arguments = [arguments] if self.present: # keep a backup reference to list of previously supplied option values values = self.value else: # "erase" the defaults with an empty list values = [] for item in arguments: # have Flag superclass parse argument, overwriting self.value reference Flag.Parse(self, item) # also increments self.present values.append(self.value) # put list of option values back in the 'value' attribute self.value = values
python
def parse(self, arguments): """Parses one or more arguments with the installed parser. Args: arguments: a single argument or a list of arguments (typically a list of default values); a single argument is converted internally into a list containing one item. """ if not isinstance(arguments, list): # Default value may be a list of values. Most other arguments # will not be, so convert them into a single-item list to make # processing simpler below. arguments = [arguments] if self.present: # keep a backup reference to list of previously supplied option values values = self.value else: # "erase" the defaults with an empty list values = [] for item in arguments: # have Flag superclass parse argument, overwriting self.value reference Flag.Parse(self, item) # also increments self.present values.append(self.value) # put list of option values back in the 'value' attribute self.value = values
[ "def", "parse", "(", "self", ",", "arguments", ")", ":", "if", "not", "isinstance", "(", "arguments", ",", "list", ")", ":", "# Default value may be a list of values. Most other arguments", "# will not be, so convert them into a single-item list to make", "# processing simpler below.", "arguments", "=", "[", "arguments", "]", "if", "self", ".", "present", ":", "# keep a backup reference to list of previously supplied option values", "values", "=", "self", ".", "value", "else", ":", "# \"erase\" the defaults with an empty list", "values", "=", "[", "]", "for", "item", "in", "arguments", ":", "# have Flag superclass parse argument, overwriting self.value reference", "Flag", ".", "Parse", "(", "self", ",", "item", ")", "# also increments self.present", "values", ".", "append", "(", "self", ".", "value", ")", "# put list of option values back in the 'value' attribute", "self", ".", "value", "=", "values" ]
Parses one or more arguments with the installed parser. Args: arguments: a single argument or a list of arguments (typically a list of default values); a single argument is converted internally into a list containing one item.
[ "Parses", "one", "or", "more", "arguments", "with", "the", "installed", "parser", "." ]
train
https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/flag.py#L359-L386
0.011707
dcaune/perseus-lib-python-common
majormode/perseus/utils/cast.py
string_to_ipv4
def string_to_ipv4(value, strict=False): """ Return a tuple corresponding to the string representation of an IPv4 address. An IPv4 address is canonically represented in dot-decimal notation, which consists of four decimal numbers, each ranging from 0 to 255, separated by dots, e.g., ``172.16.254.1``. @param value: a dotted-decimal notation of an IPv4 address, consisting of four decimal numbers, each ranging from ``0`` to ``255``, separated by dots. @param strict: indicate whether the ``None`` value is accepted. @return: a tuple of four decimal numbers ``(byte1, byte2, byte3, byte4)``, each ranging from ``0`` to ``255``. """ if is_undefined(value): if strict: raise ValueError('The value cannot be null') return None if not REGEX_IPV4.match(value): raise ValueError('The specified string "%s" does not represent a IPv4' % value) ipv4 = [ int(byte) for byte in value.split('.') if int(byte) < 256] if len(ipv4) != 4: raise ValueError('The IPv4 "%s" has invalid byte(s)' % value) return ipv4
python
def string_to_ipv4(value, strict=False): """ Return a tuple corresponding to the string representation of an IPv4 address. An IPv4 address is canonically represented in dot-decimal notation, which consists of four decimal numbers, each ranging from 0 to 255, separated by dots, e.g., ``172.16.254.1``. @param value: a dotted-decimal notation of an IPv4 address, consisting of four decimal numbers, each ranging from ``0`` to ``255``, separated by dots. @param strict: indicate whether the ``None`` value is accepted. @return: a tuple of four decimal numbers ``(byte1, byte2, byte3, byte4)``, each ranging from ``0`` to ``255``. """ if is_undefined(value): if strict: raise ValueError('The value cannot be null') return None if not REGEX_IPV4.match(value): raise ValueError('The specified string "%s" does not represent a IPv4' % value) ipv4 = [ int(byte) for byte in value.split('.') if int(byte) < 256] if len(ipv4) != 4: raise ValueError('The IPv4 "%s" has invalid byte(s)' % value) return ipv4
[ "def", "string_to_ipv4", "(", "value", ",", "strict", "=", "False", ")", ":", "if", "is_undefined", "(", "value", ")", ":", "if", "strict", ":", "raise", "ValueError", "(", "'The value cannot be null'", ")", "return", "None", "if", "not", "REGEX_IPV4", ".", "match", "(", "value", ")", ":", "raise", "ValueError", "(", "'The specified string \"%s\" does not represent a IPv4'", "%", "value", ")", "ipv4", "=", "[", "int", "(", "byte", ")", "for", "byte", "in", "value", ".", "split", "(", "'.'", ")", "if", "int", "(", "byte", ")", "<", "256", "]", "if", "len", "(", "ipv4", ")", "!=", "4", ":", "raise", "ValueError", "(", "'The IPv4 \"%s\" has invalid byte(s)'", "%", "value", ")", "return", "ipv4" ]
Return a tuple corresponding to the string representation of an IPv4 address. An IPv4 address is canonically represented in dot-decimal notation, which consists of four decimal numbers, each ranging from 0 to 255, separated by dots, e.g., ``172.16.254.1``. @param value: a dotted-decimal notation of an IPv4 address, consisting of four decimal numbers, each ranging from ``0`` to ``255``, separated by dots. @param strict: indicate whether the ``None`` value is accepted. @return: a tuple of four decimal numbers ``(byte1, byte2, byte3, byte4)``, each ranging from ``0`` to ``255``.
[ "Return", "a", "tuple", "corresponding", "to", "the", "string", "representation", "of", "an", "IPv4", "address", "." ]
train
https://github.com/dcaune/perseus-lib-python-common/blob/ba48fe0fd9bb4a75b53e7d10c41ada36a72d4496/majormode/perseus/utils/cast.py#L283-L316
0.002646
quantumlib/Cirq
cirq/google/line/placement/anneal.py
AnnealSequenceSearchStrategy.place_line
def place_line(self, device: 'cirq.google.XmonDevice', length: int) -> GridQubitLineTuple: """Runs line sequence search. Args: device: Chip description. length: Required line length. Returns: List of linear sequences on the chip found by simulated annealing method. """ seqs = AnnealSequenceSearch(device, self.seed).search(self.trace_func) return GridQubitLineTuple.best_of(seqs, length)
python
def place_line(self, device: 'cirq.google.XmonDevice', length: int) -> GridQubitLineTuple: """Runs line sequence search. Args: device: Chip description. length: Required line length. Returns: List of linear sequences on the chip found by simulated annealing method. """ seqs = AnnealSequenceSearch(device, self.seed).search(self.trace_func) return GridQubitLineTuple.best_of(seqs, length)
[ "def", "place_line", "(", "self", ",", "device", ":", "'cirq.google.XmonDevice'", ",", "length", ":", "int", ")", "->", "GridQubitLineTuple", ":", "seqs", "=", "AnnealSequenceSearch", "(", "device", ",", "self", ".", "seed", ")", ".", "search", "(", "self", ".", "trace_func", ")", "return", "GridQubitLineTuple", ".", "best_of", "(", "seqs", ",", "length", ")" ]
Runs line sequence search. Args: device: Chip description. length: Required line length. Returns: List of linear sequences on the chip found by simulated annealing method.
[ "Runs", "line", "sequence", "search", "." ]
train
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/google/line/placement/anneal.py#L370-L384
0.007634
saltstack/salt
salt/modules/runit.py
_is_svc
def _is_svc(svc_path): ''' Return ``True`` if directory <svc_path> is really a service: file <svc_path>/run exists and is executable svc_path the (absolute) directory to check for compatibility ''' run_file = os.path.join(svc_path, 'run') if (os.path.exists(svc_path) and os.path.exists(run_file) and os.access(run_file, os.X_OK)): return True return False
python
def _is_svc(svc_path): ''' Return ``True`` if directory <svc_path> is really a service: file <svc_path>/run exists and is executable svc_path the (absolute) directory to check for compatibility ''' run_file = os.path.join(svc_path, 'run') if (os.path.exists(svc_path) and os.path.exists(run_file) and os.access(run_file, os.X_OK)): return True return False
[ "def", "_is_svc", "(", "svc_path", ")", ":", "run_file", "=", "os", ".", "path", ".", "join", "(", "svc_path", ",", "'run'", ")", "if", "(", "os", ".", "path", ".", "exists", "(", "svc_path", ")", "and", "os", ".", "path", ".", "exists", "(", "run_file", ")", "and", "os", ".", "access", "(", "run_file", ",", "os", ".", "X_OK", ")", ")", ":", "return", "True", "return", "False" ]
Return ``True`` if directory <svc_path> is really a service: file <svc_path>/run exists and is executable svc_path the (absolute) directory to check for compatibility
[ "Return", "True", "if", "directory", "<svc_path", ">", "is", "really", "a", "service", ":", "file", "<svc_path", ">", "/", "run", "exists", "and", "is", "executable" ]
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/runit.py#L243-L256
0.007092
spyder-ide/spyder
spyder/otherplugins.py
get_spyderplugins_mods
def get_spyderplugins_mods(io=False): """Import modules from plugins package and return the list""" # Create user directory user_plugin_path = osp.join(get_conf_path(), USER_PLUGIN_DIR) if not osp.isdir(user_plugin_path): os.makedirs(user_plugin_path) modlist, modnames = [], [] # The user plugins directory is given the priority when looking for modules for plugin_path in [user_plugin_path] + sys.path: _get_spyderplugins(plugin_path, io, modnames, modlist) return modlist
python
def get_spyderplugins_mods(io=False): """Import modules from plugins package and return the list""" # Create user directory user_plugin_path = osp.join(get_conf_path(), USER_PLUGIN_DIR) if not osp.isdir(user_plugin_path): os.makedirs(user_plugin_path) modlist, modnames = [], [] # The user plugins directory is given the priority when looking for modules for plugin_path in [user_plugin_path] + sys.path: _get_spyderplugins(plugin_path, io, modnames, modlist) return modlist
[ "def", "get_spyderplugins_mods", "(", "io", "=", "False", ")", ":", "# Create user directory\r", "user_plugin_path", "=", "osp", ".", "join", "(", "get_conf_path", "(", ")", ",", "USER_PLUGIN_DIR", ")", "if", "not", "osp", ".", "isdir", "(", "user_plugin_path", ")", ":", "os", ".", "makedirs", "(", "user_plugin_path", ")", "modlist", ",", "modnames", "=", "[", "]", ",", "[", "]", "# The user plugins directory is given the priority when looking for modules\r", "for", "plugin_path", "in", "[", "user_plugin_path", "]", "+", "sys", ".", "path", ":", "_get_spyderplugins", "(", "plugin_path", ",", "io", ",", "modnames", ",", "modlist", ")", "return", "modlist" ]
Import modules from plugins package and return the list
[ "Import", "modules", "from", "plugins", "package", "and", "return", "the", "list" ]
train
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/otherplugins.py#L31-L43
0.001866