text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def set_mean(self, col, row, mean): """ Sets the mean at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :param mean: the mean to set :type mean: float """ javabridge.call(self.jobject, "setMean", "(IID)V", col, row, mean)
[ "def", "set_mean", "(", "self", ",", "col", ",", "row", ",", "mean", ")", ":", "javabridge", ".", "call", "(", "self", ".", "jobject", ",", "\"setMean\"", ",", "\"(IID)V\"", ",", "col", ",", "row", ",", "mean", ")" ]
31.666667
0.005115
def update_show_function(self, show_name, show_func): """ Modifies/overwrites an existing show function in the locally cached DesignDocument shows dictionary. :param show_name: Name used to identify the show function. :param show_func: Javascript show function. """ if self.get_show_function(show_name) is None: raise CloudantArgumentError(114, show_name) self.shows.__setitem__(show_name, show_func)
[ "def", "update_show_function", "(", "self", ",", "show_name", ",", "show_func", ")", ":", "if", "self", ".", "get_show_function", "(", "show_name", ")", "is", "None", ":", "raise", "CloudantArgumentError", "(", "114", ",", "show_name", ")", "self", ".", "shows", ".", "__setitem__", "(", "show_name", ",", "show_func", ")" ]
38.916667
0.004184
def securitycli(): """ Entry point for the runner defined in setup.py. """ parser = argparse.ArgumentParser(description="Runner for security test suite") parser.add_argument("-l", "--list-test-groups", action="store_true", help="List all logical test groups") parser.add_argument("-a", "--list-all-tests", action="store_true", help="List all tests") parser.add_argument("-i", "--include", metavar="GROUP", action="append", default=[], help="Only include specified group(s) in run, include several " "groups by repeating flag") parser.add_argument("--version", action="store", dest="version", help="B2G version") parser.add_argument("--ipython", dest="ipython", action="store_true", help="drop to ipython session") parser.add_argument('-H', '--host', help='Hostname or ip for target device', action='store', default='localhost') parser.add_argument('-P', '--port', help='Port for target device', action='store', default=2828) parser.add_argument('-m', '--mode', help='Test mode (stingray, phone) default (phone)', action='store', default='phone') parser.add_argument("-v", dest="verbose", action="store_true", help="Verbose output") # add specialized mozilla logger options commandline.add_logging_group(parser) args = parser.parse_args() # set up mozilla logger logger = commandline.setup_logging("securitysuite", vars(args), {"raw": sys.stdout}) try: if args.list_test_groups: for group in ExtraTest.group_list(args.mode): print group elif args.list_all_tests: for test in ExtraTest.test_list(args.mode): print "%s.%s" % (test.group, test.__name__) elif args.ipython: from IPython import embed embed() elif args.mode == 'stingray': logger.debug("security cli runnng with args %s" % args) ExtraTest.run_groups(args.include, version=args.version, host=args.host, port=int(args.port), mode=args.mode) else: logger.debug("security cli runnng with args %s" % args) wait_for_adb_device() if not adb_has_root(): logger.warning("adb has no root. Results will be incomplete.") ExtraTest.run_groups(args.include, version=args.version) except: logger.critical(traceback.format_exc()) raise
[ "def", "securitycli", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Runner for security test suite\"", ")", "parser", ".", "add_argument", "(", "\"-l\"", ",", "\"--list-test-groups\"", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"List all logical test groups\"", ")", "parser", ".", "add_argument", "(", "\"-a\"", ",", "\"--list-all-tests\"", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"List all tests\"", ")", "parser", ".", "add_argument", "(", "\"-i\"", ",", "\"--include\"", ",", "metavar", "=", "\"GROUP\"", ",", "action", "=", "\"append\"", ",", "default", "=", "[", "]", ",", "help", "=", "\"Only include specified group(s) in run, include several \"", "\"groups by repeating flag\"", ")", "parser", ".", "add_argument", "(", "\"--version\"", ",", "action", "=", "\"store\"", ",", "dest", "=", "\"version\"", ",", "help", "=", "\"B2G version\"", ")", "parser", ".", "add_argument", "(", "\"--ipython\"", ",", "dest", "=", "\"ipython\"", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"drop to ipython session\"", ")", "parser", ".", "add_argument", "(", "'-H'", ",", "'--host'", ",", "help", "=", "'Hostname or ip for target device'", ",", "action", "=", "'store'", ",", "default", "=", "'localhost'", ")", "parser", ".", "add_argument", "(", "'-P'", ",", "'--port'", ",", "help", "=", "'Port for target device'", ",", "action", "=", "'store'", ",", "default", "=", "2828", ")", "parser", ".", "add_argument", "(", "'-m'", ",", "'--mode'", ",", "help", "=", "'Test mode (stingray, phone) default (phone)'", ",", "action", "=", "'store'", ",", "default", "=", "'phone'", ")", "parser", ".", "add_argument", "(", "\"-v\"", ",", "dest", "=", "\"verbose\"", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Verbose output\"", ")", "# add specialized mozilla logger options", "commandline", ".", "add_logging_group", "(", "parser", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "# set up mozilla logger", "logger", "=", "commandline", ".", "setup_logging", "(", "\"securitysuite\"", ",", "vars", "(", "args", ")", ",", "{", "\"raw\"", ":", "sys", ".", "stdout", "}", ")", "try", ":", "if", "args", ".", "list_test_groups", ":", "for", "group", "in", "ExtraTest", ".", "group_list", "(", "args", ".", "mode", ")", ":", "print", "group", "elif", "args", ".", "list_all_tests", ":", "for", "test", "in", "ExtraTest", ".", "test_list", "(", "args", ".", "mode", ")", ":", "print", "\"%s.%s\"", "%", "(", "test", ".", "group", ",", "test", ".", "__name__", ")", "elif", "args", ".", "ipython", ":", "from", "IPython", "import", "embed", "embed", "(", ")", "elif", "args", ".", "mode", "==", "'stingray'", ":", "logger", ".", "debug", "(", "\"security cli runnng with args %s\"", "%", "args", ")", "ExtraTest", ".", "run_groups", "(", "args", ".", "include", ",", "version", "=", "args", ".", "version", ",", "host", "=", "args", ".", "host", ",", "port", "=", "int", "(", "args", ".", "port", ")", ",", "mode", "=", "args", ".", "mode", ")", "else", ":", "logger", ".", "debug", "(", "\"security cli runnng with args %s\"", "%", "args", ")", "wait_for_adb_device", "(", ")", "if", "not", "adb_has_root", "(", ")", ":", "logger", ".", "warning", "(", "\"adb has no root. Results will be incomplete.\"", ")", "ExtraTest", ".", "run_groups", "(", "args", ".", "include", ",", "version", "=", "args", ".", "version", ")", "except", ":", "logger", ".", "critical", "(", "traceback", ".", "format_exc", "(", ")", ")", "raise" ]
43.539683
0.002139
def send_with_template(message, api_key=None, secure=None, test=None, **request_args): '''Send a message. :param message: Message to send. :type message: `dict` or :class:`Message` :param api_key: Your Postmark API key. Required, if `test` is not `True`. :param secure: Use the https scheme for the Postmark API. Defaults to `True` :param test: Use the Postmark Test API. Defaults to `False`. :param \*\*request_args: Keyword arguments to pass to :func:`requests.request`. :rtype: :class:`SendResponse` ''' return _default_pyst_template_sender.send(message=message, api_key=api_key, secure=secure, test=test, **request_args)
[ "def", "send_with_template", "(", "message", ",", "api_key", "=", "None", ",", "secure", "=", "None", ",", "test", "=", "None", ",", "*", "*", "request_args", ")", ":", "return", "_default_pyst_template_sender", ".", "send", "(", "message", "=", "message", ",", "api_key", "=", "api_key", ",", "secure", "=", "secure", ",", "test", "=", "test", ",", "*", "*", "request_args", ")" ]
42.545455
0.003135
def mac(self, algorithm, key, data): """ Generate message authentication code. Args: algorithm(CryptographicAlgorithm): An enumeration specifying the algorithm for which the MAC operation will use. key(bytes): secret key used in the MAC operation data(bytes): The data to be MACed. Returns: bytes: The MACed data Raises: InvalidField: Raised when the algorithm is unsupported or the length is incompatible with the algorithm. CryptographicFailure: Raised when the key generation process fails. Example: >>> engine = CryptographyEngine() >>> mac_data = engine.mac( ... CryptographicAlgorithm.HMAC-SHA256, b'\x01\x02\x03\x04', ... b'\x05\x06\x07\x08') """ mac_data = None if algorithm in self._hash_algorithms.keys(): self.logger.info( "Generating a hash-based message authentication code using " "{0}".format(algorithm.name) ) hash_algorithm = self._hash_algorithms.get(algorithm) try: h = hmac.HMAC(key, hash_algorithm(), backend=default_backend()) h.update(data) mac_data = h.finalize() except Exception as e: self.logger.exception(e) raise exceptions.CryptographicFailure( "An error occurred while computing an HMAC. " "See the server log for more information." ) elif algorithm in self._symmetric_key_algorithms.keys(): self.logger.info( "Generating a cipher-based message authentication code using " "{0}".format(algorithm.name) ) cipher_algorithm = self._symmetric_key_algorithms.get(algorithm) try: # ARC4 and IDEA algorithms will raise exception as CMAC # requires block ciphers c = cmac.CMAC(cipher_algorithm(key), backend=default_backend()) c.update(data) mac_data = c.finalize() except Exception as e: raise exceptions.CryptographicFailure( "An error occurred while computing a CMAC. " "See the server log for more information." ) else: raise exceptions.InvalidField( "The cryptographic algorithm ({0}) is not a supported " "for a MAC operation.".format(algorithm) ) return mac_data
[ "def", "mac", "(", "self", ",", "algorithm", ",", "key", ",", "data", ")", ":", "mac_data", "=", "None", "if", "algorithm", "in", "self", ".", "_hash_algorithms", ".", "keys", "(", ")", ":", "self", ".", "logger", ".", "info", "(", "\"Generating a hash-based message authentication code using \"", "\"{0}\"", ".", "format", "(", "algorithm", ".", "name", ")", ")", "hash_algorithm", "=", "self", ".", "_hash_algorithms", ".", "get", "(", "algorithm", ")", "try", ":", "h", "=", "hmac", ".", "HMAC", "(", "key", ",", "hash_algorithm", "(", ")", ",", "backend", "=", "default_backend", "(", ")", ")", "h", ".", "update", "(", "data", ")", "mac_data", "=", "h", ".", "finalize", "(", ")", "except", "Exception", "as", "e", ":", "self", ".", "logger", ".", "exception", "(", "e", ")", "raise", "exceptions", ".", "CryptographicFailure", "(", "\"An error occurred while computing an HMAC. \"", "\"See the server log for more information.\"", ")", "elif", "algorithm", "in", "self", ".", "_symmetric_key_algorithms", ".", "keys", "(", ")", ":", "self", ".", "logger", ".", "info", "(", "\"Generating a cipher-based message authentication code using \"", "\"{0}\"", ".", "format", "(", "algorithm", ".", "name", ")", ")", "cipher_algorithm", "=", "self", ".", "_symmetric_key_algorithms", ".", "get", "(", "algorithm", ")", "try", ":", "# ARC4 and IDEA algorithms will raise exception as CMAC", "# requires block ciphers", "c", "=", "cmac", ".", "CMAC", "(", "cipher_algorithm", "(", "key", ")", ",", "backend", "=", "default_backend", "(", ")", ")", "c", ".", "update", "(", "data", ")", "mac_data", "=", "c", ".", "finalize", "(", ")", "except", "Exception", "as", "e", ":", "raise", "exceptions", ".", "CryptographicFailure", "(", "\"An error occurred while computing a CMAC. \"", "\"See the server log for more information.\"", ")", "else", ":", "raise", "exceptions", ".", "InvalidField", "(", "\"The cryptographic algorithm ({0}) is not a supported \"", "\"for a MAC operation.\"", ".", "format", "(", "algorithm", ")", ")", "return", "mac_data" ]
39.283582
0.000741
def adam7_generate(width, height): """ Generate the coordinates for the reduced scanlines of an Adam7 interlaced image of size `width` by `height` pixels. Yields a generator for each pass, and each pass generator yields a series of (x, y, xstep) triples, each one identifying a reduced scanline consisting of pixels starting at (x, y) and taking every xstep pixel to the right. """ for xstart, ystart, xstep, ystep in adam7: if xstart >= width: continue yield ((xstart, y, xstep) for y in range(ystart, height, ystep))
[ "def", "adam7_generate", "(", "width", ",", "height", ")", ":", "for", "xstart", ",", "ystart", ",", "xstep", ",", "ystep", "in", "adam7", ":", "if", "xstart", ">=", "width", ":", "continue", "yield", "(", "(", "xstart", ",", "y", ",", "xstep", ")", "for", "y", "in", "range", "(", "ystart", ",", "height", ",", "ystep", ")", ")" ]
35.8125
0.001701
def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: stop = __salt__['service.stop']('connman') time.sleep(2) res = stop and __salt__['service.start']('connman') return hostname_res and res
[ "def", "apply_network_settings", "(", "*", "*", "settings", ")", ":", "if", "__grains__", "[", "'lsb_distrib_id'", "]", "==", "'nilrt'", ":", "raise", "salt", ".", "exceptions", ".", "CommandExecutionError", "(", "'Not supported in this version.'", ")", "if", "'require_reboot'", "not", "in", "settings", ":", "settings", "[", "'require_reboot'", "]", "=", "False", "if", "'apply_hostname'", "not", "in", "settings", ":", "settings", "[", "'apply_hostname'", "]", "=", "False", "hostname_res", "=", "True", "if", "settings", "[", "'apply_hostname'", "]", "in", "_CONFIG_TRUE", ":", "if", "'hostname'", "in", "settings", ":", "hostname_res", "=", "__salt__", "[", "'network.mod_hostname'", "]", "(", "settings", "[", "'hostname'", "]", ")", "else", ":", "log", ".", "warning", "(", "'The network state sls is trying to apply hostname '", "'changes but no hostname is defined.'", ")", "hostname_res", "=", "False", "res", "=", "True", "if", "settings", "[", "'require_reboot'", "]", "in", "_CONFIG_TRUE", ":", "log", ".", "warning", "(", "'The network state sls is requiring a reboot of the system to '", "'properly apply network configuration.'", ")", "res", "=", "True", "else", ":", "stop", "=", "__salt__", "[", "'service.stop'", "]", "(", "'connman'", ")", "time", ".", "sleep", "(", "2", ")", "res", "=", "stop", "and", "__salt__", "[", "'service.start'", "]", "(", "'connman'", ")", "return", "hostname_res", "and", "res" ]
30.190476
0.002292
def to_find(self): ''' Todo: the name should be changed. list the infors. ''' kwd = {'pager': ''} self.render('user/info_list/most.html', topmenu='', userinfo=self.userinfo, kwd=kwd)
[ "def", "to_find", "(", "self", ")", ":", "kwd", "=", "{", "'pager'", ":", "''", "}", "self", ".", "render", "(", "'user/info_list/most.html'", ",", "topmenu", "=", "''", ",", "userinfo", "=", "self", ".", "userinfo", ",", "kwd", "=", "kwd", ")" ]
28.1
0.006897
def associate(self, eip_or_aid, instance_id='', network_interface_id='', private_ip=''): """Associate an EIP with a given instance or network interface. If the EIP was allocated for a VPC instance, an AllocationId(aid) must be provided instead of a PublicIp. """ if "." in eip_or_aid: # If an IP is given (Classic) return self.call("AssociateAddress", PublicIp=eip_or_aid, InstanceId=instance_id, NetworkInterfaceId=network_interface_id, PrivateIpAddress=private_ip) else: # If an AID is given (VPC) return self.call("AssociateAddress", AllocationId=eip_or_aid, InstanceId=instance_id, NetworkInterfaceId=network_interface_id, PrivateIpAddress=private_ip)
[ "def", "associate", "(", "self", ",", "eip_or_aid", ",", "instance_id", "=", "''", ",", "network_interface_id", "=", "''", ",", "private_ip", "=", "''", ")", ":", "if", "\".\"", "in", "eip_or_aid", ":", "# If an IP is given (Classic)", "return", "self", ".", "call", "(", "\"AssociateAddress\"", ",", "PublicIp", "=", "eip_or_aid", ",", "InstanceId", "=", "instance_id", ",", "NetworkInterfaceId", "=", "network_interface_id", ",", "PrivateIpAddress", "=", "private_ip", ")", "else", ":", "# If an AID is given (VPC)", "return", "self", ".", "call", "(", "\"AssociateAddress\"", ",", "AllocationId", "=", "eip_or_aid", ",", "InstanceId", "=", "instance_id", ",", "NetworkInterfaceId", "=", "network_interface_id", ",", "PrivateIpAddress", "=", "private_ip", ")" ]
55.777778
0.002938
def popone(self, key, *default): """Remove first of given key and return corresponding value. If key is not found, default is returned if given. >>> m = MutableMultiMap([('a', 1), ('b', 2), ('b', 3), ('c', 4)]) >>> m.popone('b') 2 >>> m.items() [('a', 1), ('b', 3), ('c', 4)] >>> m.popone('b') 3 >>> m.popone('b') Traceback (most recent call last): ... KeyError: 'b' >>> m.popone('b', 'default') 'default' """ try: value = self[key] except KeyError: if default: return default[0] raise # Delete this one. self._remove_pairs([self._key_ids[self._conform_key(key)].pop(0)]) return value
[ "def", "popone", "(", "self", ",", "key", ",", "*", "default", ")", ":", "try", ":", "value", "=", "self", "[", "key", "]", "except", "KeyError", ":", "if", "default", ":", "return", "default", "[", "0", "]", "raise", "# Delete this one.", "self", ".", "_remove_pairs", "(", "[", "self", ".", "_key_ids", "[", "self", ".", "_conform_key", "(", "key", ")", "]", ".", "pop", "(", "0", ")", "]", ")", "return", "value" ]
26.419355
0.008245
def to_string(type): """ Converts a TypeCode into its string name. :param type: the TypeCode to convert into a string. :return: the name of the TypeCode passed as a string value. """ if type == None: return "unknown" elif type == TypeCode.Unknown: return "unknown" elif type == TypeCode.String: return "string" elif type == TypeCode.Integer: return "integer" elif type == TypeCode.Long: return "long" elif type == TypeCode.Float: return "float" elif type == TypeCode.Double: return "double" elif type == TypeCode.Duration: return "duration" elif type == TypeCode.DateTime: return "datetime" elif type == TypeCode.Object: return "object" elif type == TypeCode.Enum: return "enum" elif type == TypeCode.Array: return "array" elif type == TypeCode.Map: return "map" else: return "unknown"
[ "def", "to_string", "(", "type", ")", ":", "if", "type", "==", "None", ":", "return", "\"unknown\"", "elif", "type", "==", "TypeCode", ".", "Unknown", ":", "return", "\"unknown\"", "elif", "type", "==", "TypeCode", ".", "String", ":", "return", "\"string\"", "elif", "type", "==", "TypeCode", ".", "Integer", ":", "return", "\"integer\"", "elif", "type", "==", "TypeCode", ".", "Long", ":", "return", "\"long\"", "elif", "type", "==", "TypeCode", ".", "Float", ":", "return", "\"float\"", "elif", "type", "==", "TypeCode", ".", "Double", ":", "return", "\"double\"", "elif", "type", "==", "TypeCode", ".", "Duration", ":", "return", "\"duration\"", "elif", "type", "==", "TypeCode", ".", "DateTime", ":", "return", "\"datetime\"", "elif", "type", "==", "TypeCode", ".", "Object", ":", "return", "\"object\"", "elif", "type", "==", "TypeCode", ".", "Enum", ":", "return", "\"enum\"", "elif", "type", "==", "TypeCode", ".", "Array", ":", "return", "\"array\"", "elif", "type", "==", "TypeCode", ".", "Map", ":", "return", "\"map\"", "else", ":", "return", "\"unknown\"" ]
29.777778
0.00271
def applymap(self, func, subset=None, **kwargs): """ Apply a function elementwise, updating the HTML representation with the result. Parameters ---------- func : function ``func`` should take a scalar and return a scalar subset : IndexSlice a valid indexer to limit ``data`` to *before* applying the function. Consider using a pandas.IndexSlice kwargs : dict pass along to ``func`` Returns ------- self : Styler See Also -------- Styler.where """ self._todo.append((lambda instance: getattr(instance, '_applymap'), (func, subset), kwargs)) return self
[ "def", "applymap", "(", "self", ",", "func", ",", "subset", "=", "None", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_todo", ".", "append", "(", "(", "lambda", "instance", ":", "getattr", "(", "instance", ",", "'_applymap'", ")", ",", "(", "func", ",", "subset", ")", ",", "kwargs", ")", ")", "return", "self" ]
28.461538
0.002614
def add_get(self, path, controller, template, raw=False): """ Setup a route of type GET Args: path (str): URL to listen to controller (coroutine): the coroutine to handle the request template (str): the template to render the response or None if it is a JSON response raw (bool): indicates if post-processing (jinja, json, etc) is needed or not """ if raw: fn = controller else: fn = self._prepare_controller(controller, template) self.app.router.add_get(path, fn)
[ "def", "add_get", "(", "self", ",", "path", ",", "controller", ",", "template", ",", "raw", "=", "False", ")", ":", "if", "raw", ":", "fn", "=", "controller", "else", ":", "fn", "=", "self", ".", "_prepare_controller", "(", "controller", ",", "template", ")", "self", ".", "app", ".", "router", ".", "add_get", "(", "path", ",", "fn", ")" ]
35.6875
0.006826
def save_data(self, *, data_filter=None, redownload=False, max_threads=None, raise_download_errors=False): """ Saves data to disk. If ``redownload=True`` then the data is redownloaded using ``max_threads`` workers. :param data_filter: Used to specify which items will be returned by the method and in which order. E.g. with `data_filter=[0, 2, -1]` the method will return only 1st, 3rd and last item. Default filter is ``None``. :type data_filter: list(int) or None :param redownload: data is redownloaded if ``redownload=True``. Default is ``False`` :type redownload: bool :param max_threads: number of threads to use when downloading data; default is ``max_threads=None`` which by default uses the number of processors on the system :type max_threads: int :param raise_download_errors: If ``True`` any error in download process should be raised as ``DownloadFailedException``. If ``False`` failed downloads will only raise warnings. :type raise_download_errors: bool """ self._preprocess_request(True, False) self._execute_data_download(data_filter, redownload, max_threads, raise_download_errors)
[ "def", "save_data", "(", "self", ",", "*", ",", "data_filter", "=", "None", ",", "redownload", "=", "False", ",", "max_threads", "=", "None", ",", "raise_download_errors", "=", "False", ")", ":", "self", ".", "_preprocess_request", "(", "True", ",", "False", ")", "self", ".", "_execute_data_download", "(", "data_filter", ",", "redownload", ",", "max_threads", ",", "raise_download_errors", ")" ]
67.944444
0.008871
def get_paginated_response(data, request): """ Update pagination links in course catalog data and return DRF Response. Arguments: data (dict): Dictionary containing catalog courses. request (HttpRequest): Current request object. Returns: (Response): DRF response object containing pagination links. """ url = urlparse(request.build_absolute_uri())._replace(query=None).geturl() next_page = None previous_page = None if data['next']: next_page = "{base_url}?{query_parameters}".format( base_url=url, query_parameters=urlparse(data['next']).query, ) next_page = next_page.rstrip('?') if data['previous']: previous_page = "{base_url}?{query_parameters}".format( base_url=url, query_parameters=urlparse(data['previous'] or "").query, ) previous_page = previous_page.rstrip('?') return Response(OrderedDict([ ('count', data['count']), ('next', next_page), ('previous', previous_page), ('results', data['results']) ]))
[ "def", "get_paginated_response", "(", "data", ",", "request", ")", ":", "url", "=", "urlparse", "(", "request", ".", "build_absolute_uri", "(", ")", ")", ".", "_replace", "(", "query", "=", "None", ")", ".", "geturl", "(", ")", "next_page", "=", "None", "previous_page", "=", "None", "if", "data", "[", "'next'", "]", ":", "next_page", "=", "\"{base_url}?{query_parameters}\"", ".", "format", "(", "base_url", "=", "url", ",", "query_parameters", "=", "urlparse", "(", "data", "[", "'next'", "]", ")", ".", "query", ",", ")", "next_page", "=", "next_page", ".", "rstrip", "(", "'?'", ")", "if", "data", "[", "'previous'", "]", ":", "previous_page", "=", "\"{base_url}?{query_parameters}\"", ".", "format", "(", "base_url", "=", "url", ",", "query_parameters", "=", "urlparse", "(", "data", "[", "'previous'", "]", "or", "\"\"", ")", ".", "query", ",", ")", "previous_page", "=", "previous_page", ".", "rstrip", "(", "'?'", ")", "return", "Response", "(", "OrderedDict", "(", "[", "(", "'count'", ",", "data", "[", "'count'", "]", ")", ",", "(", "'next'", ",", "next_page", ")", ",", "(", "'previous'", ",", "previous_page", ")", ",", "(", "'results'", ",", "data", "[", "'results'", "]", ")", "]", ")", ")" ]
30.942857
0.000895
def get_sdb_secret_version_paths(self, sdb_id): """ Get SDB secret version paths. This function takes the sdb_id """ sdb_resp = get_with_retry(str.join('', [self.cerberus_url, '/v1/sdb-secret-version-paths/', sdb_id]), headers=self.HEADERS) throw_if_bad_response(sdb_resp) return sdb_resp.json()
[ "def", "get_sdb_secret_version_paths", "(", "self", ",", "sdb_id", ")", ":", "sdb_resp", "=", "get_with_retry", "(", "str", ".", "join", "(", "''", ",", "[", "self", ".", "cerberus_url", ",", "'/v1/sdb-secret-version-paths/'", ",", "sdb_id", "]", ")", ",", "headers", "=", "self", ".", "HEADERS", ")", "throw_if_bad_response", "(", "sdb_resp", ")", "return", "sdb_resp", ".", "json", "(", ")" ]
44.375
0.01105
def js(request): """Returns the javascript needed to run persona""" userid = authenticated_userid(request) user = markupsafe.Markup("'%s'")%userid if userid else "null" redirect_paramater = request.registry['persona.redirect_url_parameter'] came_from = '%s%s' % (request.host_url, request.GET.get(redirect_paramater, request.path_qs)) data = { 'user': user, 'login': request.route_path(request.registry['persona.login_route']), 'logout': request.route_path(request.registry['persona.logout_route']), 'csrf_token': request.session.get_csrf_token(), 'came_from': came_from, 'request_params': markupsafe.Markup(request.registry['persona.request_params']), } template = markupsafe.Markup(pkg_resources.resource_string('pyramid_persona', 'templates/persona.js').decode()) return template % data
[ "def", "js", "(", "request", ")", ":", "userid", "=", "authenticated_userid", "(", "request", ")", "user", "=", "markupsafe", ".", "Markup", "(", "\"'%s'\"", ")", "%", "userid", "if", "userid", "else", "\"null\"", "redirect_paramater", "=", "request", ".", "registry", "[", "'persona.redirect_url_parameter'", "]", "came_from", "=", "'%s%s'", "%", "(", "request", ".", "host_url", ",", "request", ".", "GET", ".", "get", "(", "redirect_paramater", ",", "request", ".", "path_qs", ")", ")", "data", "=", "{", "'user'", ":", "user", ",", "'login'", ":", "request", ".", "route_path", "(", "request", ".", "registry", "[", "'persona.login_route'", "]", ")", ",", "'logout'", ":", "request", ".", "route_path", "(", "request", ".", "registry", "[", "'persona.logout_route'", "]", ")", ",", "'csrf_token'", ":", "request", ".", "session", ".", "get_csrf_token", "(", ")", ",", "'came_from'", ":", "came_from", ",", "'request_params'", ":", "markupsafe", ".", "Markup", "(", "request", ".", "registry", "[", "'persona.request_params'", "]", ")", ",", "}", "template", "=", "markupsafe", ".", "Markup", "(", "pkg_resources", ".", "resource_string", "(", "'pyramid_persona'", ",", "'templates/persona.js'", ")", ".", "decode", "(", ")", ")", "return", "template", "%", "data" ]
51.941176
0.004449
def sanitize_command_options(options): """ Sanitizes command options. """ multiples = [ 'badges', 'exclude_badges', ] for option in multiples: if options.get(option): value = options[option] if value: options[option] = [v for v in value.split(' ') if v] return options
[ "def", "sanitize_command_options", "(", "options", ")", ":", "multiples", "=", "[", "'badges'", ",", "'exclude_badges'", ",", "]", "for", "option", "in", "multiples", ":", "if", "options", ".", "get", "(", "option", ")", ":", "value", "=", "options", "[", "option", "]", "if", "value", ":", "options", "[", "option", "]", "=", "[", "v", "for", "v", "in", "value", ".", "split", "(", "' '", ")", "if", "v", "]", "return", "options" ]
21.6875
0.002762
def _child_list(element, child_tagname): """ Return list containing the direct children of *element* having *child_tagname*. """ xpath = './%s' % child_tagname return element.xpath(xpath, namespaces=nsmap)
[ "def", "_child_list", "(", "element", ",", "child_tagname", ")", ":", "xpath", "=", "'./%s'", "%", "child_tagname", "return", "element", ".", "xpath", "(", "xpath", ",", "namespaces", "=", "nsmap", ")" ]
31.857143
0.004367
def transform(self, trans): """ Compute a transformation in place using a 4x4 transform. Parameters ---------- trans : vtk.vtkMatrix4x4, vtk.vtkTransform, or np.ndarray Accepts a vtk transformation object or a 4x4 transformation matrix. """ if isinstance(trans, vtk.vtkMatrix4x4): t = vtki.trans_from_matrix(trans) elif isinstance(trans, vtk.vtkTransform): t = vtki.trans_from_matrix(trans.GetMatrix()) elif isinstance(trans, np.ndarray): if trans.shape[0] != 4 or trans.shape[1] != 4: raise Exception('Transformation array must be 4x4') t = trans else: raise TypeError('Input transform must be either:\n' + '\tvtk.vtkMatrix4x4\n' + '\tvtk.vtkTransform\n' + '\t4x4 np.ndarray\n') x = (self.points*t[0, :3]).sum(1) + t[0, -1] y = (self.points*t[1, :3]).sum(1) + t[1, -1] z = (self.points*t[2, :3]).sum(1) + t[2, -1] # overwrite points self.points[:, 0] = x self.points[:, 1] = y self.points[:, 2] = z
[ "def", "transform", "(", "self", ",", "trans", ")", ":", "if", "isinstance", "(", "trans", ",", "vtk", ".", "vtkMatrix4x4", ")", ":", "t", "=", "vtki", ".", "trans_from_matrix", "(", "trans", ")", "elif", "isinstance", "(", "trans", ",", "vtk", ".", "vtkTransform", ")", ":", "t", "=", "vtki", ".", "trans_from_matrix", "(", "trans", ".", "GetMatrix", "(", ")", ")", "elif", "isinstance", "(", "trans", ",", "np", ".", "ndarray", ")", ":", "if", "trans", ".", "shape", "[", "0", "]", "!=", "4", "or", "trans", ".", "shape", "[", "1", "]", "!=", "4", ":", "raise", "Exception", "(", "'Transformation array must be 4x4'", ")", "t", "=", "trans", "else", ":", "raise", "TypeError", "(", "'Input transform must be either:\\n'", "+", "'\\tvtk.vtkMatrix4x4\\n'", "+", "'\\tvtk.vtkTransform\\n'", "+", "'\\t4x4 np.ndarray\\n'", ")", "x", "=", "(", "self", ".", "points", "*", "t", "[", "0", ",", ":", "3", "]", ")", ".", "sum", "(", "1", ")", "+", "t", "[", "0", ",", "-", "1", "]", "y", "=", "(", "self", ".", "points", "*", "t", "[", "1", ",", ":", "3", "]", ")", ".", "sum", "(", "1", ")", "+", "t", "[", "1", ",", "-", "1", "]", "z", "=", "(", "self", ".", "points", "*", "t", "[", "2", ",", ":", "3", "]", ")", ".", "sum", "(", "1", ")", "+", "t", "[", "2", ",", "-", "1", "]", "# overwrite points", "self", ".", "points", "[", ":", ",", "0", "]", "=", "x", "self", ".", "points", "[", ":", ",", "1", "]", "=", "y", "self", ".", "points", "[", ":", ",", "2", "]", "=", "z" ]
36.875
0.001652
def average(arr): """average of the values, must have more than 0 entries. :param arr: list of numbers :type arr: number[] a number array :return: average :rtype: float """ if len(arr) == 0: sys.stderr.write("ERROR: no content in array to take average\n") sys.exit() if len(arr) == 1: return arr[0] return float(sum(arr))/float(len(arr))
[ "def", "average", "(", "arr", ")", ":", "if", "len", "(", "arr", ")", "==", "0", ":", "sys", ".", "stderr", ".", "write", "(", "\"ERROR: no content in array to take average\\n\"", ")", "sys", ".", "exit", "(", ")", "if", "len", "(", "arr", ")", "==", "1", ":", "return", "arr", "[", "0", "]", "return", "float", "(", "sum", "(", "arr", ")", ")", "/", "float", "(", "len", "(", "arr", ")", ")" ]
25.214286
0.019126
def TYPEDEF(self, _cursor_type): """ Handles TYPEDEF statement. """ _decl = _cursor_type.get_declaration() name = self.get_unique_name(_decl) if self.is_registered(name): obj = self.get_registered(name) else: log.debug('Was in TYPEDEF but had to parse record declaration for %s', name) obj = self.parse_cursor(_decl) return obj
[ "def", "TYPEDEF", "(", "self", ",", "_cursor_type", ")", ":", "_decl", "=", "_cursor_type", ".", "get_declaration", "(", ")", "name", "=", "self", ".", "get_unique_name", "(", "_decl", ")", "if", "self", ".", "is_registered", "(", "name", ")", ":", "obj", "=", "self", ".", "get_registered", "(", "name", ")", "else", ":", "log", ".", "debug", "(", "'Was in TYPEDEF but had to parse record declaration for %s'", ",", "name", ")", "obj", "=", "self", ".", "parse_cursor", "(", "_decl", ")", "return", "obj" ]
34.666667
0.007026
def download_attachments(self): """ Downloads this message attachments into memory. Need a call to 'attachment.save' to save them on disk. :return: Success / Failure :rtype: bool """ if not self._parent.has_attachments: log.debug( 'Parent {} has no attachments, skipping out early.'.format( self._parent.__class__.__name__)) return False if not self._parent.object_id: raise RuntimeError( 'Attempted to download attachments of an unsaved {}'.format( self._parent.__class__.__name__)) url = self.build_url(self._endpoints.get('attachments').format( id=self._parent.object_id)) response = self._parent.con.get(url) if not response: return False attachments = response.json().get('value', []) # Everything received from cloud must be passed as self._cloud_data_key self.untrack = True self.add({self._cloud_data_key: attachments}) self.untrack = False # TODO: when it's a item attachment the attachment itself # is not downloaded. We must download it... # TODO: idea: retrieve the attachments ids' only with # select and then download one by one. return True
[ "def", "download_attachments", "(", "self", ")", ":", "if", "not", "self", ".", "_parent", ".", "has_attachments", ":", "log", ".", "debug", "(", "'Parent {} has no attachments, skipping out early.'", ".", "format", "(", "self", ".", "_parent", ".", "__class__", ".", "__name__", ")", ")", "return", "False", "if", "not", "self", ".", "_parent", ".", "object_id", ":", "raise", "RuntimeError", "(", "'Attempted to download attachments of an unsaved {}'", ".", "format", "(", "self", ".", "_parent", ".", "__class__", ".", "__name__", ")", ")", "url", "=", "self", ".", "build_url", "(", "self", ".", "_endpoints", ".", "get", "(", "'attachments'", ")", ".", "format", "(", "id", "=", "self", ".", "_parent", ".", "object_id", ")", ")", "response", "=", "self", ".", "_parent", ".", "con", ".", "get", "(", "url", ")", "if", "not", "response", ":", "return", "False", "attachments", "=", "response", ".", "json", "(", ")", ".", "get", "(", "'value'", ",", "[", "]", ")", "# Everything received from cloud must be passed as self._cloud_data_key", "self", ".", "untrack", "=", "True", "self", ".", "add", "(", "{", "self", ".", "_cloud_data_key", ":", "attachments", "}", ")", "self", ".", "untrack", "=", "False", "# TODO: when it's a item attachment the attachment itself", "# is not downloaded. We must download it...", "# TODO: idea: retrieve the attachments ids' only with", "# select and then download one by one.", "return", "True" ]
35.621622
0.001477
def _bfill(arr, n=None, axis=-1): '''inverse of ffill''' import bottleneck as bn arr = np.flip(arr, axis=axis) # fill arr = bn.push(arr, axis=axis, n=n) # reverse back to original return np.flip(arr, axis=axis)
[ "def", "_bfill", "(", "arr", ",", "n", "=", "None", ",", "axis", "=", "-", "1", ")", ":", "import", "bottleneck", "as", "bn", "arr", "=", "np", ".", "flip", "(", "arr", ",", "axis", "=", "axis", ")", "# fill", "arr", "=", "bn", ".", "push", "(", "arr", ",", "axis", "=", "axis", ",", "n", "=", "n", ")", "# reverse back to original", "return", "np", ".", "flip", "(", "arr", ",", "axis", "=", "axis", ")" ]
21
0.004149
def is_ternary(self, keyword): """return true if the given keyword is a ternary keyword for this ControlLine""" return keyword in { 'if':set(['else', 'elif']), 'try':set(['except', 'finally']), 'for':set(['else']) }.get(self.keyword, [])
[ "def", "is_ternary", "(", "self", ",", "keyword", ")", ":", "return", "keyword", "in", "{", "'if'", ":", "set", "(", "[", "'else'", ",", "'elif'", "]", ")", ",", "'try'", ":", "set", "(", "[", "'except'", ",", "'finally'", "]", ")", ",", "'for'", ":", "set", "(", "[", "'else'", "]", ")", "}", ".", "get", "(", "self", ".", "keyword", ",", "[", "]", ")" ]
33.111111
0.01634
def compress(self, setup): """ Returns the compressed graph according to the given experimental setup Parameters ---------- setup : :class:`caspo.core.setup.Setup` Experimental setup used to compress the graph Returns ------- caspo.core.graph.Graph Compressed graph """ designated = set(setup.nodes) zipped = self.copy() marked = [(n, d) for n, d in self.nodes(data=True) if n not in designated and not d.get('compressed', False)] while marked: for node, _ in sorted(marked): backward = zipped.predecessors(node) forward = zipped.successors(node) if not backward or (len(backward) == 1 and not backward[0] in forward): self.__merge_source_targets(node, zipped) elif not forward or (len(forward) == 1 and not forward[0] in backward): self.__merge_target_sources(node, zipped) else: designated.add(node) marked = [(n, d) for n, d in self.nodes(data=True) if n not in designated and not d.get('compressed', False)] not_compressed = [(n, d) for n, d in zipped.nodes(data=True) if not d.get('compressed', False)] return zipped.subgraph([n for n, _ in not_compressed])
[ "def", "compress", "(", "self", ",", "setup", ")", ":", "designated", "=", "set", "(", "setup", ".", "nodes", ")", "zipped", "=", "self", ".", "copy", "(", ")", "marked", "=", "[", "(", "n", ",", "d", ")", "for", "n", ",", "d", "in", "self", ".", "nodes", "(", "data", "=", "True", ")", "if", "n", "not", "in", "designated", "and", "not", "d", ".", "get", "(", "'compressed'", ",", "False", ")", "]", "while", "marked", ":", "for", "node", ",", "_", "in", "sorted", "(", "marked", ")", ":", "backward", "=", "zipped", ".", "predecessors", "(", "node", ")", "forward", "=", "zipped", ".", "successors", "(", "node", ")", "if", "not", "backward", "or", "(", "len", "(", "backward", ")", "==", "1", "and", "not", "backward", "[", "0", "]", "in", "forward", ")", ":", "self", ".", "__merge_source_targets", "(", "node", ",", "zipped", ")", "elif", "not", "forward", "or", "(", "len", "(", "forward", ")", "==", "1", "and", "not", "forward", "[", "0", "]", "in", "backward", ")", ":", "self", ".", "__merge_target_sources", "(", "node", ",", "zipped", ")", "else", ":", "designated", ".", "add", "(", "node", ")", "marked", "=", "[", "(", "n", ",", "d", ")", "for", "n", ",", "d", "in", "self", ".", "nodes", "(", "data", "=", "True", ")", "if", "n", "not", "in", "designated", "and", "not", "d", ".", "get", "(", "'compressed'", ",", "False", ")", "]", "not_compressed", "=", "[", "(", "n", ",", "d", ")", "for", "n", ",", "d", "in", "zipped", ".", "nodes", "(", "data", "=", "True", ")", "if", "not", "d", ".", "get", "(", "'compressed'", ",", "False", ")", "]", "return", "zipped", ".", "subgraph", "(", "[", "n", "for", "n", ",", "_", "in", "not_compressed", "]", ")" ]
37.305556
0.00508
def disable(self, msgid, scope="package", line=None, ignore_unknown=False): """don't output message of the given id""" self._set_msg_status( msgid, enable=False, scope=scope, line=line, ignore_unknown=ignore_unknown ) self._register_by_id_managed_msg(msgid, line)
[ "def", "disable", "(", "self", ",", "msgid", ",", "scope", "=", "\"package\"", ",", "line", "=", "None", ",", "ignore_unknown", "=", "False", ")", ":", "self", ".", "_set_msg_status", "(", "msgid", ",", "enable", "=", "False", ",", "scope", "=", "scope", ",", "line", "=", "line", ",", "ignore_unknown", "=", "ignore_unknown", ")", "self", ".", "_register_by_id_managed_msg", "(", "msgid", ",", "line", ")" ]
50.333333
0.009772
def get_groups_from_category(self, category) -> typing.Iterator['Group']: """ Args: category: group category Returns: generator over all groups from a specific category in this coalition """ Mission.validator_group_category.validate(category, 'get_groups_from_category') for group in self.groups: if group.group_category == category: yield group
[ "def", "get_groups_from_category", "(", "self", ",", "category", ")", "->", "typing", ".", "Iterator", "[", "'Group'", "]", ":", "Mission", ".", "validator_group_category", ".", "validate", "(", "category", ",", "'get_groups_from_category'", ")", "for", "group", "in", "self", ".", "groups", ":", "if", "group", ".", "group_category", "==", "category", ":", "yield", "group" ]
35.333333
0.009195
def in_nested_list(nested_list, obj): """return true if the object is an element of <nested_list> or of a nested list """ for elmt in nested_list: if isinstance(elmt, (list, tuple)): if in_nested_list(elmt, obj): return True elif elmt == obj: return True return False
[ "def", "in_nested_list", "(", "nested_list", ",", "obj", ")", ":", "for", "elmt", "in", "nested_list", ":", "if", "isinstance", "(", "elmt", ",", "(", "list", ",", "tuple", ")", ")", ":", "if", "in_nested_list", "(", "elmt", ",", "obj", ")", ":", "return", "True", "elif", "elmt", "==", "obj", ":", "return", "True", "return", "False" ]
30.272727
0.002915
def ack(self, msg): """Processes the received message. I don't need to generate an ack message. """ self.log.info("senderID:%s Received: %s " % (self.senderID, msg['body'])) return stomper.NO_REPONSE_NEEDED
[ "def", "ack", "(", "self", ",", "msg", ")", ":", "self", ".", "log", ".", "info", "(", "\"senderID:%s Received: %s \"", "%", "(", "self", ".", "senderID", ",", "msg", "[", "'body'", "]", ")", ")", "return", "stomper", ".", "NO_REPONSE_NEEDED" ]
35.714286
0.019531
def t_IDENTIFIER(t): r"[A-Z_a-z][0-9A-Z_a-z]*" if t.value in keywords: t.type = t.value return t
[ "def", "t_IDENTIFIER", "(", "t", ")", ":", "if", "t", ".", "value", "in", "keywords", ":", "t", ".", "type", "=", "t", ".", "value", "return", "t" ]
20.4
0.037736
def passthrough_proc(self, inputstring, **kwargs): """Process python passthroughs.""" out = [] found = None # store of characters that might be the start of a passthrough hold = None # the contents of the passthrough so far count = None # current parenthetical level (num closes - num opens) multiline = None # if in a passthrough, is it a multiline passthrough skips = self.copy_skips() for i, c in enumerate(append_it(inputstring, "\n")): if hold is not None: # we specify that we only care about parens, not brackets or braces count += paren_change(c, opens="(", closes=")") if count >= 0 and c == hold: out.append(self.wrap_passthrough(found, multiline)) found = None hold = None count = None multiline = None else: if c == "\n": skips = addskip(skips, self.adjust(lineno(i, inputstring))) found += c elif found: if c == "\\": found = "" hold = "\n" count = 0 multiline = False elif c == "(": found = "" hold = ")" count = -1 multiline = True else: out.append("\\" + c) found = None elif c == "\\": found = True else: out.append(c) if hold is not None or found is not None: raise self.make_err(CoconutSyntaxError, "unclosed passthrough", inputstring, i) self.set_skips(skips) return "".join(out)
[ "def", "passthrough_proc", "(", "self", ",", "inputstring", ",", "*", "*", "kwargs", ")", ":", "out", "=", "[", "]", "found", "=", "None", "# store of characters that might be the start of a passthrough", "hold", "=", "None", "# the contents of the passthrough so far", "count", "=", "None", "# current parenthetical level (num closes - num opens)", "multiline", "=", "None", "# if in a passthrough, is it a multiline passthrough", "skips", "=", "self", ".", "copy_skips", "(", ")", "for", "i", ",", "c", "in", "enumerate", "(", "append_it", "(", "inputstring", ",", "\"\\n\"", ")", ")", ":", "if", "hold", "is", "not", "None", ":", "# we specify that we only care about parens, not brackets or braces", "count", "+=", "paren_change", "(", "c", ",", "opens", "=", "\"(\"", ",", "closes", "=", "\")\"", ")", "if", "count", ">=", "0", "and", "c", "==", "hold", ":", "out", ".", "append", "(", "self", ".", "wrap_passthrough", "(", "found", ",", "multiline", ")", ")", "found", "=", "None", "hold", "=", "None", "count", "=", "None", "multiline", "=", "None", "else", ":", "if", "c", "==", "\"\\n\"", ":", "skips", "=", "addskip", "(", "skips", ",", "self", ".", "adjust", "(", "lineno", "(", "i", ",", "inputstring", ")", ")", ")", "found", "+=", "c", "elif", "found", ":", "if", "c", "==", "\"\\\\\"", ":", "found", "=", "\"\"", "hold", "=", "\"\\n\"", "count", "=", "0", "multiline", "=", "False", "elif", "c", "==", "\"(\"", ":", "found", "=", "\"\"", "hold", "=", "\")\"", "count", "=", "-", "1", "multiline", "=", "True", "else", ":", "out", ".", "append", "(", "\"\\\\\"", "+", "c", ")", "found", "=", "None", "elif", "c", "==", "\"\\\\\"", ":", "found", "=", "True", "else", ":", "out", ".", "append", "(", "c", ")", "if", "hold", "is", "not", "None", "or", "found", "is", "not", "None", ":", "raise", "self", ".", "make_err", "(", "CoconutSyntaxError", ",", "\"unclosed passthrough\"", ",", "inputstring", ",", "i", ")", "self", ".", "set_skips", "(", "skips", ")", "return", "\"\"", ".", "join", "(", "out", ")" ]
38.638298
0.003222
def converge(self, playbook=None, **kwargs): """ Executes ``ansible-playbook`` against the converge playbook unless specified otherwise and returns a string. :param playbook: An optional string containing an absolute path to a playbook. :param kwargs: An optional keyword arguments. :return: str """ if playbook is None: pb = self._get_ansible_playbook(self.playbooks.converge, **kwargs) else: pb = self._get_ansible_playbook(playbook, **kwargs) return pb.execute()
[ "def", "converge", "(", "self", ",", "playbook", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "playbook", "is", "None", ":", "pb", "=", "self", ".", "_get_ansible_playbook", "(", "self", ".", "playbooks", ".", "converge", ",", "*", "*", "kwargs", ")", "else", ":", "pb", "=", "self", ".", "_get_ansible_playbook", "(", "playbook", ",", "*", "*", "kwargs", ")", "return", "pb", ".", "execute", "(", ")" ]
35.3125
0.003448
def add_options(parser): """ Add optional arguments to the parser """ partial_action = common.partial_append_action file_mods = parser.add_argument_group("Sequence File Modification") file_mods.add_argument('--line-wrap', dest='line_wrap', metavar='N', type=int, help='Adjust line wrap for sequence strings. ' 'When N is 0, all line breaks are removed. Only fasta files ' 'are supported for the output format.') file_mods.add_argument('--sort', dest='sort', choices=['length-asc', 'length-desc', 'name-asc', 'name-desc'], help='Perform sorting by length or name, ascending or descending. ' 'ASCII sorting is performed for names') parser.epilog = """Filters using regular expressions are case-sensitive by default. Append "(?i)" to a pattern to make it case-insensitive.""" seq_mods = parser.add_argument_group("Sequence Modificaton") seq_mods.add_argument('--apply-function', type=module_function, metavar='/path/to/module.py:function_name[:parameter]', help="""Specify a custom function to apply to the input sequences, specified as /path/to/file.py:function_name. Function should accept an iterable of Bio.SeqRecord objects, and yield SeqRecords. If the parameter is specified, it will be passed as a string as the second argument to the function. Specify more than one to chain.""", default=[], action='append') seq_mods.add_argument('--cut', dest='transforms', metavar="start:end[,start2:end2]", type=common.sequence_slices, action=partial_action(transform.multi_cut_sequences, 'slices'), help="""Keep only the residues within the 1-indexed start and end positions specified, : separated. Includes last item. Start or end can be left unspecified to indicate start/end of sequence. A negative start may be provided to indicate an offset from the end of the sequence. Note that to prevent negative numbers being interpreted as flags, this should be written with an equals sign between `--cut` and the argument, e.g.: `--cut=-10:`""") seq_mods.add_argument('--relative-to', dest='cut_relative', metavar='ID', help="""Apply --cut relative to the indexes of non-gap residues in sequence identified by ID""") seq_mods.add_argument('--drop', dest='transforms', metavar='start:end[,start2:end2]', type=common.sequence_slices, action=partial_action(transform.drop_columns, 'slices'), help="""Remove the residues at the specified indices. Same format as `--cut`.""") seq_mods.add_argument('--dash-gap', action=partial_action(transform.dashes_cleanup), dest='transforms', help="""Replace any of the characters "?.:~" with a "-" for all sequences""") seq_mods.add_argument('--lower', action=partial_action(transform.lower_sequences), dest='transforms', help='Translate the sequences to lower case') seq_mods.add_argument('--mask', metavar="start1:end1[,start2:end2]", action=partial_action(transform.multi_mask_sequences, 'slices'), type=common.sequence_slices, dest='transforms', help="""Replace residues in 1-indexed slice with gap-characters. If --relative-to is also specified, coordinates are relative to the sequence ID provided.""") seq_mods.add_argument('--reverse', action=partial_action(transform.reverse_sequences), dest='transforms', help='Reverse the order of sites in sequences') seq_mods.add_argument('--reverse-complement', dest='transforms', action=partial_action(transform.reverse_complement_sequences), help='Convert sequences into reverse complements') seq_mods.add_argument('--squeeze', action=partial_action(transform.squeeze), dest='transforms', help='''Remove any gaps that are present in the same position across all sequences in an alignment (equivalent to --squeeze-threshold=1.0)''') seq_mods.add_argument('--squeeze-threshold', dest='transforms', action=partial_action(transform.squeeze, 'gap_threshold'), type=common.typed_range(float, 0.0, 1.0), metavar='PROP', help="""Trim columns from an alignment which have gaps in least the specified proportion of sequences.""") seq_mods.add_argument('--transcribe', dest='transforms', action=partial_action(transform.transcribe, 'transcribe'), choices=('dna2rna', 'rna2dna'), help="""Transcription and back transcription for generic DNA and RNA. Source sequences must be the correct alphabet or this action will likely produce incorrect results.""") seq_mods.add_argument('--translate', dest='transforms', action=partial_action(transform.translate, 'translate'), choices=['dna2protein', 'rna2protein', 'dna2proteinstop', 'rna2proteinstop'], help="""Translate from generic DNA/RNA to proteins. Options with "stop" suffix will NOT translate through stop codons . Source sequences must be the correct alphabet or this action will likely produce incorrect results.""") seq_mods.add_argument('--ungap', action=partial_action(transform.ungap_sequences), dest='transforms', help='Remove gaps in the sequence alignment') seq_mods.add_argument('--upper', action=partial_action(transform.upper_sequences), dest='transforms', help='Translate the sequences to upper case') seq_select = parser.add_argument_group("Record Selection") seq_select.add_argument('--deduplicate-sequences', action='store_const', const=None, default=False, dest='deduplicate_sequences', help='Remove any duplicate sequences ' 'by sequence content, keep the first instance seen') seq_select.add_argument('--deduplicated-sequences-file', action='store', metavar='FILE', dest='deduplicate_sequences', default=False, type=common.FileType('wt'), help='Write all of the deduplicated sequences to a file') seq_select.add_argument('--deduplicate-taxa', action=partial_action(transform.deduplicate_taxa), dest='transforms', help="""Remove any duplicate sequences by ID, keep the first instance seen""") seq_select.add_argument('--exclude-from-file', metavar='FILE', type=common.FileType('rt'), help="""Filter sequences, removing those sequence IDs in the specified file""", dest='transforms', action=partial_action(transform.exclude_from_file, 'handle')) seq_select.add_argument('--include-from-file', metavar='FILE', type=common.FileType('rt'), help="""Filter sequences, keeping only those sequence IDs in the specified file""", dest='transforms', action=partial_action(transform.include_from_file, 'handle')) seq_select.add_argument('--head', metavar='N', dest='transforms', action=partial_action(transform.head, 'head'), help="""Trim down to top N sequences. With the leading `-', print all but the last N sequences.""") seq_select.add_argument('--max-length', dest='transforms', metavar='N', action=partial_action(transform.max_length_discard, 'max_length'), type=int, help="""Discard any sequences beyond the specified maximum length. This operation occurs *before* all length-changing options such as cut and squeeze.""") seq_select.add_argument('--min-length', dest='transforms', metavar='N', action=partial_action(transform.min_length_discard, 'min_length'), type=int, help="""Discard any sequences less than the specified minimum length. This operation occurs *before* cut and squeeze.""") seq_select.add_argument('--min-ungapped-length', metavar='N', action=partial_action(transform.min_ungap_length_discard, 'min_length'), type=int, help="""Discard any sequences less than the specified minimum length, excluding gaps. This operation occurs *before* cut and squeeze.""", dest='transforms') seq_select.add_argument('--pattern-include', metavar='REGEX', action=partial_action(transform.name_include, 'filter_regex'), dest='transforms', help="""Filter the sequences by regular expression in ID or description""") seq_select.add_argument('--pattern-exclude', metavar='REGEX', action=partial_action(transform.name_exclude, 'filter_regex'), dest='transforms', help="""Filter the sequences by regular expression in ID or description""") seq_select.add_argument('--prune-empty', action=partial_action(transform.prune_empty), dest='transforms', help="Prune sequences containing only gaps ('-')") seq_select.add_argument('--sample', metavar='N', dest='transforms', type=int, action=partial_action(transform.sample, 'k'), help = """ Select a random sampling of sequences """) seq_select.add_argument('--sample-seed', metavar='N', type=int, help = """Set random seed for sampling of sequences""") seq_select.add_argument('--seq-pattern-include', metavar='REGEX', action=partial_action(transform.seq_include, 'filter_regex'), dest='transforms', help="""Filter the sequences by regular expression in sequence""") seq_select.add_argument('--seq-pattern-exclude', metavar='REGEX', action=partial_action(transform.seq_exclude, 'filter_regex'), dest='transforms', help="""Filter the sequences by regular expression in sequence""") seq_select.add_argument('--tail', metavar='N', dest='transforms', action=partial_action(transform.tail, 'tail'), help="""Trim down to bottom N sequences. Use +N to output sequences starting with the Nth.""") id_mods = parser.add_argument_group("Sequence ID Modification") id_mods.add_argument('--first-name', action=partial_action(transform.first_name_capture), dest='transforms', help='''Take only the first whitespace-delimited word as the name of the sequence''') id_mods.add_argument('--name-suffix', metavar='SUFFIX', action=partial_action(transform.name_append_suffix, 'suffix'), dest='transforms', help='Append a suffix to all IDs.') id_mods.add_argument('--name-prefix', metavar='PREFIX', action=partial_action(transform.name_insert_prefix, 'prefix'), dest='transforms', help="""Insert a prefix for all IDs.""") id_mods.add_argument('--pattern-replace', nargs=2, metavar=('search_pattern', 'replace_pattern'), action=partial_action(transform.name_replace, ('search_regex', 'replace_pattern')), dest='transforms', help="""Replace regex pattern "search_pattern" with "replace_pattern" in sequence ID and description""") id_mods.add_argument('--strip-range', dest='transforms', action=partial_action(transform.strip_range), help="""Strip ranges from sequences IDs, matching </x-y>""") format_group = parser.add_argument_group('Format Options') format_group.add_argument('--input-format', metavar='FORMAT', help="Input file format (default: determine from extension)") format_group.add_argument('--output-format', metavar='FORMAT', help="Output file format (default: determine from extension)") parser.add_argument('--alphabet', choices=ALPHABETS, help="""Input alphabet. Required for writing NEXUS.""") return parser
[ "def", "add_options", "(", "parser", ")", ":", "partial_action", "=", "common", ".", "partial_append_action", "file_mods", "=", "parser", ".", "add_argument_group", "(", "\"Sequence File Modification\"", ")", "file_mods", ".", "add_argument", "(", "'--line-wrap'", ",", "dest", "=", "'line_wrap'", ",", "metavar", "=", "'N'", ",", "type", "=", "int", ",", "help", "=", "'Adjust line wrap for sequence strings. '", "'When N is 0, all line breaks are removed. Only fasta files '", "'are supported for the output format.'", ")", "file_mods", ".", "add_argument", "(", "'--sort'", ",", "dest", "=", "'sort'", ",", "choices", "=", "[", "'length-asc'", ",", "'length-desc'", ",", "'name-asc'", ",", "'name-desc'", "]", ",", "help", "=", "'Perform sorting by length or name, ascending or descending. '", "'ASCII sorting is performed for names'", ")", "parser", ".", "epilog", "=", "\"\"\"Filters using regular expressions are case-sensitive\n by default. Append \"(?i)\" to a pattern to make it case-insensitive.\"\"\"", "seq_mods", "=", "parser", ".", "add_argument_group", "(", "\"Sequence Modificaton\"", ")", "seq_mods", ".", "add_argument", "(", "'--apply-function'", ",", "type", "=", "module_function", ",", "metavar", "=", "'/path/to/module.py:function_name[:parameter]'", ",", "help", "=", "\"\"\"Specify a custom function to apply to the input sequences,\n specified as /path/to/file.py:function_name. Function should accept\n an iterable of Bio.SeqRecord objects, and yield SeqRecords. If the\n parameter is specified, it will be passed as a string as the second\n argument to the function. Specify more than one to chain.\"\"\"", ",", "default", "=", "[", "]", ",", "action", "=", "'append'", ")", "seq_mods", ".", "add_argument", "(", "'--cut'", ",", "dest", "=", "'transforms'", ",", "metavar", "=", "\"start:end[,start2:end2]\"", ",", "type", "=", "common", ".", "sequence_slices", ",", "action", "=", "partial_action", "(", "transform", ".", "multi_cut_sequences", ",", "'slices'", ")", ",", "help", "=", "\"\"\"Keep only the residues within the 1-indexed start and end\n positions specified, : separated. Includes last item. Start or end\n can be left unspecified to indicate start/end of sequence. A\n negative start may be provided to indicate an offset from the end\n of the sequence. Note that to prevent negative numbers being\n interpreted as flags, this should be written with an equals\n sign between `--cut` and the argument, e.g.: `--cut=-10:`\"\"\"", ")", "seq_mods", ".", "add_argument", "(", "'--relative-to'", ",", "dest", "=", "'cut_relative'", ",", "metavar", "=", "'ID'", ",", "help", "=", "\"\"\"Apply --cut relative to the indexes of non-gap residues in\n sequence identified by ID\"\"\"", ")", "seq_mods", ".", "add_argument", "(", "'--drop'", ",", "dest", "=", "'transforms'", ",", "metavar", "=", "'start:end[,start2:end2]'", ",", "type", "=", "common", ".", "sequence_slices", ",", "action", "=", "partial_action", "(", "transform", ".", "drop_columns", ",", "'slices'", ")", ",", "help", "=", "\"\"\"Remove the residues at the specified indices. Same format as `--cut`.\"\"\"", ")", "seq_mods", ".", "add_argument", "(", "'--dash-gap'", ",", "action", "=", "partial_action", "(", "transform", ".", "dashes_cleanup", ")", ",", "dest", "=", "'transforms'", ",", "help", "=", "\"\"\"Replace any of the characters \"?.:~\" with a \"-\" for all\n sequences\"\"\"", ")", "seq_mods", ".", "add_argument", "(", "'--lower'", ",", "action", "=", "partial_action", "(", "transform", ".", "lower_sequences", ")", ",", "dest", "=", "'transforms'", ",", "help", "=", "'Translate the sequences to lower case'", ")", "seq_mods", ".", "add_argument", "(", "'--mask'", ",", "metavar", "=", "\"start1:end1[,start2:end2]\"", ",", "action", "=", "partial_action", "(", "transform", ".", "multi_mask_sequences", ",", "'slices'", ")", ",", "type", "=", "common", ".", "sequence_slices", ",", "dest", "=", "'transforms'", ",", "help", "=", "\"\"\"Replace\n residues in 1-indexed slice with gap-characters. If --relative-to\n is also specified, coordinates are relative to the sequence ID\n provided.\"\"\"", ")", "seq_mods", ".", "add_argument", "(", "'--reverse'", ",", "action", "=", "partial_action", "(", "transform", ".", "reverse_sequences", ")", ",", "dest", "=", "'transforms'", ",", "help", "=", "'Reverse the order of sites in sequences'", ")", "seq_mods", ".", "add_argument", "(", "'--reverse-complement'", ",", "dest", "=", "'transforms'", ",", "action", "=", "partial_action", "(", "transform", ".", "reverse_complement_sequences", ")", ",", "help", "=", "'Convert sequences into reverse complements'", ")", "seq_mods", ".", "add_argument", "(", "'--squeeze'", ",", "action", "=", "partial_action", "(", "transform", ".", "squeeze", ")", ",", "dest", "=", "'transforms'", ",", "help", "=", "'''Remove any gaps that are present in the same\n position across all sequences in an alignment (equivalent to\n --squeeze-threshold=1.0)'''", ")", "seq_mods", ".", "add_argument", "(", "'--squeeze-threshold'", ",", "dest", "=", "'transforms'", ",", "action", "=", "partial_action", "(", "transform", ".", "squeeze", ",", "'gap_threshold'", ")", ",", "type", "=", "common", ".", "typed_range", "(", "float", ",", "0.0", ",", "1.0", ")", ",", "metavar", "=", "'PROP'", ",", "help", "=", "\"\"\"Trim columns from an alignment which\n have gaps in least the specified proportion of sequences.\"\"\"", ")", "seq_mods", ".", "add_argument", "(", "'--transcribe'", ",", "dest", "=", "'transforms'", ",", "action", "=", "partial_action", "(", "transform", ".", "transcribe", ",", "'transcribe'", ")", ",", "choices", "=", "(", "'dna2rna'", ",", "'rna2dna'", ")", ",", "help", "=", "\"\"\"Transcription and back\n transcription for generic DNA and RNA. Source sequences must be the\n correct alphabet or this action will likely produce incorrect\n results.\"\"\"", ")", "seq_mods", ".", "add_argument", "(", "'--translate'", ",", "dest", "=", "'transforms'", ",", "action", "=", "partial_action", "(", "transform", ".", "translate", ",", "'translate'", ")", ",", "choices", "=", "[", "'dna2protein'", ",", "'rna2protein'", ",", "'dna2proteinstop'", ",", "'rna2proteinstop'", "]", ",", "help", "=", "\"\"\"Translate from generic DNA/RNA to\n proteins. Options with \"stop\" suffix will NOT translate through\n stop codons . Source sequences must be the correct alphabet or\n this action will likely produce incorrect results.\"\"\"", ")", "seq_mods", ".", "add_argument", "(", "'--ungap'", ",", "action", "=", "partial_action", "(", "transform", ".", "ungap_sequences", ")", ",", "dest", "=", "'transforms'", ",", "help", "=", "'Remove gaps in the sequence alignment'", ")", "seq_mods", ".", "add_argument", "(", "'--upper'", ",", "action", "=", "partial_action", "(", "transform", ".", "upper_sequences", ")", ",", "dest", "=", "'transforms'", ",", "help", "=", "'Translate the sequences to upper case'", ")", "seq_select", "=", "parser", ".", "add_argument_group", "(", "\"Record Selection\"", ")", "seq_select", ".", "add_argument", "(", "'--deduplicate-sequences'", ",", "action", "=", "'store_const'", ",", "const", "=", "None", ",", "default", "=", "False", ",", "dest", "=", "'deduplicate_sequences'", ",", "help", "=", "'Remove any duplicate sequences '", "'by sequence content, keep the first instance seen'", ")", "seq_select", ".", "add_argument", "(", "'--deduplicated-sequences-file'", ",", "action", "=", "'store'", ",", "metavar", "=", "'FILE'", ",", "dest", "=", "'deduplicate_sequences'", ",", "default", "=", "False", ",", "type", "=", "common", ".", "FileType", "(", "'wt'", ")", ",", "help", "=", "'Write all of the deduplicated sequences to a file'", ")", "seq_select", ".", "add_argument", "(", "'--deduplicate-taxa'", ",", "action", "=", "partial_action", "(", "transform", ".", "deduplicate_taxa", ")", ",", "dest", "=", "'transforms'", ",", "help", "=", "\"\"\"Remove any duplicate sequences by ID,\n keep the first instance seen\"\"\"", ")", "seq_select", ".", "add_argument", "(", "'--exclude-from-file'", ",", "metavar", "=", "'FILE'", ",", "type", "=", "common", ".", "FileType", "(", "'rt'", ")", ",", "help", "=", "\"\"\"Filter sequences, removing\n those sequence IDs in the specified file\"\"\"", ",", "dest", "=", "'transforms'", ",", "action", "=", "partial_action", "(", "transform", ".", "exclude_from_file", ",", "'handle'", ")", ")", "seq_select", ".", "add_argument", "(", "'--include-from-file'", ",", "metavar", "=", "'FILE'", ",", "type", "=", "common", ".", "FileType", "(", "'rt'", ")", ",", "help", "=", "\"\"\"Filter sequences, keeping only\n those sequence IDs in the specified file\"\"\"", ",", "dest", "=", "'transforms'", ",", "action", "=", "partial_action", "(", "transform", ".", "include_from_file", ",", "'handle'", ")", ")", "seq_select", ".", "add_argument", "(", "'--head'", ",", "metavar", "=", "'N'", ",", "dest", "=", "'transforms'", ",", "action", "=", "partial_action", "(", "transform", ".", "head", ",", "'head'", ")", ",", "help", "=", "\"\"\"Trim\n down to top N sequences. With the leading `-', print all but the last N sequences.\"\"\"", ")", "seq_select", ".", "add_argument", "(", "'--max-length'", ",", "dest", "=", "'transforms'", ",", "metavar", "=", "'N'", ",", "action", "=", "partial_action", "(", "transform", ".", "max_length_discard", ",", "'max_length'", ")", ",", "type", "=", "int", ",", "help", "=", "\"\"\"Discard any sequences beyond the specified\n maximum length. This operation occurs *before* all length-changing\n options such as cut and squeeze.\"\"\"", ")", "seq_select", ".", "add_argument", "(", "'--min-length'", ",", "dest", "=", "'transforms'", ",", "metavar", "=", "'N'", ",", "action", "=", "partial_action", "(", "transform", ".", "min_length_discard", ",", "'min_length'", ")", ",", "type", "=", "int", ",", "help", "=", "\"\"\"Discard any sequences less than the specified\n minimum length. This operation occurs *before* cut and squeeze.\"\"\"", ")", "seq_select", ".", "add_argument", "(", "'--min-ungapped-length'", ",", "metavar", "=", "'N'", ",", "action", "=", "partial_action", "(", "transform", ".", "min_ungap_length_discard", ",", "'min_length'", ")", ",", "type", "=", "int", ",", "help", "=", "\"\"\"Discard any sequences less\n than the specified minimum length, excluding gaps. This\n operation occurs *before* cut and squeeze.\"\"\"", ",", "dest", "=", "'transforms'", ")", "seq_select", ".", "add_argument", "(", "'--pattern-include'", ",", "metavar", "=", "'REGEX'", ",", "action", "=", "partial_action", "(", "transform", ".", "name_include", ",", "'filter_regex'", ")", ",", "dest", "=", "'transforms'", ",", "help", "=", "\"\"\"Filter the sequences by regular\n expression in ID or description\"\"\"", ")", "seq_select", ".", "add_argument", "(", "'--pattern-exclude'", ",", "metavar", "=", "'REGEX'", ",", "action", "=", "partial_action", "(", "transform", ".", "name_exclude", ",", "'filter_regex'", ")", ",", "dest", "=", "'transforms'", ",", "help", "=", "\"\"\"Filter the sequences by regular\n expression in ID or description\"\"\"", ")", "seq_select", ".", "add_argument", "(", "'--prune-empty'", ",", "action", "=", "partial_action", "(", "transform", ".", "prune_empty", ")", ",", "dest", "=", "'transforms'", ",", "help", "=", "\"Prune sequences containing only gaps ('-')\"", ")", "seq_select", ".", "add_argument", "(", "'--sample'", ",", "metavar", "=", "'N'", ",", "dest", "=", "'transforms'", ",", "type", "=", "int", ",", "action", "=", "partial_action", "(", "transform", ".", "sample", ",", "'k'", ")", ",", "help", "=", "\"\"\" Select a random sampling of sequences \"\"\"", ")", "seq_select", ".", "add_argument", "(", "'--sample-seed'", ",", "metavar", "=", "'N'", ",", "type", "=", "int", ",", "help", "=", "\"\"\"Set random seed for sampling of sequences\"\"\"", ")", "seq_select", ".", "add_argument", "(", "'--seq-pattern-include'", ",", "metavar", "=", "'REGEX'", ",", "action", "=", "partial_action", "(", "transform", ".", "seq_include", ",", "'filter_regex'", ")", ",", "dest", "=", "'transforms'", ",", "help", "=", "\"\"\"Filter the sequences by regular\n expression in sequence\"\"\"", ")", "seq_select", ".", "add_argument", "(", "'--seq-pattern-exclude'", ",", "metavar", "=", "'REGEX'", ",", "action", "=", "partial_action", "(", "transform", ".", "seq_exclude", ",", "'filter_regex'", ")", ",", "dest", "=", "'transforms'", ",", "help", "=", "\"\"\"Filter the sequences by regular\n expression in sequence\"\"\"", ")", "seq_select", ".", "add_argument", "(", "'--tail'", ",", "metavar", "=", "'N'", ",", "dest", "=", "'transforms'", ",", "action", "=", "partial_action", "(", "transform", ".", "tail", ",", "'tail'", ")", ",", "help", "=", "\"\"\"Trim down to bottom N sequences. Use +N to output sequences starting with the Nth.\"\"\"", ")", "id_mods", "=", "parser", ".", "add_argument_group", "(", "\"Sequence ID Modification\"", ")", "id_mods", ".", "add_argument", "(", "'--first-name'", ",", "action", "=", "partial_action", "(", "transform", ".", "first_name_capture", ")", ",", "dest", "=", "'transforms'", ",", "help", "=", "'''Take only the first whitespace-delimited\n word as the name of the sequence'''", ")", "id_mods", ".", "add_argument", "(", "'--name-suffix'", ",", "metavar", "=", "'SUFFIX'", ",", "action", "=", "partial_action", "(", "transform", ".", "name_append_suffix", ",", "'suffix'", ")", ",", "dest", "=", "'transforms'", ",", "help", "=", "'Append a suffix to all IDs.'", ")", "id_mods", ".", "add_argument", "(", "'--name-prefix'", ",", "metavar", "=", "'PREFIX'", ",", "action", "=", "partial_action", "(", "transform", ".", "name_insert_prefix", ",", "'prefix'", ")", ",", "dest", "=", "'transforms'", ",", "help", "=", "\"\"\"Insert a prefix for all\n IDs.\"\"\"", ")", "id_mods", ".", "add_argument", "(", "'--pattern-replace'", ",", "nargs", "=", "2", ",", "metavar", "=", "(", "'search_pattern'", ",", "'replace_pattern'", ")", ",", "action", "=", "partial_action", "(", "transform", ".", "name_replace", ",", "(", "'search_regex'", ",", "'replace_pattern'", ")", ")", ",", "dest", "=", "'transforms'", ",", "help", "=", "\"\"\"Replace regex pattern \"search_pattern\"\n with \"replace_pattern\" in sequence ID and description\"\"\"", ")", "id_mods", ".", "add_argument", "(", "'--strip-range'", ",", "dest", "=", "'transforms'", ",", "action", "=", "partial_action", "(", "transform", ".", "strip_range", ")", ",", "help", "=", "\"\"\"Strip ranges\n from sequences IDs, matching </x-y>\"\"\"", ")", "format_group", "=", "parser", ".", "add_argument_group", "(", "'Format Options'", ")", "format_group", ".", "add_argument", "(", "'--input-format'", ",", "metavar", "=", "'FORMAT'", ",", "help", "=", "\"Input file format (default: determine from extension)\"", ")", "format_group", ".", "add_argument", "(", "'--output-format'", ",", "metavar", "=", "'FORMAT'", ",", "help", "=", "\"Output file format (default: determine from extension)\"", ")", "parser", ".", "add_argument", "(", "'--alphabet'", ",", "choices", "=", "ALPHABETS", ",", "help", "=", "\"\"\"Input alphabet. Required for writing NEXUS.\"\"\"", ")", "return", "parser" ]
60.8
0.008133
def download_supplementary_files(self, directory='series', download_sra=True, email=None, sra_kwargs=None, nproc=1): """Download supplementary data. .. warning:: Do not use parallel option (nproc > 1) in the interactive shell. For more details see `this issue <https://stackoverflow.com/questions/23641475/multiprocessing-working-in-python-but-not-in-ipython/23641560#23641560>`_ on SO. Args: directory (:obj:`str`, optional): Directory to download the data (in this directory function will create new directory with the files), by default this will be named with the series name + _Supp. download_sra (:obj:`bool`, optional): Indicates whether to download SRA raw data too. Defaults to True. email (:obj:`str`, optional): E-mail that will be provided to the Entrez. Defaults to None. sra_kwargs (:obj:`dict`, optional): Kwargs passed to the GSM.download_SRA method. Defaults to None. nproc (:obj:`int`, optional): Number of processes for SRA download (default is 1, no parallelization). Returns: :obj:`dict`: Downloaded data for each of the GSM """ if sra_kwargs is None: sra_kwargs = dict() if directory == 'series': dirpath = os.path.abspath(self.get_accession() + "_Supp") utils.mkdir_p(dirpath) else: dirpath = os.path.abspath(directory) utils.mkdir_p(dirpath) downloaded_paths = dict() if nproc == 1: # No need to parallelize, running ordinary download in loop downloaded_paths = dict() for gsm in itervalues(self.gsms): logger.info( "Downloading SRA files for %s series\n" % gsm.name) paths = gsm.download_supplementary_files(email=email, download_sra=download_sra, directory=dirpath, sra_kwargs=sra_kwargs) downloaded_paths[gsm.name] = paths elif nproc > 1: # Parallelization enabled downloaders = list() # Collecting params for Pool.map in a loop for gsm in itervalues(self.gsms): downloaders.append([ gsm, download_sra, email, dirpath, sra_kwargs]) p = Pool(nproc) results = p.map(_supplementary_files_download_worker, downloaders) downloaded_paths = dict(results) else: raise ValueError("Nproc should be non-negative: %s" % str(nproc)) return downloaded_paths
[ "def", "download_supplementary_files", "(", "self", ",", "directory", "=", "'series'", ",", "download_sra", "=", "True", ",", "email", "=", "None", ",", "sra_kwargs", "=", "None", ",", "nproc", "=", "1", ")", ":", "if", "sra_kwargs", "is", "None", ":", "sra_kwargs", "=", "dict", "(", ")", "if", "directory", "==", "'series'", ":", "dirpath", "=", "os", ".", "path", ".", "abspath", "(", "self", ".", "get_accession", "(", ")", "+", "\"_Supp\"", ")", "utils", ".", "mkdir_p", "(", "dirpath", ")", "else", ":", "dirpath", "=", "os", ".", "path", ".", "abspath", "(", "directory", ")", "utils", ".", "mkdir_p", "(", "dirpath", ")", "downloaded_paths", "=", "dict", "(", ")", "if", "nproc", "==", "1", ":", "# No need to parallelize, running ordinary download in loop", "downloaded_paths", "=", "dict", "(", ")", "for", "gsm", "in", "itervalues", "(", "self", ".", "gsms", ")", ":", "logger", ".", "info", "(", "\"Downloading SRA files for %s series\\n\"", "%", "gsm", ".", "name", ")", "paths", "=", "gsm", ".", "download_supplementary_files", "(", "email", "=", "email", ",", "download_sra", "=", "download_sra", ",", "directory", "=", "dirpath", ",", "sra_kwargs", "=", "sra_kwargs", ")", "downloaded_paths", "[", "gsm", ".", "name", "]", "=", "paths", "elif", "nproc", ">", "1", ":", "# Parallelization enabled", "downloaders", "=", "list", "(", ")", "# Collecting params for Pool.map in a loop", "for", "gsm", "in", "itervalues", "(", "self", ".", "gsms", ")", ":", "downloaders", ".", "append", "(", "[", "gsm", ",", "download_sra", ",", "email", ",", "dirpath", ",", "sra_kwargs", "]", ")", "p", "=", "Pool", "(", "nproc", ")", "results", "=", "p", ".", "map", "(", "_supplementary_files_download_worker", ",", "downloaders", ")", "downloaded_paths", "=", "dict", "(", "results", ")", "else", ":", "raise", "ValueError", "(", "\"Nproc should be non-negative: %s\"", "%", "str", "(", "nproc", ")", ")", "return", "downloaded_paths" ]
44.772727
0.001987
def x509_rsa_load(txt): """ So I get the same output format as loads produces :param txt: :return: """ pub_key = import_rsa_key(txt) if isinstance(pub_key, rsa.RSAPublicKey): return [("rsa", pub_key)]
[ "def", "x509_rsa_load", "(", "txt", ")", ":", "pub_key", "=", "import_rsa_key", "(", "txt", ")", "if", "isinstance", "(", "pub_key", ",", "rsa", ".", "RSAPublicKey", ")", ":", "return", "[", "(", "\"rsa\"", ",", "pub_key", ")", "]" ]
28.125
0.00431
def add_feature_values(self, features): """ Adds feature values of feature 'feature' to all fixations in the calling fixmat. For fixations out of the image boundaries, NaNs are returned. The function generates a new attribute field named with the string in features that contains an np.array listing feature values for every fixation in the fixmat. .. note:: The calling fixmat must have been constructed with an stimuli.Categories object Parameters: features : string list of feature names for which feature values are extracted. """ if not 'x' in self.fieldnames(): raise RuntimeError("""add_feature_values expects to find (x,y) locations in self.x and self.y. But self.x does not exist""") if not self._categories: raise RuntimeError( '''"%s" does not exist as a fieldname and the fixmat does not have a Categories object (no features available. The fixmat has these fields: %s''' \ %(features, str(self._fields))) for feature in features: # initialize new field with NaNs feat_vals = np.zeros([len(self.x)]) * np.nan for (cat_mat, imgs) in self.by_cat(): for img in np.unique(cat_mat.filenumber).astype(int): fmap = imgs[img][feature] on_image = (self.x >= 0) & (self.x <= self.image_size[1]) on_image = on_image & (self.y >= 0) & (self.y <= self.image_size[0]) idx = (self.category == imgs.category) & \ (self.filenumber == img) & \ (on_image.astype('bool')) feat_vals[idx] = fmap[self.y[idx].astype('int'), self.x[idx].astype('int')] # setattr(self, feature, feat_vals) self.add_field(feature, feat_vals)
[ "def", "add_feature_values", "(", "self", ",", "features", ")", ":", "if", "not", "'x'", "in", "self", ".", "fieldnames", "(", ")", ":", "raise", "RuntimeError", "(", "\"\"\"add_feature_values expects to find\n (x,y) locations in self.x and self.y. But self.x does not exist\"\"\"", ")", "if", "not", "self", ".", "_categories", ":", "raise", "RuntimeError", "(", "'''\"%s\" does not exist as a fieldname and the\n fixmat does not have a Categories object (no features \n available. The fixmat has these fields: %s'''", "%", "(", "features", ",", "str", "(", "self", ".", "_fields", ")", ")", ")", "for", "feature", "in", "features", ":", "# initialize new field with NaNs", "feat_vals", "=", "np", ".", "zeros", "(", "[", "len", "(", "self", ".", "x", ")", "]", ")", "*", "np", ".", "nan", "for", "(", "cat_mat", ",", "imgs", ")", "in", "self", ".", "by_cat", "(", ")", ":", "for", "img", "in", "np", ".", "unique", "(", "cat_mat", ".", "filenumber", ")", ".", "astype", "(", "int", ")", ":", "fmap", "=", "imgs", "[", "img", "]", "[", "feature", "]", "on_image", "=", "(", "self", ".", "x", ">=", "0", ")", "&", "(", "self", ".", "x", "<=", "self", ".", "image_size", "[", "1", "]", ")", "on_image", "=", "on_image", "&", "(", "self", ".", "y", ">=", "0", ")", "&", "(", "self", ".", "y", "<=", "self", ".", "image_size", "[", "0", "]", ")", "idx", "=", "(", "self", ".", "category", "==", "imgs", ".", "category", ")", "&", "(", "self", ".", "filenumber", "==", "img", ")", "&", "(", "on_image", ".", "astype", "(", "'bool'", ")", ")", "feat_vals", "[", "idx", "]", "=", "fmap", "[", "self", ".", "y", "[", "idx", "]", ".", "astype", "(", "'int'", ")", ",", "self", ".", "x", "[", "idx", "]", ".", "astype", "(", "'int'", ")", "]", "# setattr(self, feature, feat_vals)", "self", ".", "add_field", "(", "feature", ",", "feat_vals", ")" ]
47.047619
0.008924
def _find_pair(self, protocol, remote_candidate): """ Find a candidate pair in the check list. """ for pair in self._check_list: if (pair.protocol == protocol and pair.remote_candidate == remote_candidate): return pair return None
[ "def", "_find_pair", "(", "self", ",", "protocol", ",", "remote_candidate", ")", ":", "for", "pair", "in", "self", ".", "_check_list", ":", "if", "(", "pair", ".", "protocol", "==", "protocol", "and", "pair", ".", "remote_candidate", "==", "remote_candidate", ")", ":", "return", "pair", "return", "None" ]
36.375
0.010067
def named_config(self, func): """ Decorator to turn a function into a named configuration. See :ref:`named_configurations`. """ config_scope = ConfigScope(func) self._add_named_config(func.__name__, config_scope) return config_scope
[ "def", "named_config", "(", "self", ",", "func", ")", ":", "config_scope", "=", "ConfigScope", "(", "func", ")", "self", ".", "_add_named_config", "(", "func", ".", "__name__", ",", "config_scope", ")", "return", "config_scope" ]
31.222222
0.00692
def get_membership_cache(self, group_ids=None, is_active=True): """ Build a dict cache with the group membership info. Keyed off the group id and the values are a 2 element list of entity id and entity kind id (same values as the membership model). If no group ids are passed, then all groups will be fetched :param is_active: Flag indicating whether to filter on entity active status. None will not filter. :rtype: dict """ membership_queryset = EntityGroupMembership.objects.filter( Q(entity__isnull=True) | (Q(entity__isnull=False) & Q(entity__is_active=is_active)) ) if is_active is None: membership_queryset = EntityGroupMembership.objects.all() if group_ids: membership_queryset = membership_queryset.filter(entity_group_id__in=group_ids) membership_queryset = membership_queryset.values_list('entity_group_id', 'entity_id', 'sub_entity_kind_id') # Iterate over the query results and build the cache dict membership_cache = {} for entity_group_id, entity_id, sub_entity_kind_id in membership_queryset: membership_cache.setdefault(entity_group_id, []) membership_cache[entity_group_id].append([entity_id, sub_entity_kind_id]) return membership_cache
[ "def", "get_membership_cache", "(", "self", ",", "group_ids", "=", "None", ",", "is_active", "=", "True", ")", ":", "membership_queryset", "=", "EntityGroupMembership", ".", "objects", ".", "filter", "(", "Q", "(", "entity__isnull", "=", "True", ")", "|", "(", "Q", "(", "entity__isnull", "=", "False", ")", "&", "Q", "(", "entity__is_active", "=", "is_active", ")", ")", ")", "if", "is_active", "is", "None", ":", "membership_queryset", "=", "EntityGroupMembership", ".", "objects", ".", "all", "(", ")", "if", "group_ids", ":", "membership_queryset", "=", "membership_queryset", ".", "filter", "(", "entity_group_id__in", "=", "group_ids", ")", "membership_queryset", "=", "membership_queryset", ".", "values_list", "(", "'entity_group_id'", ",", "'entity_id'", ",", "'sub_entity_kind_id'", ")", "# Iterate over the query results and build the cache dict", "membership_cache", "=", "{", "}", "for", "entity_group_id", ",", "entity_id", ",", "sub_entity_kind_id", "in", "membership_queryset", ":", "membership_cache", ".", "setdefault", "(", "entity_group_id", ",", "[", "]", ")", "membership_cache", "[", "entity_group_id", "]", ".", "append", "(", "[", "entity_id", ",", "sub_entity_kind_id", "]", ")", "return", "membership_cache" ]
48.925926
0.007424
def demonstrate_colored_logging(): """Interactively demonstrate the :mod:`coloredlogs` package.""" # Determine the available logging levels and order them by numeric value. decorated_levels = [] defined_levels = coloredlogs.find_defined_levels() normalizer = coloredlogs.NameNormalizer() for name, level in defined_levels.items(): if name != 'NOTSET': item = (level, normalizer.normalize_name(name)) if item not in decorated_levels: decorated_levels.append(item) ordered_levels = sorted(decorated_levels) # Initialize colored output to the terminal, default to the most # verbose logging level but enable the user the customize it. coloredlogs.install(level=os.environ.get('COLOREDLOGS_LOG_LEVEL', ordered_levels[0][1])) # Print some examples with different timestamps. for level, name in ordered_levels: log_method = getattr(logger, name, None) if log_method: log_method("message with level %s (%i)", name, level) time.sleep(DEMO_DELAY)
[ "def", "demonstrate_colored_logging", "(", ")", ":", "# Determine the available logging levels and order them by numeric value.", "decorated_levels", "=", "[", "]", "defined_levels", "=", "coloredlogs", ".", "find_defined_levels", "(", ")", "normalizer", "=", "coloredlogs", ".", "NameNormalizer", "(", ")", "for", "name", ",", "level", "in", "defined_levels", ".", "items", "(", ")", ":", "if", "name", "!=", "'NOTSET'", ":", "item", "=", "(", "level", ",", "normalizer", ".", "normalize_name", "(", "name", ")", ")", "if", "item", "not", "in", "decorated_levels", ":", "decorated_levels", ".", "append", "(", "item", ")", "ordered_levels", "=", "sorted", "(", "decorated_levels", ")", "# Initialize colored output to the terminal, default to the most", "# verbose logging level but enable the user the customize it.", "coloredlogs", ".", "install", "(", "level", "=", "os", ".", "environ", ".", "get", "(", "'COLOREDLOGS_LOG_LEVEL'", ",", "ordered_levels", "[", "0", "]", "[", "1", "]", ")", ")", "# Print some examples with different timestamps.", "for", "level", ",", "name", "in", "ordered_levels", ":", "log_method", "=", "getattr", "(", "logger", ",", "name", ",", "None", ")", "if", "log_method", ":", "log_method", "(", "\"message with level %s (%i)\"", ",", "name", ",", "level", ")", "time", ".", "sleep", "(", "DEMO_DELAY", ")" ]
50.142857
0.001864
def _filter_dicts(self): '''Filters out all the report_dicts that do not pass the cutoffs. If any ref sequence loses all of its report_dicts, then it is completely removed.''' keys_to_remove = set() for ref_name in self.report: for ctg_name in self.report[ref_name]: self.report[ref_name][ctg_name] = self._filter_list_of_dicts(self.report[ref_name][ctg_name]) if len(self.report[ref_name][ctg_name]) == 0: keys_to_remove.add((ref_name, ctg_name)) refs_to_remove = set() for ref_name, ctg_name in keys_to_remove: del self.report[ref_name][ctg_name] if len(self.report[ref_name]) == 0: refs_to_remove.add(ref_name) for ref_name in refs_to_remove: del self.report[ref_name]
[ "def", "_filter_dicts", "(", "self", ")", ":", "keys_to_remove", "=", "set", "(", ")", "for", "ref_name", "in", "self", ".", "report", ":", "for", "ctg_name", "in", "self", ".", "report", "[", "ref_name", "]", ":", "self", ".", "report", "[", "ref_name", "]", "[", "ctg_name", "]", "=", "self", ".", "_filter_list_of_dicts", "(", "self", ".", "report", "[", "ref_name", "]", "[", "ctg_name", "]", ")", "if", "len", "(", "self", ".", "report", "[", "ref_name", "]", "[", "ctg_name", "]", ")", "==", "0", ":", "keys_to_remove", ".", "add", "(", "(", "ref_name", ",", "ctg_name", ")", ")", "refs_to_remove", "=", "set", "(", ")", "for", "ref_name", ",", "ctg_name", "in", "keys_to_remove", ":", "del", "self", ".", "report", "[", "ref_name", "]", "[", "ctg_name", "]", "if", "len", "(", "self", ".", "report", "[", "ref_name", "]", ")", "==", "0", ":", "refs_to_remove", ".", "add", "(", "ref_name", ")", "for", "ref_name", "in", "refs_to_remove", ":", "del", "self", ".", "report", "[", "ref_name", "]" ]
41.55
0.004706
def segment_radii(neurites, neurite_type=NeuriteType.all): '''arithmetic mean of the radii of the points in segments in a collection of neurites''' def _seg_radii(sec): '''vectorized mean radii''' pts = sec.points[:, COLS.R] return np.divide(np.add(pts[:-1], pts[1:]), 2.0) return map_segments(_seg_radii, neurites, neurite_type)
[ "def", "segment_radii", "(", "neurites", ",", "neurite_type", "=", "NeuriteType", ".", "all", ")", ":", "def", "_seg_radii", "(", "sec", ")", ":", "'''vectorized mean radii'''", "pts", "=", "sec", ".", "points", "[", ":", ",", "COLS", ".", "R", "]", "return", "np", ".", "divide", "(", "np", ".", "add", "(", "pts", "[", ":", "-", "1", "]", ",", "pts", "[", "1", ":", "]", ")", ",", "2.0", ")", "return", "map_segments", "(", "_seg_radii", ",", "neurites", ",", "neurite_type", ")" ]
44.875
0.005464
def load_cache(): """ Load cache from the disk. Return: set: Deserialized data from disk. """ if not os.path.exists(settings.DUP_FILTER_FILE): return set() with open(settings.DUP_FILTER_FILE) as f: return set( json.loads(f.read()) )
[ "def", "load_cache", "(", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "settings", ".", "DUP_FILTER_FILE", ")", ":", "return", "set", "(", ")", "with", "open", "(", "settings", ".", "DUP_FILTER_FILE", ")", "as", "f", ":", "return", "set", "(", "json", ".", "loads", "(", "f", ".", "read", "(", ")", ")", ")" ]
20.642857
0.003311
def _clean_xml(self, path_to_xml): """Clean MARCXML harvested from OAI. Allows the xml to be used with BibUpload or BibRecord. :param xml: either XML as a string or path to an XML file :return: ElementTree of clean data """ try: if os.path.isfile(path_to_xml): tree = ET.parse(path_to_xml) root = tree.getroot() else: root = ET.fromstring(path_to_xml) except Exception, e: self.logger.error("Could not read OAI XML, aborting filter!") raise e strip_xml_namespace(root) return root
[ "def", "_clean_xml", "(", "self", ",", "path_to_xml", ")", ":", "try", ":", "if", "os", ".", "path", ".", "isfile", "(", "path_to_xml", ")", ":", "tree", "=", "ET", ".", "parse", "(", "path_to_xml", ")", "root", "=", "tree", ".", "getroot", "(", ")", "else", ":", "root", "=", "ET", ".", "fromstring", "(", "path_to_xml", ")", "except", "Exception", ",", "e", ":", "self", ".", "logger", ".", "error", "(", "\"Could not read OAI XML, aborting filter!\"", ")", "raise", "e", "strip_xml_namespace", "(", "root", ")", "return", "root" ]
31.6
0.003072
def create_enum_option(self, custom_field, params={}, **options): """Creates an enum option and adds it to this custom field's list of enum options. A custom field can have at most 50 enum options (including disabled options). By default new enum options are inserted at the end of a custom field's list. Returns the full record of the newly created enum option. Parameters ---------- custom_field : {Id} Globally unique identifier for the custom field. [data] : {Object} Data for the request - name : {String} The name of the enum option. - [color] : {String} The color of the enum option. Defaults to 'none'. - [insert_before] : {Id} An existing enum option within this custom field before which the new enum option should be inserted. Cannot be provided together with after_enum_option. - [insert_after] : {Id} An existing enum option within this custom field after which the new enum option should be inserted. Cannot be provided together with before_enum_option. """ path = "/custom_fields/%s/enum_options" % (custom_field) return self.client.post(path, params, **options)
[ "def", "create_enum_option", "(", "self", ",", "custom_field", ",", "params", "=", "{", "}", ",", "*", "*", "options", ")", ":", "path", "=", "\"/custom_fields/%s/enum_options\"", "%", "(", "custom_field", ")", "return", "self", ".", "client", ".", "post", "(", "path", ",", "params", ",", "*", "*", "options", ")" ]
74.0625
0.006667
def dmol_neg_log_perplexity(predictions, labels, weights_fn=None): """Average log-perplexity excluding padding 0s. No smoothing.""" del weights_fn # Unused num, den = common_layers.dml_loss( predictions, labels, reduce_sum=False) return (-num, den)
[ "def", "dmol_neg_log_perplexity", "(", "predictions", ",", "labels", ",", "weights_fn", "=", "None", ")", ":", "del", "weights_fn", "# Unused", "num", ",", "den", "=", "common_layers", ".", "dml_loss", "(", "predictions", ",", "labels", ",", "reduce_sum", "=", "False", ")", "return", "(", "-", "num", ",", "den", ")" ]
39
0.015674
def listDatasetArray(self): """ API to list datasets in DBS. To be called by datasetlist url with post call. :param dataset: list of datasets [dataset1,dataset2,..,dataset n] (must have either a list of dataset or dataset_id), Max length 1000. :type dataset: list :param dataset_id: list of dataset ids [dataset_id1,dataset_id2,..,dataset_idn, "dsid_min-dsid_max"] ((must have either a list of dataset or dataset_id) :type dataset_id: list :param dataset_access_type: List only datasets with that dataset access type (Optional) :type dataset_access_type: str :param detail: brief list or detailed list 1/0 :type detail: bool :returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contains the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type) :rtype: list of dicts """ ret = [] try : body = request.body.read() if body: data = cjson.decode(body) data = validateJSONInputNoCopy("dataset", data, read=True) #Because CMSWEB has a 300 seconds responding time. We have to limit the array siz to make sure that #the API can be finished in 300 second. # YG Nov-05-2015 max_array_size = 1000 if ( 'dataset' in data.keys() and isinstance(data['dataset'], list) and len(data['dataset'])>max_array_size)\ or ('dataset_id' in data.keys() and isinstance(data['dataset_id'], list) and len(data['dataset_id'])>max_array_size): dbsExceptionHandler("dbsException-invalid-input", "The Max list length supported in listDatasetArray is %s." %max_array_size, self.logger.exception) ret = self.dbsDataset.listDatasetArray(data) except cjson.DecodeError as De: dbsExceptionHandler('dbsException-invalid-input2', "Invalid input", self.logger.exception, str(De)) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError) except HTTPError as he: raise he except Exception as ex: sError = "DBSReaderModel/listDatasetArray. %s \n Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) for item in ret: yield item
[ "def", "listDatasetArray", "(", "self", ")", ":", "ret", "=", "[", "]", "try", ":", "body", "=", "request", ".", "body", ".", "read", "(", ")", "if", "body", ":", "data", "=", "cjson", ".", "decode", "(", "body", ")", "data", "=", "validateJSONInputNoCopy", "(", "\"dataset\"", ",", "data", ",", "read", "=", "True", ")", "#Because CMSWEB has a 300 seconds responding time. We have to limit the array siz to make sure that", "#the API can be finished in 300 second. ", "# YG Nov-05-2015", "max_array_size", "=", "1000", "if", "(", "'dataset'", "in", "data", ".", "keys", "(", ")", "and", "isinstance", "(", "data", "[", "'dataset'", "]", ",", "list", ")", "and", "len", "(", "data", "[", "'dataset'", "]", ")", ">", "max_array_size", ")", "or", "(", "'dataset_id'", "in", "data", ".", "keys", "(", ")", "and", "isinstance", "(", "data", "[", "'dataset_id'", "]", ",", "list", ")", "and", "len", "(", "data", "[", "'dataset_id'", "]", ")", ">", "max_array_size", ")", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input\"", ",", "\"The Max list length supported in listDatasetArray is %s.\"", "%", "max_array_size", ",", "self", ".", "logger", ".", "exception", ")", "ret", "=", "self", ".", "dbsDataset", ".", "listDatasetArray", "(", "data", ")", "except", "cjson", ".", "DecodeError", "as", "De", ":", "dbsExceptionHandler", "(", "'dbsException-invalid-input2'", ",", "\"Invalid input\"", ",", "self", ".", "logger", ".", "exception", ",", "str", "(", "De", ")", ")", "except", "dbsException", "as", "de", ":", "dbsExceptionHandler", "(", "de", ".", "eCode", ",", "de", ".", "message", ",", "self", ".", "logger", ".", "exception", ",", "de", ".", "serverError", ")", "except", "HTTPError", "as", "he", ":", "raise", "he", "except", "Exception", "as", "ex", ":", "sError", "=", "\"DBSReaderModel/listDatasetArray. %s \\n Exception trace: \\n %s\"", "%", "(", "ex", ",", "traceback", ".", "format_exc", "(", ")", ")", "dbsExceptionHandler", "(", "'dbsException-server-error'", ",", "dbsExceptionCode", "[", "'dbsException-server-error'", "]", ",", "self", ".", "logger", ".", "exception", ",", "sError", ")", "for", "item", "in", "ret", ":", "yield", "item" ]
65.27907
0.010179
def validate_crc(msg): """ Validate CRC of message. :param msg: Byte array with message with CRC. :raise: CRCError. """ if not struct.unpack('<H', get_crc(msg[:-2])) ==\ struct.unpack('<H', msg[-2:]): raise CRCError('CRC validation failed.')
[ "def", "validate_crc", "(", "msg", ")", ":", "if", "not", "struct", ".", "unpack", "(", "'<H'", ",", "get_crc", "(", "msg", "[", ":", "-", "2", "]", ")", ")", "==", "struct", ".", "unpack", "(", "'<H'", ",", "msg", "[", "-", "2", ":", "]", ")", ":", "raise", "CRCError", "(", "'CRC validation failed.'", ")" ]
30.444444
0.003546
def generate_colours(n): """Return a list of `n` distinct colours, each represented as an RGB string suitable for use in CSS. Based on the code at http://martin.ankerl.com/2009/12/09/how-to-create-random-colors-programmatically/ :param n: number of colours to generate :type n: `int` :rtype: `list` of `str` """ colours = [] golden_ratio_conjugate = 0.618033988749895 h = 0.8 # Initial hue s = 0.7 # Fixed saturation v = 0.95 # Fixed value for i in range(n): h += golden_ratio_conjugate h %= 1 colours.append(hsv_to_rgb(h, s, v)) return colours
[ "def", "generate_colours", "(", "n", ")", ":", "colours", "=", "[", "]", "golden_ratio_conjugate", "=", "0.618033988749895", "h", "=", "0.8", "# Initial hue", "s", "=", "0.7", "# Fixed saturation", "v", "=", "0.95", "# Fixed value", "for", "i", "in", "range", "(", "n", ")", ":", "h", "+=", "golden_ratio_conjugate", "h", "%=", "1", "colours", ".", "append", "(", "hsv_to_rgb", "(", "h", ",", "s", ",", "v", ")", ")", "return", "colours" ]
27.863636
0.001577
def _find_classes_param(self): """ Searches the wrapped model for the classes_ parameter. """ for attr in ["classes_"]: try: return getattr(self.estimator, attr) except AttributeError: continue raise YellowbrickTypeError( "could not find classes_ param on {}".format( self.estimator.__class__.__name__ ) )
[ "def", "_find_classes_param", "(", "self", ")", ":", "for", "attr", "in", "[", "\"classes_\"", "]", ":", "try", ":", "return", "getattr", "(", "self", ".", "estimator", ",", "attr", ")", "except", "AttributeError", ":", "continue", "raise", "YellowbrickTypeError", "(", "\"could not find classes_ param on {}\"", ".", "format", "(", "self", ".", "estimator", ".", "__class__", ".", "__name__", ")", ")" ]
29.066667
0.004444
def addnot(self, action=None, subject=None, **conditions): """ Defines an ability which cannot be done. """ self.add_rule(Rule(False, action, subject, **conditions))
[ "def", "addnot", "(", "self", ",", "action", "=", "None", ",", "subject", "=", "None", ",", "*", "*", "conditions", ")", ":", "self", ".", "add_rule", "(", "Rule", "(", "False", ",", "action", ",", "subject", ",", "*", "*", "conditions", ")", ")" ]
38.6
0.010152
def main(unused_argv): """Freeze a model to a GraphDef proto.""" if FLAGS.use_tpu: dual_net.freeze_graph_tpu(FLAGS.model_path) else: dual_net.freeze_graph(FLAGS.model_path)
[ "def", "main", "(", "unused_argv", ")", ":", "if", "FLAGS", ".", "use_tpu", ":", "dual_net", ".", "freeze_graph_tpu", "(", "FLAGS", ".", "model_path", ")", "else", ":", "dual_net", ".", "freeze_graph", "(", "FLAGS", ".", "model_path", ")" ]
32.5
0.005
def loadWallet(self, fpath): """Load wallet from specified localtion. Returns loaded wallet. Error cases: - ``fpath`` is not inside the keyrings base dir - ValueError raised - ``fpath`` exists and it's a directory - IsADirectoryError raised :param fpath: wallet file path, absolute or relative to keyrings base dir """ if not fpath: raise ValueError("empty path") _fpath = self._normalize(fpath) _dpath = _fpath.parent try: _dpath.relative_to(self._baseDir) except ValueError: raise ValueError( "path {} is not is not relative to the wallets {}".format( fpath, self._baseDir)) with _fpath.open() as wf: wallet = self.decode(wf.read()) return wallet
[ "def", "loadWallet", "(", "self", ",", "fpath", ")", ":", "if", "not", "fpath", ":", "raise", "ValueError", "(", "\"empty path\"", ")", "_fpath", "=", "self", ".", "_normalize", "(", "fpath", ")", "_dpath", "=", "_fpath", ".", "parent", "try", ":", "_dpath", ".", "relative_to", "(", "self", ".", "_baseDir", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"path {} is not is not relative to the wallets {}\"", ".", "format", "(", "fpath", ",", "self", ".", "_baseDir", ")", ")", "with", "_fpath", ".", "open", "(", ")", "as", "wf", ":", "wallet", "=", "self", ".", "decode", "(", "wf", ".", "read", "(", ")", ")", "return", "wallet" ]
29.068966
0.002296
def retry_connect(self): """Will be called when new channels in the token network are detected. If the minimum number of channels was not yet established, it will try to open new channels. If the connection manager has no funds, this is a noop. """ with self.lock: if self._funds_remaining > 0 and not self._leaving_state: self._open_channels()
[ "def", "retry_connect", "(", "self", ")", ":", "with", "self", ".", "lock", ":", "if", "self", ".", "_funds_remaining", ">", "0", "and", "not", "self", ".", "_leaving_state", ":", "self", ".", "_open_channels", "(", ")" ]
41.2
0.004751
def get_loco_name(self): """ Returns the Provider, Product and Engine name. :return list """ ret_str = self.dll.GetLocoName().decode() if not ret_str: return return ret_str.split('.:.')
[ "def", "get_loco_name", "(", "self", ")", ":", "ret_str", "=", "self", ".", "dll", ".", "GetLocoName", "(", ")", ".", "decode", "(", ")", "if", "not", "ret_str", ":", "return", "return", "ret_str", ".", "split", "(", "'.:.'", ")" ]
24.5
0.007874
def check_cmake_exists(cmake_command): """ Check whether CMake is installed. If not, print informative error message and quits. """ from subprocess import Popen, PIPE p = Popen( '{0} --version'.format(cmake_command), shell=True, stdin=PIPE, stdout=PIPE) if not ('cmake version' in p.communicate()[0].decode('UTF-8')): sys.stderr.write(' This code is built using CMake\n\n') sys.stderr.write(' CMake is not found\n') sys.stderr.write(' get CMake at http://www.cmake.org/\n') sys.stderr.write(' on many clusters CMake is installed\n') sys.stderr.write(' but you have to load it first:\n') sys.stderr.write(' $ module load cmake\n') sys.exit(1)
[ "def", "check_cmake_exists", "(", "cmake_command", ")", ":", "from", "subprocess", "import", "Popen", ",", "PIPE", "p", "=", "Popen", "(", "'{0} --version'", ".", "format", "(", "cmake_command", ")", ",", "shell", "=", "True", ",", "stdin", "=", "PIPE", ",", "stdout", "=", "PIPE", ")", "if", "not", "(", "'cmake version'", "in", "p", ".", "communicate", "(", ")", "[", "0", "]", ".", "decode", "(", "'UTF-8'", ")", ")", ":", "sys", ".", "stderr", ".", "write", "(", "' This code is built using CMake\\n\\n'", ")", "sys", ".", "stderr", ".", "write", "(", "' CMake is not found\\n'", ")", "sys", ".", "stderr", ".", "write", "(", "' get CMake at http://www.cmake.org/\\n'", ")", "sys", ".", "stderr", ".", "write", "(", "' on many clusters CMake is installed\\n'", ")", "sys", ".", "stderr", ".", "write", "(", "' but you have to load it first:\\n'", ")", "sys", ".", "stderr", ".", "write", "(", "' $ module load cmake\\n'", ")", "sys", ".", "exit", "(", "1", ")" ]
37.55
0.001299
def update_fmt_with_notebook_options(self, metadata): """Update format options with the values in the notebook metadata, and record those options in the notebook metadata""" # format options in notebook have precedence over that in fmt for opt in _VALID_FORMAT_OPTIONS: if opt in metadata.get('jupytext', {}): self.fmt.setdefault(opt, metadata['jupytext'][opt]) if opt in self.fmt: metadata.setdefault('jupytext', {}).setdefault(opt, self.fmt[opt]) # rST to md conversion should happen only once if metadata.get('jupytext', {}).get('rst2md') is True: metadata['jupytext']['rst2md'] = False
[ "def", "update_fmt_with_notebook_options", "(", "self", ",", "metadata", ")", ":", "# format options in notebook have precedence over that in fmt", "for", "opt", "in", "_VALID_FORMAT_OPTIONS", ":", "if", "opt", "in", "metadata", ".", "get", "(", "'jupytext'", ",", "{", "}", ")", ":", "self", ".", "fmt", ".", "setdefault", "(", "opt", ",", "metadata", "[", "'jupytext'", "]", "[", "opt", "]", ")", "if", "opt", "in", "self", ".", "fmt", ":", "metadata", ".", "setdefault", "(", "'jupytext'", ",", "{", "}", ")", ".", "setdefault", "(", "opt", ",", "self", ".", "fmt", "[", "opt", "]", ")", "# rST to md conversion should happen only once", "if", "metadata", ".", "get", "(", "'jupytext'", ",", "{", "}", ")", ".", "get", "(", "'rst2md'", ")", "is", "True", ":", "metadata", "[", "'jupytext'", "]", "[", "'rst2md'", "]", "=", "False" ]
53.384615
0.005666
def _decode_agents_data(self, block): """ decode agents jsons, count diffs """ collect = [] if block: for chunk in block.split('\n'): try: if chunk: prepared_results = {} jsn = json.loads(chunk) for ts, values in jsn.iteritems(): for key, value in values.iteritems(): # key sample: diskio-sda1_io_time # key_group sample: diskio # key_name sample: io_time try: key_group, key_name = key.split('_')[0].split('-')[0], '_'.join(key.split('_')[1:]) except: # noqa: E722 key_group, key_name = key.split('_')[0], '_'.join(key.split('_')[1:]) if key_group in decoder.diff_metrics.keys(): if key_name in decoder.diff_metrics[key_group]: decoded_key = decoder.find_common_names( key) if self.prev_check: try: value = jsn[ts][key] - \ self.prev_check[key] except KeyError: logger.debug( 'There is no diff value for metric %s.\n' 'Timestamp: %s. Is it initial data?', key, ts, exc_info=True) value = 0 prepared_results[decoded_key] = value else: decoded_key = decoder.find_common_names( key) prepared_results[decoded_key] = value else: decoded_key = decoder.find_common_names( key) prepared_results[decoded_key] = value self.prev_check = jsn[ts] collect.append((ts, prepared_results)) except ValueError: logger.error( 'Telegraf agent send trash to output: %s', chunk) logger.debug( 'Telegraf agent data block w/ trash: %s', exc_info=True) return [] except BaseException: logger.error( 'Exception trying to parse agent data: %s', chunk, exc_info=True) return [] if collect: return collect
[ "def", "_decode_agents_data", "(", "self", ",", "block", ")", ":", "collect", "=", "[", "]", "if", "block", ":", "for", "chunk", "in", "block", ".", "split", "(", "'\\n'", ")", ":", "try", ":", "if", "chunk", ":", "prepared_results", "=", "{", "}", "jsn", "=", "json", ".", "loads", "(", "chunk", ")", "for", "ts", ",", "values", "in", "jsn", ".", "iteritems", "(", ")", ":", "for", "key", ",", "value", "in", "values", ".", "iteritems", "(", ")", ":", "# key sample: diskio-sda1_io_time", "# key_group sample: diskio", "# key_name sample: io_time", "try", ":", "key_group", ",", "key_name", "=", "key", ".", "split", "(", "'_'", ")", "[", "0", "]", ".", "split", "(", "'-'", ")", "[", "0", "]", ",", "'_'", ".", "join", "(", "key", ".", "split", "(", "'_'", ")", "[", "1", ":", "]", ")", "except", ":", "# noqa: E722", "key_group", ",", "key_name", "=", "key", ".", "split", "(", "'_'", ")", "[", "0", "]", ",", "'_'", ".", "join", "(", "key", ".", "split", "(", "'_'", ")", "[", "1", ":", "]", ")", "if", "key_group", "in", "decoder", ".", "diff_metrics", ".", "keys", "(", ")", ":", "if", "key_name", "in", "decoder", ".", "diff_metrics", "[", "key_group", "]", ":", "decoded_key", "=", "decoder", ".", "find_common_names", "(", "key", ")", "if", "self", ".", "prev_check", ":", "try", ":", "value", "=", "jsn", "[", "ts", "]", "[", "key", "]", "-", "self", ".", "prev_check", "[", "key", "]", "except", "KeyError", ":", "logger", ".", "debug", "(", "'There is no diff value for metric %s.\\n'", "'Timestamp: %s. Is it initial data?'", ",", "key", ",", "ts", ",", "exc_info", "=", "True", ")", "value", "=", "0", "prepared_results", "[", "decoded_key", "]", "=", "value", "else", ":", "decoded_key", "=", "decoder", ".", "find_common_names", "(", "key", ")", "prepared_results", "[", "decoded_key", "]", "=", "value", "else", ":", "decoded_key", "=", "decoder", ".", "find_common_names", "(", "key", ")", "prepared_results", "[", "decoded_key", "]", "=", "value", "self", ".", "prev_check", "=", "jsn", "[", "ts", "]", "collect", ".", "append", "(", "(", "ts", ",", "prepared_results", ")", ")", "except", "ValueError", ":", "logger", ".", "error", "(", "'Telegraf agent send trash to output: %s'", ",", "chunk", ")", "logger", ".", "debug", "(", "'Telegraf agent data block w/ trash: %s'", ",", "exc_info", "=", "True", ")", "return", "[", "]", "except", "BaseException", ":", "logger", ".", "error", "(", "'Exception trying to parse agent data: %s'", ",", "chunk", ",", "exc_info", "=", "True", ")", "return", "[", "]", "if", "collect", ":", "return", "collect" ]
53
0.00314
def _check_input_directory_listing(base_directory, listing): """ Raises an DirectoryError if files or directories, given in the listing, could not be found in the local filesystem. :param base_directory: The path to the directory to check :param listing: A listing given as dictionary :raise DirectoryError: If the given base directory does not contain all of the subdirectories and subfiles given in the listing. """ for sub in listing: path = os.path.join(base_directory, sub['basename']) if sub['class'] == 'File': if not os.path.isfile(path): raise DirectoryError('File \'{}\' not found but specified in listing.'.format(path)) if sub['class'] == 'Directory': if not os.path.isdir(path): raise DirectoryError('Directory \'{}\' not found but specified in listing'.format(path)) sub_listing = sub.get('listing') if sub_listing: _check_input_directory_listing(path, sub_listing)
[ "def", "_check_input_directory_listing", "(", "base_directory", ",", "listing", ")", ":", "for", "sub", "in", "listing", ":", "path", "=", "os", ".", "path", ".", "join", "(", "base_directory", ",", "sub", "[", "'basename'", "]", ")", "if", "sub", "[", "'class'", "]", "==", "'File'", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "raise", "DirectoryError", "(", "'File \\'{}\\' not found but specified in listing.'", ".", "format", "(", "path", ")", ")", "if", "sub", "[", "'class'", "]", "==", "'Directory'", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "raise", "DirectoryError", "(", "'Directory \\'{}\\' not found but specified in listing'", ".", "format", "(", "path", ")", ")", "sub_listing", "=", "sub", ".", "get", "(", "'listing'", ")", "if", "sub_listing", ":", "_check_input_directory_listing", "(", "path", ",", "sub_listing", ")" ]
48.238095
0.00484
def set_ownership(self): """Recursivelt set the parent, section and doc for a children""" assert self.section is not None for t in self.children: t.parent = self t._section = self.section t.doc = self.doc t.set_ownership()
[ "def", "set_ownership", "(", "self", ")", ":", "assert", "self", ".", "section", "is", "not", "None", "for", "t", "in", "self", ".", "children", ":", "t", ".", "parent", "=", "self", "t", ".", "_section", "=", "self", ".", "section", "t", ".", "doc", "=", "self", ".", "doc", "t", ".", "set_ownership", "(", ")" ]
31.888889
0.00678
def extend_extents(extents, factor=1.1): """Extend a given bounding box The bounding box (x1, y1, x2, y2) is centrally stretched by the given factor. :param extents: The bound box extents :param factor: The factor for stretching :return: (x1, y1, x2, y2) of the extended bounding box """ width = extents[2] - extents[0] height = extents[3] - extents[1] add_width = (factor - 1) * width add_height = (factor - 1) * height x1 = extents[0] - add_width / 2 x2 = extents[2] + add_width / 2 y1 = extents[1] - add_height / 2 y2 = extents[3] + add_height / 2 return x1, y1, x2, y2
[ "def", "extend_extents", "(", "extents", ",", "factor", "=", "1.1", ")", ":", "width", "=", "extents", "[", "2", "]", "-", "extents", "[", "0", "]", "height", "=", "extents", "[", "3", "]", "-", "extents", "[", "1", "]", "add_width", "=", "(", "factor", "-", "1", ")", "*", "width", "add_height", "=", "(", "factor", "-", "1", ")", "*", "height", "x1", "=", "extents", "[", "0", "]", "-", "add_width", "/", "2", "x2", "=", "extents", "[", "2", "]", "+", "add_width", "/", "2", "y1", "=", "extents", "[", "1", "]", "-", "add_height", "/", "2", "y2", "=", "extents", "[", "3", "]", "+", "add_height", "/", "2", "return", "x1", ",", "y1", ",", "x2", ",", "y2" ]
34.277778
0.003155
def csv_to_map(fields, delimiter=','): """ Convert csv to dict :param delimiter: :param fields: :return: """ def _csv_to_list(csv_input): """ Util function to overcome the use of files by in-memory io buffer :param csv_input: :return: """ io_file = io.StringIO(csv_input) return next(csv.reader(io_file, delimiter=delimiter)) def _app(current_tuple, e=None): if current_tuple is None or len(current_tuple) == 0: return None, "no input" csv_list = _csv_to_list(current_tuple) if len(csv_list) != len(fields): e = {"input": "unexpected number of fields {} obtained {} expected".format(len(csv_list), len(fields))} return None, e return {k: v for (k, v) in zip(fields, csv_list)}, e if fields is None or len(fields) == 0: return fixed_input(None, "no fields provided, cannot proceed without order") return _app
[ "def", "csv_to_map", "(", "fields", ",", "delimiter", "=", "','", ")", ":", "def", "_csv_to_list", "(", "csv_input", ")", ":", "\"\"\"\n Util function to overcome the use of files by in-memory io buffer\n\n :param csv_input:\n :return:\n \"\"\"", "io_file", "=", "io", ".", "StringIO", "(", "csv_input", ")", "return", "next", "(", "csv", ".", "reader", "(", "io_file", ",", "delimiter", "=", "delimiter", ")", ")", "def", "_app", "(", "current_tuple", ",", "e", "=", "None", ")", ":", "if", "current_tuple", "is", "None", "or", "len", "(", "current_tuple", ")", "==", "0", ":", "return", "None", ",", "\"no input\"", "csv_list", "=", "_csv_to_list", "(", "current_tuple", ")", "if", "len", "(", "csv_list", ")", "!=", "len", "(", "fields", ")", ":", "e", "=", "{", "\"input\"", ":", "\"unexpected number of fields {} obtained {} expected\"", ".", "format", "(", "len", "(", "csv_list", ")", ",", "len", "(", "fields", ")", ")", "}", "return", "None", ",", "e", "return", "{", "k", ":", "v", "for", "(", "k", ",", "v", ")", "in", "zip", "(", "fields", ",", "csv_list", ")", "}", ",", "e", "if", "fields", "is", "None", "or", "len", "(", "fields", ")", "==", "0", ":", "return", "fixed_input", "(", "None", ",", "\"no fields provided, cannot proceed without order\"", ")", "return", "_app" ]
31.8
0.003052
def on_change_plot_cursor(self,event): """ If mouse is over data point making it selectable change the shape of the cursor @param: event -> the wx Mouseevent for that click """ if not self.xdata or not self.ydata: return pos=event.GetPosition() width, height = self.canvas.get_width_height() pos[1] = height - pos[1] xpick_data,ypick_data = pos xdata_org = self.xdata ydata_org = self.ydata data_corrected = self.map.transData.transform(vstack([xdata_org,ydata_org]).T) xdata,ydata = data_corrected.T xdata = list(map(float,xdata)) ydata = list(map(float,ydata)) e = 4e0 if self.plot_setting == "Zoom": self.canvas.SetCursor(wx.Cursor(wx.CURSOR_CROSS)) else: self.canvas.SetCursor(wx.Cursor(wx.CURSOR_ARROW)) for i,(x,y) in enumerate(zip(xdata,ydata)): if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e: self.canvas.SetCursor(wx.Cursor(wx.CURSOR_HAND)) break event.Skip()
[ "def", "on_change_plot_cursor", "(", "self", ",", "event", ")", ":", "if", "not", "self", ".", "xdata", "or", "not", "self", ".", "ydata", ":", "return", "pos", "=", "event", ".", "GetPosition", "(", ")", "width", ",", "height", "=", "self", ".", "canvas", ".", "get_width_height", "(", ")", "pos", "[", "1", "]", "=", "height", "-", "pos", "[", "1", "]", "xpick_data", ",", "ypick_data", "=", "pos", "xdata_org", "=", "self", ".", "xdata", "ydata_org", "=", "self", ".", "ydata", "data_corrected", "=", "self", ".", "map", ".", "transData", ".", "transform", "(", "vstack", "(", "[", "xdata_org", ",", "ydata_org", "]", ")", ".", "T", ")", "xdata", ",", "ydata", "=", "data_corrected", ".", "T", "xdata", "=", "list", "(", "map", "(", "float", ",", "xdata", ")", ")", "ydata", "=", "list", "(", "map", "(", "float", ",", "ydata", ")", ")", "e", "=", "4e0", "if", "self", ".", "plot_setting", "==", "\"Zoom\"", ":", "self", ".", "canvas", ".", "SetCursor", "(", "wx", ".", "Cursor", "(", "wx", ".", "CURSOR_CROSS", ")", ")", "else", ":", "self", ".", "canvas", ".", "SetCursor", "(", "wx", ".", "Cursor", "(", "wx", ".", "CURSOR_ARROW", ")", ")", "for", "i", ",", "(", "x", ",", "y", ")", "in", "enumerate", "(", "zip", "(", "xdata", ",", "ydata", ")", ")", ":", "if", "0", "<", "sqrt", "(", "(", "x", "-", "xpick_data", ")", "**", "2.", "+", "(", "y", "-", "ypick_data", ")", "**", "2.", ")", "<", "e", ":", "self", ".", "canvas", ".", "SetCursor", "(", "wx", ".", "Cursor", "(", "wx", ".", "CURSOR_HAND", ")", ")", "break", "event", ".", "Skip", "(", ")" ]
40.037037
0.01355
def get_relationship_search_session(self, proxy=None): """Gets the ``OsidSession`` associated with the relationship search service. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.relationship.RelationshipSearchSession) - a ``RelationshipSearchSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_relationship_search()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_relationship_search()`` is ``true``.* """ if not self.supports_relationship_search(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() proxy = self._convert_proxy(proxy) try: session = sessions.RelationshipSearchSession(proxy=proxy, runtime=self._runtime) except AttributeError: raise OperationFailed() return session
[ "def", "get_relationship_search_session", "(", "self", ",", "proxy", "=", "None", ")", ":", "if", "not", "self", ".", "supports_relationship_search", "(", ")", ":", "raise", "Unimplemented", "(", ")", "try", ":", "from", ".", "import", "sessions", "except", "ImportError", ":", "raise", "OperationFailed", "(", ")", "proxy", "=", "self", ".", "_convert_proxy", "(", "proxy", ")", "try", ":", "session", "=", "sessions", ".", "RelationshipSearchSession", "(", "proxy", "=", "proxy", ",", "runtime", "=", "self", ".", "_runtime", ")", "except", "AttributeError", ":", "raise", "OperationFailed", "(", ")", "return", "session" ]
42.730769
0.003521
def old_streamer(main_method): """Open a stream for the first file in arguments, or stdin""" if not arguments: return [sys.stdin] elif arguments[0] == '-c': return [StringIO(get_clipboard_data())] for argument in arguments: if os.path.isfile(argument): return file(argument, 'r') return method
[ "def", "old_streamer", "(", "main_method", ")", ":", "if", "not", "arguments", ":", "return", "[", "sys", ".", "stdin", "]", "elif", "arguments", "[", "0", "]", "==", "'-c'", ":", "return", "[", "StringIO", "(", "get_clipboard_data", "(", ")", ")", "]", "for", "argument", "in", "arguments", ":", "if", "os", ".", "path", ".", "isfile", "(", "argument", ")", ":", "return", "file", "(", "argument", ",", "'r'", ")", "return", "method" ]
34
0.002865
def fetch(self, tickers, fields=None, date=None, date_from=None, date_to=None, freq='D', only_data=True, static=False): """Fetch data from TR DWE. tickers - ticker or list of tickers fields - list of fields. date - date for a single-date query date_from, date_to - date range (used only if "date" is not specified) freq - frequency of data: daily('D'), weekly('W') or monthly('M') only_data - if True then metadata will not be returned static - if True "static" request is created (i.e. not a series). In this case 'date_from', 'date_to' and 'freq' are ignored In case list of tickers is requested, a MultiIndex-dataframe is returned. Some of available fields: P - adjusted closing price PO - opening price PH - high price PL - low price VO - volume, which is expressed in 1000's of shares. UP - unadjusted price OI - open interest MV - market value EPS - earnings per share DI - dividend index MTVB - market to book value PTVB - price to book value ... The full list of data fields is available at http://dtg.tfn.com/. """ if static: query = self.construct_request(tickers, fields, date, freq='REP') else: query = self.construct_request(tickers, fields, date, date_from, date_to, freq) raw = self.request(query) if static: data, metadata = self.parse_record_static(raw) elif isinstance(tickers, basestring) or len(tickers) == 1: data, metadata = self.parse_record(raw) elif hasattr(tickers, '__len__'): metadata = pd.DataFrame() data = {} for indx in range(len(tickers)): dat, meta = self.parse_record(raw, indx) data[tickers[indx]] = dat metadata = metadata.append(meta, ignore_index=False) data = pd.concat(data) else: raise DatastreamException(('First argument should be either ticker or ' 'list of tickers')) if only_data: return data else: return data, metadata
[ "def", "fetch", "(", "self", ",", "tickers", ",", "fields", "=", "None", ",", "date", "=", "None", ",", "date_from", "=", "None", ",", "date_to", "=", "None", ",", "freq", "=", "'D'", ",", "only_data", "=", "True", ",", "static", "=", "False", ")", ":", "if", "static", ":", "query", "=", "self", ".", "construct_request", "(", "tickers", ",", "fields", ",", "date", ",", "freq", "=", "'REP'", ")", "else", ":", "query", "=", "self", ".", "construct_request", "(", "tickers", ",", "fields", ",", "date", ",", "date_from", ",", "date_to", ",", "freq", ")", "raw", "=", "self", ".", "request", "(", "query", ")", "if", "static", ":", "data", ",", "metadata", "=", "self", ".", "parse_record_static", "(", "raw", ")", "elif", "isinstance", "(", "tickers", ",", "basestring", ")", "or", "len", "(", "tickers", ")", "==", "1", ":", "data", ",", "metadata", "=", "self", ".", "parse_record", "(", "raw", ")", "elif", "hasattr", "(", "tickers", ",", "'__len__'", ")", ":", "metadata", "=", "pd", ".", "DataFrame", "(", ")", "data", "=", "{", "}", "for", "indx", "in", "range", "(", "len", "(", "tickers", ")", ")", ":", "dat", ",", "meta", "=", "self", ".", "parse_record", "(", "raw", ",", "indx", ")", "data", "[", "tickers", "[", "indx", "]", "]", "=", "dat", "metadata", "=", "metadata", ".", "append", "(", "meta", ",", "ignore_index", "=", "False", ")", "data", "=", "pd", ".", "concat", "(", "data", ")", "else", ":", "raise", "DatastreamException", "(", "(", "'First argument should be either ticker or '", "'list of tickers'", ")", ")", "if", "only_data", ":", "return", "data", "else", ":", "return", "data", ",", "metadata" ]
38.032787
0.002941
def storage(self, sf=None, args=None): """Common storage interface with :class:`~flask_pluginkit.LocalStorage` or :class:`~flask_pluginkit.RedisStorage`, sf is a custom storage interface classes, args is its parameters, highest priority. :param sf: class based :class:`~flask_pluginkit.BaseStorage` :param args: class init args :returns: class instance """ from .utils import BaseStorage, LocalStorage, RedisStorage if sf and isinstance(sf, BaseStorage): return sf(args) if args else sf() if self.s3 == "local": return LocalStorage() elif self.s3 == "redis": return RedisStorage(self.s3_redis)
[ "def", "storage", "(", "self", ",", "sf", "=", "None", ",", "args", "=", "None", ")", ":", "from", ".", "utils", "import", "BaseStorage", ",", "LocalStorage", ",", "RedisStorage", "if", "sf", "and", "isinstance", "(", "sf", ",", "BaseStorage", ")", ":", "return", "sf", "(", "args", ")", "if", "args", "else", "sf", "(", ")", "if", "self", ".", "s3", "==", "\"local\"", ":", "return", "LocalStorage", "(", ")", "elif", "self", ".", "s3", "==", "\"redis\"", ":", "return", "RedisStorage", "(", "self", ".", "s3_redis", ")" ]
40.941176
0.005618
def get_dict_for_class(self, class_name, state=None, base_name='View'): """The style dict for a given class and state. This collects the style attributes from parent classes and the class of the given object and gives precedence to values thereof to the children. The state attribute of the view instance is taken as the current state if state is None. If the state is not 'normal' then the style definitions for the 'normal' state are mixed-in from the given state style definitions, giving precedence to the non-'normal' style definitions. """ classes = [] klass = class_name while True: classes.append(klass) if klass.__name__ == base_name: break klass = klass.__bases__[0] if state is None: state = 'normal' style = {} for klass in classes: class_name = klass.__name__ try: state_styles = self._styles[class_name][state] except KeyError: state_styles = {} if state != 'normal': try: normal_styles = self._styles[class_name]['normal'] except KeyError: normal_styles = {} state_styles = dict(chain(normal_styles.iteritems(), state_styles.iteritems())) style = dict(chain(state_styles.iteritems(), style.iteritems())) return style
[ "def", "get_dict_for_class", "(", "self", ",", "class_name", ",", "state", "=", "None", ",", "base_name", "=", "'View'", ")", ":", "classes", "=", "[", "]", "klass", "=", "class_name", "while", "True", ":", "classes", ".", "append", "(", "klass", ")", "if", "klass", ".", "__name__", "==", "base_name", ":", "break", "klass", "=", "klass", ".", "__bases__", "[", "0", "]", "if", "state", "is", "None", ":", "state", "=", "'normal'", "style", "=", "{", "}", "for", "klass", "in", "classes", ":", "class_name", "=", "klass", ".", "__name__", "try", ":", "state_styles", "=", "self", ".", "_styles", "[", "class_name", "]", "[", "state", "]", "except", "KeyError", ":", "state_styles", "=", "{", "}", "if", "state", "!=", "'normal'", ":", "try", ":", "normal_styles", "=", "self", ".", "_styles", "[", "class_name", "]", "[", "'normal'", "]", "except", "KeyError", ":", "normal_styles", "=", "{", "}", "state_styles", "=", "dict", "(", "chain", "(", "normal_styles", ".", "iteritems", "(", ")", ",", "state_styles", ".", "iteritems", "(", ")", ")", ")", "style", "=", "dict", "(", "chain", "(", "state_styles", ".", "iteritems", "(", ")", ",", "style", ".", "iteritems", "(", ")", ")", ")", "return", "style" ]
30.490196
0.001246
def exists(path, **kwargs): """Check if file or directory exists""" import os.path return os.path.exists(path, **kwargs)
[ "def", "exists", "(", "path", ",", "*", "*", "kwargs", ")", ":", "import", "os", ".", "path", "return", "os", ".", "path", ".", "exists", "(", "path", ",", "*", "*", "kwargs", ")" ]
32.25
0.007576
def _env_is_exposed(env): ''' Check if an environment is exposed by comparing it against a whitelist and blacklist. ''' if __opts__['svnfs_env_whitelist']: salt.utils.versions.warn_until( 'Neon', 'The svnfs_env_whitelist config option has been renamed to ' 'svnfs_saltenv_whitelist. Please update your configuration.' ) whitelist = __opts__['svnfs_env_whitelist'] else: whitelist = __opts__['svnfs_saltenv_whitelist'] if __opts__['svnfs_env_blacklist']: salt.utils.versions.warn_until( 'Neon', 'The svnfs_env_blacklist config option has been renamed to ' 'svnfs_saltenv_blacklist. Please update your configuration.' ) blacklist = __opts__['svnfs_env_blacklist'] else: blacklist = __opts__['svnfs_saltenv_blacklist'] return salt.utils.stringutils.check_whitelist_blacklist( env, whitelist=whitelist, blacklist=blacklist, )
[ "def", "_env_is_exposed", "(", "env", ")", ":", "if", "__opts__", "[", "'svnfs_env_whitelist'", "]", ":", "salt", ".", "utils", ".", "versions", ".", "warn_until", "(", "'Neon'", ",", "'The svnfs_env_whitelist config option has been renamed to '", "'svnfs_saltenv_whitelist. Please update your configuration.'", ")", "whitelist", "=", "__opts__", "[", "'svnfs_env_whitelist'", "]", "else", ":", "whitelist", "=", "__opts__", "[", "'svnfs_saltenv_whitelist'", "]", "if", "__opts__", "[", "'svnfs_env_blacklist'", "]", ":", "salt", ".", "utils", ".", "versions", ".", "warn_until", "(", "'Neon'", ",", "'The svnfs_env_blacklist config option has been renamed to '", "'svnfs_saltenv_blacklist. Please update your configuration.'", ")", "blacklist", "=", "__opts__", "[", "'svnfs_env_blacklist'", "]", "else", ":", "blacklist", "=", "__opts__", "[", "'svnfs_saltenv_blacklist'", "]", "return", "salt", ".", "utils", ".", "stringutils", ".", "check_whitelist_blacklist", "(", "env", ",", "whitelist", "=", "whitelist", ",", "blacklist", "=", "blacklist", ",", ")" ]
33.133333
0.000978
def finish(self): """ Restore the original SIGINT handler after finishing. This should happen regardless of whether the progress display finishes normally, or gets interrupted. """ super(InterruptibleMixin, self).finish() signal(SIGINT, self.original_handler)
[ "def", "finish", "(", "self", ")", ":", "super", "(", "InterruptibleMixin", ",", "self", ")", ".", "finish", "(", ")", "signal", "(", "SIGINT", ",", "self", ".", "original_handler", ")" ]
34.222222
0.006329
def type_object_attrgetter(obj, attr, *defargs): """ This implements an improved attrgetter for type objects (i.e. classes) that can handle class attributes that are implemented as properties on a metaclass. Normally `getattr` on a class with a `property` (say, "foo"), would return the `property` object itself. However, if the class has a metaclass which *also* defines a `property` named "foo", ``getattr(cls, 'foo')`` will find the "foo" property on the metaclass and resolve it. For the purposes of autodoc we just want to document the "foo" property defined on the class, not on the metaclass. For example:: >>> class Meta(type): ... @property ... def foo(cls): ... return 'foo' ... >>> class MyClass(metaclass=Meta): ... @property ... def foo(self): ... \"\"\"Docstring for MyClass.foo property.\"\"\" ... return 'myfoo' ... >>> getattr(MyClass, 'foo') 'foo' >>> type_object_attrgetter(MyClass, 'foo') <property at 0x...> >>> type_object_attrgetter(MyClass, 'foo').__doc__ 'Docstring for MyClass.foo property.' The last line of the example shows the desired behavior for the purposes of autodoc. """ for base in obj.__mro__: if attr in base.__dict__: if isinstance(base.__dict__[attr], property): # Note, this should only be used for properties--for any other # type of descriptor (classmethod, for example) this can mess # up existing expectations of what getattr(cls, ...) returns return base.__dict__[attr] break return getattr(obj, attr, *defargs)
[ "def", "type_object_attrgetter", "(", "obj", ",", "attr", ",", "*", "defargs", ")", ":", "for", "base", "in", "obj", ".", "__mro__", ":", "if", "attr", "in", "base", ".", "__dict__", ":", "if", "isinstance", "(", "base", ".", "__dict__", "[", "attr", "]", ",", "property", ")", ":", "# Note, this should only be used for properties--for any other", "# type of descriptor (classmethod, for example) this can mess", "# up existing expectations of what getattr(cls, ...) returns", "return", "base", ".", "__dict__", "[", "attr", "]", "break", "return", "getattr", "(", "obj", ",", "attr", ",", "*", "defargs", ")" ]
37.361702
0.000555
def doubleMouseButtonLeftWithMods(self, coord, modifiers): """Click the left mouse button with modifiers pressed. Parameters: coordinates to click; modifiers (list) Returns: None """ modFlags = self._pressModifiers(modifiers) self._queueMouseButton(coord, Quartz.kCGMouseButtonLeft, modFlags) self._queueMouseButton(coord, Quartz.kCGMouseButtonLeft, modFlags, clickCount=2) self._releaseModifiers(modifiers) self._postQueuedEvents()
[ "def", "doubleMouseButtonLeftWithMods", "(", "self", ",", "coord", ",", "modifiers", ")", ":", "modFlags", "=", "self", ".", "_pressModifiers", "(", "modifiers", ")", "self", ".", "_queueMouseButton", "(", "coord", ",", "Quartz", ".", "kCGMouseButtonLeft", ",", "modFlags", ")", "self", ".", "_queueMouseButton", "(", "coord", ",", "Quartz", ".", "kCGMouseButtonLeft", ",", "modFlags", ",", "clickCount", "=", "2", ")", "self", ".", "_releaseModifiers", "(", "modifiers", ")", "self", ".", "_postQueuedEvents", "(", ")" ]
43.75
0.003731
def collate_data(in_dir, extension='.csv', out_dir=None): """ Copy all csvs in nested directroy to single directory. Function to copy all csvs from a directory, and place them in a new directory. Parameters ---------- in_dir : str Input directory containing csv files in subfolders extension : str The extension that identifies your data files. Defaults to '.csv'. out_dir : str Destination directory Returns ------- None """ if out_dir is None: out_dir = './' + re.search('^\.(.*)', extension).groups(0)[0] if not os.path.isdir(out_dir): os.mkdir(out_dir) for p, d, fs in os.walk(in_dir): for f in fs: if extension in f: shutil.copy(p + '/' + f, out_dir + '/' + f) return
[ "def", "collate_data", "(", "in_dir", ",", "extension", "=", "'.csv'", ",", "out_dir", "=", "None", ")", ":", "if", "out_dir", "is", "None", ":", "out_dir", "=", "'./'", "+", "re", ".", "search", "(", "'^\\.(.*)'", ",", "extension", ")", ".", "groups", "(", "0", ")", "[", "0", "]", "if", "not", "os", ".", "path", ".", "isdir", "(", "out_dir", ")", ":", "os", ".", "mkdir", "(", "out_dir", ")", "for", "p", ",", "d", ",", "fs", "in", "os", ".", "walk", "(", "in_dir", ")", ":", "for", "f", "in", "fs", ":", "if", "extension", "in", "f", ":", "shutil", ".", "copy", "(", "p", "+", "'/'", "+", "f", ",", "out_dir", "+", "'/'", "+", "f", ")", "return" ]
24.9375
0.002413
def setContentLen(self, content, len): """Replace the content of a node. NOTE: @content is supposed to be a piece of XML CDATA, so it allows entity references, but XML special chars need to be escaped first by using xmlEncodeEntitiesReentrant() resp. xmlEncodeSpecialChars(). """ libxml2mod.xmlNodeSetContentLen(self._o, content, len)
[ "def", "setContentLen", "(", "self", ",", "content", ",", "len", ")", ":", "libxml2mod", ".", "xmlNodeSetContentLen", "(", "self", ".", "_o", ",", "content", ",", "len", ")" ]
62.666667
0.005249
def _serialize( self, array_parent, # type: ET.Element value, # type: List state # type: _ProcessorState ): # type: (...) -> None """Serialize the array value and add it to the array parent element.""" if not value: # Nothing to do. Avoid attempting to iterate over a possibly # None value. return for i, item_value in enumerate(value): state.push_location(self._item_processor.element_path, i) item_element = self._item_processor.serialize(item_value, state) array_parent.append(item_element) state.pop_location()
[ "def", "_serialize", "(", "self", ",", "array_parent", ",", "# type: ET.Element", "value", ",", "# type: List", "state", "# type: _ProcessorState", ")", ":", "# type: (...) -> None", "if", "not", "value", ":", "# Nothing to do. Avoid attempting to iterate over a possibly", "# None value.", "return", "for", "i", ",", "item_value", "in", "enumerate", "(", "value", ")", ":", "state", ".", "push_location", "(", "self", ".", "_item_processor", ".", "element_path", ",", "i", ")", "item_element", "=", "self", ".", "_item_processor", ".", "serialize", "(", "item_value", ",", "state", ")", "array_parent", ".", "append", "(", "item_element", ")", "state", ".", "pop_location", "(", ")" ]
37.166667
0.005831
def statement_prep(link): ''' Prepare a statement into a triple ready for rdflib ''' from rdflib import URIRef, Literal from rdflib import BNode s, p, o = link[:3] if not isinstance(s, BNode): s = URIRef(s) p = URIRef(p) if not isinstance(o, BNode): o = URIRef(o) if isinstance(o, I) else Literal(o) return s, p, o
[ "def", "statement_prep", "(", "link", ")", ":", "from", "rdflib", "import", "URIRef", ",", "Literal", "from", "rdflib", "import", "BNode", "s", ",", "p", ",", "o", "=", "link", "[", ":", "3", "]", "if", "not", "isinstance", "(", "s", ",", "BNode", ")", ":", "s", "=", "URIRef", "(", "s", ")", "p", "=", "URIRef", "(", "p", ")", "if", "not", "isinstance", "(", "o", ",", "BNode", ")", ":", "o", "=", "URIRef", "(", "o", ")", "if", "isinstance", "(", "o", ",", "I", ")", "else", "Literal", "(", "o", ")", "return", "s", ",", "p", ",", "o" ]
31.272727
0.011299
def _get_all(self, *args, **kwargs): """If 'force' is in the headers, retrieve the list of keys from S3. Otherwise, use the list() function to retrieve the keys from MimicDB. """ headers = kwargs.get('headers', args[2] if len(args) > 2 else None) or dict() if 'force' in headers: keys = super(Bucket, self)._get_all(*args, **kwargs) for key in keys: mimicdb.backend.sadd(tpl.bucket % self.name, key.name) mimicdb.backend.hmset(tpl.key % (self.name, key.name), dict(size=key.size, md5=key.etag.strip('"'))) key.name = key.name return keys prefix = kwargs.get('prefix', '') return list(self.list(prefix=prefix))
[ "def", "_get_all", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "headers", "=", "kwargs", ".", "get", "(", "'headers'", ",", "args", "[", "2", "]", "if", "len", "(", "args", ")", ">", "2", "else", "None", ")", "or", "dict", "(", ")", "if", "'force'", "in", "headers", ":", "keys", "=", "super", "(", "Bucket", ",", "self", ")", ".", "_get_all", "(", "*", "args", ",", "*", "*", "kwargs", ")", "for", "key", "in", "keys", ":", "mimicdb", ".", "backend", ".", "sadd", "(", "tpl", ".", "bucket", "%", "self", ".", "name", ",", "key", ".", "name", ")", "mimicdb", ".", "backend", ".", "hmset", "(", "tpl", ".", "key", "%", "(", "self", ".", "name", ",", "key", ".", "name", ")", ",", "dict", "(", "size", "=", "key", ".", "size", ",", "md5", "=", "key", ".", "etag", ".", "strip", "(", "'\"'", ")", ")", ")", "key", ".", "name", "=", "key", ".", "name", "return", "keys", "prefix", "=", "kwargs", ".", "get", "(", "'prefix'", ",", "''", ")", "return", "list", "(", "self", ".", "list", "(", "prefix", "=", "prefix", ")", ")" ]
36.8
0.005298
def build_additional_match(self, ident, node_set): """ handle additional matches supplied by 'has()' calls """ source_ident = ident for key, value in node_set.must_match.items(): if isinstance(value, dict): label = ':' + value['node_class'].__label__ stmt = _rel_helper(lhs=source_ident, rhs=label, ident='', **value) self._ast['where'].append(stmt) else: raise ValueError("Expecting dict got: " + repr(value)) for key, val in node_set.dont_match.items(): if isinstance(val, dict): label = ':' + val['node_class'].__label__ stmt = _rel_helper(lhs=source_ident, rhs=label, ident='', **val) self._ast['where'].append('NOT ' + stmt) else: raise ValueError("Expecting dict got: " + repr(val))
[ "def", "build_additional_match", "(", "self", ",", "ident", ",", "node_set", ")", ":", "source_ident", "=", "ident", "for", "key", ",", "value", "in", "node_set", ".", "must_match", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "label", "=", "':'", "+", "value", "[", "'node_class'", "]", ".", "__label__", "stmt", "=", "_rel_helper", "(", "lhs", "=", "source_ident", ",", "rhs", "=", "label", ",", "ident", "=", "''", ",", "*", "*", "value", ")", "self", ".", "_ast", "[", "'where'", "]", ".", "append", "(", "stmt", ")", "else", ":", "raise", "ValueError", "(", "\"Expecting dict got: \"", "+", "repr", "(", "value", ")", ")", "for", "key", ",", "val", "in", "node_set", ".", "dont_match", ".", "items", "(", ")", ":", "if", "isinstance", "(", "val", ",", "dict", ")", ":", "label", "=", "':'", "+", "val", "[", "'node_class'", "]", ".", "__label__", "stmt", "=", "_rel_helper", "(", "lhs", "=", "source_ident", ",", "rhs", "=", "label", ",", "ident", "=", "''", ",", "*", "*", "val", ")", "self", ".", "_ast", "[", "'where'", "]", ".", "append", "(", "'NOT '", "+", "stmt", ")", "else", ":", "raise", "ValueError", "(", "\"Expecting dict got: \"", "+", "repr", "(", "val", ")", ")" ]
42.761905
0.004357
def _insert_data(con, data): """ insert line for each cluster """ with con: cur = con.cursor() cur.execute("DROP TABLE IF EXISTS clusters;") cur.execute("CREATE TABLE clusters(Id INT, Description TEXT, Locus TEXT, Annotation TEXT, Sequences TEXT, Profile TXT, Precursor TXT)") for c in data[0]: locus = json.dumps(data[0][c]['loci']) annotation = json.dumps(data[0][c]['ann']) description = _get_description(data[0][c]['ann']) sequences = json.dumps(_get_sequences(data[0][c])) keys = data[0][c]['freq'][0].values()[0].keys() profile = "Not available." if 'profile' in data[0][c]: profile = json.dumps(_set_format(data[0][c]['profile'])) precursor = json.dumps(data[0][c].get('precursor')) cur.execute("INSERT INTO clusters VALUES(%s, '%s', '%s', '%s', '%s', '%s', '%s')" % (c, description, locus, annotation, sequences, profile, precursor))
[ "def", "_insert_data", "(", "con", ",", "data", ")", ":", "with", "con", ":", "cur", "=", "con", ".", "cursor", "(", ")", "cur", ".", "execute", "(", "\"DROP TABLE IF EXISTS clusters;\"", ")", "cur", ".", "execute", "(", "\"CREATE TABLE clusters(Id INT, Description TEXT, Locus TEXT, Annotation TEXT, Sequences TEXT, Profile TXT, Precursor TXT)\"", ")", "for", "c", "in", "data", "[", "0", "]", ":", "locus", "=", "json", ".", "dumps", "(", "data", "[", "0", "]", "[", "c", "]", "[", "'loci'", "]", ")", "annotation", "=", "json", ".", "dumps", "(", "data", "[", "0", "]", "[", "c", "]", "[", "'ann'", "]", ")", "description", "=", "_get_description", "(", "data", "[", "0", "]", "[", "c", "]", "[", "'ann'", "]", ")", "sequences", "=", "json", ".", "dumps", "(", "_get_sequences", "(", "data", "[", "0", "]", "[", "c", "]", ")", ")", "keys", "=", "data", "[", "0", "]", "[", "c", "]", "[", "'freq'", "]", "[", "0", "]", ".", "values", "(", ")", "[", "0", "]", ".", "keys", "(", ")", "profile", "=", "\"Not available.\"", "if", "'profile'", "in", "data", "[", "0", "]", "[", "c", "]", ":", "profile", "=", "json", ".", "dumps", "(", "_set_format", "(", "data", "[", "0", "]", "[", "c", "]", "[", "'profile'", "]", ")", ")", "precursor", "=", "json", ".", "dumps", "(", "data", "[", "0", "]", "[", "c", "]", ".", "get", "(", "'precursor'", ")", ")", "cur", ".", "execute", "(", "\"INSERT INTO clusters VALUES(%s, '%s', '%s', '%s', '%s', '%s', '%s')\"", "%", "(", "c", ",", "description", ",", "locus", ",", "annotation", ",", "sequences", ",", "profile", ",", "precursor", ")", ")" ]
52.368421
0.002962
def cns_vwl_pstr_long(self): """ Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the primary stress diacritics, and 4. the long suprasegmental in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if (c.is_letter) or (c.is_suprasegmental and (c.is_primary_stress or c.is_long))])
[ "def", "cns_vwl_pstr_long", "(", "self", ")", ":", "return", "IPAString", "(", "ipa_chars", "=", "[", "c", "for", "c", "in", "self", ".", "ipa_chars", "if", "(", "c", ".", "is_letter", ")", "or", "(", "c", ".", "is_suprasegmental", "and", "(", "c", ".", "is_primary_stress", "or", "c", ".", "is_long", ")", ")", "]", ")" ]
31.142857
0.008909
def file_find(self, load): ''' Convenience function for calls made using the LocalClient ''' path = load.get('path') if not path: return {'path': '', 'rel': ''} tgt_env = load.get('saltenv', 'base') return self.find_file(path, tgt_env)
[ "def", "file_find", "(", "self", ",", "load", ")", ":", "path", "=", "load", ".", "get", "(", "'path'", ")", "if", "not", "path", ":", "return", "{", "'path'", ":", "''", ",", "'rel'", ":", "''", "}", "tgt_env", "=", "load", ".", "get", "(", "'saltenv'", ",", "'base'", ")", "return", "self", ".", "find_file", "(", "path", ",", "tgt_env", ")" ]
31.4
0.006192
def add(config, username, filename): """Add user's SSH public key to their LDAP entry.""" try: client = Client() client.prepare_connection() user_api = UserApi(client) key_api = API(client) key_api.add(username, user_api, filename) except (ldap3.core.exceptions.LDAPNoSuchAttributeResult, ldap_tools.exceptions.InvalidResult, ldap3.core.exceptions.LDAPAttributeOrValueExistsResult ) as err: # pragma: no cover print('{}: {}'.format(type(err), err.args[0])) except Exception as err: # pragma: no cover raise err from None
[ "def", "add", "(", "config", ",", "username", ",", "filename", ")", ":", "try", ":", "client", "=", "Client", "(", ")", "client", ".", "prepare_connection", "(", ")", "user_api", "=", "UserApi", "(", "client", ")", "key_api", "=", "API", "(", "client", ")", "key_api", ".", "add", "(", "username", ",", "user_api", ",", "filename", ")", "except", "(", "ldap3", ".", "core", ".", "exceptions", ".", "LDAPNoSuchAttributeResult", ",", "ldap_tools", ".", "exceptions", ".", "InvalidResult", ",", "ldap3", ".", "core", ".", "exceptions", ".", "LDAPAttributeOrValueExistsResult", ")", "as", "err", ":", "# pragma: no cover", "print", "(", "'{}: {}'", ".", "format", "(", "type", "(", "err", ")", ",", "err", ".", "args", "[", "0", "]", ")", ")", "except", "Exception", "as", "err", ":", "# pragma: no cover", "raise", "err", "from", "None" ]
44.8
0.002915
def delete_operation(self, name, options=None): """ Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns ``google.rpc.Code.UNIMPLEMENTED``. Example: >>> from google.gapic.longrunning import operations_client >>> api = operations_client.OperationsClient() >>> name = '' >>> api.delete_operation(name) Args: name (string): The name of the operation resource to be deleted. options (:class:`google.gax.CallOptions`): Overrides the default settings for this call, e.g, timeout, retries etc. Raises: :exc:`google.gax.errors.GaxError` if the RPC is aborted. :exc:`ValueError` if the parameters are invalid. """ # Create the request object. request = operations_pb2.DeleteOperationRequest(name=name) self._delete_operation(request, options)
[ "def", "delete_operation", "(", "self", ",", "name", ",", "options", "=", "None", ")", ":", "# Create the request object.", "request", "=", "operations_pb2", ".", "DeleteOperationRequest", "(", "name", "=", "name", ")", "self", ".", "_delete_operation", "(", "request", ",", "options", ")" ]
42.24
0.002778
def _bfd_rx(self, **kwargs): """Return the BFD minimum receive interval XML. You should not use this method. You probably want `BGP.bfd`. Args: min_rx (str): BFD receive interval in milliseconds (300, 500, etc) delete (bool): Remove the configuration if ``True``. Returns: XML to be passed to the switch. Raises: None """ int_type = kwargs['int_type'] method_name = 'interface_%s_bfd_interval_min_rx' % int_type bfd_rx = getattr(self._interface, method_name) config = bfd_rx(**kwargs) if kwargs['delete']: tag = 'min-rx' config.find('.//*%s' % tag).set('operation', 'delete') pass return config
[ "def", "_bfd_rx", "(", "self", ",", "*", "*", "kwargs", ")", ":", "int_type", "=", "kwargs", "[", "'int_type'", "]", "method_name", "=", "'interface_%s_bfd_interval_min_rx'", "%", "int_type", "bfd_rx", "=", "getattr", "(", "self", ".", "_interface", ",", "method_name", ")", "config", "=", "bfd_rx", "(", "*", "*", "kwargs", ")", "if", "kwargs", "[", "'delete'", "]", ":", "tag", "=", "'min-rx'", "config", ".", "find", "(", "'.//*%s'", "%", "tag", ")", ".", "set", "(", "'operation'", ",", "'delete'", ")", "pass", "return", "config" ]
30.48
0.002545
def _get_meta(model): """Return metadata of a model. Model could be a real model or evaluated metadata.""" if isinstance(model, Model): w = model.meta else: w = model # Already metadata return w
[ "def", "_get_meta", "(", "model", ")", ":", "if", "isinstance", "(", "model", ",", "Model", ")", ":", "w", "=", "model", ".", "meta", "else", ":", "w", "=", "model", "# Already metadata", "return", "w" ]
28
0.004329
def _maybe_validate_distributions(distributions, dtype_override, validate_args): """Checks that `distributions` satisfies all assumptions.""" assertions = [] if not _is_iterable(distributions) or not distributions: raise ValueError('`distributions` must be a list of one or more ' 'distributions.') if dtype_override is None: dts = [ dtype_util.base_dtype(d.dtype) for d in distributions if d.dtype is not None ] if dts[1:] != dts[:-1]: raise TypeError('Distributions must have same dtype; found: {}.'.format( set(dtype_util.name(dt) for dt in dts))) # Validate event_ndims. for d in distributions: if tensorshape_util.rank(d.event_shape) is not None: if tensorshape_util.rank(d.event_shape) != 1: raise ValueError('`Distribution` must be vector variate, ' 'found event nimds: {}.'.format( tensorshape_util.rank(d.event_shape))) elif validate_args: assertions.append( assert_util.assert_equal( 1, tf.size(input=d.event_shape_tensor()), message='`Distribution` must be vector variate.')) batch_shapes = [d.batch_shape for d in distributions] if all(tensorshape_util.is_fully_defined(b) for b in batch_shapes): if batch_shapes[1:] != batch_shapes[:-1]: raise ValueError('Distributions must have the same `batch_shape`; ' 'found: {}.'.format(batch_shapes)) elif validate_args: batch_shapes = [ tensorshape_util.as_list(d.batch_shape) # pylint: disable=g-complex-comprehension if tensorshape_util.is_fully_defined(d.batch_shape) else d.batch_shape_tensor() for d in distributions ] assertions.extend( assert_util.assert_equal( # pylint: disable=g-complex-comprehension b1, b2, message='Distribution `batch_shape`s must be identical.') for b1, b2 in zip(batch_shapes[1:], batch_shapes[:-1])) return assertions
[ "def", "_maybe_validate_distributions", "(", "distributions", ",", "dtype_override", ",", "validate_args", ")", ":", "assertions", "=", "[", "]", "if", "not", "_is_iterable", "(", "distributions", ")", "or", "not", "distributions", ":", "raise", "ValueError", "(", "'`distributions` must be a list of one or more '", "'distributions.'", ")", "if", "dtype_override", "is", "None", ":", "dts", "=", "[", "dtype_util", ".", "base_dtype", "(", "d", ".", "dtype", ")", "for", "d", "in", "distributions", "if", "d", ".", "dtype", "is", "not", "None", "]", "if", "dts", "[", "1", ":", "]", "!=", "dts", "[", ":", "-", "1", "]", ":", "raise", "TypeError", "(", "'Distributions must have same dtype; found: {}.'", ".", "format", "(", "set", "(", "dtype_util", ".", "name", "(", "dt", ")", "for", "dt", "in", "dts", ")", ")", ")", "# Validate event_ndims.", "for", "d", "in", "distributions", ":", "if", "tensorshape_util", ".", "rank", "(", "d", ".", "event_shape", ")", "is", "not", "None", ":", "if", "tensorshape_util", ".", "rank", "(", "d", ".", "event_shape", ")", "!=", "1", ":", "raise", "ValueError", "(", "'`Distribution` must be vector variate, '", "'found event nimds: {}.'", ".", "format", "(", "tensorshape_util", ".", "rank", "(", "d", ".", "event_shape", ")", ")", ")", "elif", "validate_args", ":", "assertions", ".", "append", "(", "assert_util", ".", "assert_equal", "(", "1", ",", "tf", ".", "size", "(", "input", "=", "d", ".", "event_shape_tensor", "(", ")", ")", ",", "message", "=", "'`Distribution` must be vector variate.'", ")", ")", "batch_shapes", "=", "[", "d", ".", "batch_shape", "for", "d", "in", "distributions", "]", "if", "all", "(", "tensorshape_util", ".", "is_fully_defined", "(", "b", ")", "for", "b", "in", "batch_shapes", ")", ":", "if", "batch_shapes", "[", "1", ":", "]", "!=", "batch_shapes", "[", ":", "-", "1", "]", ":", "raise", "ValueError", "(", "'Distributions must have the same `batch_shape`; '", "'found: {}.'", ".", "format", "(", "batch_shapes", ")", ")", "elif", "validate_args", ":", "batch_shapes", "=", "[", "tensorshape_util", ".", "as_list", "(", "d", ".", "batch_shape", ")", "# pylint: disable=g-complex-comprehension", "if", "tensorshape_util", ".", "is_fully_defined", "(", "d", ".", "batch_shape", ")", "else", "d", ".", "batch_shape_tensor", "(", ")", "for", "d", "in", "distributions", "]", "assertions", ".", "extend", "(", "assert_util", ".", "assert_equal", "(", "# pylint: disable=g-complex-comprehension", "b1", ",", "b2", ",", "message", "=", "'Distribution `batch_shape`s must be identical.'", ")", "for", "b1", ",", "b2", "in", "zip", "(", "batch_shapes", "[", "1", ":", "]", ",", "batch_shapes", "[", ":", "-", "1", "]", ")", ")", "return", "assertions" ]
40.591837
0.008346
def create(filename, spec): """Create a new segy file. Create a new segy file with the geometry and properties given by `spec`. This enables creating SEGY files from your data. The created file supports all segyio modes, but has an emphasis on writing. The spec must be complete, otherwise an exception will be raised. A default, empty spec can be created with ``segyio.spec()``. Very little data is written to the file, so just calling create is not sufficient to re-read the file with segyio. Rather, every trace header and trace must be written to the file to be considered complete. Create should be used together with python's ``with`` statement. This ensure the data is written. Please refer to the examples. The ``segyio.spec()`` function will default sorting, offsets and everything in the mandatory group, except format and samples, and requires the caller to fill in *all* the fields in either of the exclusive groups. If any field is missing from the first exclusive group, and the tracecount is set, the resulting file will be considered unstructured. If the tracecount is set, and all fields of the first exclusive group are specified, the file is considered structured and the tracecount is inferred from the xlines/ilines/offsets. The offsets are defaulted to ``[1]`` by ``segyio.spec()``. Parameters ---------- filename : str Path to file to create spec : segyio.spec Structure of the segy file Returns ------- file : segyio.SegyFile An open segyio file handle, similar to that returned by `segyio.open` See also -------- segyio.spec : template for the `spec` argument Notes ----- .. versionadded:: 1.1 .. versionchanged:: 1.4 Support for creating unstructured files .. versionchanged:: 1.8 Support for creating lsb files The ``spec`` is any object that has the following attributes Mandatory:: iline : int or segyio.BinField xline : int or segyio.BinField samples : array of int format : { 1, 5 } 1 = IBM float, 5 = IEEE float Exclusive:: ilines : array_like of int xlines : array_like of int offsets : array_like of int sorting : int or segyio.TraceSortingFormat OR tracecount : int Optional:: ext_headers : int endian : str { 'big', 'msb', 'little', 'lsb' } defaults to 'big' Examples -------- Create a file: >>> spec = segyio.spec() >>> spec.ilines = [1, 2, 3, 4] >>> spec.xlines = [11, 12, 13] >>> spec.samples = list(range(50)) >>> spec.sorting = 2 >>> spec.format = 1 >>> with segyio.create(path, spec) as f: ... ## fill the file with data ... pass ... Copy a file, but shorten all traces by 50 samples: >>> with segyio.open(srcpath) as src: ... spec = segyio.spec() ... spec.sorting = src.sorting ... spec.format = src.format ... spec.samples = src.samples[:len(src.samples) - 50] ... spec.ilines = src.ilines ... spec.xline = src.xlines ... with segyio.create(dstpath, spec) as dst: ... dst.text[0] = src.text[0] ... dst.bin = src.bin ... dst.header = src.header ... dst.trace = src.trace Copy a file, but shift samples time by 50: >>> with segyio.open(srcpath) as src: ... delrt = 50 ... spec = segyio.spec() ... spec.samples = src.samples + delrt ... spec.ilines = src.ilines ... spec.xline = src.xlines ... with segyio.create(dstpath, spec) as dst: ... dst.text[0] = src.text[0] ... dst.bin = src.bin ... dst.header = src.header ... dst.header = { TraceField.DelayRecordingTime: delrt } ... dst.trace = src.trace Copy a file, but shorten all traces by 50 samples (since v1.4): >>> with segyio.open(srcpath) as src: ... spec = segyio.tools.metadata(src) ... spec.samples = spec.samples[:len(spec.samples) - 50] ... with segyio.create(dstpath, spec) as dst: ... dst.text[0] = src.text[0] ... dst.bin = src.bin ... dst.header = src.header ... dst.trace = src.trace """ from . import _segyio if not structured(spec): tracecount = spec.tracecount else: tracecount = len(spec.ilines) * len(spec.xlines) * len(spec.offsets) ext_headers = spec.ext_headers if hasattr(spec, 'ext_headers') else 0 samples = numpy.asarray(spec.samples) endians = { 'lsb': 256, # (1 << 8) 'little': 256, 'msb': 0, 'big': 0, } endian = spec.endian if hasattr(spec, 'endian') else 'big' if endian is None: endian = 'big' if endian not in endians: problem = 'unknown endianness {}, expected one of: ' opts = ' '.join(endians.keys()) raise ValueError(problem.format(endian) + opts) fd = _segyio.segyiofd(str(filename), 'w+', endians[endian]) fd.segymake( samples = len(samples), tracecount = tracecount, format = int(spec.format), ext_headers = int(ext_headers), ) f = segyio.SegyFile(fd, filename = str(filename), mode = 'w+', iline = int(spec.iline), xline = int(spec.xline), endian = endian, ) f._samples = samples if structured(spec): sorting = spec.sorting if hasattr(spec, 'sorting') else None if sorting is None: sorting = TraceSortingFormat.INLINE_SORTING f.interpret(spec.ilines, spec.xlines, spec.offsets, sorting) f.text[0] = default_text_header(f._il, f._xl, segyio.TraceField.offset) if len(samples) == 1: interval = int(samples[0] * 1000) else: interval = int((samples[1] - samples[0]) * 1000) f.bin.update( ntrpr = tracecount, nart = tracecount, hdt = interval, dto = interval, hns = len(samples), nso = len(samples), format = int(spec.format), exth = ext_headers, ) return f
[ "def", "create", "(", "filename", ",", "spec", ")", ":", "from", ".", "import", "_segyio", "if", "not", "structured", "(", "spec", ")", ":", "tracecount", "=", "spec", ".", "tracecount", "else", ":", "tracecount", "=", "len", "(", "spec", ".", "ilines", ")", "*", "len", "(", "spec", ".", "xlines", ")", "*", "len", "(", "spec", ".", "offsets", ")", "ext_headers", "=", "spec", ".", "ext_headers", "if", "hasattr", "(", "spec", ",", "'ext_headers'", ")", "else", "0", "samples", "=", "numpy", ".", "asarray", "(", "spec", ".", "samples", ")", "endians", "=", "{", "'lsb'", ":", "256", ",", "# (1 << 8)", "'little'", ":", "256", ",", "'msb'", ":", "0", ",", "'big'", ":", "0", ",", "}", "endian", "=", "spec", ".", "endian", "if", "hasattr", "(", "spec", ",", "'endian'", ")", "else", "'big'", "if", "endian", "is", "None", ":", "endian", "=", "'big'", "if", "endian", "not", "in", "endians", ":", "problem", "=", "'unknown endianness {}, expected one of: '", "opts", "=", "' '", ".", "join", "(", "endians", ".", "keys", "(", ")", ")", "raise", "ValueError", "(", "problem", ".", "format", "(", "endian", ")", "+", "opts", ")", "fd", "=", "_segyio", ".", "segyiofd", "(", "str", "(", "filename", ")", ",", "'w+'", ",", "endians", "[", "endian", "]", ")", "fd", ".", "segymake", "(", "samples", "=", "len", "(", "samples", ")", ",", "tracecount", "=", "tracecount", ",", "format", "=", "int", "(", "spec", ".", "format", ")", ",", "ext_headers", "=", "int", "(", "ext_headers", ")", ",", ")", "f", "=", "segyio", ".", "SegyFile", "(", "fd", ",", "filename", "=", "str", "(", "filename", ")", ",", "mode", "=", "'w+'", ",", "iline", "=", "int", "(", "spec", ".", "iline", ")", ",", "xline", "=", "int", "(", "spec", ".", "xline", ")", ",", "endian", "=", "endian", ",", ")", "f", ".", "_samples", "=", "samples", "if", "structured", "(", "spec", ")", ":", "sorting", "=", "spec", ".", "sorting", "if", "hasattr", "(", "spec", ",", "'sorting'", ")", "else", "None", "if", "sorting", "is", "None", ":", "sorting", "=", "TraceSortingFormat", ".", "INLINE_SORTING", "f", ".", "interpret", "(", "spec", ".", "ilines", ",", "spec", ".", "xlines", ",", "spec", ".", "offsets", ",", "sorting", ")", "f", ".", "text", "[", "0", "]", "=", "default_text_header", "(", "f", ".", "_il", ",", "f", ".", "_xl", ",", "segyio", ".", "TraceField", ".", "offset", ")", "if", "len", "(", "samples", ")", "==", "1", ":", "interval", "=", "int", "(", "samples", "[", "0", "]", "*", "1000", ")", "else", ":", "interval", "=", "int", "(", "(", "samples", "[", "1", "]", "-", "samples", "[", "0", "]", ")", "*", "1000", ")", "f", ".", "bin", ".", "update", "(", "ntrpr", "=", "tracecount", ",", "nart", "=", "tracecount", ",", "hdt", "=", "interval", ",", "dto", "=", "interval", ",", "hns", "=", "len", "(", "samples", ")", ",", "nso", "=", "len", "(", "samples", ")", ",", "format", "=", "int", "(", "spec", ".", "format", ")", ",", "exth", "=", "ext_headers", ",", ")", "return", "f" ]
29.368421
0.008194
def set_metadata(self, obj, metadata, clear=False, prefix=None): """ Accepts a dictionary of metadata key/value pairs and updates the specified object metadata with them. If 'clear' is True, any existing metadata is deleted and only the passed metadata is retained. Otherwise, the values passed here update the object's metadata. By default, the standard object metadata prefix ('X-Object-Meta-') is prepended to the header name if it isn't present. For non-standard headers, you must include a non-None prefix, such as an empty string. """ # Add the metadata prefix, if needed. if prefix is None: prefix = OBJECT_META_PREFIX massaged = _massage_metakeys(metadata, prefix) cname = utils.get_name(self.container) oname = utils.get_name(obj) new_meta = {} # Note that the API for object POST is the opposite of that for # container POST: for objects, all current metadata is deleted, # whereas for containers you need to set the values to an empty # string to delete them. if not clear: obj_meta = self.get_metadata(obj, prefix=prefix) new_meta = _massage_metakeys(obj_meta, prefix) utils.case_insensitive_update(new_meta, massaged) # Remove any empty values, since the object metadata API will # store them. to_pop = [] for key, val in six.iteritems(new_meta): if not val: to_pop.append(key) for key in to_pop: new_meta.pop(key) uri = "/%s/%s" % (cname, oname) resp, resp_body = self.api.method_post(uri, headers=new_meta)
[ "def", "set_metadata", "(", "self", ",", "obj", ",", "metadata", ",", "clear", "=", "False", ",", "prefix", "=", "None", ")", ":", "# Add the metadata prefix, if needed.", "if", "prefix", "is", "None", ":", "prefix", "=", "OBJECT_META_PREFIX", "massaged", "=", "_massage_metakeys", "(", "metadata", ",", "prefix", ")", "cname", "=", "utils", ".", "get_name", "(", "self", ".", "container", ")", "oname", "=", "utils", ".", "get_name", "(", "obj", ")", "new_meta", "=", "{", "}", "# Note that the API for object POST is the opposite of that for", "# container POST: for objects, all current metadata is deleted,", "# whereas for containers you need to set the values to an empty", "# string to delete them.", "if", "not", "clear", ":", "obj_meta", "=", "self", ".", "get_metadata", "(", "obj", ",", "prefix", "=", "prefix", ")", "new_meta", "=", "_massage_metakeys", "(", "obj_meta", ",", "prefix", ")", "utils", ".", "case_insensitive_update", "(", "new_meta", ",", "massaged", ")", "# Remove any empty values, since the object metadata API will", "# store them.", "to_pop", "=", "[", "]", "for", "key", ",", "val", "in", "six", ".", "iteritems", "(", "new_meta", ")", ":", "if", "not", "val", ":", "to_pop", ".", "append", "(", "key", ")", "for", "key", "in", "to_pop", ":", "new_meta", ".", "pop", "(", "key", ")", "uri", "=", "\"/%s/%s\"", "%", "(", "cname", ",", "oname", ")", "resp", ",", "resp_body", "=", "self", ".", "api", ".", "method_post", "(", "uri", ",", "headers", "=", "new_meta", ")" ]
44.578947
0.001155
def encoder(f, blocksize, seed=None, c=sampler.DEFAULT_C, delta=sampler.DEFAULT_DELTA): """Generates an infinite sequence of blocks to transmit to the receiver """ # Generate seed if not provided if seed is None: seed = randint(0, 1 << 31 - 1) # get file blocks filesize, blocks = _split_file(f, blocksize) # init stream vars K = len(blocks) prng = sampler.PRNG(params=(K, delta, c)) prng.set_seed(seed) # block generation loop while True: blockseed, d, ix_samples = prng.get_src_blocks() block_data = 0 for ix in ix_samples: block_data ^= blocks[ix] # Generate blocks of XORed data in network byte order block = (filesize, blocksize, blockseed, int.to_bytes(block_data, blocksize, sys.byteorder)) yield pack('!III%ss'%blocksize, *block)
[ "def", "encoder", "(", "f", ",", "blocksize", ",", "seed", "=", "None", ",", "c", "=", "sampler", ".", "DEFAULT_C", ",", "delta", "=", "sampler", ".", "DEFAULT_DELTA", ")", ":", "# Generate seed if not provided", "if", "seed", "is", "None", ":", "seed", "=", "randint", "(", "0", ",", "1", "<<", "31", "-", "1", ")", "# get file blocks", "filesize", ",", "blocks", "=", "_split_file", "(", "f", ",", "blocksize", ")", "# init stream vars", "K", "=", "len", "(", "blocks", ")", "prng", "=", "sampler", ".", "PRNG", "(", "params", "=", "(", "K", ",", "delta", ",", "c", ")", ")", "prng", ".", "set_seed", "(", "seed", ")", "# block generation loop", "while", "True", ":", "blockseed", ",", "d", ",", "ix_samples", "=", "prng", ".", "get_src_blocks", "(", ")", "block_data", "=", "0", "for", "ix", "in", "ix_samples", ":", "block_data", "^=", "blocks", "[", "ix", "]", "# Generate blocks of XORed data in network byte order", "block", "=", "(", "filesize", ",", "blocksize", ",", "blockseed", ",", "int", ".", "to_bytes", "(", "block_data", ",", "blocksize", ",", "sys", ".", "byteorder", ")", ")", "yield", "pack", "(", "'!III%ss'", "%", "blocksize", ",", "*", "block", ")" ]
30.962963
0.00464
def from_api_repr(cls, resource, client): """Factory: construct a metric given its API representation :type resource: dict :param resource: metric resource representation returned from the API :type client: :class:`google.cloud.logging.client.Client` :param client: Client which holds credentials and project configuration for the metric. :rtype: :class:`google.cloud.logging.metric.Metric` :returns: Metric parsed from ``resource``. """ metric_name = resource["name"] filter_ = resource["filter"] description = resource.get("description", "") return cls(metric_name, filter_, client=client, description=description)
[ "def", "from_api_repr", "(", "cls", ",", "resource", ",", "client", ")", ":", "metric_name", "=", "resource", "[", "\"name\"", "]", "filter_", "=", "resource", "[", "\"filter\"", "]", "description", "=", "resource", ".", "get", "(", "\"description\"", ",", "\"\"", ")", "return", "cls", "(", "metric_name", ",", "filter_", ",", "client", "=", "client", ",", "description", "=", "description", ")" ]
42.529412
0.00406
def write_str(data, sidx, pnames): """ Write STRUCTURE format for all SNPs and unlinked SNPs """ ## grab snp and bis data from tmparr start = time.time() tmparrs = os.path.join(data.dirs.outfiles, "tmp-{}.h5".format(data.name)) with h5py.File(tmparrs, 'r') as io5: snparr = io5["snparr"] bisarr = io5["bisarr"] ## trim to size b/c it was made longer than actual bend = np.where(np.all(bisarr[:] == "", axis=0))[0] if np.any(bend): bend = bend.min() else: bend = bisarr.shape[1] send = np.where(np.all(snparr[:] == "", axis=0))[0] if np.any(send): send = send.min() else: send = snparr.shape[1] ## write to str and ustr out1 = open(data.outfiles.str, 'w') out2 = open(data.outfiles.ustr, 'w') numdict = {'A': '0', 'T': '1', 'G': '2', 'C': '3', 'N': '-9', '-': '-9'} if data.paramsdict["max_alleles_consens"] > 1: for idx, name in enumerate(pnames): out1.write("{}\t\t\t\t\t{}\n"\ .format(name, "\t".join([numdict[DUCT[i][0]] for i in snparr[idx, :send]]))) out1.write("{}\t\t\t\t\t{}\n"\ .format(name, "\t".join([numdict[DUCT[i][1]] for i in snparr[idx, :send]]))) out2.write("{}\t\t\t\t\t{}\n"\ .format(name, "\t".join([numdict[DUCT[i][0]] for i in bisarr[idx, :bend]]))) out2.write("{}\t\t\t\t\t{}\n"\ .format(name, "\t".join([numdict[DUCT[i][1]] for i in bisarr[idx, :bend]]))) else: ## haploid output for idx, name in enumerate(pnames): out1.write("{}\t\t\t\t\t{}\n"\ .format(name, "\t".join([numdict[DUCT[i][0]] for i in snparr[idx, :send]]))) out2.write("{}\t\t\t\t\t{}\n"\ .format(name, "\t".join([numdict[DUCT[i][0]] for i in bisarr[idx, :bend]]))) out1.close() out2.close() LOGGER.debug("finished writing str in: %s", time.time() - start)
[ "def", "write_str", "(", "data", ",", "sidx", ",", "pnames", ")", ":", "## grab snp and bis data from tmparr", "start", "=", "time", ".", "time", "(", ")", "tmparrs", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "outfiles", ",", "\"tmp-{}.h5\"", ".", "format", "(", "data", ".", "name", ")", ")", "with", "h5py", ".", "File", "(", "tmparrs", ",", "'r'", ")", "as", "io5", ":", "snparr", "=", "io5", "[", "\"snparr\"", "]", "bisarr", "=", "io5", "[", "\"bisarr\"", "]", "## trim to size b/c it was made longer than actual", "bend", "=", "np", ".", "where", "(", "np", ".", "all", "(", "bisarr", "[", ":", "]", "==", "\"\"", ",", "axis", "=", "0", ")", ")", "[", "0", "]", "if", "np", ".", "any", "(", "bend", ")", ":", "bend", "=", "bend", ".", "min", "(", ")", "else", ":", "bend", "=", "bisarr", ".", "shape", "[", "1", "]", "send", "=", "np", ".", "where", "(", "np", ".", "all", "(", "snparr", "[", ":", "]", "==", "\"\"", ",", "axis", "=", "0", ")", ")", "[", "0", "]", "if", "np", ".", "any", "(", "send", ")", ":", "send", "=", "send", ".", "min", "(", ")", "else", ":", "send", "=", "snparr", ".", "shape", "[", "1", "]", "## write to str and ustr", "out1", "=", "open", "(", "data", ".", "outfiles", ".", "str", ",", "'w'", ")", "out2", "=", "open", "(", "data", ".", "outfiles", ".", "ustr", ",", "'w'", ")", "numdict", "=", "{", "'A'", ":", "'0'", ",", "'T'", ":", "'1'", ",", "'G'", ":", "'2'", ",", "'C'", ":", "'3'", ",", "'N'", ":", "'-9'", ",", "'-'", ":", "'-9'", "}", "if", "data", ".", "paramsdict", "[", "\"max_alleles_consens\"", "]", ">", "1", ":", "for", "idx", ",", "name", "in", "enumerate", "(", "pnames", ")", ":", "out1", ".", "write", "(", "\"{}\\t\\t\\t\\t\\t{}\\n\"", ".", "format", "(", "name", ",", "\"\\t\"", ".", "join", "(", "[", "numdict", "[", "DUCT", "[", "i", "]", "[", "0", "]", "]", "for", "i", "in", "snparr", "[", "idx", ",", ":", "send", "]", "]", ")", ")", ")", "out1", ".", "write", "(", "\"{}\\t\\t\\t\\t\\t{}\\n\"", ".", "format", "(", "name", ",", "\"\\t\"", ".", "join", "(", "[", "numdict", "[", "DUCT", "[", "i", "]", "[", "1", "]", "]", "for", "i", "in", "snparr", "[", "idx", ",", ":", "send", "]", "]", ")", ")", ")", "out2", ".", "write", "(", "\"{}\\t\\t\\t\\t\\t{}\\n\"", ".", "format", "(", "name", ",", "\"\\t\"", ".", "join", "(", "[", "numdict", "[", "DUCT", "[", "i", "]", "[", "0", "]", "]", "for", "i", "in", "bisarr", "[", "idx", ",", ":", "bend", "]", "]", ")", ")", ")", "out2", ".", "write", "(", "\"{}\\t\\t\\t\\t\\t{}\\n\"", ".", "format", "(", "name", ",", "\"\\t\"", ".", "join", "(", "[", "numdict", "[", "DUCT", "[", "i", "]", "[", "1", "]", "]", "for", "i", "in", "bisarr", "[", "idx", ",", ":", "bend", "]", "]", ")", ")", ")", "else", ":", "## haploid output", "for", "idx", ",", "name", "in", "enumerate", "(", "pnames", ")", ":", "out1", ".", "write", "(", "\"{}\\t\\t\\t\\t\\t{}\\n\"", ".", "format", "(", "name", ",", "\"\\t\"", ".", "join", "(", "[", "numdict", "[", "DUCT", "[", "i", "]", "[", "0", "]", "]", "for", "i", "in", "snparr", "[", "idx", ",", ":", "send", "]", "]", ")", ")", ")", "out2", ".", "write", "(", "\"{}\\t\\t\\t\\t\\t{}\\n\"", ".", "format", "(", "name", ",", "\"\\t\"", ".", "join", "(", "[", "numdict", "[", "DUCT", "[", "i", "]", "[", "0", "]", "]", "for", "i", "in", "bisarr", "[", "idx", ",", ":", "bend", "]", "]", ")", ")", ")", "out1", ".", "close", "(", ")", "out2", ".", "close", "(", ")", "LOGGER", ".", "debug", "(", "\"finished writing str in: %s\"", ",", "time", ".", "time", "(", ")", "-", "start", ")" ]
41.54717
0.015084
def get(self, key, default) -> Union[Uniform, UniformBlock, Subroutine, Attribute, Varying]: ''' Returns a Uniform, UniformBlock, Subroutine, Attribute or Varying. Args: default: This is the value to be returned in case key does not exist. Returns: :py:class:`Uniform`, :py:class:`UniformBlock`, :py:class:`Subroutine`, :py:class:`Attribute` or :py:class:`Varying` ''' return self._members.get(key, default)
[ "def", "get", "(", "self", ",", "key", ",", "default", ")", "->", "Union", "[", "Uniform", ",", "UniformBlock", ",", "Subroutine", ",", "Attribute", ",", "Varying", "]", ":", "return", "self", ".", "_members", ".", "get", "(", "key", ",", "default", ")" ]
38.923077
0.009653
def add_ubridge_udp_connection(self, bridge_name, source_nio, destination_nio): """ Creates an UDP connection in uBridge. :param bridge_name: bridge name in uBridge :param source_nio: source NIO instance :param destination_nio: destination NIO instance """ yield from self._ubridge_send("bridge create {name}".format(name=bridge_name)) if not isinstance(destination_nio, NIOUDP): raise NodeError("Destination NIO is not UDP") yield from self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=bridge_name, lport=source_nio.lport, rhost=source_nio.rhost, rport=source_nio.rport)) yield from self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=bridge_name, lport=destination_nio.lport, rhost=destination_nio.rhost, rport=destination_nio.rport)) if destination_nio.capturing: yield from self._ubridge_send('bridge start_capture {name} "{pcap_file}"'.format(name=bridge_name, pcap_file=destination_nio.pcap_output_file)) yield from self._ubridge_send('bridge start {name}'.format(name=bridge_name)) yield from self._ubridge_apply_filters(bridge_name, destination_nio.filters)
[ "def", "add_ubridge_udp_connection", "(", "self", ",", "bridge_name", ",", "source_nio", ",", "destination_nio", ")", ":", "yield", "from", "self", ".", "_ubridge_send", "(", "\"bridge create {name}\"", ".", "format", "(", "name", "=", "bridge_name", ")", ")", "if", "not", "isinstance", "(", "destination_nio", ",", "NIOUDP", ")", ":", "raise", "NodeError", "(", "\"Destination NIO is not UDP\"", ")", "yield", "from", "self", ".", "_ubridge_send", "(", "'bridge add_nio_udp {name} {lport} {rhost} {rport}'", ".", "format", "(", "name", "=", "bridge_name", ",", "lport", "=", "source_nio", ".", "lport", ",", "rhost", "=", "source_nio", ".", "rhost", ",", "rport", "=", "source_nio", ".", "rport", ")", ")", "yield", "from", "self", ".", "_ubridge_send", "(", "'bridge add_nio_udp {name} {lport} {rhost} {rport}'", ".", "format", "(", "name", "=", "bridge_name", ",", "lport", "=", "destination_nio", ".", "lport", ",", "rhost", "=", "destination_nio", ".", "rhost", ",", "rport", "=", "destination_nio", ".", "rport", ")", ")", "if", "destination_nio", ".", "capturing", ":", "yield", "from", "self", ".", "_ubridge_send", "(", "'bridge start_capture {name} \"{pcap_file}\"'", ".", "format", "(", "name", "=", "bridge_name", ",", "pcap_file", "=", "destination_nio", ".", "pcap_output_file", ")", ")", "yield", "from", "self", ".", "_ubridge_send", "(", "'bridge start {name}'", ".", "format", "(", "name", "=", "bridge_name", ")", ")", "yield", "from", "self", ".", "_ubridge_apply_filters", "(", "bridge_name", ",", "destination_nio", ".", "filters", ")" ]
63.666667
0.007736
def add_veth(name, interface_name, bridge=None, path=None): ''' Add a veth to a container. Note : this function doesn't update the container config, just add the interface at runtime name Name of the container interface_name Name of the interface in the container bridge Name of the bridge to attach the interface to (facultative) CLI Examples: .. code-block:: bash salt '*' lxc.add_veth container_name eth1 br1 salt '*' lxc.add_veth container_name eth1 ''' # Get container init PID pid = get_pid(name, path=path) # Generate a ramdom string for veth and ensure that is isn't present on the system while True: random_veth = 'veth'+''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) if random_veth not in __salt__['network.interfaces']().keys(): break # Check prerequisites if not __salt__['file.directory_exists']('/var/run/'): raise CommandExecutionError('Directory /var/run required for lxc.add_veth doesn\'t exists') if not __salt__['file.file_exists']('/proc/{0}/ns/net'.format(pid)): raise CommandExecutionError('Proc file for container {0} network namespace doesn\'t exists'.format(name)) if not __salt__['file.directory_exists']('/var/run/netns'): __salt__['file.mkdir']('/var/run/netns') # Ensure that the symlink is up to date (change on container restart) if __salt__['file.is_link']('/var/run/netns/{0}'.format(name)): __salt__['file.remove']('/var/run/netns/{0}'.format(name)) __salt__['file.symlink']('/proc/{0}/ns/net'.format(pid), '/var/run/netns/{0}'.format(name)) # Ensure that interface doesn't exists interface_exists = 0 == __salt__['cmd.retcode']('ip netns exec {netns} ip address list {interface}'.format( netns=name, interface=interface_name )) if interface_exists: raise CommandExecutionError('Interface {interface} already exists in {container}'.format( interface=interface_name, container=name )) # Create veth and bring it up if __salt__['cmd.retcode']('ip link add name {veth} type veth peer name {veth}_c'.format(veth=random_veth)) != 0: raise CommandExecutionError('Error while creating the veth pair {0}'.format(random_veth)) if __salt__['cmd.retcode']('ip link set dev {0} up'.format(random_veth)) != 0: raise CommandExecutionError('Error while bringing up host-side veth {0}'.format(random_veth)) # Attach it to the container attached = 0 == __salt__['cmd.retcode']('ip link set dev {veth}_c netns {container} name {interface_name}'.format( veth=random_veth, container=name, interface_name=interface_name )) if not attached: raise CommandExecutionError('Error while attaching the veth {veth} to container {container}'.format( veth=random_veth, container=name )) __salt__['file.remove']('/var/run/netns/{0}'.format(name)) if bridge is not None: __salt__['bridge.addif'](bridge, random_veth)
[ "def", "add_veth", "(", "name", ",", "interface_name", ",", "bridge", "=", "None", ",", "path", "=", "None", ")", ":", "# Get container init PID", "pid", "=", "get_pid", "(", "name", ",", "path", "=", "path", ")", "# Generate a ramdom string for veth and ensure that is isn't present on the system", "while", "True", ":", "random_veth", "=", "'veth'", "+", "''", ".", "join", "(", "random", ".", "choice", "(", "string", ".", "ascii_uppercase", "+", "string", ".", "digits", ")", "for", "_", "in", "range", "(", "6", ")", ")", "if", "random_veth", "not", "in", "__salt__", "[", "'network.interfaces'", "]", "(", ")", ".", "keys", "(", ")", ":", "break", "# Check prerequisites", "if", "not", "__salt__", "[", "'file.directory_exists'", "]", "(", "'/var/run/'", ")", ":", "raise", "CommandExecutionError", "(", "'Directory /var/run required for lxc.add_veth doesn\\'t exists'", ")", "if", "not", "__salt__", "[", "'file.file_exists'", "]", "(", "'/proc/{0}/ns/net'", ".", "format", "(", "pid", ")", ")", ":", "raise", "CommandExecutionError", "(", "'Proc file for container {0} network namespace doesn\\'t exists'", ".", "format", "(", "name", ")", ")", "if", "not", "__salt__", "[", "'file.directory_exists'", "]", "(", "'/var/run/netns'", ")", ":", "__salt__", "[", "'file.mkdir'", "]", "(", "'/var/run/netns'", ")", "# Ensure that the symlink is up to date (change on container restart)", "if", "__salt__", "[", "'file.is_link'", "]", "(", "'/var/run/netns/{0}'", ".", "format", "(", "name", ")", ")", ":", "__salt__", "[", "'file.remove'", "]", "(", "'/var/run/netns/{0}'", ".", "format", "(", "name", ")", ")", "__salt__", "[", "'file.symlink'", "]", "(", "'/proc/{0}/ns/net'", ".", "format", "(", "pid", ")", ",", "'/var/run/netns/{0}'", ".", "format", "(", "name", ")", ")", "# Ensure that interface doesn't exists", "interface_exists", "=", "0", "==", "__salt__", "[", "'cmd.retcode'", "]", "(", "'ip netns exec {netns} ip address list {interface}'", ".", "format", "(", "netns", "=", "name", ",", "interface", "=", "interface_name", ")", ")", "if", "interface_exists", ":", "raise", "CommandExecutionError", "(", "'Interface {interface} already exists in {container}'", ".", "format", "(", "interface", "=", "interface_name", ",", "container", "=", "name", ")", ")", "# Create veth and bring it up", "if", "__salt__", "[", "'cmd.retcode'", "]", "(", "'ip link add name {veth} type veth peer name {veth}_c'", ".", "format", "(", "veth", "=", "random_veth", ")", ")", "!=", "0", ":", "raise", "CommandExecutionError", "(", "'Error while creating the veth pair {0}'", ".", "format", "(", "random_veth", ")", ")", "if", "__salt__", "[", "'cmd.retcode'", "]", "(", "'ip link set dev {0} up'", ".", "format", "(", "random_veth", ")", ")", "!=", "0", ":", "raise", "CommandExecutionError", "(", "'Error while bringing up host-side veth {0}'", ".", "format", "(", "random_veth", ")", ")", "# Attach it to the container", "attached", "=", "0", "==", "__salt__", "[", "'cmd.retcode'", "]", "(", "'ip link set dev {veth}_c netns {container} name {interface_name}'", ".", "format", "(", "veth", "=", "random_veth", ",", "container", "=", "name", ",", "interface_name", "=", "interface_name", ")", ")", "if", "not", "attached", ":", "raise", "CommandExecutionError", "(", "'Error while attaching the veth {veth} to container {container}'", ".", "format", "(", "veth", "=", "random_veth", ",", "container", "=", "name", ")", ")", "__salt__", "[", "'file.remove'", "]", "(", "'/var/run/netns/{0}'", ".", "format", "(", "name", ")", ")", "if", "bridge", "is", "not", "None", ":", "__salt__", "[", "'bridge.addif'", "]", "(", "bridge", ",", "random_veth", ")" ]
38.975
0.004692
def check(self): """ Checks the values of the window quads. If any problems are found it flags the offending window by changing the background colour. Returns: status : bool """ status = synced = True xbin = self.xbin.value() ybin = self.ybin.value() nquad = self.nquad.value() g = get_root(self).globals # individual window checks for (xsllw, xsulw, xslrw, xsurw, ysw, nxw, nyw) in zip( self.xsll[:nquad], self.xsul[:nquad], self.xslr[:nquad], self.xsur[:nquad], self.ys[:nquad], self.nx[:nquad], self.ny[:nquad]): all_fields = (xsllw, xsulw, xslrw, xsurw, ysw, nxw, nyw) for field in all_fields: field.config(bg=g.COL['main']) status = status if field.ok() else False xsll = xsllw.value() xsul = xsulw.value() xslr = xslrw.value() xsur = xsurw.value() ys = ysw.value() nx = nxw.value() ny = nyw.value() # Are unbinned dimensions consistent with binning factors? if nx is None or nx % xbin != 0: nxw.config(bg=g.COL['error']) status = False elif (nx // xbin) % 4 != 0: """ The NGC collects pixel data in chunks before transmission. As a result, to avoid loss of data from frames, the binned x-size must be a multiple of 4. """ nxw.config(bg=g.COL['error']) status = False if ny is None or ny % ybin != 0: nyw.config(bg=g.COL['error']) status = False # overlap checks in x direction if xsll is None or xslr is None or xsll >= xslr: xslrw.config(bg=g.COL['error']) status = False if xsul is None or xsur is None or xsul >= xsur: xsurw.config(bg=g.COL['error']) status = False if nx is None or xsll is None or xsll + nx > xslr: xslrw.config(bg=g.COL['error']) status = False if xsul is None or nx is None or xsul + nx > xsur: xsurw.config(bg=g.COL['error']) status = False # Are the windows synchronised? This means that they would # be consistent with the pixels generated were the whole CCD # to be binned by the same factors. If relevant values are not # set, we count that as "synced" because the purpose of this is # to enable / disable the sync button and we don't want it to be # enabled just because xs or ys are not set. perform_check = all([param is not None for param in ( xsll, xslr, ys, nx, ny )]) if (perform_check and ((xsll - 1) % xbin != 0 or (xslr - 1025) % xbin != 0 or (ys - 1) % ybin != 0)): synced = False perform_check = all([param is not None for param in ( xsul, xsur, ys, nx, ny )]) if (perform_check and ((xsul - 1) % xbin != 0 or (xsur - 1025) % xbin != 0 or (ys - 1) % ybin != 0)): synced = False # Range checks rchecks = ((xsll, nx, xsllw), (xslr, nx, xslrw), (xsul, nx, xsulw), (xsur, nx, xsurw), (ys, ny, ysw)) for check in rchecks: val, size, widg = check if val is None or size is None or val + size - 1 > widg.imax: widg.config(bg=g.COL['error']) status = False # Quad overlap checks. Compare one quad with the next one # in the same quadrant if there is one. Only bother if we # have survived so far, which saves a lot of checks. if status: for index in range(nquad-1): ys1 = self.ys[index].value() ny1 = self.ny[index].value() ysw2 = self.ys[index+1] ys2 = ysw2.value() if any([thing is None for thing in (ys1, ny1, ys2)]) or ys1 + ny1 > ys2: ysw2.config(bg=g.COL['error']) status = False if synced: self.sbutt.config(bg=g.COL['main']) self.sbutt.disable() else: if not self.frozen: self.sbutt.enable() self.sbutt.config(bg=g.COL['warn']) return status
[ "def", "check", "(", "self", ")", ":", "status", "=", "synced", "=", "True", "xbin", "=", "self", ".", "xbin", ".", "value", "(", ")", "ybin", "=", "self", ".", "ybin", ".", "value", "(", ")", "nquad", "=", "self", ".", "nquad", ".", "value", "(", ")", "g", "=", "get_root", "(", "self", ")", ".", "globals", "# individual window checks", "for", "(", "xsllw", ",", "xsulw", ",", "xslrw", ",", "xsurw", ",", "ysw", ",", "nxw", ",", "nyw", ")", "in", "zip", "(", "self", ".", "xsll", "[", ":", "nquad", "]", ",", "self", ".", "xsul", "[", ":", "nquad", "]", ",", "self", ".", "xslr", "[", ":", "nquad", "]", ",", "self", ".", "xsur", "[", ":", "nquad", "]", ",", "self", ".", "ys", "[", ":", "nquad", "]", ",", "self", ".", "nx", "[", ":", "nquad", "]", ",", "self", ".", "ny", "[", ":", "nquad", "]", ")", ":", "all_fields", "=", "(", "xsllw", ",", "xsulw", ",", "xslrw", ",", "xsurw", ",", "ysw", ",", "nxw", ",", "nyw", ")", "for", "field", "in", "all_fields", ":", "field", ".", "config", "(", "bg", "=", "g", ".", "COL", "[", "'main'", "]", ")", "status", "=", "status", "if", "field", ".", "ok", "(", ")", "else", "False", "xsll", "=", "xsllw", ".", "value", "(", ")", "xsul", "=", "xsulw", ".", "value", "(", ")", "xslr", "=", "xslrw", ".", "value", "(", ")", "xsur", "=", "xsurw", ".", "value", "(", ")", "ys", "=", "ysw", ".", "value", "(", ")", "nx", "=", "nxw", ".", "value", "(", ")", "ny", "=", "nyw", ".", "value", "(", ")", "# Are unbinned dimensions consistent with binning factors?", "if", "nx", "is", "None", "or", "nx", "%", "xbin", "!=", "0", ":", "nxw", ".", "config", "(", "bg", "=", "g", ".", "COL", "[", "'error'", "]", ")", "status", "=", "False", "elif", "(", "nx", "//", "xbin", ")", "%", "4", "!=", "0", ":", "\"\"\"\n The NGC collects pixel data in chunks before transmission.\n As a result, to avoid loss of data from frames, the binned\n x-size must be a multiple of 4.\n \"\"\"", "nxw", ".", "config", "(", "bg", "=", "g", ".", "COL", "[", "'error'", "]", ")", "status", "=", "False", "if", "ny", "is", "None", "or", "ny", "%", "ybin", "!=", "0", ":", "nyw", ".", "config", "(", "bg", "=", "g", ".", "COL", "[", "'error'", "]", ")", "status", "=", "False", "# overlap checks in x direction", "if", "xsll", "is", "None", "or", "xslr", "is", "None", "or", "xsll", ">=", "xslr", ":", "xslrw", ".", "config", "(", "bg", "=", "g", ".", "COL", "[", "'error'", "]", ")", "status", "=", "False", "if", "xsul", "is", "None", "or", "xsur", "is", "None", "or", "xsul", ">=", "xsur", ":", "xsurw", ".", "config", "(", "bg", "=", "g", ".", "COL", "[", "'error'", "]", ")", "status", "=", "False", "if", "nx", "is", "None", "or", "xsll", "is", "None", "or", "xsll", "+", "nx", ">", "xslr", ":", "xslrw", ".", "config", "(", "bg", "=", "g", ".", "COL", "[", "'error'", "]", ")", "status", "=", "False", "if", "xsul", "is", "None", "or", "nx", "is", "None", "or", "xsul", "+", "nx", ">", "xsur", ":", "xsurw", ".", "config", "(", "bg", "=", "g", ".", "COL", "[", "'error'", "]", ")", "status", "=", "False", "# Are the windows synchronised? This means that they would", "# be consistent with the pixels generated were the whole CCD", "# to be binned by the same factors. If relevant values are not", "# set, we count that as \"synced\" because the purpose of this is", "# to enable / disable the sync button and we don't want it to be", "# enabled just because xs or ys are not set.", "perform_check", "=", "all", "(", "[", "param", "is", "not", "None", "for", "param", "in", "(", "xsll", ",", "xslr", ",", "ys", ",", "nx", ",", "ny", ")", "]", ")", "if", "(", "perform_check", "and", "(", "(", "xsll", "-", "1", ")", "%", "xbin", "!=", "0", "or", "(", "xslr", "-", "1025", ")", "%", "xbin", "!=", "0", "or", "(", "ys", "-", "1", ")", "%", "ybin", "!=", "0", ")", ")", ":", "synced", "=", "False", "perform_check", "=", "all", "(", "[", "param", "is", "not", "None", "for", "param", "in", "(", "xsul", ",", "xsur", ",", "ys", ",", "nx", ",", "ny", ")", "]", ")", "if", "(", "perform_check", "and", "(", "(", "xsul", "-", "1", ")", "%", "xbin", "!=", "0", "or", "(", "xsur", "-", "1025", ")", "%", "xbin", "!=", "0", "or", "(", "ys", "-", "1", ")", "%", "ybin", "!=", "0", ")", ")", ":", "synced", "=", "False", "# Range checks", "rchecks", "=", "(", "(", "xsll", ",", "nx", ",", "xsllw", ")", ",", "(", "xslr", ",", "nx", ",", "xslrw", ")", ",", "(", "xsul", ",", "nx", ",", "xsulw", ")", ",", "(", "xsur", ",", "nx", ",", "xsurw", ")", ",", "(", "ys", ",", "ny", ",", "ysw", ")", ")", "for", "check", "in", "rchecks", ":", "val", ",", "size", ",", "widg", "=", "check", "if", "val", "is", "None", "or", "size", "is", "None", "or", "val", "+", "size", "-", "1", ">", "widg", ".", "imax", ":", "widg", ".", "config", "(", "bg", "=", "g", ".", "COL", "[", "'error'", "]", ")", "status", "=", "False", "# Quad overlap checks. Compare one quad with the next one", "# in the same quadrant if there is one. Only bother if we", "# have survived so far, which saves a lot of checks.", "if", "status", ":", "for", "index", "in", "range", "(", "nquad", "-", "1", ")", ":", "ys1", "=", "self", ".", "ys", "[", "index", "]", ".", "value", "(", ")", "ny1", "=", "self", ".", "ny", "[", "index", "]", ".", "value", "(", ")", "ysw2", "=", "self", ".", "ys", "[", "index", "+", "1", "]", "ys2", "=", "ysw2", ".", "value", "(", ")", "if", "any", "(", "[", "thing", "is", "None", "for", "thing", "in", "(", "ys1", ",", "ny1", ",", "ys2", ")", "]", ")", "or", "ys1", "+", "ny1", ">", "ys2", ":", "ysw2", ".", "config", "(", "bg", "=", "g", ".", "COL", "[", "'error'", "]", ")", "status", "=", "False", "if", "synced", ":", "self", ".", "sbutt", ".", "config", "(", "bg", "=", "g", ".", "COL", "[", "'main'", "]", ")", "self", ".", "sbutt", ".", "disable", "(", ")", "else", ":", "if", "not", "self", ".", "frozen", ":", "self", ".", "sbutt", ".", "enable", "(", ")", "self", ".", "sbutt", ".", "config", "(", "bg", "=", "g", ".", "COL", "[", "'warn'", "]", ")", "return", "status" ]
39.37931
0.001281
def value_type(type_): """returns reference to `boost::shared_ptr` \ or `std::shared_ptr` value type""" if not smart_pointer_traits.is_smart_pointer(type_): raise TypeError( 'Type "%s" is not an instantiation of \ boost::shared_ptr or std::shared_ptr' % type_.decl_string) try: return internal_type_traits.get_by_name(type_, "element_type") except runtime_errors.declaration_not_found_t: return _search_in_bases(type_)
[ "def", "value_type", "(", "type_", ")", ":", "if", "not", "smart_pointer_traits", ".", "is_smart_pointer", "(", "type_", ")", ":", "raise", "TypeError", "(", "'Type \"%s\" is not an instantiation of \\\n boost::shared_ptr or std::shared_ptr'", "%", "type_", ".", "decl_string", ")", "try", ":", "return", "internal_type_traits", ".", "get_by_name", "(", "type_", ",", "\"element_type\"", ")", "except", "runtime_errors", ".", "declaration_not_found_t", ":", "return", "_search_in_bases", "(", "type_", ")" ]
44.25
0.00369
def _create_fig( *, x_sc=bq.LinearScale, y_sc=bq.LinearScale, x_ax=bq.Axis, y_ax=bq.Axis, fig=bq.Figure, options={}, params={} ): """ Initializes scales and axes for a bqplot figure and returns the resulting blank figure. Each plot component is passed in as a class. The plot options should be passed into options. Any additional parameters to initialize plot components are passed into params as a dict of { plot_component: { trait: value, ... } } For example, to change the grid lines of the x-axis: params={ 'x_ax': {'grid_lines' : 'solid'} } If the param value is a function, it will be called with the options dict augmented with all previously created plot elements. This permits dependencies on plot elements: params={ 'x_ax': {'scale': lambda opts: opts['x_sc'] } } """ params = _merge_with_defaults(params) x_sc = x_sc(**_call_params(params['x_sc'], options)) y_sc = y_sc(**_call_params(params['y_sc'], options)) options = tz.merge(options, {'x_sc': x_sc, 'y_sc': y_sc}) x_ax = x_ax(**_call_params(params['x_ax'], options)) y_ax = y_ax(**_call_params(params['y_ax'], options)) options = tz.merge(options, {'x_ax': x_ax, 'y_ax': y_ax, 'marks': []}) fig = fig(**_call_params(params['fig'], options)) return fig
[ "def", "_create_fig", "(", "*", ",", "x_sc", "=", "bq", ".", "LinearScale", ",", "y_sc", "=", "bq", ".", "LinearScale", ",", "x_ax", "=", "bq", ".", "Axis", ",", "y_ax", "=", "bq", ".", "Axis", ",", "fig", "=", "bq", ".", "Figure", ",", "options", "=", "{", "}", ",", "params", "=", "{", "}", ")", ":", "params", "=", "_merge_with_defaults", "(", "params", ")", "x_sc", "=", "x_sc", "(", "*", "*", "_call_params", "(", "params", "[", "'x_sc'", "]", ",", "options", ")", ")", "y_sc", "=", "y_sc", "(", "*", "*", "_call_params", "(", "params", "[", "'y_sc'", "]", ",", "options", ")", ")", "options", "=", "tz", ".", "merge", "(", "options", ",", "{", "'x_sc'", ":", "x_sc", ",", "'y_sc'", ":", "y_sc", "}", ")", "x_ax", "=", "x_ax", "(", "*", "*", "_call_params", "(", "params", "[", "'x_ax'", "]", ",", "options", ")", ")", "y_ax", "=", "y_ax", "(", "*", "*", "_call_params", "(", "params", "[", "'y_ax'", "]", ",", "options", ")", ")", "options", "=", "tz", ".", "merge", "(", "options", ",", "{", "'x_ax'", ":", "x_ax", ",", "'y_ax'", ":", "y_ax", ",", "'marks'", ":", "[", "]", "}", ")", "fig", "=", "fig", "(", "*", "*", "_call_params", "(", "params", "[", "'fig'", "]", ",", "options", ")", ")", "return", "fig" ]
34.368421
0.000745
def add_member(self, member, dn=False): """Add a member to the bound group Arguments: member -- the CSHMember object (or distinguished name) of the member Keyword arguments: dn -- whether or not member is a distinguished name """ if dn: if self.check_member(member, dn=True): return mod = (ldap.MOD_ADD, 'member', member.encode('ascii')) else: if self.check_member(member): return mod = (ldap.MOD_ADD, 'member', member.get_dn().encode('ascii')) if self.__lib__.__batch_mods__: self.__lib__.enqueue_mod(self.__dn__, mod) elif not self.__lib__.__ro__: mod_attrs = [mod] self.__con__.modify_s(self.__dn__, mod_attrs) else: print("ADD VALUE member = {} FOR {}".format(mod[2], self.__dn__))
[ "def", "add_member", "(", "self", ",", "member", ",", "dn", "=", "False", ")", ":", "if", "dn", ":", "if", "self", ".", "check_member", "(", "member", ",", "dn", "=", "True", ")", ":", "return", "mod", "=", "(", "ldap", ".", "MOD_ADD", ",", "'member'", ",", "member", ".", "encode", "(", "'ascii'", ")", ")", "else", ":", "if", "self", ".", "check_member", "(", "member", ")", ":", "return", "mod", "=", "(", "ldap", ".", "MOD_ADD", ",", "'member'", ",", "member", ".", "get_dn", "(", ")", ".", "encode", "(", "'ascii'", ")", ")", "if", "self", ".", "__lib__", ".", "__batch_mods__", ":", "self", ".", "__lib__", ".", "enqueue_mod", "(", "self", ".", "__dn__", ",", "mod", ")", "elif", "not", "self", ".", "__lib__", ".", "__ro__", ":", "mod_attrs", "=", "[", "mod", "]", "self", ".", "__con__", ".", "modify_s", "(", "self", ".", "__dn__", ",", "mod_attrs", ")", "else", ":", "print", "(", "\"ADD VALUE member = {} FOR {}\"", ".", "format", "(", "mod", "[", "2", "]", ",", "self", ".", "__dn__", ")", ")" ]
33.846154
0.00221