text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def get_compatible_canvas_layers(self, category): """Collect layers from map canvas, compatible for the given category and selected impact function .. note:: Returns layers with keywords and layermode matching the category and compatible with the selected impact function. Also returns layers without keywords with layermode compatible with the selected impact function. :param category: The category to filter for. :type category: string :returns: Metadata of found layers. :rtype: list of dicts """ # Collect compatible layers layers = [] for layer in self.iface.mapCanvas().layers(): try: keywords = self.keyword_io.read_keywords(layer) if 'layer_purpose' not in keywords: keywords = None except (HashNotFoundError, OperationalError, NoKeywordsFoundError, KeywordNotFoundError, InvalidParameterError, UnsupportedProviderError): keywords = None if self.is_layer_compatible(layer, category, keywords): layers += [ {'id': layer.id(), 'name': layer.name(), 'keywords': keywords}] # Move layers without keywords to the end l1 = [l for l in layers if l['keywords']] l2 = [l for l in layers if not l['keywords']] layers = l1 + l2 return layers
[ "def", "get_compatible_canvas_layers", "(", "self", ",", "category", ")", ":", "# Collect compatible layers", "layers", "=", "[", "]", "for", "layer", "in", "self", ".", "iface", ".", "mapCanvas", "(", ")", ".", "layers", "(", ")", ":", "try", ":", "keywords", "=", "self", ".", "keyword_io", ".", "read_keywords", "(", "layer", ")", "if", "'layer_purpose'", "not", "in", "keywords", ":", "keywords", "=", "None", "except", "(", "HashNotFoundError", ",", "OperationalError", ",", "NoKeywordsFoundError", ",", "KeywordNotFoundError", ",", "InvalidParameterError", ",", "UnsupportedProviderError", ")", ":", "keywords", "=", "None", "if", "self", ".", "is_layer_compatible", "(", "layer", ",", "category", ",", "keywords", ")", ":", "layers", "+=", "[", "{", "'id'", ":", "layer", ".", "id", "(", ")", ",", "'name'", ":", "layer", ".", "name", "(", ")", ",", "'keywords'", ":", "keywords", "}", "]", "# Move layers without keywords to the end", "l1", "=", "[", "l", "for", "l", "in", "layers", "if", "l", "[", "'keywords'", "]", "]", "l2", "=", "[", "l", "for", "l", "in", "layers", "if", "not", "l", "[", "'keywords'", "]", "]", "layers", "=", "l1", "+", "l2", "return", "layers" ]
36.046512
0.002513
def get_note_names(self): """Return a list of unique note names in the Bar.""" res = [] for cont in self.bar: for x in cont[2].get_note_names(): if x not in res: res.append(x) return res
[ "def", "get_note_names", "(", "self", ")", ":", "res", "=", "[", "]", "for", "cont", "in", "self", ".", "bar", ":", "for", "x", "in", "cont", "[", "2", "]", ".", "get_note_names", "(", ")", ":", "if", "x", "not", "in", "res", ":", "res", ".", "append", "(", "x", ")", "return", "res" ]
32.375
0.007519
def init_app(self, app, sessionstore=None, register_blueprint=True): """Flask application initialization. :param app: The Flask application. :param sessionstore: store for sessions. Passed to ``flask-kvsession``. If ``None`` then Redis is configured. (Default: ``None``) :param register_blueprint: If ``True``, the application registers the blueprints. (Default: ``True``) """ self.make_session_permanent(app) return super(InvenioAccountsUI, self).init_app( app, sessionstore=sessionstore, register_blueprint=register_blueprint )
[ "def", "init_app", "(", "self", ",", "app", ",", "sessionstore", "=", "None", ",", "register_blueprint", "=", "True", ")", ":", "self", ".", "make_session_permanent", "(", "app", ")", "return", "super", "(", "InvenioAccountsUI", ",", "self", ")", ".", "init_app", "(", "app", ",", "sessionstore", "=", "sessionstore", ",", "register_blueprint", "=", "register_blueprint", ")" ]
42.666667
0.003058
def _expand_base_distribution_mean(self): """Ensures `self.distribution.mean()` has `[batch, event]` shape.""" single_draw_shape = concat_vectors(self.batch_shape_tensor(), self.event_shape_tensor()) m = tf.reshape( self.distribution.mean(), # A scalar. shape=tf.ones_like(single_draw_shape, dtype=tf.int32)) m = tf.tile(m, multiples=single_draw_shape) tensorshape_util.set_shape( m, tensorshape_util.concatenate(self.batch_shape, self.event_shape)) return m
[ "def", "_expand_base_distribution_mean", "(", "self", ")", ":", "single_draw_shape", "=", "concat_vectors", "(", "self", ".", "batch_shape_tensor", "(", ")", ",", "self", ".", "event_shape_tensor", "(", ")", ")", "m", "=", "tf", ".", "reshape", "(", "self", ".", "distribution", ".", "mean", "(", ")", ",", "# A scalar.", "shape", "=", "tf", ".", "ones_like", "(", "single_draw_shape", ",", "dtype", "=", "tf", ".", "int32", ")", ")", "m", "=", "tf", ".", "tile", "(", "m", ",", "multiples", "=", "single_draw_shape", ")", "tensorshape_util", ".", "set_shape", "(", "m", ",", "tensorshape_util", ".", "concatenate", "(", "self", ".", "batch_shape", ",", "self", ".", "event_shape", ")", ")", "return", "m" ]
48.727273
0.001832
def get_3d_markers_no_label( self, component_info=None, data=None, component_position=None ): """Get 3D markers without label.""" return self._get_3d_markers( RT3DMarkerPositionNoLabel, component_info, data, component_position )
[ "def", "get_3d_markers_no_label", "(", "self", ",", "component_info", "=", "None", ",", "data", "=", "None", ",", "component_position", "=", "None", ")", ":", "return", "self", ".", "_get_3d_markers", "(", "RT3DMarkerPositionNoLabel", ",", "component_info", ",", "data", ",", "component_position", ")" ]
38.571429
0.01087
def createPartyFromName(apps, name): ''' For creating/matching TransactionParty objects using names alone. Look for staff members with the same name and match to them first if there is exactly one match. Then, look for users and match them if there is exactly one match. Otherwise, just generate a TransactionParty for the name only. ''' TransactionParty = apps.get_model('financial', 'TransactionParty') StaffMember = apps.get_model('core', 'StaffMember') User = apps.get_model('auth', 'User') firstName = name.split(' ')[0] lastName = ' '.join(name.split(' ')[1:]) members = StaffMember.objects.filter( firstName__istartswith=firstName, lastName__istartswith=lastName ) users = User.objects.filter( first_name__istartswith=firstName, last_name__istartswith=lastName ) if members.count() == 1: this_member = members.first() party = TransactionParty.objects.get_or_create( staffMember=this_member, defaults={ 'name': getFullName(this_member), 'user': this_member.userAccount, } )[0] elif users.count() == 1: this_user = users.first() party = TransactionParty.objects.get_or_create( user=this_user, defaults={ 'name': getFullName(this_user), 'staffMember': getattr(this_user, 'staffmember', None), } )[0] else: party = TransactionParty.objects.get_or_create( name=name )[0] return party
[ "def", "createPartyFromName", "(", "apps", ",", "name", ")", ":", "TransactionParty", "=", "apps", ".", "get_model", "(", "'financial'", ",", "'TransactionParty'", ")", "StaffMember", "=", "apps", ".", "get_model", "(", "'core'", ",", "'StaffMember'", ")", "User", "=", "apps", ".", "get_model", "(", "'auth'", ",", "'User'", ")", "firstName", "=", "name", ".", "split", "(", "' '", ")", "[", "0", "]", "lastName", "=", "' '", ".", "join", "(", "name", ".", "split", "(", "' '", ")", "[", "1", ":", "]", ")", "members", "=", "StaffMember", ".", "objects", ".", "filter", "(", "firstName__istartswith", "=", "firstName", ",", "lastName__istartswith", "=", "lastName", ")", "users", "=", "User", ".", "objects", ".", "filter", "(", "first_name__istartswith", "=", "firstName", ",", "last_name__istartswith", "=", "lastName", ")", "if", "members", ".", "count", "(", ")", "==", "1", ":", "this_member", "=", "members", ".", "first", "(", ")", "party", "=", "TransactionParty", ".", "objects", ".", "get_or_create", "(", "staffMember", "=", "this_member", ",", "defaults", "=", "{", "'name'", ":", "getFullName", "(", "this_member", ")", ",", "'user'", ":", "this_member", ".", "userAccount", ",", "}", ")", "[", "0", "]", "elif", "users", ".", "count", "(", ")", "==", "1", ":", "this_user", "=", "users", ".", "first", "(", ")", "party", "=", "TransactionParty", ".", "objects", ".", "get_or_create", "(", "user", "=", "this_user", ",", "defaults", "=", "{", "'name'", ":", "getFullName", "(", "this_user", ")", ",", "'staffMember'", ":", "getattr", "(", "this_user", ",", "'staffmember'", ",", "None", ")", ",", "}", ")", "[", "0", "]", "else", ":", "party", "=", "TransactionParty", ".", "objects", ".", "get_or_create", "(", "name", "=", "name", ")", "[", "0", "]", "return", "party" ]
34.444444
0.001882
def _validate_config(self): """ Handle and check configuration. """ groups = dict( job=defaultdict(Bunch), httpd=defaultdict(Bunch), ) for key, val in config.torque.items(): # Auto-convert numbers and bools if val.isdigit(): config.torque[key] = val = int(val) elif val.lower() in (matching.TRUE | matching.FALSE): val = matching.truth(str(val), key) # Assemble grouped parameters stem = key.split('.', 1)[0] if key == "httpd.active": groups[stem]["active"] = val elif stem in groups: try: stem, name, param = key.split('.', 2) except (TypeError, ValueError): self.fatal("Bad %s configuration key %r (expecting %s.NAME.PARAM)" % (stem, key, stem)) else: groups[stem][name][param] = val for key, val in groups.iteritems(): setattr(self, key.replace("job", "jobs"), Bunch(val)) # Validate httpd config if self.httpd.active: if self.httpd.waitress.url_scheme not in ("http", "https"): self.fatal("HTTP URL scheme must be either 'http' or 'https'") if not isinstance(self.httpd.waitress.port, int) or not(1024 <= self.httpd.waitress.port < 65536): self.fatal("HTTP port must be a 16 bit number >= 1024") # Validate jobs for name, params in self.jobs.items(): for key in ("handler", "schedule"): if key not in params: self.fatal("Job '%s' is missing the required 'job.%s.%s' parameter" % (name, name, key)) bool_param = lambda k, default, p=params: matching.truth(p.get(k, default), "job.%s.%s" % (name, k)) params.job_name = name params.dry_run = bool_param("dry_run", False) or self.options.dry_run params.active = bool_param("active", True) params.schedule = self._parse_schedule(params.schedule) if params.active: try: params.handler = pymagic.import_name(params.handler) except ImportError as exc: self.fatal("Bad handler name '%s' for job '%s':\n %s" % (params.handler, name, exc))
[ "def", "_validate_config", "(", "self", ")", ":", "groups", "=", "dict", "(", "job", "=", "defaultdict", "(", "Bunch", ")", ",", "httpd", "=", "defaultdict", "(", "Bunch", ")", ",", ")", "for", "key", ",", "val", "in", "config", ".", "torque", ".", "items", "(", ")", ":", "# Auto-convert numbers and bools", "if", "val", ".", "isdigit", "(", ")", ":", "config", ".", "torque", "[", "key", "]", "=", "val", "=", "int", "(", "val", ")", "elif", "val", ".", "lower", "(", ")", "in", "(", "matching", ".", "TRUE", "|", "matching", ".", "FALSE", ")", ":", "val", "=", "matching", ".", "truth", "(", "str", "(", "val", ")", ",", "key", ")", "# Assemble grouped parameters", "stem", "=", "key", ".", "split", "(", "'.'", ",", "1", ")", "[", "0", "]", "if", "key", "==", "\"httpd.active\"", ":", "groups", "[", "stem", "]", "[", "\"active\"", "]", "=", "val", "elif", "stem", "in", "groups", ":", "try", ":", "stem", ",", "name", ",", "param", "=", "key", ".", "split", "(", "'.'", ",", "2", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "self", ".", "fatal", "(", "\"Bad %s configuration key %r (expecting %s.NAME.PARAM)\"", "%", "(", "stem", ",", "key", ",", "stem", ")", ")", "else", ":", "groups", "[", "stem", "]", "[", "name", "]", "[", "param", "]", "=", "val", "for", "key", ",", "val", "in", "groups", ".", "iteritems", "(", ")", ":", "setattr", "(", "self", ",", "key", ".", "replace", "(", "\"job\"", ",", "\"jobs\"", ")", ",", "Bunch", "(", "val", ")", ")", "# Validate httpd config", "if", "self", ".", "httpd", ".", "active", ":", "if", "self", ".", "httpd", ".", "waitress", ".", "url_scheme", "not", "in", "(", "\"http\"", ",", "\"https\"", ")", ":", "self", ".", "fatal", "(", "\"HTTP URL scheme must be either 'http' or 'https'\"", ")", "if", "not", "isinstance", "(", "self", ".", "httpd", ".", "waitress", ".", "port", ",", "int", ")", "or", "not", "(", "1024", "<=", "self", ".", "httpd", ".", "waitress", ".", "port", "<", "65536", ")", ":", "self", ".", "fatal", "(", "\"HTTP port must be a 16 bit number >= 1024\"", ")", "# Validate jobs", "for", "name", ",", "params", "in", "self", ".", "jobs", ".", "items", "(", ")", ":", "for", "key", "in", "(", "\"handler\"", ",", "\"schedule\"", ")", ":", "if", "key", "not", "in", "params", ":", "self", ".", "fatal", "(", "\"Job '%s' is missing the required 'job.%s.%s' parameter\"", "%", "(", "name", ",", "name", ",", "key", ")", ")", "bool_param", "=", "lambda", "k", ",", "default", ",", "p", "=", "params", ":", "matching", ".", "truth", "(", "p", ".", "get", "(", "k", ",", "default", ")", ",", "\"job.%s.%s\"", "%", "(", "name", ",", "k", ")", ")", "params", ".", "job_name", "=", "name", "params", ".", "dry_run", "=", "bool_param", "(", "\"dry_run\"", ",", "False", ")", "or", "self", ".", "options", ".", "dry_run", "params", ".", "active", "=", "bool_param", "(", "\"active\"", ",", "True", ")", "params", ".", "schedule", "=", "self", ".", "_parse_schedule", "(", "params", ".", "schedule", ")", "if", "params", ".", "active", ":", "try", ":", "params", ".", "handler", "=", "pymagic", ".", "import_name", "(", "params", ".", "handler", ")", "except", "ImportError", "as", "exc", ":", "self", ".", "fatal", "(", "\"Bad handler name '%s' for job '%s':\\n %s\"", "%", "(", "params", ".", "handler", ",", "name", ",", "exc", ")", ")" ]
42.6
0.003755
def list(self, policy_id, page=None): """ This API endpoint returns a paginated list of alert conditions associated with the given policy_id. This API endpoint returns a paginated list of the alert conditions associated with your New Relic account. Alert conditions can be filtered by their name, list of IDs, type (application, key_transaction, or server) or whether or not policies are archived (defaults to filtering archived policies). :type policy_id: int :param policy_id: Alert policy id :type page: int :param page: Pagination index :rtype: dict :return: The JSON response of the API, with an additional 'pages' key if there are paginated results :: { "conditions": [ { "id": "integer", "type": "string", "condition_scope": "string", "name": "string", "enabled": "boolean", "entities": [ "integer" ], "metric": "string", "runbook_url": "string", "terms": [ { "duration": "string", "operator": "string", "priority": "string", "threshold": "string", "time_function": "string" } ], "user_defined": { "metric": "string", "value_function": "string" } } ] } """ filters = [ 'policy_id={0}'.format(policy_id), 'page={0}'.format(page) if page else None ] return self._get( url='{0}alerts_conditions.json'.format(self.URL), headers=self.headers, params=self.build_param_string(filters) )
[ "def", "list", "(", "self", ",", "policy_id", ",", "page", "=", "None", ")", ":", "filters", "=", "[", "'policy_id={0}'", ".", "format", "(", "policy_id", ")", ",", "'page={0}'", ".", "format", "(", "page", ")", "if", "page", "else", "None", "]", "return", "self", ".", "_get", "(", "url", "=", "'{0}alerts_conditions.json'", ".", "format", "(", "self", ".", "URL", ")", ",", "headers", "=", "self", ".", "headers", ",", "params", "=", "self", ".", "build_param_string", "(", "filters", ")", ")" ]
33.9375
0.00179
def merge_ndx(*args): """ Takes one or more index files and optionally one structure file and returns a path for a new merged index file. :param args: index files and zero or one structure file :return: path for the new merged index file """ ndxs = [] struct = None for fname in args: if fname.endswith('.ndx'): ndxs.append(fname) else: if struct is not None: raise ValueError("only one structure file supported") struct = fname fd, multi_ndx = tempfile.mkstemp(suffix='.ndx', prefix='multi_') os.close(fd) atexit.register(os.unlink, multi_ndx) if struct: make_ndx = registry['Make_ndx'](f=struct, n=ndxs, o=multi_ndx) else: make_ndx = registry['Make_ndx'](n=ndxs, o=multi_ndx) _, _, _ = make_ndx(input=['q'], stdout=False, stderr=False) return multi_ndx
[ "def", "merge_ndx", "(", "*", "args", ")", ":", "ndxs", "=", "[", "]", "struct", "=", "None", "for", "fname", "in", "args", ":", "if", "fname", ".", "endswith", "(", "'.ndx'", ")", ":", "ndxs", ".", "append", "(", "fname", ")", "else", ":", "if", "struct", "is", "not", "None", ":", "raise", "ValueError", "(", "\"only one structure file supported\"", ")", "struct", "=", "fname", "fd", ",", "multi_ndx", "=", "tempfile", ".", "mkstemp", "(", "suffix", "=", "'.ndx'", ",", "prefix", "=", "'multi_'", ")", "os", ".", "close", "(", "fd", ")", "atexit", ".", "register", "(", "os", ".", "unlink", ",", "multi_ndx", ")", "if", "struct", ":", "make_ndx", "=", "registry", "[", "'Make_ndx'", "]", "(", "f", "=", "struct", ",", "n", "=", "ndxs", ",", "o", "=", "multi_ndx", ")", "else", ":", "make_ndx", "=", "registry", "[", "'Make_ndx'", "]", "(", "n", "=", "ndxs", ",", "o", "=", "multi_ndx", ")", "_", ",", "_", ",", "_", "=", "make_ndx", "(", "input", "=", "[", "'q'", "]", ",", "stdout", "=", "False", ",", "stderr", "=", "False", ")", "return", "multi_ndx" ]
31.25
0.001109
def split(self, data): """ Split data into list of string, each (self.width() - 1) length or less. If nul-length string specified then empty list is returned :param data: data to split :return: list of str """ line = deepcopy(data) line_width = (self.width() - 1) lines = [] while len(line): new_line = line[:line_width] new_line_pos = new_line.find('\n') if new_line_pos >= 0: new_line = line[:new_line_pos] line = line[(new_line_pos + 1):] else: line = line[line_width:] lines.append(new_line) return lines
[ "def", "split", "(", "self", ",", "data", ")", ":", "line", "=", "deepcopy", "(", "data", ")", "line_width", "=", "(", "self", ".", "width", "(", ")", "-", "1", ")", "lines", "=", "[", "]", "while", "len", "(", "line", ")", ":", "new_line", "=", "line", "[", ":", "line_width", "]", "new_line_pos", "=", "new_line", ".", "find", "(", "'\\n'", ")", "if", "new_line_pos", ">=", "0", ":", "new_line", "=", "line", "[", ":", "new_line_pos", "]", "line", "=", "line", "[", "(", "new_line_pos", "+", "1", ")", ":", "]", "else", ":", "line", "=", "line", "[", "line_width", ":", "]", "lines", ".", "append", "(", "new_line", ")", "return", "lines" ]
21.48
0.037433
def MakeRequest(self, data): """Make a HTTP Post request to the server 'control' endpoint.""" stats_collector_instance.Get().IncrementCounter("grr_client_sent_bytes", len(data)) # Verify the response is as it should be from the control endpoint. response = self.http_manager.OpenServerEndpoint( path="control?api=%s" % config.CONFIG["Network.api"], verify_cb=self.VerifyServerControlResponse, data=data, headers={"Content-Type": "binary/octet-stream"}) if response.code == 406: self.InitiateEnrolment() return response if response.code == 200: stats_collector_instance.Get().IncrementCounter( "grr_client_received_bytes", len(response.data)) return response # An unspecified error occured. return response
[ "def", "MakeRequest", "(", "self", ",", "data", ")", ":", "stats_collector_instance", ".", "Get", "(", ")", ".", "IncrementCounter", "(", "\"grr_client_sent_bytes\"", ",", "len", "(", "data", ")", ")", "# Verify the response is as it should be from the control endpoint.", "response", "=", "self", ".", "http_manager", ".", "OpenServerEndpoint", "(", "path", "=", "\"control?api=%s\"", "%", "config", ".", "CONFIG", "[", "\"Network.api\"", "]", ",", "verify_cb", "=", "self", ".", "VerifyServerControlResponse", ",", "data", "=", "data", ",", "headers", "=", "{", "\"Content-Type\"", ":", "\"binary/octet-stream\"", "}", ")", "if", "response", ".", "code", "==", "406", ":", "self", ".", "InitiateEnrolment", "(", ")", "return", "response", "if", "response", ".", "code", "==", "200", ":", "stats_collector_instance", ".", "Get", "(", ")", ".", "IncrementCounter", "(", "\"grr_client_received_bytes\"", ",", "len", "(", "response", ".", "data", ")", ")", "return", "response", "# An unspecified error occured.", "return", "response" ]
36.391304
0.005821
def Filter(self, match=None, **_): """Filter the current expression.""" arg = self.stack.pop(-1) # Filters can be specified as a comma separated list. for filter_name in match.group(1).split(","): filter_object = ConfigFilter.classes_by_name.get(filter_name) if filter_object is None: raise FilterError("Unknown filter function %r" % filter_name) if not filter_object.sensitive_arg: logging.debug("Applying filter %s for %s.", filter_name, arg) arg = filter_object().Filter(arg) precondition.AssertType(arg, Text) self.stack[-1] += arg
[ "def", "Filter", "(", "self", ",", "match", "=", "None", ",", "*", "*", "_", ")", ":", "arg", "=", "self", ".", "stack", ".", "pop", "(", "-", "1", ")", "# Filters can be specified as a comma separated list.", "for", "filter_name", "in", "match", ".", "group", "(", "1", ")", ".", "split", "(", "\",\"", ")", ":", "filter_object", "=", "ConfigFilter", ".", "classes_by_name", ".", "get", "(", "filter_name", ")", "if", "filter_object", "is", "None", ":", "raise", "FilterError", "(", "\"Unknown filter function %r\"", "%", "filter_name", ")", "if", "not", "filter_object", ".", "sensitive_arg", ":", "logging", ".", "debug", "(", "\"Applying filter %s for %s.\"", ",", "filter_name", ",", "arg", ")", "arg", "=", "filter_object", "(", ")", ".", "Filter", "(", "arg", ")", "precondition", ".", "AssertType", "(", "arg", ",", "Text", ")", "self", ".", "stack", "[", "-", "1", "]", "+=", "arg" ]
36.8125
0.009934
def apply_T5(word): '''If a (V)VVV-sequence contains a VV-sequence that could be an /i/-final diphthong, there is a syllable boundary between it and the third vowel, e.g., [raa.ois.sa], [huo.uim.me], [la.eis.sa], [sel.vi.äi.si], [tai.an], [säi.e], [oi.om.me].''' WORD = _split_consonants_and_vowels(word) for k, v in WORD.iteritems(): if len(v) >= 3 and is_vowel(v[0]): vv = [v.find(i) for i in i_DIPHTHONGS if v.find(i) > 0] if any(vv): vv = vv[0] if vv == v[0]: WORD[k] = v[:2] + '.' + v[2:] else: WORD[k] = v[:vv] + '.' + v[vv:] word = _compile_dict_into_word(WORD) return word
[ "def", "apply_T5", "(", "word", ")", ":", "WORD", "=", "_split_consonants_and_vowels", "(", "word", ")", "for", "k", ",", "v", "in", "WORD", ".", "iteritems", "(", ")", ":", "if", "len", "(", "v", ")", ">=", "3", "and", "is_vowel", "(", "v", "[", "0", "]", ")", ":", "vv", "=", "[", "v", ".", "find", "(", "i", ")", "for", "i", "in", "i_DIPHTHONGS", "if", "v", ".", "find", "(", "i", ")", ">", "0", "]", "if", "any", "(", "vv", ")", ":", "vv", "=", "vv", "[", "0", "]", "if", "vv", "==", "v", "[", "0", "]", ":", "WORD", "[", "k", "]", "=", "v", "[", ":", "2", "]", "+", "'.'", "+", "v", "[", "2", ":", "]", "else", ":", "WORD", "[", "k", "]", "=", "v", "[", ":", "vv", "]", "+", "'.'", "+", "v", "[", "vv", ":", "]", "word", "=", "_compile_dict_into_word", "(", "WORD", ")", "return", "word" ]
29.833333
0.001353
def endpoint_delete(auth=None, **kwargs): ''' Delete an endpoint CLI Example: .. code-block:: bash salt '*' keystoneng.endpoint_delete id=3bee4bd8c2b040ee966adfda1f0bfca9 ''' cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.delete_endpoint(**kwargs)
[ "def", "endpoint_delete", "(", "auth", "=", "None", ",", "*", "*", "kwargs", ")", ":", "cloud", "=", "get_operator_cloud", "(", "auth", ")", "kwargs", "=", "_clean_kwargs", "(", "*", "*", "kwargs", ")", "return", "cloud", ".", "delete_endpoint", "(", "*", "*", "kwargs", ")" ]
23.846154
0.003106
def pingback_url(self, server_name, target_url): """ Do a pingback call for the target URL. """ try: server = ServerProxy(server_name) reply = server.pingback.ping(self.entry_url, target_url) except (Error, socket.error): reply = '%s cannot be pinged.' % target_url return reply
[ "def", "pingback_url", "(", "self", ",", "server_name", ",", "target_url", ")", ":", "try", ":", "server", "=", "ServerProxy", "(", "server_name", ")", "reply", "=", "server", ".", "pingback", ".", "ping", "(", "self", ".", "entry_url", ",", "target_url", ")", "except", "(", "Error", ",", "socket", ".", "error", ")", ":", "reply", "=", "'%s cannot be pinged.'", "%", "target_url", "return", "reply" ]
35.3
0.005525
def write_profile(name, repo, token): """Save a profile to the CONFIG_FILE. After you use this method to save a profile, you can load it anytime later with the ``read_profile()`` function defined above. Args: name The name of the profile to save. repo The Github repo you want to connect to. For instance, this repo is ``jtpaasch/simplygithub``. token A personal access token to connect to the repo. It is a hash that looks something like ``ff20ae42dc...`` Returns: A dictionary with the profile's ``repo`` and ``token`` values. """ make_sure_folder_exists(CONFIG_FOLDER) config = configparser.ConfigParser() config.read(CONFIG_FILE) profile = {"repo": repo, "token": token} config[name] = profile with open(CONFIG_FILE, "w") as configfile: config.write(configfile) return profile
[ "def", "write_profile", "(", "name", ",", "repo", ",", "token", ")", ":", "make_sure_folder_exists", "(", "CONFIG_FOLDER", ")", "config", "=", "configparser", ".", "ConfigParser", "(", ")", "config", ".", "read", "(", "CONFIG_FILE", ")", "profile", "=", "{", "\"repo\"", ":", "repo", ",", "\"token\"", ":", "token", "}", "config", "[", "name", "]", "=", "profile", "with", "open", "(", "CONFIG_FILE", ",", "\"w\"", ")", "as", "configfile", ":", "config", ".", "write", "(", "configfile", ")", "return", "profile" ]
29.322581
0.001065
def memoized(func=None, key_factory=equal_args, cache_factory=dict): """Memoizes the results of a function call. By default, exactly one result is memoized for each unique combination of function arguments. Note that memoization is not thread-safe and the default result cache will grow without bound; so care must be taken to only apply this decorator to functions with single threaded access and an expected reasonably small set of unique call parameters. Note that the wrapped function comes equipped with 3 helper function attributes: + `put(*args, **kwargs)`: A context manager that takes the same arguments as the memoized function and yields a setter function to set the value in the memoization cache. + `forget(*args, **kwargs)`: Takes the same arguments as the memoized function and causes the memoization cache to forget the computed value, if any, for those arguments. + `clear()`: Causes the memoization cache to be fully cleared. :API: public :param func: The function to wrap. Only generally passed by the python runtime and should be omitted when passing a custom `key_factory` or `cache_factory`. :param key_factory: A function that can form a cache key from the arguments passed to the wrapped, memoized function; by default uses simple parameter-set equality; ie `equal_args`. :param cache_factory: A no-arg callable that produces a mapping object to use for the memoized method's value cache. By default the `dict` constructor, but could be a a factory for an LRU cache for example. :raises: `ValueError` if the wrapper is applied to anything other than a function. :returns: A wrapped function that memoizes its results or else a function wrapper that does this. """ if func is None: # We're being applied as a decorator factory; ie: the user has supplied args, like so: # >>> @memoized(cache_factory=lru_cache) # ... def expensive_operation(user): # ... pass # So we return a decorator with the user-supplied args curried in for the python decorator # machinery to use to wrap the upcoming func. # # NB: This is just a tricky way to allow for both `@memoized` and `@memoized(...params...)` # application forms. Without this trick, ie: using a decorator class or nested decorator # function, the no-params application would have to be `@memoized()`. It still can, but need # not be and a bare `@memoized` will work as well as a `@memoized()`. return functools.partial(memoized, key_factory=key_factory, cache_factory=cache_factory) if not inspect.isfunction(func): raise ValueError('The @memoized decorator must be applied innermost of all decorators.') key_func = key_factory or equal_args memoized_results = cache_factory() if cache_factory else {} @functools.wraps(func) def memoize(*args, **kwargs): key = key_func(*args, **kwargs) if key in memoized_results: return memoized_results[key] result = func(*args, **kwargs) memoized_results[key] = result return result @contextmanager def put(*args, **kwargs): key = key_func(*args, **kwargs) yield functools.partial(memoized_results.__setitem__, key) memoize.put = put def forget(*args, **kwargs): key = key_func(*args, **kwargs) if key in memoized_results: del memoized_results[key] memoize.forget = forget def clear(): memoized_results.clear() memoize.clear = clear return memoize
[ "def", "memoized", "(", "func", "=", "None", ",", "key_factory", "=", "equal_args", ",", "cache_factory", "=", "dict", ")", ":", "if", "func", "is", "None", ":", "# We're being applied as a decorator factory; ie: the user has supplied args, like so:", "# >>> @memoized(cache_factory=lru_cache)", "# ... def expensive_operation(user):", "# ... pass", "# So we return a decorator with the user-supplied args curried in for the python decorator", "# machinery to use to wrap the upcoming func.", "#", "# NB: This is just a tricky way to allow for both `@memoized` and `@memoized(...params...)`", "# application forms. Without this trick, ie: using a decorator class or nested decorator", "# function, the no-params application would have to be `@memoized()`. It still can, but need", "# not be and a bare `@memoized` will work as well as a `@memoized()`.", "return", "functools", ".", "partial", "(", "memoized", ",", "key_factory", "=", "key_factory", ",", "cache_factory", "=", "cache_factory", ")", "if", "not", "inspect", ".", "isfunction", "(", "func", ")", ":", "raise", "ValueError", "(", "'The @memoized decorator must be applied innermost of all decorators.'", ")", "key_func", "=", "key_factory", "or", "equal_args", "memoized_results", "=", "cache_factory", "(", ")", "if", "cache_factory", "else", "{", "}", "@", "functools", ".", "wraps", "(", "func", ")", "def", "memoize", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "key", "=", "key_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "key", "in", "memoized_results", ":", "return", "memoized_results", "[", "key", "]", "result", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "memoized_results", "[", "key", "]", "=", "result", "return", "result", "@", "contextmanager", "def", "put", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "key", "=", "key_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "yield", "functools", ".", "partial", "(", "memoized_results", ".", "__setitem__", ",", "key", ")", "memoize", ".", "put", "=", "put", "def", "forget", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "key", "=", "key_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "key", "in", "memoized_results", ":", "del", "memoized_results", "[", "key", "]", "memoize", ".", "forget", "=", "forget", "def", "clear", "(", ")", ":", "memoized_results", ".", "clear", "(", ")", "memoize", ".", "clear", "=", "clear", "return", "memoize" ]
45.961538
0.010923
def extract_zipdir(zip_file): """ Extract contents of zip file into subfolder in parent directory. Parameters ---------- zip_file : str Path to zip file Returns ------- str : folder where the zip was extracted """ if not os.path.exists(zip_file): raise ValueError('{} does not exist'.format(zip_file)) directory = os.path.dirname(zip_file) filename = os.path.basename(zip_file) dirpath = os.path.join(directory, filename.replace('.zip', '')) with zipfile.ZipFile(zip_file, 'r', zipfile.ZIP_DEFLATED) as zipf: zipf.extractall(dirpath) return dirpath
[ "def", "extract_zipdir", "(", "zip_file", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "zip_file", ")", ":", "raise", "ValueError", "(", "'{} does not exist'", ".", "format", "(", "zip_file", ")", ")", "directory", "=", "os", ".", "path", ".", "dirname", "(", "zip_file", ")", "filename", "=", "os", ".", "path", ".", "basename", "(", "zip_file", ")", "dirpath", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "filename", ".", "replace", "(", "'.zip'", ",", "''", ")", ")", "with", "zipfile", ".", "ZipFile", "(", "zip_file", ",", "'r'", ",", "zipfile", ".", "ZIP_DEFLATED", ")", "as", "zipf", ":", "zipf", ".", "extractall", "(", "dirpath", ")", "return", "dirpath" ]
27.217391
0.00463
def protocol(self): """ - | A (ipproto, proto_start) tuple. | ``ipproto`` is the IP protocol in use, e.g. Protocol.TCP or Protocol.UDP. | ``proto_start`` denotes the beginning of the protocol data. | If the packet does not match our expectations, both ipproto and proto_start are None. """ if self.address_family == socket.AF_INET: proto = i(self.raw[9]) start = (i(self.raw[0]) & 0b1111) * 4 elif self.address_family == socket.AF_INET6: proto = i(self.raw[6]) # skip over well-known ipv6 headers start = 40 while proto in IPV6_EXT_HEADERS: if start >= len(self.raw): # less than two bytes left start = None proto = None break if proto == Protocol.FRAGMENT: hdrlen = 8 elif proto == Protocol.AH: hdrlen = (i(self.raw[start + 1]) + 2) * 4 else: # Protocol.HOPOPT, Protocol.DSTOPTS, Protocol.ROUTING hdrlen = (i(self.raw[start + 1]) + 1) * 8 proto = i(self.raw[start]) start += hdrlen else: start = None proto = None out_of_bounds = ( (proto == Protocol.TCP and start + 20 > len(self.raw)) or (proto == Protocol.UDP and start + 8 > len(self.raw)) or (proto in {Protocol.ICMP, Protocol.ICMPV6} and start + 4 > len(self.raw)) ) if out_of_bounds: # special-case tcp/udp so that we can rely on .protocol for the port properties. start = None proto = None return proto, start
[ "def", "protocol", "(", "self", ")", ":", "if", "self", ".", "address_family", "==", "socket", ".", "AF_INET", ":", "proto", "=", "i", "(", "self", ".", "raw", "[", "9", "]", ")", "start", "=", "(", "i", "(", "self", ".", "raw", "[", "0", "]", ")", "&", "0b1111", ")", "*", "4", "elif", "self", ".", "address_family", "==", "socket", ".", "AF_INET6", ":", "proto", "=", "i", "(", "self", ".", "raw", "[", "6", "]", ")", "# skip over well-known ipv6 headers", "start", "=", "40", "while", "proto", "in", "IPV6_EXT_HEADERS", ":", "if", "start", ">=", "len", "(", "self", ".", "raw", ")", ":", "# less than two bytes left", "start", "=", "None", "proto", "=", "None", "break", "if", "proto", "==", "Protocol", ".", "FRAGMENT", ":", "hdrlen", "=", "8", "elif", "proto", "==", "Protocol", ".", "AH", ":", "hdrlen", "=", "(", "i", "(", "self", ".", "raw", "[", "start", "+", "1", "]", ")", "+", "2", ")", "*", "4", "else", ":", "# Protocol.HOPOPT, Protocol.DSTOPTS, Protocol.ROUTING", "hdrlen", "=", "(", "i", "(", "self", ".", "raw", "[", "start", "+", "1", "]", ")", "+", "1", ")", "*", "8", "proto", "=", "i", "(", "self", ".", "raw", "[", "start", "]", ")", "start", "+=", "hdrlen", "else", ":", "start", "=", "None", "proto", "=", "None", "out_of_bounds", "=", "(", "(", "proto", "==", "Protocol", ".", "TCP", "and", "start", "+", "20", ">", "len", "(", "self", ".", "raw", ")", ")", "or", "(", "proto", "==", "Protocol", ".", "UDP", "and", "start", "+", "8", ">", "len", "(", "self", ".", "raw", ")", ")", "or", "(", "proto", "in", "{", "Protocol", ".", "ICMP", ",", "Protocol", ".", "ICMPV6", "}", "and", "start", "+", "4", ">", "len", "(", "self", ".", "raw", ")", ")", ")", "if", "out_of_bounds", ":", "# special-case tcp/udp so that we can rely on .protocol for the port properties.", "start", "=", "None", "proto", "=", "None", "return", "proto", ",", "start" ]
39.133333
0.003324
def power_off(self, timeout_sec=TIMEOUT_SEC): """Power off Bluetooth.""" # Turn off bluetooth. self._powered_off.clear() IOBluetoothPreferenceSetControllerPowerState(0) if not self._powered_off.wait(timeout_sec): raise RuntimeError('Exceeded timeout waiting for adapter to power off!')
[ "def", "power_off", "(", "self", ",", "timeout_sec", "=", "TIMEOUT_SEC", ")", ":", "# Turn off bluetooth.", "self", ".", "_powered_off", ".", "clear", "(", ")", "IOBluetoothPreferenceSetControllerPowerState", "(", "0", ")", "if", "not", "self", ".", "_powered_off", ".", "wait", "(", "timeout_sec", ")", ":", "raise", "RuntimeError", "(", "'Exceeded timeout waiting for adapter to power off!'", ")" ]
47.285714
0.008902
def parse_properties(parent_index_name, parent_name, nested_path, esProperties): """ RETURN THE COLUMN DEFINITIONS IN THE GIVEN esProperties OBJECT """ columns = FlatList() for name, property in esProperties.items(): index_name = parent_index_name column_name = concat_field(parent_name, name) jx_name = column_name if property.type == "nested" and property.properties: # NESTED TYPE IS A NEW TYPE DEFINITION # MARKUP CHILD COLUMNS WITH THE EXTRA DEPTH self_columns = parse_properties(index_name, column_name, [column_name] + nested_path, property.properties) columns.extend(self_columns) columns.append(Column( name=jx_name, es_index=index_name, es_column=column_name, es_type="nested", jx_type=NESTED, last_updated=Date.now(), nested_path=nested_path )) continue if property.properties: child_columns = parse_properties(index_name, column_name, nested_path, property.properties) columns.extend(child_columns) columns.append(Column( name=jx_name, es_index=index_name, es_column=column_name, es_type="source" if property.enabled == False else "object", jx_type=OBJECT, last_updated=Date.now(), nested_path=nested_path )) if property.dynamic: continue if not property.type: continue cardinality = 0 if not (property.store or property.enabled) and name != '_id' else None if property.fields: child_columns = parse_properties(index_name, column_name, nested_path, property.fields) if cardinality is None: for cc in child_columns: cc.cardinality = None columns.extend(child_columns) if property.type in es_type_to_json_type.keys(): columns.append(Column( name=jx_name, es_index=index_name, es_column=column_name, es_type=property.type, jx_type=es_type_to_json_type[property.type], cardinality=cardinality, last_updated=Date.now(), nested_path=nested_path )) if property.index_name and name != property.index_name: columns.append(Column( name=jx_name, es_index=index_name, es_column=column_name, es_type=property.type, jx_type=es_type_to_json_type[property.type], cardinality=0 if property.store else None, last_updated=Date.now(), nested_path=nested_path )) elif property.enabled == None or property.enabled == False: columns.append(Column( name=jx_name, es_index=index_name, es_column=column_name, es_type="source" if property.enabled == False else "object", jx_type=OBJECT, cardinality=0 if property.store else None, last_updated=Date.now(), nested_path=nested_path )) else: Log.warning("unknown type {{type}} for property {{path}}", type=property.type, path=parent_name) return columns
[ "def", "parse_properties", "(", "parent_index_name", ",", "parent_name", ",", "nested_path", ",", "esProperties", ")", ":", "columns", "=", "FlatList", "(", ")", "for", "name", ",", "property", "in", "esProperties", ".", "items", "(", ")", ":", "index_name", "=", "parent_index_name", "column_name", "=", "concat_field", "(", "parent_name", ",", "name", ")", "jx_name", "=", "column_name", "if", "property", ".", "type", "==", "\"nested\"", "and", "property", ".", "properties", ":", "# NESTED TYPE IS A NEW TYPE DEFINITION", "# MARKUP CHILD COLUMNS WITH THE EXTRA DEPTH", "self_columns", "=", "parse_properties", "(", "index_name", ",", "column_name", ",", "[", "column_name", "]", "+", "nested_path", ",", "property", ".", "properties", ")", "columns", ".", "extend", "(", "self_columns", ")", "columns", ".", "append", "(", "Column", "(", "name", "=", "jx_name", ",", "es_index", "=", "index_name", ",", "es_column", "=", "column_name", ",", "es_type", "=", "\"nested\"", ",", "jx_type", "=", "NESTED", ",", "last_updated", "=", "Date", ".", "now", "(", ")", ",", "nested_path", "=", "nested_path", ")", ")", "continue", "if", "property", ".", "properties", ":", "child_columns", "=", "parse_properties", "(", "index_name", ",", "column_name", ",", "nested_path", ",", "property", ".", "properties", ")", "columns", ".", "extend", "(", "child_columns", ")", "columns", ".", "append", "(", "Column", "(", "name", "=", "jx_name", ",", "es_index", "=", "index_name", ",", "es_column", "=", "column_name", ",", "es_type", "=", "\"source\"", "if", "property", ".", "enabled", "==", "False", "else", "\"object\"", ",", "jx_type", "=", "OBJECT", ",", "last_updated", "=", "Date", ".", "now", "(", ")", ",", "nested_path", "=", "nested_path", ")", ")", "if", "property", ".", "dynamic", ":", "continue", "if", "not", "property", ".", "type", ":", "continue", "cardinality", "=", "0", "if", "not", "(", "property", ".", "store", "or", "property", ".", "enabled", ")", "and", "name", "!=", "'_id'", "else", "None", "if", "property", ".", "fields", ":", "child_columns", "=", "parse_properties", "(", "index_name", ",", "column_name", ",", "nested_path", ",", "property", ".", "fields", ")", "if", "cardinality", "is", "None", ":", "for", "cc", "in", "child_columns", ":", "cc", ".", "cardinality", "=", "None", "columns", ".", "extend", "(", "child_columns", ")", "if", "property", ".", "type", "in", "es_type_to_json_type", ".", "keys", "(", ")", ":", "columns", ".", "append", "(", "Column", "(", "name", "=", "jx_name", ",", "es_index", "=", "index_name", ",", "es_column", "=", "column_name", ",", "es_type", "=", "property", ".", "type", ",", "jx_type", "=", "es_type_to_json_type", "[", "property", ".", "type", "]", ",", "cardinality", "=", "cardinality", ",", "last_updated", "=", "Date", ".", "now", "(", ")", ",", "nested_path", "=", "nested_path", ")", ")", "if", "property", ".", "index_name", "and", "name", "!=", "property", ".", "index_name", ":", "columns", ".", "append", "(", "Column", "(", "name", "=", "jx_name", ",", "es_index", "=", "index_name", ",", "es_column", "=", "column_name", ",", "es_type", "=", "property", ".", "type", ",", "jx_type", "=", "es_type_to_json_type", "[", "property", ".", "type", "]", ",", "cardinality", "=", "0", "if", "property", ".", "store", "else", "None", ",", "last_updated", "=", "Date", ".", "now", "(", ")", ",", "nested_path", "=", "nested_path", ")", ")", "elif", "property", ".", "enabled", "==", "None", "or", "property", ".", "enabled", "==", "False", ":", "columns", ".", "append", "(", "Column", "(", "name", "=", "jx_name", ",", "es_index", "=", "index_name", ",", "es_column", "=", "column_name", ",", "es_type", "=", "\"source\"", "if", "property", ".", "enabled", "==", "False", "else", "\"object\"", ",", "jx_type", "=", "OBJECT", ",", "cardinality", "=", "0", "if", "property", ".", "store", "else", "None", ",", "last_updated", "=", "Date", ".", "now", "(", ")", ",", "nested_path", "=", "nested_path", ")", ")", "else", ":", "Log", ".", "warning", "(", "\"unknown type {{type}} for property {{path}}\"", ",", "type", "=", "property", ".", "type", ",", "path", "=", "parent_name", ")", "return", "columns" ]
38.395604
0.003069
def _read_content(self): """ Reads metadata from location (and path_in_arc if archive) This function is called during the init process and should not be used in isolation: it overwrites unsafed metadata. """ if self._path_in_arc: with zipfile.ZipFile(file=str(self._metadata_file)) as zf: self._content = json.loads( zf.read(self._path_in_arc).decode('utf-8'), object_pairs_hook=OrderedDict) else: with self._metadata_file.open('r') as mdf: self._content = json.load(mdf, object_pairs_hook=OrderedDict)
[ "def", "_read_content", "(", "self", ")", ":", "if", "self", ".", "_path_in_arc", ":", "with", "zipfile", ".", "ZipFile", "(", "file", "=", "str", "(", "self", ".", "_metadata_file", ")", ")", "as", "zf", ":", "self", ".", "_content", "=", "json", ".", "loads", "(", "zf", ".", "read", "(", "self", ".", "_path_in_arc", ")", ".", "decode", "(", "'utf-8'", ")", ",", "object_pairs_hook", "=", "OrderedDict", ")", "else", ":", "with", "self", ".", "_metadata_file", ".", "open", "(", "'r'", ")", "as", "mdf", ":", "self", ".", "_content", "=", "json", ".", "load", "(", "mdf", ",", "object_pairs_hook", "=", "OrderedDict", ")" ]
42.625
0.002869
def load_tags(self, max_pages=30): """ Load all WordPress tags from the given site. :param max_pages: kill counter to avoid infinite looping :return: None """ logger.info("loading tags") # clear them all out so we don't get dupes if requested if self.purge_first: Tag.objects.filter(site_id=self.site_id).delete() path = "sites/{}/tags".format(self.site_id) params = {"number": 1000} page = 1 response = self.get(path, params) if not response.ok: logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text) while response.ok and response.text and page < max_pages: logger.info(" - page: %d", page) api_tags = response.json().get("tags") if not api_tags: # we're done here break tags = [] for api_tag in api_tags: # if it exists locally, update local version if anything has changed existing_tag = Tag.objects.filter(site_id=self.site_id, wp_id=api_tag["ID"]).first() if existing_tag: self.update_existing_tag(existing_tag, api_tag) else: tags.append(self.get_new_tag(api_tag)) if tags: Tag.objects.bulk_create(tags) elif not self.full: # we're done here break # get next page page += 1 params["page"] = page response = self.get(path, params) if not response.ok: logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text) return
[ "def", "load_tags", "(", "self", ",", "max_pages", "=", "30", ")", ":", "logger", ".", "info", "(", "\"loading tags\"", ")", "# clear them all out so we don't get dupes if requested", "if", "self", ".", "purge_first", ":", "Tag", ".", "objects", ".", "filter", "(", "site_id", "=", "self", ".", "site_id", ")", ".", "delete", "(", ")", "path", "=", "\"sites/{}/tags\"", ".", "format", "(", "self", ".", "site_id", ")", "params", "=", "{", "\"number\"", ":", "1000", "}", "page", "=", "1", "response", "=", "self", ".", "get", "(", "path", ",", "params", ")", "if", "not", "response", ".", "ok", ":", "logger", ".", "warning", "(", "\"Response NOT OK! status_code=%s\\n%s\"", ",", "response", ".", "status_code", ",", "response", ".", "text", ")", "while", "response", ".", "ok", "and", "response", ".", "text", "and", "page", "<", "max_pages", ":", "logger", ".", "info", "(", "\" - page: %d\"", ",", "page", ")", "api_tags", "=", "response", ".", "json", "(", ")", ".", "get", "(", "\"tags\"", ")", "if", "not", "api_tags", ":", "# we're done here", "break", "tags", "=", "[", "]", "for", "api_tag", "in", "api_tags", ":", "# if it exists locally, update local version if anything has changed", "existing_tag", "=", "Tag", ".", "objects", ".", "filter", "(", "site_id", "=", "self", ".", "site_id", ",", "wp_id", "=", "api_tag", "[", "\"ID\"", "]", ")", ".", "first", "(", ")", "if", "existing_tag", ":", "self", ".", "update_existing_tag", "(", "existing_tag", ",", "api_tag", ")", "else", ":", "tags", ".", "append", "(", "self", ".", "get_new_tag", "(", "api_tag", ")", ")", "if", "tags", ":", "Tag", ".", "objects", ".", "bulk_create", "(", "tags", ")", "elif", "not", "self", ".", "full", ":", "# we're done here", "break", "# get next page", "page", "+=", "1", "params", "[", "\"page\"", "]", "=", "page", "response", "=", "self", ".", "get", "(", "path", ",", "params", ")", "if", "not", "response", ".", "ok", ":", "logger", ".", "warning", "(", "\"Response NOT OK! status_code=%s\\n%s\"", ",", "response", ".", "status_code", ",", "response", ".", "text", ")", "return" ]
32.314815
0.003337
def check_rights(self, resources, request=None): """ Check rights for resources. :return bool: True if operation is success else HTTP_403_FORBIDDEN """ if not self.auth: return True try: if not self.auth.test_rights(resources, request=request): raise AssertionError() except AssertionError, e: raise HttpError("Access forbiden. {0}".format(e), status=status.HTTP_403_FORBIDDEN)
[ "def", "check_rights", "(", "self", ",", "resources", ",", "request", "=", "None", ")", ":", "if", "not", "self", ".", "auth", ":", "return", "True", "try", ":", "if", "not", "self", ".", "auth", ".", "test_rights", "(", "resources", ",", "request", "=", "request", ")", ":", "raise", "AssertionError", "(", ")", "except", "AssertionError", ",", "e", ":", "raise", "HttpError", "(", "\"Access forbiden. {0}\"", ".", "format", "(", "e", ")", ",", "status", "=", "status", ".", "HTTP_403_FORBIDDEN", ")" ]
31.133333
0.006237
def preamble(): """ Log the Andes command-line preamble at the `logging.INFO` level Returns ------- None """ from . import __version__ as version logger.info('ANDES {ver} (Build {b}, Python {p} on {os})' .format(ver=version[:5], b=version[-8:], p=platform.python_version(), os=platform.system())) try: username = os.getlogin() + ', ' except OSError: username = '' logger.info('Session: {}{}'.format(username, strftime("%m/%d/%Y %I:%M:%S %p"))) logger.info('')
[ "def", "preamble", "(", ")", ":", "from", ".", "import", "__version__", "as", "version", "logger", ".", "info", "(", "'ANDES {ver} (Build {b}, Python {p} on {os})'", ".", "format", "(", "ver", "=", "version", "[", ":", "5", "]", ",", "b", "=", "version", "[", "-", "8", ":", "]", ",", "p", "=", "platform", ".", "python_version", "(", ")", ",", "os", "=", "platform", ".", "system", "(", ")", ")", ")", "try", ":", "username", "=", "os", ".", "getlogin", "(", ")", "+", "', '", "except", "OSError", ":", "username", "=", "''", "logger", ".", "info", "(", "'Session: {}{}'", ".", "format", "(", "username", ",", "strftime", "(", "\"%m/%d/%Y %I:%M:%S %p\"", ")", ")", ")", "logger", ".", "info", "(", "''", ")" ]
27.857143
0.001653
def execute(self, globals_=None, _locals=None): """ Execute a code object The inputs and behavior of this function should match those of eval_ and exec_. .. _eval: https://docs.python.org/3/library/functions.html?highlight=eval#eval .. _exec: https://docs.python.org/3/library/functions.html?highlight=exec#exec .. note:: Need to figure out how the internals of this function must change for ``eval`` or ``exec``. :param code: a python code object :param globals_: optional globals dictionary :param _locals: optional locals dictionary """ if globals_ is None: globals_ = globals() if _locals is None: self._locals = globals_ else: self._locals = _locals self.globals_ = globals_ if self.contains_op("YIELD_VALUE"): return self.iterate_instructions() else: return self.execute_instructions()
[ "def", "execute", "(", "self", ",", "globals_", "=", "None", ",", "_locals", "=", "None", ")", ":", "if", "globals_", "is", "None", ":", "globals_", "=", "globals", "(", ")", "if", "_locals", "is", "None", ":", "self", ".", "_locals", "=", "globals_", "else", ":", "self", ".", "_locals", "=", "_locals", "self", ".", "globals_", "=", "globals_", "if", "self", ".", "contains_op", "(", "\"YIELD_VALUE\"", ")", ":", "return", "self", ".", "iterate_instructions", "(", ")", "else", ":", "return", "self", ".", "execute_instructions", "(", ")" ]
33
0.006869
def safe_to_exit(self, *args, **kargs): """ Overrided to prevent user from exiting selection until they have selected the right amount of instances """ if len(self.value) == self.instance_num: return True return False
[ "def", "safe_to_exit", "(", "self", ",", "*", "args", ",", "*", "*", "kargs", ")", ":", "if", "len", "(", "self", ".", "value", ")", "==", "self", ".", "instance_num", ":", "return", "True", "return", "False" ]
33.75
0.00722
def alltoall(self, x, mesh_axis, split_axis, concat_axis): """Grouped alltoall (like MPI alltoall with splitting and concatenation). Args: x: a LaidOutTensor mesh_axis: an integer the mesh axis along which to group split_axis: an integer (the Tensor axis along which to split) concat_axis: an integer (the Tensor axis along which to concatenate) Returns: a LaidOutTensor """ x = x.to_laid_out_tensor() t = x.one_slice group_assignment = self._create_group_assignment([mesh_axis]) dtype = t.dtype if dtype == tf.float32: # There seems to be a bug with float32 alltoall. # Do it in bfloat16 until the bug is fixed. # TODO(noam): file a bug t = tf.to_bfloat16(t) t = tpu_ops.all_to_all( t, concat_dimension=concat_axis, split_dimension=split_axis, split_count=len(group_assignment[0]), group_assignment=group_assignment) t = tf.cast(t, dtype) x = self.LaidOutTensor([t]) return x
[ "def", "alltoall", "(", "self", ",", "x", ",", "mesh_axis", ",", "split_axis", ",", "concat_axis", ")", ":", "x", "=", "x", ".", "to_laid_out_tensor", "(", ")", "t", "=", "x", ".", "one_slice", "group_assignment", "=", "self", ".", "_create_group_assignment", "(", "[", "mesh_axis", "]", ")", "dtype", "=", "t", ".", "dtype", "if", "dtype", "==", "tf", ".", "float32", ":", "# There seems to be a bug with float32 alltoall.", "# Do it in bfloat16 until the bug is fixed.", "# TODO(noam): file a bug", "t", "=", "tf", ".", "to_bfloat16", "(", "t", ")", "t", "=", "tpu_ops", ".", "all_to_all", "(", "t", ",", "concat_dimension", "=", "concat_axis", ",", "split_dimension", "=", "split_axis", ",", "split_count", "=", "len", "(", "group_assignment", "[", "0", "]", ")", ",", "group_assignment", "=", "group_assignment", ")", "t", "=", "tf", ".", "cast", "(", "t", ",", "dtype", ")", "x", "=", "self", ".", "LaidOutTensor", "(", "[", "t", "]", ")", "return", "x" ]
34.310345
0.004888
def publish_events( self, topic_hostname, events, custom_headers=None, raw=False, **operation_config): """Publishes a batch of events to an Azure Event Grid topic. :param topic_hostname: The host name of the topic, e.g. topic1.westus2-1.eventgrid.azure.net :type topic_hostname: str :param events: An array of events to be published to Event Grid. :type events: list[~azure.eventgrid.models.EventGridEvent] :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: None or ClientRawResponse if raw=true :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: :class:`HttpOperationError<msrest.exceptions.HttpOperationError>` """ # Construct URL url = self.publish_events.metadata['url'] path_format_arguments = { 'topicHostname': self._serialize.url("topic_hostname", topic_hostname, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(events, '[EventGridEvent]') # Construct and send request request = self._client.post(url, query_parameters) response = self._client.send( request, header_parameters, body_content, stream=False, **operation_config) if response.status_code not in [200]: raise HttpOperationError(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response
[ "def", "publish_events", "(", "self", ",", "topic_hostname", ",", "events", ",", "custom_headers", "=", "None", ",", "raw", "=", "False", ",", "*", "*", "operation_config", ")", ":", "# Construct URL", "url", "=", "self", ".", "publish_events", ".", "metadata", "[", "'url'", "]", "path_format_arguments", "=", "{", "'topicHostname'", ":", "self", ".", "_serialize", ".", "url", "(", "\"topic_hostname\"", ",", "topic_hostname", ",", "'str'", ",", "skip_quote", "=", "True", ")", "}", "url", "=", "self", ".", "_client", ".", "format_url", "(", "url", ",", "*", "*", "path_format_arguments", ")", "# Construct parameters", "query_parameters", "=", "{", "}", "query_parameters", "[", "'api-version'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "\"self.api_version\"", ",", "self", ".", "api_version", ",", "'str'", ")", "# Construct headers", "header_parameters", "=", "{", "}", "header_parameters", "[", "'Content-Type'", "]", "=", "'application/json; charset=utf-8'", "if", "custom_headers", ":", "header_parameters", ".", "update", "(", "custom_headers", ")", "# Construct body", "body_content", "=", "self", ".", "_serialize", ".", "body", "(", "events", ",", "'[EventGridEvent]'", ")", "# Construct and send request", "request", "=", "self", ".", "_client", ".", "post", "(", "url", ",", "query_parameters", ")", "response", "=", "self", ".", "_client", ".", "send", "(", "request", ",", "header_parameters", ",", "body_content", ",", "stream", "=", "False", ",", "*", "*", "operation_config", ")", "if", "response", ".", "status_code", "not", "in", "[", "200", "]", ":", "raise", "HttpOperationError", "(", "self", ".", "_deserialize", ",", "response", ")", "if", "raw", ":", "client_raw_response", "=", "ClientRawResponse", "(", "None", ",", "response", ")", "return", "client_raw_response" ]
43.32
0.002709
def hardware_version(self): """Get a hardware identification string.""" hardware_string = self.hardware_string if not isinstance(hardware_string, bytes): hardware_string = self.hardware_string.encode('utf-8') if len(hardware_string) > 10: self._logger.warn("Truncating hardware string that was longer than 10 bytes: %s", self.hardware_string) if len(hardware_string) < 10: hardware_string += b'\0'*(10 - len(hardware_string)) return [hardware_string]
[ "def", "hardware_version", "(", "self", ")", ":", "hardware_string", "=", "self", ".", "hardware_string", "if", "not", "isinstance", "(", "hardware_string", ",", "bytes", ")", ":", "hardware_string", "=", "self", ".", "hardware_string", ".", "encode", "(", "'utf-8'", ")", "if", "len", "(", "hardware_string", ")", ">", "10", ":", "self", ".", "_logger", ".", "warn", "(", "\"Truncating hardware string that was longer than 10 bytes: %s\"", ",", "self", ".", "hardware_string", ")", "if", "len", "(", "hardware_string", ")", "<", "10", ":", "hardware_string", "+=", "b'\\0'", "*", "(", "10", "-", "len", "(", "hardware_string", ")", ")", "return", "[", "hardware_string", "]" ]
35
0.005566
def demultiplex_cells(fastq, out_dir, readnumber, prefix, cb_histogram, cb_cutoff): ''' Demultiplex a fastqtransformed FASTQ file into a FASTQ file for each cell. ''' annotations = detect_fastq_annotations(fastq) re_string = construct_transformed_regex(annotations) parser_re = re.compile(re_string) readstring = "" if not readnumber else "_R{}".format(readnumber) filestring = "{prefix}{sample}{readstring}.fq" cb_set = set() if cb_histogram: cb_set = get_cb_depth_set(cb_histogram, cb_cutoff) sample_set = set() batch = collections.defaultdict(list) parsed = 0 safe_makedir(out_dir) for read in read_fastq(fastq): parsed += 1 match = parser_re.search(read).groupdict() sample = match['CB'] if cb_set and sample not in cb_set: continue sample_set.add(sample) batch[sample].append(read) # write in batches to avoid opening up file handles repeatedly if not parsed % 10000000: for sample, reads in batch.items(): out_file = os.path.join(out_dir, filestring.format(**locals())) with open(out_file, "a") as out_handle: for read in reads: out_handle.write(read) batch = collections.defaultdict(list) for sample, reads in batch.items(): out_file = os.path.join(out_dir, filestring.format(**locals())) with open(out_file, "a") as out_handle: for read in reads: out_handle.write(read)
[ "def", "demultiplex_cells", "(", "fastq", ",", "out_dir", ",", "readnumber", ",", "prefix", ",", "cb_histogram", ",", "cb_cutoff", ")", ":", "annotations", "=", "detect_fastq_annotations", "(", "fastq", ")", "re_string", "=", "construct_transformed_regex", "(", "annotations", ")", "parser_re", "=", "re", ".", "compile", "(", "re_string", ")", "readstring", "=", "\"\"", "if", "not", "readnumber", "else", "\"_R{}\"", ".", "format", "(", "readnumber", ")", "filestring", "=", "\"{prefix}{sample}{readstring}.fq\"", "cb_set", "=", "set", "(", ")", "if", "cb_histogram", ":", "cb_set", "=", "get_cb_depth_set", "(", "cb_histogram", ",", "cb_cutoff", ")", "sample_set", "=", "set", "(", ")", "batch", "=", "collections", ".", "defaultdict", "(", "list", ")", "parsed", "=", "0", "safe_makedir", "(", "out_dir", ")", "for", "read", "in", "read_fastq", "(", "fastq", ")", ":", "parsed", "+=", "1", "match", "=", "parser_re", ".", "search", "(", "read", ")", ".", "groupdict", "(", ")", "sample", "=", "match", "[", "'CB'", "]", "if", "cb_set", "and", "sample", "not", "in", "cb_set", ":", "continue", "sample_set", ".", "add", "(", "sample", ")", "batch", "[", "sample", "]", ".", "append", "(", "read", ")", "# write in batches to avoid opening up file handles repeatedly", "if", "not", "parsed", "%", "10000000", ":", "for", "sample", ",", "reads", "in", "batch", ".", "items", "(", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "filestring", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ")", "with", "open", "(", "out_file", ",", "\"a\"", ")", "as", "out_handle", ":", "for", "read", "in", "reads", ":", "out_handle", ".", "write", "(", "read", ")", "batch", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "sample", ",", "reads", "in", "batch", ".", "items", "(", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "filestring", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ")", "with", "open", "(", "out_file", ",", "\"a\"", ")", "as", "out_handle", ":", "for", "read", "in", "reads", ":", "out_handle", ".", "write", "(", "read", ")" ]
39.871795
0.001255
def do_loop_turn(self): """Receiver daemon main loop :return: None """ # Begin to clean modules self.check_and_del_zombie_modules() # Maybe the arbiter pushed a new configuration... if self.watch_for_new_conf(timeout=0.05): logger.info("I got a new configuration...") # Manage the new configuration self.setup_new_conf() # Maybe external modules raised 'objects' # we should get them _t0 = time.time() self.get_objects_from_from_queues() statsmgr.timer('core.get-objects-from-queues', time.time() - _t0) # Get external commands from the arbiters... _t0 = time.time() self.get_external_commands_from_arbiters() statsmgr.timer('external-commands.got.time', time.time() - _t0) statsmgr.gauge('external-commands.got.count', len(self.unprocessed_external_commands)) _t0 = time.time() self.push_external_commands_to_schedulers() statsmgr.timer('external-commands.pushed.time', time.time() - _t0) # Say to modules it's a new tick :) _t0 = time.time() self.hook_point('tick') statsmgr.timer('hook.tick', time.time() - _t0)
[ "def", "do_loop_turn", "(", "self", ")", ":", "# Begin to clean modules", "self", ".", "check_and_del_zombie_modules", "(", ")", "# Maybe the arbiter pushed a new configuration...", "if", "self", ".", "watch_for_new_conf", "(", "timeout", "=", "0.05", ")", ":", "logger", ".", "info", "(", "\"I got a new configuration...\"", ")", "# Manage the new configuration", "self", ".", "setup_new_conf", "(", ")", "# Maybe external modules raised 'objects'", "# we should get them", "_t0", "=", "time", ".", "time", "(", ")", "self", ".", "get_objects_from_from_queues", "(", ")", "statsmgr", ".", "timer", "(", "'core.get-objects-from-queues'", ",", "time", ".", "time", "(", ")", "-", "_t0", ")", "# Get external commands from the arbiters...", "_t0", "=", "time", ".", "time", "(", ")", "self", ".", "get_external_commands_from_arbiters", "(", ")", "statsmgr", ".", "timer", "(", "'external-commands.got.time'", ",", "time", ".", "time", "(", ")", "-", "_t0", ")", "statsmgr", ".", "gauge", "(", "'external-commands.got.count'", ",", "len", "(", "self", ".", "unprocessed_external_commands", ")", ")", "_t0", "=", "time", ".", "time", "(", ")", "self", ".", "push_external_commands_to_schedulers", "(", ")", "statsmgr", ".", "timer", "(", "'external-commands.pushed.time'", ",", "time", ".", "time", "(", ")", "-", "_t0", ")", "# Say to modules it's a new tick :)", "_t0", "=", "time", ".", "time", "(", ")", "self", ".", "hook_point", "(", "'tick'", ")", "statsmgr", ".", "timer", "(", "'hook.tick'", ",", "time", ".", "time", "(", ")", "-", "_t0", ")" ]
34.714286
0.002402
def maps_get_default_rules_output_rules_groupname(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") maps_get_default_rules = ET.Element("maps_get_default_rules") config = maps_get_default_rules output = ET.SubElement(maps_get_default_rules, "output") rules = ET.SubElement(output, "rules") groupname = ET.SubElement(rules, "groupname") groupname.text = kwargs.pop('groupname') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "maps_get_default_rules_output_rules_groupname", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "maps_get_default_rules", "=", "ET", ".", "Element", "(", "\"maps_get_default_rules\"", ")", "config", "=", "maps_get_default_rules", "output", "=", "ET", ".", "SubElement", "(", "maps_get_default_rules", ",", "\"output\"", ")", "rules", "=", "ET", ".", "SubElement", "(", "output", ",", "\"rules\"", ")", "groupname", "=", "ET", ".", "SubElement", "(", "rules", ",", "\"groupname\"", ")", "groupname", ".", "text", "=", "kwargs", ".", "pop", "(", "'groupname'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
42.384615
0.003552
def _send(self, data): """ Send data to statsite. Data that can not be sent will be queued. """ retry = self.RETRY # Attempt to send any data in the queue while retry > 0: # Check socket if not self.socket: # Log Error self.log.error("StatsiteHandler: Socket unavailable.") # Attempt to restablish connection self._connect() # Decrement retry retry -= 1 # Try again continue try: # Send data to socket data = data.split() data = data[0] + ":" + data[1] + "|kv\n" self.socket.sendall(data) # Done break except socket.error as e: # Log Error self.log.error("StatsiteHandler: Failed sending data. %s.", e) # Attempt to restablish connection self._close() # Decrement retry retry -= 1 # try again continue
[ "def", "_send", "(", "self", ",", "data", ")", ":", "retry", "=", "self", ".", "RETRY", "# Attempt to send any data in the queue", "while", "retry", ">", "0", ":", "# Check socket", "if", "not", "self", ".", "socket", ":", "# Log Error", "self", ".", "log", ".", "error", "(", "\"StatsiteHandler: Socket unavailable.\"", ")", "# Attempt to restablish connection", "self", ".", "_connect", "(", ")", "# Decrement retry", "retry", "-=", "1", "# Try again", "continue", "try", ":", "# Send data to socket", "data", "=", "data", ".", "split", "(", ")", "data", "=", "data", "[", "0", "]", "+", "\":\"", "+", "data", "[", "1", "]", "+", "\"|kv\\n\"", "self", ".", "socket", ".", "sendall", "(", "data", ")", "# Done", "break", "except", "socket", ".", "error", "as", "e", ":", "# Log Error", "self", ".", "log", ".", "error", "(", "\"StatsiteHandler: Failed sending data. %s.\"", ",", "e", ")", "# Attempt to restablish connection", "self", ".", "_close", "(", ")", "# Decrement retry", "retry", "-=", "1", "# try again", "continue" ]
33.848485
0.001741
def _redirect_complete(self, text: str, line: str, begidx: int, endidx: int, compfunc: Callable) -> List[str]: """Called by complete() as the first tab completion function for all commands It determines if it should tab complete for redirection (|, <, >, >>) or use the completer function for the current command :param text: the string prefix we are attempting to match (all returned matches must begin with it) :param line: the current input line with leading whitespace removed :param begidx: the beginning index of the prefix text :param endidx: the ending index of the prefix text :param compfunc: the completer function for the current command this will be called if we aren't completing for redirection :return: a list of possible tab completions """ if self.allow_redirection: # Get all tokens through the one being completed. We want the raw tokens # so we can tell if redirection strings are quoted and ignore them. _, raw_tokens = self.tokens_for_completion(line, begidx, endidx) if not raw_tokens: return [] if len(raw_tokens) > 1: # Check if there are redirection strings prior to the token being completed seen_pipe = False has_redirection = False for cur_token in raw_tokens[:-1]: if cur_token in constants.REDIRECTION_TOKENS: has_redirection = True if cur_token == constants.REDIRECTION_PIPE: seen_pipe = True # Get token prior to the one being completed prior_token = raw_tokens[-2] # If a pipe is right before the token being completed, complete a shell command as the piped process if prior_token == constants.REDIRECTION_PIPE: return self.shell_cmd_complete(text, line, begidx, endidx) # Otherwise do path completion either as files to redirectors or arguments to the piped process elif prior_token in constants.REDIRECTION_TOKENS or seen_pipe: return self.path_complete(text, line, begidx, endidx) # If there were redirection strings anywhere on the command line, then we # are no longer tab completing for the current command elif has_redirection: return [] # Call the command's completer function return compfunc(text, line, begidx, endidx)
[ "def", "_redirect_complete", "(", "self", ",", "text", ":", "str", ",", "line", ":", "str", ",", "begidx", ":", "int", ",", "endidx", ":", "int", ",", "compfunc", ":", "Callable", ")", "->", "List", "[", "str", "]", ":", "if", "self", ".", "allow_redirection", ":", "# Get all tokens through the one being completed. We want the raw tokens", "# so we can tell if redirection strings are quoted and ignore them.", "_", ",", "raw_tokens", "=", "self", ".", "tokens_for_completion", "(", "line", ",", "begidx", ",", "endidx", ")", "if", "not", "raw_tokens", ":", "return", "[", "]", "if", "len", "(", "raw_tokens", ")", ">", "1", ":", "# Check if there are redirection strings prior to the token being completed", "seen_pipe", "=", "False", "has_redirection", "=", "False", "for", "cur_token", "in", "raw_tokens", "[", ":", "-", "1", "]", ":", "if", "cur_token", "in", "constants", ".", "REDIRECTION_TOKENS", ":", "has_redirection", "=", "True", "if", "cur_token", "==", "constants", ".", "REDIRECTION_PIPE", ":", "seen_pipe", "=", "True", "# Get token prior to the one being completed", "prior_token", "=", "raw_tokens", "[", "-", "2", "]", "# If a pipe is right before the token being completed, complete a shell command as the piped process", "if", "prior_token", "==", "constants", ".", "REDIRECTION_PIPE", ":", "return", "self", ".", "shell_cmd_complete", "(", "text", ",", "line", ",", "begidx", ",", "endidx", ")", "# Otherwise do path completion either as files to redirectors or arguments to the piped process", "elif", "prior_token", "in", "constants", ".", "REDIRECTION_TOKENS", "or", "seen_pipe", ":", "return", "self", ".", "path_complete", "(", "text", ",", "line", ",", "begidx", ",", "endidx", ")", "# If there were redirection strings anywhere on the command line, then we", "# are no longer tab completing for the current command", "elif", "has_redirection", ":", "return", "[", "]", "# Call the command's completer function", "return", "compfunc", "(", "text", ",", "line", ",", "begidx", ",", "endidx", ")" ]
49.846154
0.00454
def imagenet_clamp_batch(batch, low, high): """ Not necessary in practice """ F.clip(batch[:,0,:,:],low-123.680, high-123.680) F.clip(batch[:,1,:,:],low-116.779, high-116.779) F.clip(batch[:,2,:,:],low-103.939, high-103.939)
[ "def", "imagenet_clamp_batch", "(", "batch", ",", "low", ",", "high", ")", ":", "F", ".", "clip", "(", "batch", "[", ":", ",", "0", ",", ":", ",", ":", "]", ",", "low", "-", "123.680", ",", "high", "-", "123.680", ")", "F", ".", "clip", "(", "batch", "[", ":", ",", "1", ",", ":", ",", ":", "]", ",", "low", "-", "116.779", ",", "high", "-", "116.779", ")", "F", ".", "clip", "(", "batch", "[", ":", ",", "2", ",", ":", ",", ":", "]", ",", "low", "-", "103.939", ",", "high", "-", "103.939", ")" ]
47.2
0.054167
def create_rules(self): '''Adds rules for the command line options''' dmp = apps.get_app_config('django_mako_plus') # the default rules = [ # files are included by default Rule('*', level=None, filetype=TYPE_FILE, score=1), # files at the app level are skipped Rule('*', level=0, filetype=TYPE_FILE, score=-2), # directories are recursed by default Rule('*', level=None, filetype=TYPE_DIRECTORY, score=1), # directories at the app level are skipped Rule('*', level=0, filetype=TYPE_DIRECTORY, score=-2), # media, scripts, styles directories are what we want to copy Rule('media', level=0, filetype=TYPE_DIRECTORY, score=6), Rule('scripts', level=0, filetype=TYPE_DIRECTORY, score=6), Rule('styles', level=0, filetype=TYPE_DIRECTORY, score=6), # ignore the template cache directories Rule(dmp.options['TEMPLATES_CACHE_DIR'], level=None, filetype=TYPE_DIRECTORY, score=-3), # ignore python cache directories Rule('__pycache__', level=None, filetype=TYPE_DIRECTORY, score=-3), # ignore compiled python files Rule('*.pyc', level=None, filetype=TYPE_FILE, score=-3), ] # include rules have score of 50 because they trump all initial rules for pattern in (self.options.get('include_dir') or []): self.message('Setting rule - recurse directories: {}'.format(pattern), 1) rules.append(Rule(pattern, level=None, filetype=TYPE_DIRECTORY, score=50)) for pattern in (self.options.get('include_file') or []): self.message('Setting rule - include files: {}'.format(pattern), 1) rules.append(Rule(pattern, level=None, filetype=TYPE_FILE, score=50)) # skip rules have score of 100 because they trump everything, including the includes from the command line for pattern in (self.options.get('skip_dir') or []): self.message('Setting rule - skip directories: {}'.format(pattern), 1) rules.append(Rule(pattern, level=None, filetype=TYPE_DIRECTORY, score=-100)) for pattern in (self.options.get('skip_file') or []): self.message('Setting rule - skip files: {}'.format(pattern), 1) rules.append(Rule(pattern, level=None, filetype=TYPE_FILE, score=-100)) return rules
[ "def", "create_rules", "(", "self", ")", ":", "dmp", "=", "apps", ".", "get_app_config", "(", "'django_mako_plus'", ")", "# the default", "rules", "=", "[", "# files are included by default", "Rule", "(", "'*'", ",", "level", "=", "None", ",", "filetype", "=", "TYPE_FILE", ",", "score", "=", "1", ")", ",", "# files at the app level are skipped", "Rule", "(", "'*'", ",", "level", "=", "0", ",", "filetype", "=", "TYPE_FILE", ",", "score", "=", "-", "2", ")", ",", "# directories are recursed by default", "Rule", "(", "'*'", ",", "level", "=", "None", ",", "filetype", "=", "TYPE_DIRECTORY", ",", "score", "=", "1", ")", ",", "# directories at the app level are skipped", "Rule", "(", "'*'", ",", "level", "=", "0", ",", "filetype", "=", "TYPE_DIRECTORY", ",", "score", "=", "-", "2", ")", ",", "# media, scripts, styles directories are what we want to copy", "Rule", "(", "'media'", ",", "level", "=", "0", ",", "filetype", "=", "TYPE_DIRECTORY", ",", "score", "=", "6", ")", ",", "Rule", "(", "'scripts'", ",", "level", "=", "0", ",", "filetype", "=", "TYPE_DIRECTORY", ",", "score", "=", "6", ")", ",", "Rule", "(", "'styles'", ",", "level", "=", "0", ",", "filetype", "=", "TYPE_DIRECTORY", ",", "score", "=", "6", ")", ",", "# ignore the template cache directories", "Rule", "(", "dmp", ".", "options", "[", "'TEMPLATES_CACHE_DIR'", "]", ",", "level", "=", "None", ",", "filetype", "=", "TYPE_DIRECTORY", ",", "score", "=", "-", "3", ")", ",", "# ignore python cache directories", "Rule", "(", "'__pycache__'", ",", "level", "=", "None", ",", "filetype", "=", "TYPE_DIRECTORY", ",", "score", "=", "-", "3", ")", ",", "# ignore compiled python files", "Rule", "(", "'*.pyc'", ",", "level", "=", "None", ",", "filetype", "=", "TYPE_FILE", ",", "score", "=", "-", "3", ")", ",", "]", "# include rules have score of 50 because they trump all initial rules", "for", "pattern", "in", "(", "self", ".", "options", ".", "get", "(", "'include_dir'", ")", "or", "[", "]", ")", ":", "self", ".", "message", "(", "'Setting rule - recurse directories: {}'", ".", "format", "(", "pattern", ")", ",", "1", ")", "rules", ".", "append", "(", "Rule", "(", "pattern", ",", "level", "=", "None", ",", "filetype", "=", "TYPE_DIRECTORY", ",", "score", "=", "50", ")", ")", "for", "pattern", "in", "(", "self", ".", "options", ".", "get", "(", "'include_file'", ")", "or", "[", "]", ")", ":", "self", ".", "message", "(", "'Setting rule - include files: {}'", ".", "format", "(", "pattern", ")", ",", "1", ")", "rules", ".", "append", "(", "Rule", "(", "pattern", ",", "level", "=", "None", ",", "filetype", "=", "TYPE_FILE", ",", "score", "=", "50", ")", ")", "# skip rules have score of 100 because they trump everything, including the includes from the command line", "for", "pattern", "in", "(", "self", ".", "options", ".", "get", "(", "'skip_dir'", ")", "or", "[", "]", ")", ":", "self", ".", "message", "(", "'Setting rule - skip directories: {}'", ".", "format", "(", "pattern", ")", ",", "1", ")", "rules", ".", "append", "(", "Rule", "(", "pattern", ",", "level", "=", "None", ",", "filetype", "=", "TYPE_DIRECTORY", ",", "score", "=", "-", "100", ")", ")", "for", "pattern", "in", "(", "self", ".", "options", ".", "get", "(", "'skip_file'", ")", "or", "[", "]", ")", ":", "self", ".", "message", "(", "'Setting rule - skip files: {}'", ".", "format", "(", "pattern", ")", ",", "1", ")", "rules", ".", "append", "(", "Rule", "(", "pattern", ",", "level", "=", "None", ",", "filetype", "=", "TYPE_FILE", ",", "score", "=", "-", "100", ")", ")", "return", "rules" ]
66.707317
0.006847
def gut_message(message: Message) -> Message: """ Remove body from a message, and wrap in a message/external-body. """ wrapper = Message() wrapper.add_header('Content-Type', 'message/external-body', access_type='x-spam-deleted', expiration=time.strftime("%a, %d %b %Y %H:%M:%S %z"), size=str(len(message.get_payload()))) message.set_payload('') wrapper.set_payload([message]) return wrapper
[ "def", "gut_message", "(", "message", ":", "Message", ")", "->", "Message", ":", "wrapper", "=", "Message", "(", ")", "wrapper", ".", "add_header", "(", "'Content-Type'", ",", "'message/external-body'", ",", "access_type", "=", "'x-spam-deleted'", ",", "expiration", "=", "time", ".", "strftime", "(", "\"%a, %d %b %Y %H:%M:%S %z\"", ")", ",", "size", "=", "str", "(", "len", "(", "message", ".", "get_payload", "(", ")", ")", ")", ")", "message", ".", "set_payload", "(", "''", ")", "wrapper", ".", "set_payload", "(", "[", "message", "]", ")", "return", "wrapper" ]
34.285714
0.002028
def pipe(self, func: Union[Callable[..., T], Tuple[Callable[..., T], str]], *args, **kwargs) -> T: """ Apply func(self, *args, **kwargs) This method replicates the pandas method of the same name. Parameters ---------- func : function function to apply to this xarray object (Dataset/DataArray). ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the xarray object. args : positional arguments passed into ``func``. kwargs : a dictionary of keyword arguments passed into ``func``. Returns ------- object : the return type of ``func``. Notes ----- Use ``.pipe`` when chaining together functions that expect xarray or pandas objects, e.g., instead of writing >>> f(g(h(ds), arg1=a), arg2=b, arg3=c) You can write >>> (ds.pipe(h) ... .pipe(g, arg1=a) ... .pipe(f, arg2=b, arg3=c) ... ) If you have a function that takes the data as (say) the second argument, pass a tuple indicating which keyword expects the data. For example, suppose ``f`` takes its data as ``arg2``: >>> (ds.pipe(h) ... .pipe(g, arg1=a) ... .pipe((f, 'arg2'), arg1=a, arg3=c) ... ) See Also -------- pandas.DataFrame.pipe """ if isinstance(func, tuple): func, target = func if target in kwargs: raise ValueError('%s is both the pipe target and a keyword ' 'argument' % target) kwargs[target] = self return func(*args, **kwargs) else: return func(self, *args, **kwargs)
[ "def", "pipe", "(", "self", ",", "func", ":", "Union", "[", "Callable", "[", "...", ",", "T", "]", ",", "Tuple", "[", "Callable", "[", "...", ",", "T", "]", ",", "str", "]", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")", "->", "T", ":", "if", "isinstance", "(", "func", ",", "tuple", ")", ":", "func", ",", "target", "=", "func", "if", "target", "in", "kwargs", ":", "raise", "ValueError", "(", "'%s is both the pipe target and a keyword '", "'argument'", "%", "target", ")", "kwargs", "[", "target", "]", "=", "self", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "return", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
32.067797
0.001538
def get_area_code(self, ip): ''' Get area_code ''' rec = self.get_all(ip) return rec and rec.area_code
[ "def", "get_area_code", "(", "self", ",", "ip", ")", ":", "rec", "=", "self", ".", "get_all", "(", "ip", ")", "return", "rec", "and", "rec", ".", "area_code" ]
30.75
0.015873
def error(self, msg, *args, **kwargs) -> Task: # type: ignore """ Log msg with severity 'ERROR'. To pass exception information, use the keyword argument exc_info with a true value, e.g. await logger.error("Houston, we have a major problem", exc_info=1) """ return self._make_log_task(logging.ERROR, msg, args, **kwargs)
[ "def", "error", "(", "self", ",", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")", "->", "Task", ":", "# type: ignore", "return", "self", ".", "_make_log_task", "(", "logging", ".", "ERROR", ",", "msg", ",", "args", ",", "*", "*", "kwargs", ")" ]
36.9
0.005291
def ignore(self, filename): """Ignore a given filename or not.""" _, ext = os.path.splitext(filename) return ext in ['.pyc', '.pyo', '.o', '.swp']
[ "def", "ignore", "(", "self", ",", "filename", ")", ":", "_", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "return", "ext", "in", "[", "'.pyc'", ",", "'.pyo'", ",", "'.o'", ",", "'.swp'", "]" ]
41.75
0.011765
def apps(self): """ Dictionary with loaded applications. """ logger.debug("initialize applications ...") enabled = None apps = self.args.apps or self._config_apps.keys() unknown = set(apps) - set(self._config_apps.keys()) if unknown: raise LogRaptorArgumentError("--apps", "not found apps %r" % list(unknown)) if apps or enabled is None: return {k: v for k, v in self._config_apps.items() if k in apps} else: return {k: v for k, v in self._config_apps.items() if k in apps and v.enabled == enabled}
[ "def", "apps", "(", "self", ")", ":", "logger", ".", "debug", "(", "\"initialize applications ...\"", ")", "enabled", "=", "None", "apps", "=", "self", ".", "args", ".", "apps", "or", "self", ".", "_config_apps", ".", "keys", "(", ")", "unknown", "=", "set", "(", "apps", ")", "-", "set", "(", "self", ".", "_config_apps", ".", "keys", "(", ")", ")", "if", "unknown", ":", "raise", "LogRaptorArgumentError", "(", "\"--apps\"", ",", "\"not found apps %r\"", "%", "list", "(", "unknown", ")", ")", "if", "apps", "or", "enabled", "is", "None", ":", "return", "{", "k", ":", "v", "for", "k", ",", "v", "in", "self", ".", "_config_apps", ".", "items", "(", ")", "if", "k", "in", "apps", "}", "else", ":", "return", "{", "k", ":", "v", "for", "k", ",", "v", "in", "self", ".", "_config_apps", ".", "items", "(", ")", "if", "k", "in", "apps", "and", "v", ".", "enabled", "==", "enabled", "}" ]
40.066667
0.006504
def versions_from_trove(trove): """Finds out python version from list of trove classifiers. Args: trove: list of trove classifiers Returns: python version string """ versions = set() for classifier in trove: if 'Programming Language :: Python ::' in classifier: ver = classifier.split('::')[-1] major = ver.split('.')[0].strip() if major: versions.add(major) return sorted( set([v for v in versions if v.replace('.', '', 1).isdigit()]))
[ "def", "versions_from_trove", "(", "trove", ")", ":", "versions", "=", "set", "(", ")", "for", "classifier", "in", "trove", ":", "if", "'Programming Language :: Python ::'", "in", "classifier", ":", "ver", "=", "classifier", ".", "split", "(", "'::'", ")", "[", "-", "1", "]", "major", "=", "ver", ".", "split", "(", "'.'", ")", "[", "0", "]", ".", "strip", "(", ")", "if", "major", ":", "versions", ".", "add", "(", "major", ")", "return", "sorted", "(", "set", "(", "[", "v", "for", "v", "in", "versions", "if", "v", ".", "replace", "(", "'.'", ",", "''", ",", "1", ")", ".", "isdigit", "(", ")", "]", ")", ")" ]
33.3125
0.001825
def get_line_pts(pt0i, pt0j, pt1i, pt1j): '''Retrieve the coordinates of the points along lines pt0i, pt0j - the starting coordinates of the lines (1-d nparray) pt1i, pt1j - the ending coordinates of the lines (1-d nparray) use the Bresenham algorithm to find the coordinates along the lines connectiong pt0 and pt1. pt01, pt0j, pt1i and pt1j must be 1-d arrays of similar size and must be of integer type. The results are returned as four vectors - index, count, i, j. index is the index of the first point in the line for each coordinate pair count is the # of points in the line i is the I coordinates for each point j is the J coordinate for each point ''' assert len(pt0i) == len(pt0j) assert len(pt0i) == len(pt1i) assert len(pt0i) == len(pt1j) pt0i = np.array(pt0i, int) pt0j = np.array(pt0j, int) pt1i = np.array(pt1i, int) pt1j = np.array(pt1j, int) if len(pt0i) == 0: # Return four zero-length arrays if nothing passed in return [np.zeros((0,),int)] * 4 # # The Bresenham algorithm picks the coordinate that varies the most # and generates one point for each step in that coordinate. Add one # for the start point. # diff_i = np.abs(pt0i - pt1i) diff_j = np.abs(pt0j - pt1j) count = np.maximum(diff_i, diff_j).astype(int) + 1 # # The indexes of the ends of the coordinate vectors are at the # cumulative sum of the counts. We get to the starts by subtracting # the count. # index = np.cumsum(count) - count # # Find the step directions per coordinate pair. # True = 1, False = 0 # True * 2 - 1 = 1, False * 2 - 1 = -1 # step_i = (pt1i > pt0i).astype(int) * 2 - 1 step_j = (pt1j > pt0j).astype(int) * 2 - 1 # # Make arrays to hold the results # n_pts = index[-1] + count[-1] i = np.zeros(n_pts, int) j = np.zeros(n_pts, int) # # Put pt0 into the arrays # i[index] = pt0i j[index] = pt0j # # # # # # # # # # # # # Do the points for which I varies most (or it's a tie). # mask = (diff_i >= diff_j) count_t = count[mask] if len(count_t) > 0: last_n = np.max(count_t) diff_i_t = diff_i[mask] diff_j_t = diff_j[mask] remainder = diff_j_t * 2 - diff_i_t current_i = pt0i[mask] current_j = pt0j[mask] index_t = index[mask] step_i_t = step_i[mask] step_j_t = step_j[mask] for n in range(1,last_n+1): # # Eliminate all points that are done # mask = (count_t > n) remainder = remainder[mask] current_i = current_i[mask] current_j = current_j[mask] index_t = index_t[mask] count_t = count_t[mask] diff_i_t = diff_i_t[mask] diff_j_t = diff_j_t[mask] step_i_t = step_i_t[mask] step_j_t = step_j_t[mask] # # Take a step in the J direction if the remainder is positive # remainder_mask = (remainder >= 0) current_j[remainder_mask] += step_j_t[remainder_mask] remainder[remainder_mask] -= diff_i_t[remainder_mask] * 2 # # Always take a step in the I direction # current_i += step_i_t remainder += diff_j_t * 2 i[index_t+n] = current_i j[index_t+n] = current_j # # # # # # # # # # # # # Do the points for which J varies most # mask = (diff_j > diff_i) count_t = count[mask] if len(count_t) > 0: last_n = np.max(count_t) diff_i_t = diff_i[mask] diff_j_t = diff_j[mask] remainder = diff_i_t * 2 - diff_j_t current_i = pt0i[mask] current_j = pt0j[mask] index_t = index[mask] step_i_t = step_i[mask] step_j_t = step_j[mask] for n in range(1,last_n+1): # # Eliminate all points that are done # mask = (count_t > n) remainder = remainder[mask] current_i = current_i[mask] current_j = current_j[mask] index_t = index_t[mask] count_t = count_t[mask] diff_i_t = diff_i_t[mask] diff_j_t = diff_j_t[mask] step_i_t = step_i_t[mask] step_j_t = step_j_t[mask] # # Take a step in the I direction if the remainder is positive # remainder_mask = (remainder >= 0) current_i[remainder_mask] += step_i_t[remainder_mask] remainder[remainder_mask] -= diff_j_t[remainder_mask] * 2 # # Always take a step in the J direction # current_j += step_j_t remainder += diff_i_t * 2 i[index_t+n] = current_i j[index_t+n] = current_j return index, count, i, j
[ "def", "get_line_pts", "(", "pt0i", ",", "pt0j", ",", "pt1i", ",", "pt1j", ")", ":", "assert", "len", "(", "pt0i", ")", "==", "len", "(", "pt0j", ")", "assert", "len", "(", "pt0i", ")", "==", "len", "(", "pt1i", ")", "assert", "len", "(", "pt0i", ")", "==", "len", "(", "pt1j", ")", "pt0i", "=", "np", ".", "array", "(", "pt0i", ",", "int", ")", "pt0j", "=", "np", ".", "array", "(", "pt0j", ",", "int", ")", "pt1i", "=", "np", ".", "array", "(", "pt1i", ",", "int", ")", "pt1j", "=", "np", ".", "array", "(", "pt1j", ",", "int", ")", "if", "len", "(", "pt0i", ")", "==", "0", ":", "# Return four zero-length arrays if nothing passed in", "return", "[", "np", ".", "zeros", "(", "(", "0", ",", ")", ",", "int", ")", "]", "*", "4", "#", "# The Bresenham algorithm picks the coordinate that varies the most", "# and generates one point for each step in that coordinate. Add one", "# for the start point.", "#", "diff_i", "=", "np", ".", "abs", "(", "pt0i", "-", "pt1i", ")", "diff_j", "=", "np", ".", "abs", "(", "pt0j", "-", "pt1j", ")", "count", "=", "np", ".", "maximum", "(", "diff_i", ",", "diff_j", ")", ".", "astype", "(", "int", ")", "+", "1", "#", "# The indexes of the ends of the coordinate vectors are at the", "# cumulative sum of the counts. We get to the starts by subtracting", "# the count.", "#", "index", "=", "np", ".", "cumsum", "(", "count", ")", "-", "count", "#", "# Find the step directions per coordinate pair. ", "# True = 1, False = 0", "# True * 2 - 1 = 1, False * 2 - 1 = -1", "#", "step_i", "=", "(", "pt1i", ">", "pt0i", ")", ".", "astype", "(", "int", ")", "*", "2", "-", "1", "step_j", "=", "(", "pt1j", ">", "pt0j", ")", ".", "astype", "(", "int", ")", "*", "2", "-", "1", "#", "# Make arrays to hold the results", "#", "n_pts", "=", "index", "[", "-", "1", "]", "+", "count", "[", "-", "1", "]", "i", "=", "np", ".", "zeros", "(", "n_pts", ",", "int", ")", "j", "=", "np", ".", "zeros", "(", "n_pts", ",", "int", ")", "#", "# Put pt0 into the arrays", "#", "i", "[", "index", "]", "=", "pt0i", "j", "[", "index", "]", "=", "pt0j", "#", "# # # # # # # # # #", "#", "# Do the points for which I varies most (or it's a tie).", "#", "mask", "=", "(", "diff_i", ">=", "diff_j", ")", "count_t", "=", "count", "[", "mask", "]", "if", "len", "(", "count_t", ")", ">", "0", ":", "last_n", "=", "np", ".", "max", "(", "count_t", ")", "diff_i_t", "=", "diff_i", "[", "mask", "]", "diff_j_t", "=", "diff_j", "[", "mask", "]", "remainder", "=", "diff_j_t", "*", "2", "-", "diff_i_t", "current_i", "=", "pt0i", "[", "mask", "]", "current_j", "=", "pt0j", "[", "mask", "]", "index_t", "=", "index", "[", "mask", "]", "step_i_t", "=", "step_i", "[", "mask", "]", "step_j_t", "=", "step_j", "[", "mask", "]", "for", "n", "in", "range", "(", "1", ",", "last_n", "+", "1", ")", ":", "#", "# Eliminate all points that are done", "#", "mask", "=", "(", "count_t", ">", "n", ")", "remainder", "=", "remainder", "[", "mask", "]", "current_i", "=", "current_i", "[", "mask", "]", "current_j", "=", "current_j", "[", "mask", "]", "index_t", "=", "index_t", "[", "mask", "]", "count_t", "=", "count_t", "[", "mask", "]", "diff_i_t", "=", "diff_i_t", "[", "mask", "]", "diff_j_t", "=", "diff_j_t", "[", "mask", "]", "step_i_t", "=", "step_i_t", "[", "mask", "]", "step_j_t", "=", "step_j_t", "[", "mask", "]", "#", "# Take a step in the J direction if the remainder is positive", "#", "remainder_mask", "=", "(", "remainder", ">=", "0", ")", "current_j", "[", "remainder_mask", "]", "+=", "step_j_t", "[", "remainder_mask", "]", "remainder", "[", "remainder_mask", "]", "-=", "diff_i_t", "[", "remainder_mask", "]", "*", "2", "#", "# Always take a step in the I direction", "#", "current_i", "+=", "step_i_t", "remainder", "+=", "diff_j_t", "*", "2", "i", "[", "index_t", "+", "n", "]", "=", "current_i", "j", "[", "index_t", "+", "n", "]", "=", "current_j", "#", "# # # # # # # # # #", "#", "# Do the points for which J varies most", "#", "mask", "=", "(", "diff_j", ">", "diff_i", ")", "count_t", "=", "count", "[", "mask", "]", "if", "len", "(", "count_t", ")", ">", "0", ":", "last_n", "=", "np", ".", "max", "(", "count_t", ")", "diff_i_t", "=", "diff_i", "[", "mask", "]", "diff_j_t", "=", "diff_j", "[", "mask", "]", "remainder", "=", "diff_i_t", "*", "2", "-", "diff_j_t", "current_i", "=", "pt0i", "[", "mask", "]", "current_j", "=", "pt0j", "[", "mask", "]", "index_t", "=", "index", "[", "mask", "]", "step_i_t", "=", "step_i", "[", "mask", "]", "step_j_t", "=", "step_j", "[", "mask", "]", "for", "n", "in", "range", "(", "1", ",", "last_n", "+", "1", ")", ":", "#", "# Eliminate all points that are done", "#", "mask", "=", "(", "count_t", ">", "n", ")", "remainder", "=", "remainder", "[", "mask", "]", "current_i", "=", "current_i", "[", "mask", "]", "current_j", "=", "current_j", "[", "mask", "]", "index_t", "=", "index_t", "[", "mask", "]", "count_t", "=", "count_t", "[", "mask", "]", "diff_i_t", "=", "diff_i_t", "[", "mask", "]", "diff_j_t", "=", "diff_j_t", "[", "mask", "]", "step_i_t", "=", "step_i_t", "[", "mask", "]", "step_j_t", "=", "step_j_t", "[", "mask", "]", "#", "# Take a step in the I direction if the remainder is positive", "#", "remainder_mask", "=", "(", "remainder", ">=", "0", ")", "current_i", "[", "remainder_mask", "]", "+=", "step_i_t", "[", "remainder_mask", "]", "remainder", "[", "remainder_mask", "]", "-=", "diff_j_t", "[", "remainder_mask", "]", "*", "2", "#", "# Always take a step in the J direction", "#", "current_j", "+=", "step_j_t", "remainder", "+=", "diff_i_t", "*", "2", "i", "[", "index_t", "+", "n", "]", "=", "current_i", "j", "[", "index_t", "+", "n", "]", "=", "current_j", "return", "index", ",", "count", ",", "i", ",", "j" ]
33.081633
0.001597
def _distribution(gtfs, table, column): """Count occurrences of values AND return it as a string. Example return value: '1:5 2:15'""" cur = gtfs.conn.cursor() cur.execute('SELECT {column}, count(*) ' 'FROM {table} GROUP BY {column} ' 'ORDER BY {column}'.format(column=column, table=table)) return ' '.join('%s:%s' % (t, c) for t, c in cur)
[ "def", "_distribution", "(", "gtfs", ",", "table", ",", "column", ")", ":", "cur", "=", "gtfs", ".", "conn", ".", "cursor", "(", ")", "cur", ".", "execute", "(", "'SELECT {column}, count(*) '", "'FROM {table} GROUP BY {column} '", "'ORDER BY {column}'", ".", "format", "(", "column", "=", "column", ",", "table", "=", "table", ")", ")", "return", "' '", ".", "join", "(", "'%s:%s'", "%", "(", "t", ",", "c", ")", "for", "t", ",", "c", "in", "cur", ")" ]
42.888889
0.002538
def _filter_commands(ctx, commands=None): """Return list of used commands.""" lookup = getattr(ctx.command, 'commands', {}) if not lookup and isinstance(ctx.command, click.MultiCommand): lookup = _get_lazyload_commands(ctx.command) if commands is None: return sorted(lookup.values(), key=lambda item: item.name) names = [name.strip() for name in commands.split(',')] return [lookup[name] for name in names if name in lookup]
[ "def", "_filter_commands", "(", "ctx", ",", "commands", "=", "None", ")", ":", "lookup", "=", "getattr", "(", "ctx", ".", "command", ",", "'commands'", ",", "{", "}", ")", "if", "not", "lookup", "and", "isinstance", "(", "ctx", ".", "command", ",", "click", ".", "MultiCommand", ")", ":", "lookup", "=", "_get_lazyload_commands", "(", "ctx", ".", "command", ")", "if", "commands", "is", "None", ":", "return", "sorted", "(", "lookup", ".", "values", "(", ")", ",", "key", "=", "lambda", "item", ":", "item", ".", "name", ")", "names", "=", "[", "name", ".", "strip", "(", ")", "for", "name", "in", "commands", ".", "split", "(", "','", ")", "]", "return", "[", "lookup", "[", "name", "]", "for", "name", "in", "names", "if", "name", "in", "lookup", "]" ]
41.454545
0.002146
def validate_file(parser, arg): """Validates that `arg` is a valid file.""" if not os.path.isfile(arg): parser.error("%s is not a file." % arg) return arg
[ "def", "validate_file", "(", "parser", ",", "arg", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "arg", ")", ":", "parser", ".", "error", "(", "\"%s is not a file.\"", "%", "arg", ")", "return", "arg" ]
34
0.005747
def parse_address(address: str) -> str: """ Parse an email address, falling back to the raw string given. """ display_name, parsed_address = email.utils.parseaddr(address) return parsed_address or address
[ "def", "parse_address", "(", "address", ":", "str", ")", "->", "str", ":", "display_name", ",", "parsed_address", "=", "email", ".", "utils", ".", "parseaddr", "(", "address", ")", "return", "parsed_address", "or", "address" ]
31.285714
0.004444
def search(cls, five9, filters): """Search for a record on the remote and return the results. Args: five9 (five9.Five9): The authenticated Five9 remote. filters (dict): A dictionary of search parameters, keyed by the name of the field to search. This should conform to the schema defined in :func:`five9.Five9.create_criteria`. Returns: list[BaseModel]: A list of records representing the result. """ return cls._name_search(five9.configuration.getWebConnectors, filters)
[ "def", "search", "(", "cls", ",", "five9", ",", "filters", ")", ":", "return", "cls", ".", "_name_search", "(", "five9", ".", "configuration", ".", "getWebConnectors", ",", "filters", ")" ]
43.769231
0.003442
def hrule(n=1, width=WIDTH, linestyle=LineStyle('', '─', '─', '')): """Returns a formatted string used as a border between table rows Parameters ---------- n : int The number of columns in the table width : int The width of each column (Default: 11) linestyle : tuple A LineStyle namedtuple containing the characters for (begin, hr, sep, end). (Default: ('', '─', '─', '')) Returns ------- rowstr : string A string consisting of the row border to print """ widths = parse_width(width, n) hrstr = linestyle.sep.join([('{:%s^%i}' % (linestyle.hline, width)).format('') for width in widths]) return linestyle.begin + hrstr + linestyle.end
[ "def", "hrule", "(", "n", "=", "1", ",", "width", "=", "WIDTH", ",", "linestyle", "=", "LineStyle", "(", "''", ",", "'─', ", "'", "', ''", ")", ":", "", "", "", "widths", "=", "parse_width", "(", "width", ",", "n", ")", "hrstr", "=", "linestyle", ".", "sep", ".", "join", "(", "[", "(", "'{:%s^%i}'", "%", "(", "linestyle", ".", "hline", ",", "width", ")", ")", ".", "format", "(", "''", ")", "for", "width", "in", "widths", "]", ")", "return", "linestyle", ".", "begin", "+", "hrstr", "+", "linestyle", ".", "end" ]
30.833333
0.003932
def route_handler(context, content, pargs, kwargs): """ Route shortcode works a lot like rendering a page based on the url or route. This allows inserting in rendered HTML within another page. Activate it with the 'shortcodes' template filter. Within the content use the chill route shortcode: "[chill route /path/to/something/]" where the '[chill' and ']' are the shortcode starting and ending tags. And 'route' is this route handler that takes one argument which is the url. """ (node, rule_kw) = node_from_uri(pargs[0]) if node == None: return u"<!-- 404 '{0}' -->".format(pargs[0]) rule_kw.update( node ) values = rule_kw values.update( request.form.to_dict(flat=True) ) values.update( request.args.to_dict(flat=True) ) values['method'] = request.method noderequest = values.copy() noderequest.pop('node_id') noderequest.pop('name') noderequest.pop('value') rendered = render_node(node['id'], noderequest=noderequest, **values) if rendered: if not isinstance(rendered, (str, unicode, int, float)): # return a json string return encoder.encode(rendered) return rendered # Nothing to show, so nothing found return "<!-- 404 '{0}' -->".format(pargs[0])
[ "def", "route_handler", "(", "context", ",", "content", ",", "pargs", ",", "kwargs", ")", ":", "(", "node", ",", "rule_kw", ")", "=", "node_from_uri", "(", "pargs", "[", "0", "]", ")", "if", "node", "==", "None", ":", "return", "u\"<!-- 404 '{0}' -->\"", ".", "format", "(", "pargs", "[", "0", "]", ")", "rule_kw", ".", "update", "(", "node", ")", "values", "=", "rule_kw", "values", ".", "update", "(", "request", ".", "form", ".", "to_dict", "(", "flat", "=", "True", ")", ")", "values", ".", "update", "(", "request", ".", "args", ".", "to_dict", "(", "flat", "=", "True", ")", ")", "values", "[", "'method'", "]", "=", "request", ".", "method", "noderequest", "=", "values", ".", "copy", "(", ")", "noderequest", ".", "pop", "(", "'node_id'", ")", "noderequest", ".", "pop", "(", "'name'", ")", "noderequest", ".", "pop", "(", "'value'", ")", "rendered", "=", "render_node", "(", "node", "[", "'id'", "]", ",", "noderequest", "=", "noderequest", ",", "*", "*", "values", ")", "if", "rendered", ":", "if", "not", "isinstance", "(", "rendered", ",", "(", "str", ",", "unicode", ",", "int", ",", "float", ")", ")", ":", "# return a json string", "return", "encoder", ".", "encode", "(", "rendered", ")", "return", "rendered", "# Nothing to show, so nothing found", "return", "\"<!-- 404 '{0}' -->\"", ".", "format", "(", "pargs", "[", "0", "]", ")" ]
35.166667
0.006149
def _update_proxy(self, change): """ An observer which sends the state change to the proxy. """ if change['type'] == 'event' and self.proxy_is_active: self.proxy.set_opened(change['name'] == 'show') else: super(ActionMenuView, self)._update_proxy(change)
[ "def", "_update_proxy", "(", "self", ",", "change", ")", ":", "if", "change", "[", "'type'", "]", "==", "'event'", "and", "self", ".", "proxy_is_active", ":", "self", ".", "proxy", ".", "set_opened", "(", "change", "[", "'name'", "]", "==", "'show'", ")", "else", ":", "super", "(", "ActionMenuView", ",", "self", ")", ".", "_update_proxy", "(", "change", ")" ]
38
0.006431
def from_fault_data(cls, fault_trace, upper_seismogenic_depth, lower_seismogenic_depth, dip, mesh_spacing): """ Create and return a fault surface using fault source data. :param openquake.hazardlib.geo.line.Line fault_trace: Geographical line representing the intersection between the fault surface and the earth surface. The line must be horizontal (i.e. all depth values must be equal). If the depths are not given, they are assumed to be zero, meaning the trace intersects the surface at sea level, e.g. fault_trace = Line([Point(1, 1), Point(1, 2)]). :param upper_seismo_depth: Minimum depth ruptures can reach, in km (i.e. depth to fault's top edge). :param lower_seismo_depth: Maximum depth ruptures can reach, in km (i.e. depth to fault's bottom edge). :param dip: Dip angle (i.e. angle between fault surface and earth surface), in degrees. :param mesh_spacing: Distance between two subsequent points in a mesh, in km. :returns: An instance of :class:`SimpleFaultSurface` created using that data. Uses :meth:`check_fault_data` for checking parameters. """ cls.check_fault_data(fault_trace, upper_seismogenic_depth, lower_seismogenic_depth, dip, mesh_spacing) # Loops over points in the top edge, for each point # on the top edge compute corresponding point on the bottom edge, then # computes equally spaced points between top and bottom points. vdist_top = upper_seismogenic_depth - fault_trace[0].depth vdist_bottom = lower_seismogenic_depth - fault_trace[0].depth hdist_top = vdist_top / math.tan(math.radians(dip)) hdist_bottom = vdist_bottom / math.tan(math.radians(dip)) strike = fault_trace[0].azimuth(fault_trace[-1]) azimuth = (strike + 90.0) % 360 mesh = [] for point in fault_trace.resample(mesh_spacing): top = point.point_at(hdist_top, vdist_top, azimuth) bottom = point.point_at(hdist_bottom, vdist_bottom, azimuth) mesh.append(top.equally_spaced_points(bottom, mesh_spacing)) # number of rows corresponds to number of points along dip # number of columns corresponds to number of points along strike surface_points = numpy.array(mesh).transpose().tolist() mesh = RectangularMesh.from_points_list(surface_points) assert 1 not in mesh.shape, ( "Mesh must have at least 2 nodes along both length and width." " Possible cause: Mesh spacing could be too large with respect to" " the fault length and width." ) self = cls(mesh) self.surface_nodes = [simple_fault_node( fault_trace, dip, upper_seismogenic_depth, lower_seismogenic_depth)] return self
[ "def", "from_fault_data", "(", "cls", ",", "fault_trace", ",", "upper_seismogenic_depth", ",", "lower_seismogenic_depth", ",", "dip", ",", "mesh_spacing", ")", ":", "cls", ".", "check_fault_data", "(", "fault_trace", ",", "upper_seismogenic_depth", ",", "lower_seismogenic_depth", ",", "dip", ",", "mesh_spacing", ")", "# Loops over points in the top edge, for each point", "# on the top edge compute corresponding point on the bottom edge, then", "# computes equally spaced points between top and bottom points.", "vdist_top", "=", "upper_seismogenic_depth", "-", "fault_trace", "[", "0", "]", ".", "depth", "vdist_bottom", "=", "lower_seismogenic_depth", "-", "fault_trace", "[", "0", "]", ".", "depth", "hdist_top", "=", "vdist_top", "/", "math", ".", "tan", "(", "math", ".", "radians", "(", "dip", ")", ")", "hdist_bottom", "=", "vdist_bottom", "/", "math", ".", "tan", "(", "math", ".", "radians", "(", "dip", ")", ")", "strike", "=", "fault_trace", "[", "0", "]", ".", "azimuth", "(", "fault_trace", "[", "-", "1", "]", ")", "azimuth", "=", "(", "strike", "+", "90.0", ")", "%", "360", "mesh", "=", "[", "]", "for", "point", "in", "fault_trace", ".", "resample", "(", "mesh_spacing", ")", ":", "top", "=", "point", ".", "point_at", "(", "hdist_top", ",", "vdist_top", ",", "azimuth", ")", "bottom", "=", "point", ".", "point_at", "(", "hdist_bottom", ",", "vdist_bottom", ",", "azimuth", ")", "mesh", ".", "append", "(", "top", ".", "equally_spaced_points", "(", "bottom", ",", "mesh_spacing", ")", ")", "# number of rows corresponds to number of points along dip", "# number of columns corresponds to number of points along strike", "surface_points", "=", "numpy", ".", "array", "(", "mesh", ")", ".", "transpose", "(", ")", ".", "tolist", "(", ")", "mesh", "=", "RectangularMesh", ".", "from_points_list", "(", "surface_points", ")", "assert", "1", "not", "in", "mesh", ".", "shape", ",", "(", "\"Mesh must have at least 2 nodes along both length and width.\"", "\" Possible cause: Mesh spacing could be too large with respect to\"", "\" the fault length and width.\"", ")", "self", "=", "cls", "(", "mesh", ")", "self", ".", "surface_nodes", "=", "[", "simple_fault_node", "(", "fault_trace", ",", "dip", ",", "upper_seismogenic_depth", ",", "lower_seismogenic_depth", ")", "]", "return", "self" ]
47.806452
0.000992
def _get_values(self): """ Get unique index value for each bar """ (gi, _), (ci, _), (si, _) = self._get_dims(self.hmap.last) ndims = self.hmap.last.ndims dims = self.hmap.last.kdims dimensions = [] values = {} for vidx, vtype in zip([gi, ci, si], self._dimensions): if vidx < ndims: dim = dims[vidx] dimensions.append(dim) vals = self.hmap.dimension_values(dim.name) else: dimensions.append(None) vals = [None] values[vtype] = list(unique_iterator(vals)) return values, dimensions
[ "def", "_get_values", "(", "self", ")", ":", "(", "gi", ",", "_", ")", ",", "(", "ci", ",", "_", ")", ",", "(", "si", ",", "_", ")", "=", "self", ".", "_get_dims", "(", "self", ".", "hmap", ".", "last", ")", "ndims", "=", "self", ".", "hmap", ".", "last", ".", "ndims", "dims", "=", "self", ".", "hmap", ".", "last", ".", "kdims", "dimensions", "=", "[", "]", "values", "=", "{", "}", "for", "vidx", ",", "vtype", "in", "zip", "(", "[", "gi", ",", "ci", ",", "si", "]", ",", "self", ".", "_dimensions", ")", ":", "if", "vidx", "<", "ndims", ":", "dim", "=", "dims", "[", "vidx", "]", "dimensions", ".", "append", "(", "dim", ")", "vals", "=", "self", ".", "hmap", ".", "dimension_values", "(", "dim", ".", "name", ")", "else", ":", "dimensions", ".", "append", "(", "None", ")", "vals", "=", "[", "None", "]", "values", "[", "vtype", "]", "=", "list", "(", "unique_iterator", "(", "vals", ")", ")", "return", "values", ",", "dimensions" ]
34.684211
0.002954
def set_widgets(self): """Set widgets on the Extra Keywords tab.""" existing_inasafe_default_values = self.parent.get_existing_keyword( 'inasafe_default_values') # Remove old container and parameter if self.parameter_container: self.default_values_grid.removeWidget( self.parameter_container) if self.parameters: self.parameters = [] # Iterate through all inasafe fields # existing_inasafe_default_values for inasafe_field in self.inasafe_fields_for_the_layer(): # Create DefaultSelectParameter parameter = DefaultValueParameter() parameter.guid = inasafe_field['key'] parameter.name = inasafe_field['name'] parameter.is_required = False parameter.help_text = inasafe_field['default_value']['description'] # parameter.description = inasafe_field['default_value'] parameter.element_type = str parameter.labels = get_inasafe_default_value_fields( self.parent.setting, inasafe_field['key'])[0] parameter.options = get_inasafe_default_value_fields( self.parent.setting, inasafe_field['key'])[1] if existing_inasafe_default_values: existing_default_value = existing_inasafe_default_values.get( inasafe_field['key']) if existing_default_value: parameter.default = existing_default_value self.parameters.append(parameter) # Create the parameter container and add to the wizard. self.parameter_container = ParameterContainer( self.parameters, extra_parameters=self.extra_parameters) self.parameter_container.setup_ui() self.default_values_grid.addWidget(self.parameter_container) # Set default value to None for parameter_widget in self.parameter_container.\ get_parameter_widgets(): parameter_widget.widget().set_value(None) # Set default value from existing keywords if existing_inasafe_default_values: for guid, default in list(existing_inasafe_default_values.items()): parameter_widget = self.parameter_container.\ get_parameter_widget_by_guid(guid) if isinstance(parameter_widget, DefaultValueParameterWidget): parameter_widget.set_value(default)
[ "def", "set_widgets", "(", "self", ")", ":", "existing_inasafe_default_values", "=", "self", ".", "parent", ".", "get_existing_keyword", "(", "'inasafe_default_values'", ")", "# Remove old container and parameter", "if", "self", ".", "parameter_container", ":", "self", ".", "default_values_grid", ".", "removeWidget", "(", "self", ".", "parameter_container", ")", "if", "self", ".", "parameters", ":", "self", ".", "parameters", "=", "[", "]", "# Iterate through all inasafe fields", "# existing_inasafe_default_values", "for", "inasafe_field", "in", "self", ".", "inasafe_fields_for_the_layer", "(", ")", ":", "# Create DefaultSelectParameter", "parameter", "=", "DefaultValueParameter", "(", ")", "parameter", ".", "guid", "=", "inasafe_field", "[", "'key'", "]", "parameter", ".", "name", "=", "inasafe_field", "[", "'name'", "]", "parameter", ".", "is_required", "=", "False", "parameter", ".", "help_text", "=", "inasafe_field", "[", "'default_value'", "]", "[", "'description'", "]", "# parameter.description = inasafe_field['default_value']", "parameter", ".", "element_type", "=", "str", "parameter", ".", "labels", "=", "get_inasafe_default_value_fields", "(", "self", ".", "parent", ".", "setting", ",", "inasafe_field", "[", "'key'", "]", ")", "[", "0", "]", "parameter", ".", "options", "=", "get_inasafe_default_value_fields", "(", "self", ".", "parent", ".", "setting", ",", "inasafe_field", "[", "'key'", "]", ")", "[", "1", "]", "if", "existing_inasafe_default_values", ":", "existing_default_value", "=", "existing_inasafe_default_values", ".", "get", "(", "inasafe_field", "[", "'key'", "]", ")", "if", "existing_default_value", ":", "parameter", ".", "default", "=", "existing_default_value", "self", ".", "parameters", ".", "append", "(", "parameter", ")", "# Create the parameter container and add to the wizard.", "self", ".", "parameter_container", "=", "ParameterContainer", "(", "self", ".", "parameters", ",", "extra_parameters", "=", "self", ".", "extra_parameters", ")", "self", ".", "parameter_container", ".", "setup_ui", "(", ")", "self", ".", "default_values_grid", ".", "addWidget", "(", "self", ".", "parameter_container", ")", "# Set default value to None", "for", "parameter_widget", "in", "self", ".", "parameter_container", ".", "get_parameter_widgets", "(", ")", ":", "parameter_widget", ".", "widget", "(", ")", ".", "set_value", "(", "None", ")", "# Set default value from existing keywords", "if", "existing_inasafe_default_values", ":", "for", "guid", ",", "default", "in", "list", "(", "existing_inasafe_default_values", ".", "items", "(", ")", ")", ":", "parameter_widget", "=", "self", ".", "parameter_container", ".", "get_parameter_widget_by_guid", "(", "guid", ")", "if", "isinstance", "(", "parameter_widget", ",", "DefaultValueParameterWidget", ")", ":", "parameter_widget", ".", "set_value", "(", "default", ")" ]
46.207547
0.0008
def spin1x_from_xi1_phi_a_phi_s(xi1, phi_a, phi_s): """Returns x-component spin for primary mass. """ phi1 = phi1_from_phi_a_phi_s(phi_a, phi_s) return xi1 * numpy.cos(phi1)
[ "def", "spin1x_from_xi1_phi_a_phi_s", "(", "xi1", ",", "phi_a", ",", "phi_s", ")", ":", "phi1", "=", "phi1_from_phi_a_phi_s", "(", "phi_a", ",", "phi_s", ")", "return", "xi1", "*", "numpy", ".", "cos", "(", "phi1", ")" ]
37
0.005291
def delete(self, index, doc_type, id, bulk=False, **query_params): """ Delete a typed JSON document from a specific index based on its id. If bulk is True, the delete operation is put in bulk mode. """ if bulk: cmd = {"delete": {"_index": index, "_type": doc_type, "_id": id}} self.bulker.add(json.dumps(cmd, cls=self.encoder)) return self.flush_bulk() path = make_path(index, doc_type, id) return self._send_request('DELETE', path, params=query_params)
[ "def", "delete", "(", "self", ",", "index", ",", "doc_type", ",", "id", ",", "bulk", "=", "False", ",", "*", "*", "query_params", ")", ":", "if", "bulk", ":", "cmd", "=", "{", "\"delete\"", ":", "{", "\"_index\"", ":", "index", ",", "\"_type\"", ":", "doc_type", ",", "\"_id\"", ":", "id", "}", "}", "self", ".", "bulker", ".", "add", "(", "json", ".", "dumps", "(", "cmd", ",", "cls", "=", "self", ".", "encoder", ")", ")", "return", "self", ".", "flush_bulk", "(", ")", "path", "=", "make_path", "(", "index", ",", "doc_type", ",", "id", ")", "return", "self", ".", "_send_request", "(", "'DELETE'", ",", "path", ",", "params", "=", "query_params", ")" ]
43.384615
0.003472
def runSearchVariantAnnotationSets(self, request): """ Runs the specified SearchVariantAnnotationSetsRequest. """ return self.runSearchRequest( request, protocol.SearchVariantAnnotationSetsRequest, protocol.SearchVariantAnnotationSetsResponse, self.variantAnnotationSetsGenerator)
[ "def", "runSearchVariantAnnotationSets", "(", "self", ",", "request", ")", ":", "return", "self", ".", "runSearchRequest", "(", "request", ",", "protocol", ".", "SearchVariantAnnotationSetsRequest", ",", "protocol", ".", "SearchVariantAnnotationSetsResponse", ",", "self", ".", "variantAnnotationSetsGenerator", ")" ]
42.625
0.005747
def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613 ''' List all available package upgrades on this system CLI Example: .. code-block:: bash salt '*' pkgutil.list_upgrades ''' if salt.utils.data.is_true(refresh): refresh_db() upgrades = {} lines = __salt__['cmd.run_stdout']( '/opt/csw/bin/pkgutil -A --parse').splitlines() for line in lines: comps = line.split('\t') if comps[2] == "SAME": continue if comps[2] == "not installed": continue upgrades[comps[0]] = comps[1] return upgrades
[ "def", "list_upgrades", "(", "refresh", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=W0613", "if", "salt", ".", "utils", ".", "data", ".", "is_true", "(", "refresh", ")", ":", "refresh_db", "(", ")", "upgrades", "=", "{", "}", "lines", "=", "__salt__", "[", "'cmd.run_stdout'", "]", "(", "'/opt/csw/bin/pkgutil -A --parse'", ")", ".", "splitlines", "(", ")", "for", "line", "in", "lines", ":", "comps", "=", "line", ".", "split", "(", "'\\t'", ")", "if", "comps", "[", "2", "]", "==", "\"SAME\"", ":", "continue", "if", "comps", "[", "2", "]", "==", "\"not installed\"", ":", "continue", "upgrades", "[", "comps", "[", "0", "]", "]", "=", "comps", "[", "1", "]", "return", "upgrades" ]
26.217391
0.0016
def get_platform(): """Return a DHT platform interface for the currently detected platform.""" plat = platform_detect.platform_detect() if plat == platform_detect.RASPBERRY_PI: # Check for version 1 or 2 of the pi. version = platform_detect.pi_version() if version == 1: from . import Raspberry_Pi return Raspberry_Pi elif version == 2: from . import Raspberry_Pi_2 return Raspberry_Pi_2 elif version == 3: """Use Pi 2 driver even though running on Pi 3""" from . import Raspberry_Pi_2 return Raspberry_Pi_2 else: raise RuntimeError('No driver for detected Raspberry Pi version available!') elif plat == platform_detect.BEAGLEBONE_BLACK: from . import Beaglebone_Black return Beaglebone_Black else: raise RuntimeError('Unknown platform.')
[ "def", "get_platform", "(", ")", ":", "plat", "=", "platform_detect", ".", "platform_detect", "(", ")", "if", "plat", "==", "platform_detect", ".", "RASPBERRY_PI", ":", "# Check for version 1 or 2 of the pi.", "version", "=", "platform_detect", ".", "pi_version", "(", ")", "if", "version", "==", "1", ":", "from", ".", "import", "Raspberry_Pi", "return", "Raspberry_Pi", "elif", "version", "==", "2", ":", "from", ".", "import", "Raspberry_Pi_2", "return", "Raspberry_Pi_2", "elif", "version", "==", "3", ":", "\"\"\"Use Pi 2 driver even though running on Pi 3\"\"\"", "from", ".", "import", "Raspberry_Pi_2", "return", "Raspberry_Pi_2", "else", ":", "raise", "RuntimeError", "(", "'No driver for detected Raspberry Pi version available!'", ")", "elif", "plat", "==", "platform_detect", ".", "BEAGLEBONE_BLACK", ":", "from", ".", "import", "Beaglebone_Black", "return", "Beaglebone_Black", "else", ":", "raise", "RuntimeError", "(", "'Unknown platform.'", ")" ]
39.304348
0.00216
def update_manual_intervention(self, manual_intervention_update_metadata, project, release_id, manual_intervention_id): """UpdateManualIntervention. [Preview API] Update manual intervention. :param :class:`<ManualInterventionUpdateMetadata> <azure.devops.v5_1.release.models.ManualInterventionUpdateMetadata>` manual_intervention_update_metadata: Meta data to update manual intervention. :param str project: Project ID or project name :param int release_id: Id of the release. :param int manual_intervention_id: Id of the manual intervention. :rtype: :class:`<ManualIntervention> <azure.devops.v5_1.release.models.ManualIntervention>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if release_id is not None: route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int') if manual_intervention_id is not None: route_values['manualInterventionId'] = self._serialize.url('manual_intervention_id', manual_intervention_id, 'int') content = self._serialize.body(manual_intervention_update_metadata, 'ManualInterventionUpdateMetadata') response = self._send(http_method='PATCH', location_id='616c46e4-f370-4456-adaa-fbaf79c7b79e', version='5.1-preview.1', route_values=route_values, content=content) return self._deserialize('ManualIntervention', response)
[ "def", "update_manual_intervention", "(", "self", ",", "manual_intervention_update_metadata", ",", "project", ",", "release_id", ",", "manual_intervention_id", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "if", "release_id", "is", "not", "None", ":", "route_values", "[", "'releaseId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'release_id'", ",", "release_id", ",", "'int'", ")", "if", "manual_intervention_id", "is", "not", "None", ":", "route_values", "[", "'manualInterventionId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'manual_intervention_id'", ",", "manual_intervention_id", ",", "'int'", ")", "content", "=", "self", ".", "_serialize", ".", "body", "(", "manual_intervention_update_metadata", ",", "'ManualInterventionUpdateMetadata'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'PATCH'", ",", "location_id", "=", "'616c46e4-f370-4456-adaa-fbaf79c7b79e'", ",", "version", "=", "'5.1-preview.1'", ",", "route_values", "=", "route_values", ",", "content", "=", "content", ")", "return", "self", ".", "_deserialize", "(", "'ManualIntervention'", ",", "response", ")" ]
69.347826
0.006184
def add_replace(self, selector, replacement, upsert=False, collation=None): """Create a replace document and add it to the list of ops. """ validate_ok_for_replace(replacement) cmd = SON([('q', selector), ('u', replacement), ('multi', False), ('upsert', upsert)]) collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True cmd['collation'] = collation self.ops.append((_UPDATE, cmd))
[ "def", "add_replace", "(", "self", ",", "selector", ",", "replacement", ",", "upsert", "=", "False", ",", "collation", "=", "None", ")", ":", "validate_ok_for_replace", "(", "replacement", ")", "cmd", "=", "SON", "(", "[", "(", "'q'", ",", "selector", ")", ",", "(", "'u'", ",", "replacement", ")", ",", "(", "'multi'", ",", "False", ")", ",", "(", "'upsert'", ",", "upsert", ")", "]", ")", "collation", "=", "validate_collation_or_none", "(", "collation", ")", "if", "collation", "is", "not", "None", ":", "self", ".", "uses_collation", "=", "True", "cmd", "[", "'collation'", "]", "=", "collation", "self", ".", "ops", ".", "append", "(", "(", "_UPDATE", ",", "cmd", ")", ")" ]
44.583333
0.005495
def segment_raised_funds_average(df): """ Return some info about raised funds. """ grouped = df.groupby('Segmento') aggregated = grouped.agg(['mean', 'std']) aggregated.columns = aggregated.columns.droplevel(0) return aggregated
[ "def", "segment_raised_funds_average", "(", "df", ")", ":", "grouped", "=", "df", ".", "groupby", "(", "'Segmento'", ")", "aggregated", "=", "grouped", ".", "agg", "(", "[", "'mean'", ",", "'std'", "]", ")", "aggregated", ".", "columns", "=", "aggregated", ".", "columns", ".", "droplevel", "(", "0", ")", "return", "aggregated" ]
27.666667
0.003891
def select_grid_model_residential(lvgd): """Selects typified model grid based on population Parameters ---------- lvgd : LVGridDistrictDing0 Low-voltage grid district object Returns ------- :pandas:`pandas.DataFrame<dataframe>` Selected string of typified model grid :pandas:`pandas.DataFrame<dataframe>` Parameters of chosen Transformer Notes ----- In total 196 distinct LV grid topologies are available that are chosen by population in the LV grid district. Population is translated to number of house branches. Each grid model fits a number of house branches. If this number exceeds 196, still the grid topology of 196 house branches is used. The peak load of the LV grid district is uniformly distributed across house branches. """ # Load properties of LV typified model grids string_properties = lvgd.lv_grid.network.static_data['LV_model_grids_strings'] # Load relational table of apartment count and strings of model grid apartment_string = lvgd.lv_grid.network.static_data[ 'LV_model_grids_strings_per_grid'] # load assumtions apartment_house_branch_ratio = cfg_ding0.get("assumptions", "apartment_house_branch_ratio") population_per_apartment = cfg_ding0.get("assumptions", "population_per_apartment") # calc count of apartments to select string types apartments = round(lvgd.population / population_per_apartment) if apartments > 196: apartments = 196 # select set of strings that represent one type of model grid strings = apartment_string.loc[apartments] selected_strings = [int(s) for s in strings[strings >= 1].index.tolist()] # slice dataframe of string parameters selected_strings_df = string_properties.loc[selected_strings] # add number of occurences of each branch to df occurence_selector = [str(i) for i in selected_strings] selected_strings_df['occurence'] = strings.loc[occurence_selector].tolist() return selected_strings_df
[ "def", "select_grid_model_residential", "(", "lvgd", ")", ":", "# Load properties of LV typified model grids", "string_properties", "=", "lvgd", ".", "lv_grid", ".", "network", ".", "static_data", "[", "'LV_model_grids_strings'", "]", "# Load relational table of apartment count and strings of model grid", "apartment_string", "=", "lvgd", ".", "lv_grid", ".", "network", ".", "static_data", "[", "'LV_model_grids_strings_per_grid'", "]", "# load assumtions", "apartment_house_branch_ratio", "=", "cfg_ding0", ".", "get", "(", "\"assumptions\"", ",", "\"apartment_house_branch_ratio\"", ")", "population_per_apartment", "=", "cfg_ding0", ".", "get", "(", "\"assumptions\"", ",", "\"population_per_apartment\"", ")", "# calc count of apartments to select string types", "apartments", "=", "round", "(", "lvgd", ".", "population", "/", "population_per_apartment", ")", "if", "apartments", ">", "196", ":", "apartments", "=", "196", "# select set of strings that represent one type of model grid", "strings", "=", "apartment_string", ".", "loc", "[", "apartments", "]", "selected_strings", "=", "[", "int", "(", "s", ")", "for", "s", "in", "strings", "[", "strings", ">=", "1", "]", ".", "index", ".", "tolist", "(", ")", "]", "# slice dataframe of string parameters", "selected_strings_df", "=", "string_properties", ".", "loc", "[", "selected_strings", "]", "# add number of occurences of each branch to df", "occurence_selector", "=", "[", "str", "(", "i", ")", "for", "i", "in", "selected_strings", "]", "selected_strings_df", "[", "'occurence'", "]", "=", "strings", ".", "loc", "[", "occurence_selector", "]", ".", "tolist", "(", ")", "return", "selected_strings_df" ]
37.872727
0.001404
def compute_wcs(key, challenge): """ Compute an WAMP-CRA authentication signature from an authentication challenge and a (derived) key. :param key: The key derived (via PBKDF2) from the secret. :type key: str/bytes :param challenge: The authentication challenge to sign. :type challenge: str/bytes :return: The authentication signature. :rtype: bytes """ key = key.encode('utf8') challenge = challenge.encode('utf8') sig = hmac.new(key, challenge, hashlib.sha256).digest() return binascii.b2a_base64(sig).strip()
[ "def", "compute_wcs", "(", "key", ",", "challenge", ")", ":", "key", "=", "key", ".", "encode", "(", "'utf8'", ")", "challenge", "=", "challenge", ".", "encode", "(", "'utf8'", ")", "sig", "=", "hmac", ".", "new", "(", "key", ",", "challenge", ",", "hashlib", ".", "sha256", ")", ".", "digest", "(", ")", "return", "binascii", ".", "b2a_base64", "(", "sig", ")", ".", "strip", "(", ")" ]
32.588235
0.001754
def _decode16(self, offset): """ Decode an UTF-16 String at the given offset :param offset: offset of the string inside the data :return: str """ str_len, skip = self._decode_length(offset, 2) offset += skip # The len is the string len in utf-16 units encoded_bytes = str_len * 2 data = self.m_charbuff[offset: offset + encoded_bytes] assert self.m_charbuff[offset + encoded_bytes:offset + encoded_bytes + 2] == b"\x00\x00", \ "UTF-16 String is not null terminated! At offset={}".format(offset) return self._decode_bytes(data, 'utf-16', str_len)
[ "def", "_decode16", "(", "self", ",", "offset", ")", ":", "str_len", ",", "skip", "=", "self", ".", "_decode_length", "(", "offset", ",", "2", ")", "offset", "+=", "skip", "# The len is the string len in utf-16 units", "encoded_bytes", "=", "str_len", "*", "2", "data", "=", "self", ".", "m_charbuff", "[", "offset", ":", "offset", "+", "encoded_bytes", "]", "assert", "self", ".", "m_charbuff", "[", "offset", "+", "encoded_bytes", ":", "offset", "+", "encoded_bytes", "+", "2", "]", "==", "b\"\\x00\\x00\"", ",", "\"UTF-16 String is not null terminated! At offset={}\"", ".", "format", "(", "offset", ")", "return", "self", ".", "_decode_bytes", "(", "data", ",", "'utf-16'", ",", "str_len", ")" ]
33.684211
0.004559
def ancestor_paths(start=None, limit={}): """ All paths above you """ import utool as ut limit = ut.ensure_iterable(limit) limit = {expanduser(p) for p in limit}.union(set(limit)) if start is None: start = os.getcwd() path = start prev = None while path != prev and prev not in limit: yield path prev = path path = dirname(path)
[ "def", "ancestor_paths", "(", "start", "=", "None", ",", "limit", "=", "{", "}", ")", ":", "import", "utool", "as", "ut", "limit", "=", "ut", ".", "ensure_iterable", "(", "limit", ")", "limit", "=", "{", "expanduser", "(", "p", ")", "for", "p", "in", "limit", "}", ".", "union", "(", "set", "(", "limit", ")", ")", "if", "start", "is", "None", ":", "start", "=", "os", ".", "getcwd", "(", ")", "path", "=", "start", "prev", "=", "None", "while", "path", "!=", "prev", "and", "prev", "not", "in", "limit", ":", "yield", "path", "prev", "=", "path", "path", "=", "dirname", "(", "path", ")" ]
25.733333
0.0025
def choices(self): """Gets the experiment choices""" if self._choices == None: self._choices = [ExperimentChoice(self, choice_name) for choice_name in self.choice_names] return self._choices
[ "def", "choices", "(", "self", ")", ":", "if", "self", ".", "_choices", "==", "None", ":", "self", ".", "_choices", "=", "[", "ExperimentChoice", "(", "self", ",", "choice_name", ")", "for", "choice_name", "in", "self", ".", "choice_names", "]", "return", "self", ".", "_choices" ]
31.714286
0.017544
def calibrate(self, data, key): """Data calibration.""" # logger.debug('Calibration: %s' % key.calibration) logger.warning('Calibration disabled!') if key.calibration == 'brightness_temperature': # self._ir_calibrate(data, key) pass elif key.calibration == 'reflectance': # self._vis_calibrate(data, key) pass else: pass return data
[ "def", "calibrate", "(", "self", ",", "data", ",", "key", ")", ":", "# logger.debug('Calibration: %s' % key.calibration)", "logger", ".", "warning", "(", "'Calibration disabled!'", ")", "if", "key", ".", "calibration", "==", "'brightness_temperature'", ":", "# self._ir_calibrate(data, key)", "pass", "elif", "key", ".", "calibration", "==", "'reflectance'", ":", "# self._vis_calibrate(data, key)", "pass", "else", ":", "pass", "return", "data" ]
29.066667
0.004444
def kv(d): """Equivalent to dict.items(). Usage:: >>> for key, node in DictTree.kv(d): >>> print(key, DictTree.getattr(node, "population")) MD 200000 VA 100000 """ return ((key, value) for key, value in iteritems(d) if key != _meta)
[ "def", "kv", "(", "d", ")", ":", "return", "(", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "iteritems", "(", "d", ")", "if", "key", "!=", "_meta", ")" ]
29.636364
0.014881
def iterateEM(self, count): ''' Iterate through all transmissions of english to foreign words. keep count of repeated occurences do until convergence set count(e|f) to 0 for all e,f set total(f) to 0 for all f for all sentence pairs (e_s,f_s) set total_s(e) = 0 for all e for all words e in e_s for all words f in f_s total_s(e) += t(e|f) for all words e in e_s for all words f in f_s count(e|f) += t(e|f) / total_s(e) total(f) += t(e|f) / total_s(e) for all f for all e t(e|f) = count(e|f) / total(f) ''' for iter in range(count): countef = {} totalf = {} # set the count of the words to zero for word in self.en_words: if(word not in self.probs): continue word_probs = self.probs[word] count = dict([(w, 0) for w in word_probs]) countef[word] = count totalf[word] = 0 self.countef = countef self.totalf = totalf # NOW iterate over each word pair for (es, ds) in self.sent_pairs: es_split = es.split() ds_split = ds.split() for d in ds_split: self.totals[d] = 0 for e in es_split: if (e not in self.transmissions): continue e_trans = self.transmissions[e] if (d not in e_trans): continue self.totals[d] += e_trans[d] # Get count(e|f) and total(f) for e in es_split: if(e not in self.transmissions): continue if (d not in self.transmissions[e]): continue self.countef[e][ d] += self.transmissions[e][d] / self.totals[d] self.totalf[ e] += self.transmissions[e][d] / self.totals[d] for e in self.en_words: if (e not in self.probs): continue e_prob = self.probs[e] for d in e_prob: self.transmissions[e][d] = self.countef[ e][d] / self.totalf[e]
[ "def", "iterateEM", "(", "self", ",", "count", ")", ":", "for", "iter", "in", "range", "(", "count", ")", ":", "countef", "=", "{", "}", "totalf", "=", "{", "}", "# set the count of the words to zero", "for", "word", "in", "self", ".", "en_words", ":", "if", "(", "word", "not", "in", "self", ".", "probs", ")", ":", "continue", "word_probs", "=", "self", ".", "probs", "[", "word", "]", "count", "=", "dict", "(", "[", "(", "w", ",", "0", ")", "for", "w", "in", "word_probs", "]", ")", "countef", "[", "word", "]", "=", "count", "totalf", "[", "word", "]", "=", "0", "self", ".", "countef", "=", "countef", "self", ".", "totalf", "=", "totalf", "# NOW iterate over each word pair", "for", "(", "es", ",", "ds", ")", "in", "self", ".", "sent_pairs", ":", "es_split", "=", "es", ".", "split", "(", ")", "ds_split", "=", "ds", ".", "split", "(", ")", "for", "d", "in", "ds_split", ":", "self", ".", "totals", "[", "d", "]", "=", "0", "for", "e", "in", "es_split", ":", "if", "(", "e", "not", "in", "self", ".", "transmissions", ")", ":", "continue", "e_trans", "=", "self", ".", "transmissions", "[", "e", "]", "if", "(", "d", "not", "in", "e_trans", ")", ":", "continue", "self", ".", "totals", "[", "d", "]", "+=", "e_trans", "[", "d", "]", "# Get count(e|f) and total(f)", "for", "e", "in", "es_split", ":", "if", "(", "e", "not", "in", "self", ".", "transmissions", ")", ":", "continue", "if", "(", "d", "not", "in", "self", ".", "transmissions", "[", "e", "]", ")", ":", "continue", "self", ".", "countef", "[", "e", "]", "[", "d", "]", "+=", "self", ".", "transmissions", "[", "e", "]", "[", "d", "]", "/", "self", ".", "totals", "[", "d", "]", "self", ".", "totalf", "[", "e", "]", "+=", "self", ".", "transmissions", "[", "e", "]", "[", "d", "]", "/", "self", ".", "totals", "[", "d", "]", "for", "e", "in", "self", ".", "en_words", ":", "if", "(", "e", "not", "in", "self", ".", "probs", ")", ":", "continue", "e_prob", "=", "self", ".", "probs", "[", "e", "]", "for", "d", "in", "e_prob", ":", "self", ".", "transmissions", "[", "e", "]", "[", "d", "]", "=", "self", ".", "countef", "[", "e", "]", "[", "d", "]", "/", "self", ".", "totalf", "[", "e", "]" ]
32.883117
0.000767
def create(self, friendly_name=values.unset): """ Create a new AccountInstance :param unicode friendly_name: A human readable description of the account :returns: Newly created AccountInstance :rtype: twilio.rest.api.v2010.account.AccountInstance """ data = values.of({'FriendlyName': friendly_name, }) payload = self._version.create( 'POST', self._uri, data=data, ) return AccountInstance(self._version, payload, )
[ "def", "create", "(", "self", ",", "friendly_name", "=", "values", ".", "unset", ")", ":", "data", "=", "values", ".", "of", "(", "{", "'FriendlyName'", ":", "friendly_name", ",", "}", ")", "payload", "=", "self", ".", "_version", ".", "create", "(", "'POST'", ",", "self", ".", "_uri", ",", "data", "=", "data", ",", ")", "return", "AccountInstance", "(", "self", ".", "_version", ",", "payload", ",", ")" ]
28.777778
0.005607
def build_backend(self, backend_node): """parse `backend` sections Args: backend_node (TreeNode): Description Returns: config.Backend: an object """ proxy_name = backend_node.backend_header.proxy_name.text config_block_lines = self.__build_config_block( backend_node.config_block) return config.Backend(name=proxy_name, config_block=config_block_lines)
[ "def", "build_backend", "(", "self", ",", "backend_node", ")", ":", "proxy_name", "=", "backend_node", ".", "backend_header", ".", "proxy_name", ".", "text", "config_block_lines", "=", "self", ".", "__build_config_block", "(", "backend_node", ".", "config_block", ")", "return", "config", ".", "Backend", "(", "name", "=", "proxy_name", ",", "config_block", "=", "config_block_lines", ")" ]
33.384615
0.004484
def get_info(pdb_id, url_root='http://www.rcsb.org/pdb/rest/describeMol?structureId='): '''Look up all information about a given PDB ID Parameters ---------- pdb_id : string A 4 character string giving a pdb entry of interest url_root : string The string root of the specific url for the request type Returns ------- out : OrderedDict An ordered dictionary object corresponding to bare xml ''' url = url_root + pdb_id req = urllib.request.Request(url) f = urllib.request.urlopen(req) result = f.read() assert result out = xmltodict.parse(result,process_namespaces=True) return out
[ "def", "get_info", "(", "pdb_id", ",", "url_root", "=", "'http://www.rcsb.org/pdb/rest/describeMol?structureId='", ")", ":", "url", "=", "url_root", "+", "pdb_id", "req", "=", "urllib", ".", "request", ".", "Request", "(", "url", ")", "f", "=", "urllib", ".", "request", ".", "urlopen", "(", "req", ")", "result", "=", "f", ".", "read", "(", ")", "assert", "result", "out", "=", "xmltodict", ".", "parse", "(", "result", ",", "process_namespaces", "=", "True", ")", "return", "out" ]
22.37931
0.004431
def sing(a, b, c=False, name='yetone'): """sing a song hehe :param a: I'm a :param b: I'm b :param c: I'm c :param name: I'm name """ print('test0.sing: <a: {}, b: {}, c: {}> by {}'.format(a, b, c, name))
[ "def", "sing", "(", "a", ",", "b", ",", "c", "=", "False", ",", "name", "=", "'yetone'", ")", ":", "print", "(", "'test0.sing: <a: {}, b: {}, c: {}> by {}'", ".", "format", "(", "a", ",", "b", ",", "c", ",", "name", ")", ")" ]
22.8
0.004219
async def add_ssh_key(self, user, key): """Add a public SSH key to this model. :param str user: The username of the user :param str key: The public ssh key """ key_facade = client.KeyManagerFacade.from_connection(self.connection()) return await key_facade.AddKeys([key], user)
[ "async", "def", "add_ssh_key", "(", "self", ",", "user", ",", "key", ")", ":", "key_facade", "=", "client", ".", "KeyManagerFacade", ".", "from_connection", "(", "self", ".", "connection", "(", ")", ")", "return", "await", "key_facade", ".", "AddKeys", "(", "[", "key", "]", ",", "user", ")" ]
35.333333
0.006135
def kfactor(R, k=2, algorithm='COBYLA'): """k-Factor Nearest Correlation Matrix Fit Parameters: ----------- R : ndarray an illconditioned <d x d> correlation matrix, e.g. oxyba.illcond_corrmat k : int Number of factors (Default: 2) algorithm : str 'COBYLA' (Default) or 'SLSQP'. Use 'COBYLA' because it is faster and more likely to converge to a feasible solution. Returns: -------- C : ndarray Fitted <d x d> Correlation Matrix. flag : bool True if C is a feasible solution or False if C is ill-conditioned X : ndarray The <d x k> Factor-Matrix. results : scipy.optimize.optimize.OptimizeResult Scipy's result object. Links ----- * Higham, N.J., 2002. Computing the nearest correlation matrix -- a problem from finance. IMA Journal of Numerical Analysis 22, 329–343. https://doi.org/10.1093/imanum/22.3.329 http://www.maths.manchester.ac.uk/~higham/narep/narep369.pdf * Higham, Nick, 2009, presentation https://www.nag.com/market/nagquantday2009_ComputingaNearestCorrelationMatrixNickHigham.pdf """ # subfunctions def corradjusteigen(C): """Reset negative diagonal elements 'D' to +1e-308 This trick ensures that C semipositive definite. """ D, V = np.linalg.eig(C) Dadj = np.diag(np.maximum(1e-308, D)) Cadj = np.dot(np.dot(V, Dadj), V.T) Cadj = (Cadj + Cadj.T) / 2. return Cadj def xtocorr(x, d, k): """Convert vector 'x' into <d x k> matrix 'X' and compute the Correlation Matrix """ X = x.reshape(d, k) xx = np.dot(X, X.T) C = np.diag(np.diag(1 - xx)) + xx Cadj = corradjusteigen(C) return Cadj, X def objectivefunc(x, R, d, k): """Objective Function of the Minimization Probem. The Sum of Squared Diff (SSR) between the ill-conditioned matrix 'R' and the current iteration of 'C' """ C, _ = xtocorr(x, d, k) f = np.sum((R - C)**2) extra = 2 * d * np.sum(np.abs(np.diag(C) - 1.0)) return f + extra def gradientfunc(x, R, d, k): X = np.matrix(x.reshape(d, k)) G = 4 * (X * (X.T * X) - R * X + X - np.diag(np.diag(X * X.T)) * X) return np.array(G.reshape(G.size,)) def nlcon_ineq(x, d, k): """Non-Linear Constraint The sum of the d absolute parameter values for each of the k factors have to be less than 1. (Sum for each of the k columns is lt 1) """ X = x.reshape(d, k) return np.sum(X**2, axis=1) - 1 # check k if k < 2: raise Exception('k<2 is not supported') # check eligible algorithm if algorithm not in ('COBYLA', 'SLSQP'): raise Exception('Optimization Algorithm not supported.') # dimension of the correlation matrix d = R.shape[0] # start values of the optimization are Ones x0 = np.ones(shape=(d * k, )) # simple lower (-1) and upper (+1) bounds bnds = [(-1, +1) for _ in range(d * k)] # for each of the k factors, the sum of its d absolute params # values has to be less than 1 con_ineq = {'type': 'ineq', 'args': (d, k), 'fun': nlcon_ineq} # set maxiter if algorithm is 'SLSQP': opt = {'ftol': 1e-12, 'maxiter': d * k * 1000, 'disp': False} else: opt = {'tol': 1e-8, 'catol': 2e-4, 'maxiter': d * k * 200, 'disp': False} # run the optimization results = scipy.optimize.minimize( objectivefunc, x0, jac=gradientfunc, args=(R, d, k), bounds=bnds, constraints=[con_ineq], method=algorithm, options=opt) # convert the d*k paramter (of k factors) into the correlation matrix C, X = xtocorr(results.x, d, k) # for information purposes f = objectivefunc(results.x, R, d, k) g = gradientfunc(results.x, R, d, k) # check solution if np.any(np.linalg.eigvals(C) < 0.0): warnings.warn("Matrix is not positive definite") # done return C, X, f, g, results
[ "def", "kfactor", "(", "R", ",", "k", "=", "2", ",", "algorithm", "=", "'COBYLA'", ")", ":", "# subfunctions", "def", "corradjusteigen", "(", "C", ")", ":", "\"\"\"Reset negative diagonal elements 'D' to +1e-308\n This trick ensures that C semipositive definite.\n \"\"\"", "D", ",", "V", "=", "np", ".", "linalg", ".", "eig", "(", "C", ")", "Dadj", "=", "np", ".", "diag", "(", "np", ".", "maximum", "(", "1e-308", ",", "D", ")", ")", "Cadj", "=", "np", ".", "dot", "(", "np", ".", "dot", "(", "V", ",", "Dadj", ")", ",", "V", ".", "T", ")", "Cadj", "=", "(", "Cadj", "+", "Cadj", ".", "T", ")", "/", "2.", "return", "Cadj", "def", "xtocorr", "(", "x", ",", "d", ",", "k", ")", ":", "\"\"\"Convert vector 'x' into <d x k> matrix 'X'\n and compute the Correlation Matrix\n \"\"\"", "X", "=", "x", ".", "reshape", "(", "d", ",", "k", ")", "xx", "=", "np", ".", "dot", "(", "X", ",", "X", ".", "T", ")", "C", "=", "np", ".", "diag", "(", "np", ".", "diag", "(", "1", "-", "xx", ")", ")", "+", "xx", "Cadj", "=", "corradjusteigen", "(", "C", ")", "return", "Cadj", ",", "X", "def", "objectivefunc", "(", "x", ",", "R", ",", "d", ",", "k", ")", ":", "\"\"\"Objective Function of the Minimization Probem.\n The Sum of Squared Diff (SSR) between the\n ill-conditioned matrix 'R' and the current\n iteration of 'C'\n \"\"\"", "C", ",", "_", "=", "xtocorr", "(", "x", ",", "d", ",", "k", ")", "f", "=", "np", ".", "sum", "(", "(", "R", "-", "C", ")", "**", "2", ")", "extra", "=", "2", "*", "d", "*", "np", ".", "sum", "(", "np", ".", "abs", "(", "np", ".", "diag", "(", "C", ")", "-", "1.0", ")", ")", "return", "f", "+", "extra", "def", "gradientfunc", "(", "x", ",", "R", ",", "d", ",", "k", ")", ":", "X", "=", "np", ".", "matrix", "(", "x", ".", "reshape", "(", "d", ",", "k", ")", ")", "G", "=", "4", "*", "(", "X", "*", "(", "X", ".", "T", "*", "X", ")", "-", "R", "*", "X", "+", "X", "-", "np", ".", "diag", "(", "np", ".", "diag", "(", "X", "*", "X", ".", "T", ")", ")", "*", "X", ")", "return", "np", ".", "array", "(", "G", ".", "reshape", "(", "G", ".", "size", ",", ")", ")", "def", "nlcon_ineq", "(", "x", ",", "d", ",", "k", ")", ":", "\"\"\"Non-Linear Constraint\n The sum of the d absolute parameter values\n for each of the k factors have to be less than 1.\n (Sum for each of the k columns is lt 1)\n \"\"\"", "X", "=", "x", ".", "reshape", "(", "d", ",", "k", ")", "return", "np", ".", "sum", "(", "X", "**", "2", ",", "axis", "=", "1", ")", "-", "1", "# check k", "if", "k", "<", "2", ":", "raise", "Exception", "(", "'k<2 is not supported'", ")", "# check eligible algorithm", "if", "algorithm", "not", "in", "(", "'COBYLA'", ",", "'SLSQP'", ")", ":", "raise", "Exception", "(", "'Optimization Algorithm not supported.'", ")", "# dimension of the correlation matrix", "d", "=", "R", ".", "shape", "[", "0", "]", "# start values of the optimization are Ones", "x0", "=", "np", ".", "ones", "(", "shape", "=", "(", "d", "*", "k", ",", ")", ")", "# simple lower (-1) and upper (+1) bounds", "bnds", "=", "[", "(", "-", "1", ",", "+", "1", ")", "for", "_", "in", "range", "(", "d", "*", "k", ")", "]", "# for each of the k factors, the sum of its d absolute params", "# values has to be less than 1", "con_ineq", "=", "{", "'type'", ":", "'ineq'", ",", "'args'", ":", "(", "d", ",", "k", ")", ",", "'fun'", ":", "nlcon_ineq", "}", "# set maxiter", "if", "algorithm", "is", "'SLSQP'", ":", "opt", "=", "{", "'ftol'", ":", "1e-12", ",", "'maxiter'", ":", "d", "*", "k", "*", "1000", ",", "'disp'", ":", "False", "}", "else", ":", "opt", "=", "{", "'tol'", ":", "1e-8", ",", "'catol'", ":", "2e-4", ",", "'maxiter'", ":", "d", "*", "k", "*", "200", ",", "'disp'", ":", "False", "}", "# run the optimization", "results", "=", "scipy", ".", "optimize", ".", "minimize", "(", "objectivefunc", ",", "x0", ",", "jac", "=", "gradientfunc", ",", "args", "=", "(", "R", ",", "d", ",", "k", ")", ",", "bounds", "=", "bnds", ",", "constraints", "=", "[", "con_ineq", "]", ",", "method", "=", "algorithm", ",", "options", "=", "opt", ")", "# convert the d*k paramter (of k factors) into the correlation matrix", "C", ",", "X", "=", "xtocorr", "(", "results", ".", "x", ",", "d", ",", "k", ")", "# for information purposes", "f", "=", "objectivefunc", "(", "results", ".", "x", ",", "R", ",", "d", ",", "k", ")", "g", "=", "gradientfunc", "(", "results", ".", "x", ",", "R", ",", "d", ",", "k", ")", "# check solution", "if", "np", ".", "any", "(", "np", ".", "linalg", ".", "eigvals", "(", "C", ")", "<", "0.0", ")", ":", "warnings", ".", "warn", "(", "\"Matrix is not positive definite\"", ")", "# done", "return", "C", ",", "X", ",", "f", ",", "g", ",", "results" ]
29.2
0.000237
def _is_txn_to_replay(self, txn_id, possible_successor, already_seen): """Decide if possible_successor should be replayed. Args: txn_id (str): Id of txn in failed batch. possible_successor (str): Id of txn to possibly replay. already_seen (list): A list of possible_successors that have been replayed. Returns: (bool): If the possible_successor should be replayed. """ is_successor = self._is_predecessor_of_possible_successor( txn_id, possible_successor) in_different_batch = not self._is_in_same_batch(txn_id, possible_successor) has_not_been_seen = possible_successor not in already_seen return is_successor and in_different_batch and has_not_been_seen
[ "def", "_is_txn_to_replay", "(", "self", ",", "txn_id", ",", "possible_successor", ",", "already_seen", ")", ":", "is_successor", "=", "self", ".", "_is_predecessor_of_possible_successor", "(", "txn_id", ",", "possible_successor", ")", "in_different_batch", "=", "not", "self", ".", "_is_in_same_batch", "(", "txn_id", ",", "possible_successor", ")", "has_not_been_seen", "=", "possible_successor", "not", "in", "already_seen", "return", "is_successor", "and", "in_different_batch", "and", "has_not_been_seen" ]
40.333333
0.002307
def list_commands(self, ctx): """Override for showing commands in particular order""" commands = super(LegitGroup, self).list_commands(ctx) return [cmd for cmd in order_manually(commands)]
[ "def", "list_commands", "(", "self", ",", "ctx", ")", ":", "commands", "=", "super", "(", "LegitGroup", ",", "self", ")", ".", "list_commands", "(", "ctx", ")", "return", "[", "cmd", "for", "cmd", "in", "order_manually", "(", "commands", ")", "]" ]
52.25
0.009434
def get_sphinx_ref(self, url, label=None): """ Get an internal sphinx cross reference corresponding to `url` into the online docs, associated with a link with label `label` (if not None). """ # Raise an exception if the initial part of url does not match # the base url for this object n = len(self.baseurl) if url[0:n] != self.baseurl: raise KeyError('base of url %s does not match base url %s' % (url, self.baseurl)) # The reverse lookup key is either the full url or the postfix # to the base url, depending on flag addbase if self.addbase: pstfx = url[n:] else: pstfx = url # Look up the cross-reference role and referenced object # name via the postfix to the base url role, name = self.revinv[pstfx] # If the label string is provided and is shorter than the name # string we have lookup up, assume it is a partial name for # the same object: append a '.' at the front and use it as the # object name in the cross-reference if label is not None and len(label) < len(name): name = '.' + label # Construct cross-reference ref = ':%s:`%s`' % (role, name) return ref
[ "def", "get_sphinx_ref", "(", "self", ",", "url", ",", "label", "=", "None", ")", ":", "# Raise an exception if the initial part of url does not match", "# the base url for this object", "n", "=", "len", "(", "self", ".", "baseurl", ")", "if", "url", "[", "0", ":", "n", "]", "!=", "self", ".", "baseurl", ":", "raise", "KeyError", "(", "'base of url %s does not match base url %s'", "%", "(", "url", ",", "self", ".", "baseurl", ")", ")", "# The reverse lookup key is either the full url or the postfix", "# to the base url, depending on flag addbase", "if", "self", ".", "addbase", ":", "pstfx", "=", "url", "[", "n", ":", "]", "else", ":", "pstfx", "=", "url", "# Look up the cross-reference role and referenced object", "# name via the postfix to the base url", "role", ",", "name", "=", "self", ".", "revinv", "[", "pstfx", "]", "# If the label string is provided and is shorter than the name", "# string we have lookup up, assume it is a partial name for", "# the same object: append a '.' at the front and use it as the", "# object name in the cross-reference", "if", "label", "is", "not", "None", "and", "len", "(", "label", ")", "<", "len", "(", "name", ")", ":", "name", "=", "'.'", "+", "label", "# Construct cross-reference", "ref", "=", "':%s:`%s`'", "%", "(", "role", ",", "name", ")", "return", "ref" ]
38.382353
0.001495
def setDebug(string): """Set the DEBUG string. This controls the log output.""" global _DEBUG global _ENV_VAR_NAME global _categories _DEBUG = string debug('log', "%s set to %s" % (_ENV_VAR_NAME, _DEBUG)) # reparse all already registered category levels for category in _categories: registerCategory(category)
[ "def", "setDebug", "(", "string", ")", ":", "global", "_DEBUG", "global", "_ENV_VAR_NAME", "global", "_categories", "_DEBUG", "=", "string", "debug", "(", "'log'", ",", "\"%s set to %s\"", "%", "(", "_ENV_VAR_NAME", ",", "_DEBUG", ")", ")", "# reparse all already registered category levels", "for", "category", "in", "_categories", ":", "registerCategory", "(", "category", ")" ]
28.416667
0.002841
def _oauth10a_signature( consumer_token: Dict[str, Any], method: str, url: str, parameters: Dict[str, Any] = {}, token: Dict[str, Any] = None, ) -> bytes: """Calculates the HMAC-SHA1 OAuth 1.0a signature for the given request. See http://oauth.net/core/1.0a/#signing_process """ parts = urllib.parse.urlparse(url) scheme, netloc, path = parts[:3] normalized_url = scheme.lower() + "://" + netloc.lower() + path base_elems = [] base_elems.append(method.upper()) base_elems.append(normalized_url) base_elems.append( "&".join( "%s=%s" % (k, _oauth_escape(str(v))) for k, v in sorted(parameters.items()) ) ) base_string = "&".join(_oauth_escape(e) for e in base_elems) key_elems = [escape.utf8(urllib.parse.quote(consumer_token["secret"], safe="~"))] key_elems.append( escape.utf8(urllib.parse.quote(token["secret"], safe="~") if token else "") ) key = b"&".join(key_elems) hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1) return binascii.b2a_base64(hash.digest())[:-1]
[ "def", "_oauth10a_signature", "(", "consumer_token", ":", "Dict", "[", "str", ",", "Any", "]", ",", "method", ":", "str", ",", "url", ":", "str", ",", "parameters", ":", "Dict", "[", "str", ",", "Any", "]", "=", "{", "}", ",", "token", ":", "Dict", "[", "str", ",", "Any", "]", "=", "None", ",", ")", "->", "bytes", ":", "parts", "=", "urllib", ".", "parse", ".", "urlparse", "(", "url", ")", "scheme", ",", "netloc", ",", "path", "=", "parts", "[", ":", "3", "]", "normalized_url", "=", "scheme", ".", "lower", "(", ")", "+", "\"://\"", "+", "netloc", ".", "lower", "(", ")", "+", "path", "base_elems", "=", "[", "]", "base_elems", ".", "append", "(", "method", ".", "upper", "(", ")", ")", "base_elems", ".", "append", "(", "normalized_url", ")", "base_elems", ".", "append", "(", "\"&\"", ".", "join", "(", "\"%s=%s\"", "%", "(", "k", ",", "_oauth_escape", "(", "str", "(", "v", ")", ")", ")", "for", "k", ",", "v", "in", "sorted", "(", "parameters", ".", "items", "(", ")", ")", ")", ")", "base_string", "=", "\"&\"", ".", "join", "(", "_oauth_escape", "(", "e", ")", "for", "e", "in", "base_elems", ")", "key_elems", "=", "[", "escape", ".", "utf8", "(", "urllib", ".", "parse", ".", "quote", "(", "consumer_token", "[", "\"secret\"", "]", ",", "safe", "=", "\"~\"", ")", ")", "]", "key_elems", ".", "append", "(", "escape", ".", "utf8", "(", "urllib", ".", "parse", ".", "quote", "(", "token", "[", "\"secret\"", "]", ",", "safe", "=", "\"~\"", ")", "if", "token", "else", "\"\"", ")", ")", "key", "=", "b\"&\"", ".", "join", "(", "key_elems", ")", "hash", "=", "hmac", ".", "new", "(", "key", ",", "escape", ".", "utf8", "(", "base_string", ")", ",", "hashlib", ".", "sha1", ")", "return", "binascii", ".", "b2a_base64", "(", "hash", ".", "digest", "(", ")", ")", "[", ":", "-", "1", "]" ]
32.636364
0.003607
def checkAuthentication(): """ The request will have a parameter 'key' if it came from the command line client, or have a session key of 'key' if it's the browser. If the token is not found, start the login process. If there is no oidcClient, we are running naked and we don't check. If we're being redirected to the oidcCallback we don't check. :returns None if all is ok (and the request handler continues as usual). Otherwise if the key was in the session (therefore we're in a browser) then startLogin() will redirect to the OIDC provider. If the key was in the request arguments, we're using the command line and just raise an exception. """ if app.oidcClient is None: return if flask.request.endpoint == 'oidcCallback': return key = flask.session.get('key') or flask.request.args.get('key') if key is None or not app.cache.get(key): if 'key' in flask.request.args: raise exceptions.NotAuthenticatedException() else: return startLogin()
[ "def", "checkAuthentication", "(", ")", ":", "if", "app", ".", "oidcClient", "is", "None", ":", "return", "if", "flask", ".", "request", ".", "endpoint", "==", "'oidcCallback'", ":", "return", "key", "=", "flask", ".", "session", ".", "get", "(", "'key'", ")", "or", "flask", ".", "request", ".", "args", ".", "get", "(", "'key'", ")", "if", "key", "is", "None", "or", "not", "app", ".", "cache", ".", "get", "(", "key", ")", ":", "if", "'key'", "in", "flask", ".", "request", ".", "args", ":", "raise", "exceptions", ".", "NotAuthenticatedException", "(", ")", "else", ":", "return", "startLogin", "(", ")" ]
41.56
0.000941
def _get_all_taskpaper_files( self, workspaceRoot): """*get a list of all the taskpaper filepaths in the workspace (excluding the sync directory)* **Key Arguments:** - ``workspaceRoot`` -- path to the root folder of a workspace containing taskpaper files **Return:** - ``taskpaperFiles`` -- a list of paths to all the taskpaper files within the workspace """ self.log.info('starting the ``_get_all_taskpaper_files`` method') theseFiles = recursive_directory_listing( log=self.log, baseFolderPath=self.workspaceRoot, whatToList="files" # all | files | dirs ) taskpaperFiles = [] taskpaperFiles[:] = [f for f in theseFiles if os.path.splitext(f)[ 1] == ".taskpaper" and self.syncFolder not in f] self.log.info('completed the ``_get_all_taskpaper_files`` method') return taskpaperFiles
[ "def", "_get_all_taskpaper_files", "(", "self", ",", "workspaceRoot", ")", ":", "self", ".", "log", ".", "info", "(", "'starting the ``_get_all_taskpaper_files`` method'", ")", "theseFiles", "=", "recursive_directory_listing", "(", "log", "=", "self", ".", "log", ",", "baseFolderPath", "=", "self", ".", "workspaceRoot", ",", "whatToList", "=", "\"files\"", "# all | files | dirs", ")", "taskpaperFiles", "=", "[", "]", "taskpaperFiles", "[", ":", "]", "=", "[", "f", "for", "f", "in", "theseFiles", "if", "os", ".", "path", ".", "splitext", "(", "f", ")", "[", "1", "]", "==", "\".taskpaper\"", "and", "self", ".", "syncFolder", "not", "in", "f", "]", "self", ".", "log", ".", "info", "(", "'completed the ``_get_all_taskpaper_files`` method'", ")", "return", "taskpaperFiles" ]
37.92
0.005144
def get(**kwargs): ''' Return system rc configuration variables CLI Example: .. code-block:: bash salt '*' sysrc.get includeDefaults=True ''' cmd = 'sysrc -v' if 'file' in kwargs: cmd += ' -f '+kwargs['file'] if 'jail' in kwargs: cmd += ' -j '+kwargs['jail'] if 'name' in kwargs: cmd += ' '+kwargs['name'] elif kwargs.get('includeDefaults', False): cmd += ' -A' else: cmd += ' -a' sysrcs = __salt__['cmd.run'](cmd) if "sysrc: unknown variable" in sysrcs: # raise CommandExecutionError(sysrcs) return None ret = {} for sysrc in sysrcs.split("\n"): line_components = sysrc.split(': ') rcfile = line_components[0] if len(line_components) > 2: var = line_components[1] val = line_components[2] else: var = line_components[1].rstrip(':') val = '' if rcfile not in ret: ret[rcfile] = {} ret[rcfile][var] = val return ret
[ "def", "get", "(", "*", "*", "kwargs", ")", ":", "cmd", "=", "'sysrc -v'", "if", "'file'", "in", "kwargs", ":", "cmd", "+=", "' -f '", "+", "kwargs", "[", "'file'", "]", "if", "'jail'", "in", "kwargs", ":", "cmd", "+=", "' -j '", "+", "kwargs", "[", "'jail'", "]", "if", "'name'", "in", "kwargs", ":", "cmd", "+=", "' '", "+", "kwargs", "[", "'name'", "]", "elif", "kwargs", ".", "get", "(", "'includeDefaults'", ",", "False", ")", ":", "cmd", "+=", "' -A'", "else", ":", "cmd", "+=", "' -a'", "sysrcs", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ")", "if", "\"sysrc: unknown variable\"", "in", "sysrcs", ":", "# raise CommandExecutionError(sysrcs)", "return", "None", "ret", "=", "{", "}", "for", "sysrc", "in", "sysrcs", ".", "split", "(", "\"\\n\"", ")", ":", "line_components", "=", "sysrc", ".", "split", "(", "': '", ")", "rcfile", "=", "line_components", "[", "0", "]", "if", "len", "(", "line_components", ")", ">", "2", ":", "var", "=", "line_components", "[", "1", "]", "val", "=", "line_components", "[", "2", "]", "else", ":", "var", "=", "line_components", "[", "1", "]", ".", "rstrip", "(", "':'", ")", "val", "=", "''", "if", "rcfile", "not", "in", "ret", ":", "ret", "[", "rcfile", "]", "=", "{", "}", "ret", "[", "rcfile", "]", "[", "var", "]", "=", "val", "return", "ret" ]
22.6
0.000943
def get(self, key, ttl=0, quiet=None, replica=False, no_format=False): """Obtain an object stored in Couchbase by given key. :param string key: The key to fetch. The type of key is the same as mentioned in :meth:`upsert` :param int ttl: If specified, indicates that the key's expiration time should be *modified* when retrieving the value. :param boolean quiet: causes `get` to return None instead of raising an exception when the key is not found. It defaults to the value set by :attr:`~quiet` on the instance. In `quiet` mode, the error may still be obtained by inspecting the :attr:`~.Result.rc` attribute of the :class:`.Result` object, or checking :attr:`.Result.success`. Note that the default value is `None`, which means to use the :attr:`quiet`. If it is a boolean (i.e. `True` or `False`) it will override the `couchbase.bucket.Bucket`-level :attr:`quiet` attribute. :param bool replica: Whether to fetch this key from a replica rather than querying the master server. This is primarily useful when operations with the master fail (possibly due to a configuration change). It should normally be used in an exception handler like so Using the ``replica`` option:: try: res = c.get("key", quiet=True) # suppress not-found errors catch CouchbaseError: res = c.get("key", replica=True, quiet=True) :param bool no_format: If set to ``True``, then the value will always be delivered in the :class:`~couchbase.result.Result` object as being of :data:`~couchbase.FMT_BYTES`. This is a item-local equivalent of using the :attr:`data_passthrough` option :raise: :exc:`.NotFoundError` if the key does not exist :raise: :exc:`.CouchbaseNetworkError` :raise: :exc:`.ValueFormatError` if the value cannot be deserialized with chosen decoder, e.g. if you try to retreive an object stored with an unrecognized format :return: A :class:`~.Result` object Simple get:: value = cb.get('key').value Get multiple values:: cb.get_multi(['foo', 'bar']) # { 'foo' : <Result(...)>, 'bar' : <Result(...)> } Inspect the flags:: rv = cb.get("key") value, flags, cas = rv.value, rv.flags, rv.cas Update the expiration time:: rv = cb.get("key", ttl=10) # Expires in ten seconds .. seealso:: :meth:`get_multi` """ return _Base.get(self, key, ttl=ttl, quiet=quiet, replica=replica, no_format=no_format)
[ "def", "get", "(", "self", ",", "key", ",", "ttl", "=", "0", ",", "quiet", "=", "None", ",", "replica", "=", "False", ",", "no_format", "=", "False", ")", ":", "return", "_Base", ".", "get", "(", "self", ",", "key", ",", "ttl", "=", "ttl", ",", "quiet", "=", "quiet", ",", "replica", "=", "replica", ",", "no_format", "=", "no_format", ")" ]
39.577465
0.000694
def subtract_params(param_list_left, param_list_right): """Subtract two lists of parameters :param param_list_left: list of numpy arrays :param param_list_right: list of numpy arrays :return: list of numpy arrays """ res = [] for x, y in zip(param_list_left, param_list_right): res.append(x - y) return res
[ "def", "subtract_params", "(", "param_list_left", ",", "param_list_right", ")", ":", "res", "=", "[", "]", "for", "x", ",", "y", "in", "zip", "(", "param_list_left", ",", "param_list_right", ")", ":", "res", ".", "append", "(", "x", "-", "y", ")", "return", "res" ]
30.636364
0.002882
def _on_scan_request(self, sequence, topic, message): """Process a request for scanning information Args: sequence (int:) The sequence number of the packet received topic (string): The topic this message was received on message_type (string): The type of the packet received message (dict): The message itself """ if messages.ProbeCommand.matches(message): self._logger.debug("Received probe message on topic %s, message=%s", topic, message) self._loop.add_callback(self._publish_scan_response, message['client']) else: self._logger.warn("Invalid message received on topic %s, message=%s", topic, message)
[ "def", "_on_scan_request", "(", "self", ",", "sequence", ",", "topic", ",", "message", ")", ":", "if", "messages", ".", "ProbeCommand", ".", "matches", "(", "message", ")", ":", "self", ".", "_logger", ".", "debug", "(", "\"Received probe message on topic %s, message=%s\"", ",", "topic", ",", "message", ")", "self", ".", "_loop", ".", "add_callback", "(", "self", ".", "_publish_scan_response", ",", "message", "[", "'client'", "]", ")", "else", ":", "self", ".", "_logger", ".", "warn", "(", "\"Invalid message received on topic %s, message=%s\"", ",", "topic", ",", "message", ")" ]
47.8
0.00684
def _disable(name, started, result=True, skip_verify=False, **kwargs): ''' Disable the service ''' ret = {} if not skip_verify: # is service available? try: if not _available(name, ret): ret['result'] = True return ret except CommandExecutionError as exc: ret['result'] = False ret['comment'] = exc.strerror return ret # Set default expected result ret['result'] = result # is enable/disable available? if 'service.disable' not in __salt__ or 'service.disabled' not in __salt__: if started is True: ret['comment'] = ('Disable is not available on this minion,' ' service {0} started').format(name) elif started is None: ret['comment'] = ('Disable is not available on this minion,' ' service {0} is in the desired state' ).format(name) else: ret['comment'] = ('Disable is not available on this minion,' ' service {0} is dead').format(name) return ret # Service can be disabled if salt.utils.platform.is_windows(): # service.disabled in Windows returns True for services that are set to # Manual start, so we need to check specifically for Disabled before_toggle_disable_status = __salt__['service.info'](name)['StartType'] in ['Disabled'] else: before_toggle_disable_status = __salt__['service.disabled'](name) if before_toggle_disable_status: # Service is disabled if started is True: ret['comment'] = ('Service {0} is already disabled,' ' and is running').format(name) elif started is None: # always be sure in this case to reset the changes dict ret['changes'] = {} ret['comment'] = ('Service {0} is already disabled,' ' and is in the desired state').format(name) else: ret['comment'] = ('Service {0} is already disabled,' ' and is dead').format(name) return ret # Service needs to be disabled if __opts__['test']: ret['result'] = None ret['comment'] = 'Service {0} set to be disabled'.format(name) return ret if __salt__['service.disable'](name, **kwargs): # Service has been disabled ret['changes'] = {} after_toggle_disable_status = __salt__['service.disabled'](name) # on upstart, certain services like apparmor will always return # False, even if correctly activated # do not trigger a change if before_toggle_disable_status != after_toggle_disable_status: ret['changes'][name] = True if started is True: ret['comment'] = ('Service {0} has been disabled,' ' and is running').format(name) elif started is None: ret['comment'] = ('Service {0} has been disabled,' ' and is in the desired state').format(name) else: ret['comment'] = ('Service {0} has been disabled,' ' and is dead').format(name) return ret # Service failed to be disabled ret['result'] = False if started is True: ret['comment'] = ('Failed when setting service {0} to not start' ' at boot, and is running').format(name) elif started is None: ret['comment'] = ('Failed when setting service {0} to not start' ' at boot, but the service was already running' ).format(name) else: ret['comment'] = ('Failed when setting service {0} to not start' ' at boot, and the service is dead').format(name) return ret
[ "def", "_disable", "(", "name", ",", "started", ",", "result", "=", "True", ",", "skip_verify", "=", "False", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "}", "if", "not", "skip_verify", ":", "# is service available?", "try", ":", "if", "not", "_available", "(", "name", ",", "ret", ")", ":", "ret", "[", "'result'", "]", "=", "True", "return", "ret", "except", "CommandExecutionError", "as", "exc", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "exc", ".", "strerror", "return", "ret", "# Set default expected result", "ret", "[", "'result'", "]", "=", "result", "# is enable/disable available?", "if", "'service.disable'", "not", "in", "__salt__", "or", "'service.disabled'", "not", "in", "__salt__", ":", "if", "started", "is", "True", ":", "ret", "[", "'comment'", "]", "=", "(", "'Disable is not available on this minion,'", "' service {0} started'", ")", ".", "format", "(", "name", ")", "elif", "started", "is", "None", ":", "ret", "[", "'comment'", "]", "=", "(", "'Disable is not available on this minion,'", "' service {0} is in the desired state'", ")", ".", "format", "(", "name", ")", "else", ":", "ret", "[", "'comment'", "]", "=", "(", "'Disable is not available on this minion,'", "' service {0} is dead'", ")", ".", "format", "(", "name", ")", "return", "ret", "# Service can be disabled", "if", "salt", ".", "utils", ".", "platform", ".", "is_windows", "(", ")", ":", "# service.disabled in Windows returns True for services that are set to", "# Manual start, so we need to check specifically for Disabled", "before_toggle_disable_status", "=", "__salt__", "[", "'service.info'", "]", "(", "name", ")", "[", "'StartType'", "]", "in", "[", "'Disabled'", "]", "else", ":", "before_toggle_disable_status", "=", "__salt__", "[", "'service.disabled'", "]", "(", "name", ")", "if", "before_toggle_disable_status", ":", "# Service is disabled", "if", "started", "is", "True", ":", "ret", "[", "'comment'", "]", "=", "(", "'Service {0} is already disabled,'", "' and is running'", ")", ".", "format", "(", "name", ")", "elif", "started", "is", "None", ":", "# always be sure in this case to reset the changes dict", "ret", "[", "'changes'", "]", "=", "{", "}", "ret", "[", "'comment'", "]", "=", "(", "'Service {0} is already disabled,'", "' and is in the desired state'", ")", ".", "format", "(", "name", ")", "else", ":", "ret", "[", "'comment'", "]", "=", "(", "'Service {0} is already disabled,'", "' and is dead'", ")", ".", "format", "(", "name", ")", "return", "ret", "# Service needs to be disabled", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'Service {0} set to be disabled'", ".", "format", "(", "name", ")", "return", "ret", "if", "__salt__", "[", "'service.disable'", "]", "(", "name", ",", "*", "*", "kwargs", ")", ":", "# Service has been disabled", "ret", "[", "'changes'", "]", "=", "{", "}", "after_toggle_disable_status", "=", "__salt__", "[", "'service.disabled'", "]", "(", "name", ")", "# on upstart, certain services like apparmor will always return", "# False, even if correctly activated", "# do not trigger a change", "if", "before_toggle_disable_status", "!=", "after_toggle_disable_status", ":", "ret", "[", "'changes'", "]", "[", "name", "]", "=", "True", "if", "started", "is", "True", ":", "ret", "[", "'comment'", "]", "=", "(", "'Service {0} has been disabled,'", "' and is running'", ")", ".", "format", "(", "name", ")", "elif", "started", "is", "None", ":", "ret", "[", "'comment'", "]", "=", "(", "'Service {0} has been disabled,'", "' and is in the desired state'", ")", ".", "format", "(", "name", ")", "else", ":", "ret", "[", "'comment'", "]", "=", "(", "'Service {0} has been disabled,'", "' and is dead'", ")", ".", "format", "(", "name", ")", "return", "ret", "# Service failed to be disabled", "ret", "[", "'result'", "]", "=", "False", "if", "started", "is", "True", ":", "ret", "[", "'comment'", "]", "=", "(", "'Failed when setting service {0} to not start'", "' at boot, and is running'", ")", ".", "format", "(", "name", ")", "elif", "started", "is", "None", ":", "ret", "[", "'comment'", "]", "=", "(", "'Failed when setting service {0} to not start'", "' at boot, but the service was already running'", ")", ".", "format", "(", "name", ")", "else", ":", "ret", "[", "'comment'", "]", "=", "(", "'Failed when setting service {0} to not start'", "' at boot, and the service is dead'", ")", ".", "format", "(", "name", ")", "return", "ret" ]
40.663158
0.000505
def password_change(self, wallet, password): """ Changes the password for **wallet** to **password** .. enable_control required :param wallet: Wallet to change password for :type wallet: str :param password: Password to set :type password: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.password_change( ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F", ... password="test" ... ) True """ wallet = self._process_value(wallet, 'wallet') payload = {"wallet": wallet, "password": password} resp = self.call('password_change', payload) return resp['changed'] == '1'
[ "def", "password_change", "(", "self", ",", "wallet", ",", "password", ")", ":", "wallet", "=", "self", ".", "_process_value", "(", "wallet", ",", "'wallet'", ")", "payload", "=", "{", "\"wallet\"", ":", "wallet", ",", "\"password\"", ":", "password", "}", "resp", "=", "self", ".", "call", "(", "'password_change'", ",", "payload", ")", "return", "resp", "[", "'changed'", "]", "==", "'1'" ]
26.071429
0.003963
def int_global_to_local_start(self, index, axis=0): """ Calculate local index from global index from start_index :param index: global index as integer :param axis: current axis to process :return: """ if index >= self.__mask[axis].stop-self.__halos[1][axis]: return None if index < self.__mask[axis].start: return 0 return index-self.__mask[axis].start
[ "def", "int_global_to_local_start", "(", "self", ",", "index", ",", "axis", "=", "0", ")", ":", "if", "index", ">=", "self", ".", "__mask", "[", "axis", "]", ".", "stop", "-", "self", ".", "__halos", "[", "1", "]", "[", "axis", "]", ":", "return", "None", "if", "index", "<", "self", ".", "__mask", "[", "axis", "]", ".", "start", ":", "return", "0", "return", "index", "-", "self", ".", "__mask", "[", "axis", "]", ".", "start" ]
30.714286
0.004515
def post_migrate(cls, sender=None, **kwargs): """ Iterate over fake_proxy_models and add contenttypes and permissions for missing proxy models, if this has not been done by Django yet """ ContentType = apps.get_model('contenttypes', 'ContentType') for model_name, proxy_model in sender.get_proxy_models().items(): ctype, created = ContentType.objects.get_or_create(app_label=sender.label, model=model_name) if created: sender.grant_permissions(proxy_model)
[ "def", "post_migrate", "(", "cls", ",", "sender", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ContentType", "=", "apps", ".", "get_model", "(", "'contenttypes'", ",", "'ContentType'", ")", "for", "model_name", ",", "proxy_model", "in", "sender", ".", "get_proxy_models", "(", ")", ".", "items", "(", ")", ":", "ctype", ",", "created", "=", "ContentType", ".", "objects", ".", "get_or_create", "(", "app_label", "=", "sender", ".", "label", ",", "model", "=", "model_name", ")", "if", "created", ":", "sender", ".", "grant_permissions", "(", "proxy_model", ")" ]
48.636364
0.007339
def encode_to_json(history_list): """ Encodes this MarketHistoryList instance to a JSON string. :param MarketHistoryList history_list: The history instance to serialize. :rtype: str """ rowsets = [] for items_in_region_list in history_list._history.values(): region_id = items_in_region_list.region_id type_id = items_in_region_list.type_id generated_at = gen_iso_datetime_str(items_in_region_list.generated_at) rows = [] for entry in items_in_region_list.entries: historical_date = gen_iso_datetime_str(entry.historical_date) # The order in which these values are added is crucial. It must # match STANDARD_ENCODED_COLUMNS. rows.append([ historical_date, entry.num_orders, entry.total_quantity, entry.low_price, entry.high_price, entry.average_price, ]) rowsets.append(dict( generatedAt = generated_at, regionID = region_id, typeID = type_id, rows = rows, )) json_dict = { 'resultType': 'history', 'version': '0.1', 'uploadKeys': history_list.upload_keys, 'generator': history_list.history_generator, 'currentTime': gen_iso_datetime_str(now_dtime_in_utc()), # This must match the order of the values in the row assembling portion # above this. 'columns': STANDARD_ENCODED_COLUMNS, 'rowsets': rowsets, } return json.dumps(json_dict)
[ "def", "encode_to_json", "(", "history_list", ")", ":", "rowsets", "=", "[", "]", "for", "items_in_region_list", "in", "history_list", ".", "_history", ".", "values", "(", ")", ":", "region_id", "=", "items_in_region_list", ".", "region_id", "type_id", "=", "items_in_region_list", ".", "type_id", "generated_at", "=", "gen_iso_datetime_str", "(", "items_in_region_list", ".", "generated_at", ")", "rows", "=", "[", "]", "for", "entry", "in", "items_in_region_list", ".", "entries", ":", "historical_date", "=", "gen_iso_datetime_str", "(", "entry", ".", "historical_date", ")", "# The order in which these values are added is crucial. It must", "# match STANDARD_ENCODED_COLUMNS.", "rows", ".", "append", "(", "[", "historical_date", ",", "entry", ".", "num_orders", ",", "entry", ".", "total_quantity", ",", "entry", ".", "low_price", ",", "entry", ".", "high_price", ",", "entry", ".", "average_price", ",", "]", ")", "rowsets", ".", "append", "(", "dict", "(", "generatedAt", "=", "generated_at", ",", "regionID", "=", "region_id", ",", "typeID", "=", "type_id", ",", "rows", "=", "rows", ",", ")", ")", "json_dict", "=", "{", "'resultType'", ":", "'history'", ",", "'version'", ":", "'0.1'", ",", "'uploadKeys'", ":", "history_list", ".", "upload_keys", ",", "'generator'", ":", "history_list", ".", "history_generator", ",", "'currentTime'", ":", "gen_iso_datetime_str", "(", "now_dtime_in_utc", "(", ")", ")", ",", "# This must match the order of the values in the row assembling portion", "# above this.", "'columns'", ":", "STANDARD_ENCODED_COLUMNS", ",", "'rowsets'", ":", "rowsets", ",", "}", "return", "json", ".", "dumps", "(", "json_dict", ")" ]
32.5625
0.00559
def transit_decrypt_data(self, name, ciphertext, context=None, nonce=None, batch_input=None, mount_point='transit'): """POST /<mount_point>/decrypt/<name> :param name: :type name: :param ciphertext: :type ciphertext: :param context: :type context: :param nonce: :type nonce: :param batch_input: :type batch_input: :param mount_point: :type mount_point: :return: :rtype: """ url = '/v1/{0}/decrypt/{1}'.format(mount_point, name) params = { 'ciphertext': ciphertext } if context is not None: params['context'] = context if nonce is not None: params['nonce'] = nonce if batch_input is not None: params['batch_input'] = batch_input return self._adapter.post(url, json=params).json()
[ "def", "transit_decrypt_data", "(", "self", ",", "name", ",", "ciphertext", ",", "context", "=", "None", ",", "nonce", "=", "None", ",", "batch_input", "=", "None", ",", "mount_point", "=", "'transit'", ")", ":", "url", "=", "'/v1/{0}/decrypt/{1}'", ".", "format", "(", "mount_point", ",", "name", ")", "params", "=", "{", "'ciphertext'", ":", "ciphertext", "}", "if", "context", "is", "not", "None", ":", "params", "[", "'context'", "]", "=", "context", "if", "nonce", "is", "not", "None", ":", "params", "[", "'nonce'", "]", "=", "nonce", "if", "batch_input", "is", "not", "None", ":", "params", "[", "'batch_input'", "]", "=", "batch_input", "return", "self", ".", "_adapter", ".", "post", "(", "url", ",", "json", "=", "params", ")", ".", "json", "(", ")" ]
29.433333
0.003289
def exists(name, path=None): ''' Returns whether the named container exists. path path to the container parent directory (default: /var/lib/lxc) .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' lxc.exists name ''' _exists = name in ls_(path=path) # container may be just created but we did cached earlier the # lxc-ls results if not _exists: _exists = name in ls_(cache=False, path=path) return _exists
[ "def", "exists", "(", "name", ",", "path", "=", "None", ")", ":", "_exists", "=", "name", "in", "ls_", "(", "path", "=", "path", ")", "# container may be just created but we did cached earlier the", "# lxc-ls results", "if", "not", "_exists", ":", "_exists", "=", "name", "in", "ls_", "(", "cache", "=", "False", ",", "path", "=", "path", ")", "return", "_exists" ]
21.043478
0.001976
def Replace(self, resource, path, type, id, initial_headers, options=None): """Replaces a Azure Cosmos resource and returns it. :param dict resource: :param str path: :param str type: :param str id: :param dict initial_headers: :param dict options: The request options for the request. :return: The new Azure Cosmos resource. :rtype: dict """ if options is None: options = {} initial_headers = initial_headers or self.default_headers headers = base.GetHeaders(self, initial_headers, 'put', path, id, type, options) # Replace will use WriteEndpoint since it uses PUT operation request = request_object._RequestObject(type, documents._OperationType.Replace) result, self.last_response_headers = self.__Put(path, request, resource, headers) # update session for request mutates data on server side self._UpdateSessionIfRequired(headers, result, self.last_response_headers) return result
[ "def", "Replace", "(", "self", ",", "resource", ",", "path", ",", "type", ",", "id", ",", "initial_headers", ",", "options", "=", "None", ")", ":", "if", "options", "is", "None", ":", "options", "=", "{", "}", "initial_headers", "=", "initial_headers", "or", "self", ".", "default_headers", "headers", "=", "base", ".", "GetHeaders", "(", "self", ",", "initial_headers", ",", "'put'", ",", "path", ",", "id", ",", "type", ",", "options", ")", "# Replace will use WriteEndpoint since it uses PUT operation", "request", "=", "request_object", ".", "_RequestObject", "(", "type", ",", "documents", ".", "_OperationType", ".", "Replace", ")", "result", ",", "self", ".", "last_response_headers", "=", "self", ".", "__Put", "(", "path", ",", "request", ",", "resource", ",", "headers", ")", "# update session for request mutates data on server side", "self", ".", "_UpdateSessionIfRequired", "(", "headers", ",", "result", ",", "self", ".", "last_response_headers", ")", "return", "result" ]
37.657895
0.003406
def get_available_name(self, name): """Returns a filename that's free on the target storage system, and available for new content to be written to. Found at http://djangosnippets.org/snippets/976/ This file storage solves overwrite on upload problem. Another proposed solution was to override the save method on the model like so (from https://code.djangoproject.com/ticket/11663): def save(self, *args, **kwargs): try: this = MyModelName.objects.get(id=self.id) if this.MyImageFieldName != self.MyImageFieldName: this.MyImageFieldName.delete() except: pass super(MyModelName, self).save(*args, **kwargs) """ # If the filename already exists, remove it as if it was a true file system if self.exists(name): os.remove(os.path.join(settings.MEDIA_ROOT, name)) return name
[ "def", "get_available_name", "(", "self", ",", "name", ")", ":", "# If the filename already exists, remove it as if it was a true file system", "if", "self", ".", "exists", "(", "name", ")", ":", "os", ".", "remove", "(", "os", ".", "path", ".", "join", "(", "settings", ".", "MEDIA_ROOT", ",", "name", ")", ")", "return", "name" ]
42.681818
0.003125
def _deserialize_primitive(data, klass): """ Deserializes to primitive type. :param data: data to deserialize. :param klass: class literal. :return: int, long, float, str, bool. :rtype: int | long | float | str | bool """ try: value = klass(data) except UnicodeEncodeError: value = unicode(data) except TypeError: value = data return value
[ "def", "_deserialize_primitive", "(", "data", ",", "klass", ")", ":", "try", ":", "value", "=", "klass", "(", "data", ")", "except", "UnicodeEncodeError", ":", "value", "=", "unicode", "(", "data", ")", "except", "TypeError", ":", "value", "=", "data", "return", "value" ]
23.117647
0.002445
def modularity_louvain_dir(W, gamma=1, hierarchy=False, seed=None): ''' The optimal community structure is a subdivision of the network into nonoverlapping groups of nodes in a way that maximizes the number of within-group edges, and minimizes the number of between-group edges. The modularity is a statistic that quantifies the degree to which the network may be subdivided into such clearly delineated groups. The Louvain algorithm is a fast and accurate community detection algorithm (as of writing). The algorithm may also be used to detect hierarchical community structure. Parameters ---------- W : NxN np.ndarray directed weighted/binary connection matrix gamma : float resolution parameter. default value=1. Values 0 <= gamma < 1 detect larger modules while gamma > 1 detects smaller modules. hierarchy : bool Enables hierarchical output. Defalut value=False seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- ci : Nx1 np.ndarray refined community affiliation vector. If hierarchical output enabled, it is an NxH np.ndarray instead with multiple iterations Q : float optimized modularity metric. If hierarchical output enabled, becomes an Hx1 array of floats instead. Notes ----- Ci and Q may vary from run to run, due to heuristics in the algorithm. Consequently, it may be worth to compare multiple runs. ''' rng = get_rng(seed) n = len(W) # number of nodes s = np.sum(W) # total weight of edges h = 0 # hierarchy index ci = [] ci.append(np.arange(n) + 1) # hierarchical module assignments q = [] q.append(-1) # hierarchical modularity index n0 = n while True: if h > 300: raise BCTParamError('Modularity Infinite Loop Style E. Please ' 'contact the developer with this error.') k_o = np.sum(W, axis=1) # node in/out degrees k_i = np.sum(W, axis=0) km_o = k_o.copy() # module in/out degrees km_i = k_i.copy() knm_o = W.copy() # node-to-module in/out degrees knm_i = W.copy() m = np.arange(n) + 1 # initial module assignments flag = True # flag for within hierarchy search it = 0 while flag: it += 1 if it > 1000: raise BCTParamError('Modularity Infinite Loop Style F. Please ' 'contact the developer with this error.') flag = False # loop over nodes in random order for u in rng.permutation(n): ma = m[u] - 1 # algorithm condition dq_o = ((knm_o[u, :] - knm_o[u, ma] + W[u, u]) - gamma * k_o[u] * (km_i - km_i[ma] + k_i[u]) / s) dq_i = ((knm_i[u, :] - knm_i[u, ma] + W[u, u]) - gamma * k_i[u] * (km_o - km_o[ma] + k_o[u]) / s) dq = (dq_o + dq_i) / 2 dq[ma] = 0 max_dq = np.max(dq) # find maximal modularity increase if max_dq > 1e-10: # if maximal increase positive mb = np.argmax(dq) # take only one value knm_o[:, mb] += W[u, :].T # change node-to-module degrees knm_o[:, ma] -= W[u, :].T knm_i[:, mb] += W[:, u] knm_i[:, ma] -= W[:, u] km_o[mb] += k_o[u] # change module out-degrees km_o[ma] -= k_o[u] km_i[mb] += k_i[u] km_i[ma] -= k_i[u] m[u] = mb + 1 # reassign module flag = True _, m = np.unique(m, return_inverse=True) m += 1 h += 1 ci.append(np.zeros((n0,))) # for i,mi in enumerate(m): #loop through module assignments for i in range(n): # ci[h][np.where(ci[h-1]==i)]=mi #assign new modules ci[h][np.where(ci[h - 1] == i + 1)] = m[i] n = np.max(m) # new number of modules W1 = np.zeros((n, n)) # new weighted matrix for i in range(n): for j in range(n): # pool weights of nodes in same module W1[i, j] = np.sum(W[np.ix_(m == i + 1, m == j + 1)]) q.append(0) # compute modularity q[h] = np.trace(W1) / s - gamma * np.sum(np.dot(W1 / s, W1 / s)) if q[h] - q[h - 1] < 1e-10: # if modularity does not increase break ci = np.array(ci, dtype=int) if hierarchy: ci = ci[1:-1] q = q[1:-1] return ci, q else: return ci[h - 1], q[h - 1]
[ "def", "modularity_louvain_dir", "(", "W", ",", "gamma", "=", "1", ",", "hierarchy", "=", "False", ",", "seed", "=", "None", ")", ":", "rng", "=", "get_rng", "(", "seed", ")", "n", "=", "len", "(", "W", ")", "# number of nodes", "s", "=", "np", ".", "sum", "(", "W", ")", "# total weight of edges", "h", "=", "0", "# hierarchy index", "ci", "=", "[", "]", "ci", ".", "append", "(", "np", ".", "arange", "(", "n", ")", "+", "1", ")", "# hierarchical module assignments", "q", "=", "[", "]", "q", ".", "append", "(", "-", "1", ")", "# hierarchical modularity index", "n0", "=", "n", "while", "True", ":", "if", "h", ">", "300", ":", "raise", "BCTParamError", "(", "'Modularity Infinite Loop Style E. Please '", "'contact the developer with this error.'", ")", "k_o", "=", "np", ".", "sum", "(", "W", ",", "axis", "=", "1", ")", "# node in/out degrees", "k_i", "=", "np", ".", "sum", "(", "W", ",", "axis", "=", "0", ")", "km_o", "=", "k_o", ".", "copy", "(", ")", "# module in/out degrees", "km_i", "=", "k_i", ".", "copy", "(", ")", "knm_o", "=", "W", ".", "copy", "(", ")", "# node-to-module in/out degrees", "knm_i", "=", "W", ".", "copy", "(", ")", "m", "=", "np", ".", "arange", "(", "n", ")", "+", "1", "# initial module assignments", "flag", "=", "True", "# flag for within hierarchy search", "it", "=", "0", "while", "flag", ":", "it", "+=", "1", "if", "it", ">", "1000", ":", "raise", "BCTParamError", "(", "'Modularity Infinite Loop Style F. Please '", "'contact the developer with this error.'", ")", "flag", "=", "False", "# loop over nodes in random order", "for", "u", "in", "rng", ".", "permutation", "(", "n", ")", ":", "ma", "=", "m", "[", "u", "]", "-", "1", "# algorithm condition", "dq_o", "=", "(", "(", "knm_o", "[", "u", ",", ":", "]", "-", "knm_o", "[", "u", ",", "ma", "]", "+", "W", "[", "u", ",", "u", "]", ")", "-", "gamma", "*", "k_o", "[", "u", "]", "*", "(", "km_i", "-", "km_i", "[", "ma", "]", "+", "k_i", "[", "u", "]", ")", "/", "s", ")", "dq_i", "=", "(", "(", "knm_i", "[", "u", ",", ":", "]", "-", "knm_i", "[", "u", ",", "ma", "]", "+", "W", "[", "u", ",", "u", "]", ")", "-", "gamma", "*", "k_i", "[", "u", "]", "*", "(", "km_o", "-", "km_o", "[", "ma", "]", "+", "k_o", "[", "u", "]", ")", "/", "s", ")", "dq", "=", "(", "dq_o", "+", "dq_i", ")", "/", "2", "dq", "[", "ma", "]", "=", "0", "max_dq", "=", "np", ".", "max", "(", "dq", ")", "# find maximal modularity increase", "if", "max_dq", ">", "1e-10", ":", "# if maximal increase positive", "mb", "=", "np", ".", "argmax", "(", "dq", ")", "# take only one value", "knm_o", "[", ":", ",", "mb", "]", "+=", "W", "[", "u", ",", ":", "]", ".", "T", "# change node-to-module degrees", "knm_o", "[", ":", ",", "ma", "]", "-=", "W", "[", "u", ",", ":", "]", ".", "T", "knm_i", "[", ":", ",", "mb", "]", "+=", "W", "[", ":", ",", "u", "]", "knm_i", "[", ":", ",", "ma", "]", "-=", "W", "[", ":", ",", "u", "]", "km_o", "[", "mb", "]", "+=", "k_o", "[", "u", "]", "# change module out-degrees", "km_o", "[", "ma", "]", "-=", "k_o", "[", "u", "]", "km_i", "[", "mb", "]", "+=", "k_i", "[", "u", "]", "km_i", "[", "ma", "]", "-=", "k_i", "[", "u", "]", "m", "[", "u", "]", "=", "mb", "+", "1", "# reassign module", "flag", "=", "True", "_", ",", "m", "=", "np", ".", "unique", "(", "m", ",", "return_inverse", "=", "True", ")", "m", "+=", "1", "h", "+=", "1", "ci", ".", "append", "(", "np", ".", "zeros", "(", "(", "n0", ",", ")", ")", ")", "# for i,mi in enumerate(m):\t\t#loop through module assignments", "for", "i", "in", "range", "(", "n", ")", ":", "# ci[h][np.where(ci[h-1]==i)]=mi\t#assign new modules", "ci", "[", "h", "]", "[", "np", ".", "where", "(", "ci", "[", "h", "-", "1", "]", "==", "i", "+", "1", ")", "]", "=", "m", "[", "i", "]", "n", "=", "np", ".", "max", "(", "m", ")", "# new number of modules", "W1", "=", "np", ".", "zeros", "(", "(", "n", ",", "n", ")", ")", "# new weighted matrix", "for", "i", "in", "range", "(", "n", ")", ":", "for", "j", "in", "range", "(", "n", ")", ":", "# pool weights of nodes in same module", "W1", "[", "i", ",", "j", "]", "=", "np", ".", "sum", "(", "W", "[", "np", ".", "ix_", "(", "m", "==", "i", "+", "1", ",", "m", "==", "j", "+", "1", ")", "]", ")", "q", ".", "append", "(", "0", ")", "# compute modularity", "q", "[", "h", "]", "=", "np", ".", "trace", "(", "W1", ")", "/", "s", "-", "gamma", "*", "np", ".", "sum", "(", "np", ".", "dot", "(", "W1", "/", "s", ",", "W1", "/", "s", ")", ")", "if", "q", "[", "h", "]", "-", "q", "[", "h", "-", "1", "]", "<", "1e-10", ":", "# if modularity does not increase", "break", "ci", "=", "np", ".", "array", "(", "ci", ",", "dtype", "=", "int", ")", "if", "hierarchy", ":", "ci", "=", "ci", "[", "1", ":", "-", "1", "]", "q", "=", "q", "[", "1", ":", "-", "1", "]", "return", "ci", ",", "q", "else", ":", "return", "ci", "[", "h", "-", "1", "]", ",", "q", "[", "h", "-", "1", "]" ]
37.507813
0.000812