text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def get_metadata(audio_filepaths): """ Return a tuple of album, artist, has_embedded_album_art from a list of audio files. """ artist, album, has_embedded_album_art = None, None, None for audio_filepath in audio_filepaths: try: mf = mutagen.File(audio_filepath) except Exception: continue if mf is None: continue # artist for key in ("albumartist", "artist", # ogg "TPE1", "TPE2", # mp3 "aART", "\xa9ART"): # mp4 try: val = mf.get(key, None) except ValueError: val = None if val is not None: artist = val[-1] break # album for key in ("_album", "album", # ogg "TALB", # mp3 "\xa9alb"): # mp4 try: val = mf.get(key, None) except ValueError: val = None if val is not None: album = val[-1] break if artist and album: # album art if isinstance(mf, mutagen.ogg.OggFileType): has_embedded_album_art = "metadata_block_picture" in mf elif isinstance(mf, mutagen.mp3.MP3): has_embedded_album_art = any(map(operator.methodcaller("startswith", "APIC:"), mf.keys())) elif isinstance(mf, mutagen.mp4.MP4): has_embedded_album_art = "covr" in mf # stop at the first file that succeeds (for performance) break return artist, album, has_embedded_album_art
[ "def", "get_metadata", "(", "audio_filepaths", ")", ":", "artist", ",", "album", ",", "has_embedded_album_art", "=", "None", ",", "None", ",", "None", "for", "audio_filepath", "in", "audio_filepaths", ":", "try", ":", "mf", "=", "mutagen", ".", "File", "(", "audio_filepath", ")", "except", "Exception", ":", "continue", "if", "mf", "is", "None", ":", "continue", "# artist", "for", "key", "in", "(", "\"albumartist\"", ",", "\"artist\"", ",", "# ogg", "\"TPE1\"", ",", "\"TPE2\"", ",", "# mp3", "\"aART\"", ",", "\"\\xa9ART\"", ")", ":", "# mp4", "try", ":", "val", "=", "mf", ".", "get", "(", "key", ",", "None", ")", "except", "ValueError", ":", "val", "=", "None", "if", "val", "is", "not", "None", ":", "artist", "=", "val", "[", "-", "1", "]", "break", "# album", "for", "key", "in", "(", "\"_album\"", ",", "\"album\"", ",", "# ogg", "\"TALB\"", ",", "# mp3", "\"\\xa9alb\"", ")", ":", "# mp4", "try", ":", "val", "=", "mf", ".", "get", "(", "key", ",", "None", ")", "except", "ValueError", ":", "val", "=", "None", "if", "val", "is", "not", "None", ":", "album", "=", "val", "[", "-", "1", "]", "break", "if", "artist", "and", "album", ":", "# album art", "if", "isinstance", "(", "mf", ",", "mutagen", ".", "ogg", ".", "OggFileType", ")", ":", "has_embedded_album_art", "=", "\"metadata_block_picture\"", "in", "mf", "elif", "isinstance", "(", "mf", ",", "mutagen", ".", "mp3", ".", "MP3", ")", ":", "has_embedded_album_art", "=", "any", "(", "map", "(", "operator", ".", "methodcaller", "(", "\"startswith\"", ",", "\"APIC:\"", ")", ",", "mf", ".", "keys", "(", ")", ")", ")", "elif", "isinstance", "(", "mf", ",", "mutagen", ".", "mp4", ".", "MP4", ")", ":", "has_embedded_album_art", "=", "\"covr\"", "in", "mf", "# stop at the first file that succeeds (for performance)", "break", "return", "artist", ",", "album", ",", "has_embedded_album_art" ]
28.833333
0.015374
def order(self, piecewise=False): """ Returns the total number of nodes in the :class:`.GraphCollection`\. """ if piecewise: return {k: v.order() for k, v in self.items()} return self.master_graph.order()
[ "def", "order", "(", "self", ",", "piecewise", "=", "False", ")", ":", "if", "piecewise", ":", "return", "{", "k", ":", "v", ".", "order", "(", ")", "for", "k", ",", "v", "in", "self", ".", "items", "(", ")", "}", "return", "self", ".", "master_graph", ".", "order", "(", ")" ]
35.714286
0.011719
def generate_wave(message, wpm=WPM, framerate=FRAMERATE, skip_frame=0, amplitude=AMPLITUDE, frequency=FREQUENCY, word_ref=WORD): """ Generate binary Morse code of message at a given code speed wpm and framerate Parameters ---------- word : string wpm : int or float - word per minute framerate : nb of samples / seconds word_spaced : bool - calculate with spaces between 2 words (default is False) skip_frame : int - nb of frame to skip Returns ------- value : float """ lst_bin = _encode_binary(message) if amplitude > 1.0: amplitude = 1.0 if amplitude < 0.0: amplitude = 0.0 seconds_per_dot = _seconds_per_dot(word_ref) # =1.2 for i in count(skip_frame): bit = morse_bin(i=i, lst_bin=lst_bin, wpm=wpm, framerate=framerate, default_value=0.0, seconds_per_dot=seconds_per_dot) sine = sine_wave(i=i, frequency=frequency, framerate=framerate, amplitude=amplitude) yield sine * bit
[ "def", "generate_wave", "(", "message", ",", "wpm", "=", "WPM", ",", "framerate", "=", "FRAMERATE", ",", "skip_frame", "=", "0", ",", "amplitude", "=", "AMPLITUDE", ",", "frequency", "=", "FREQUENCY", ",", "word_ref", "=", "WORD", ")", ":", "lst_bin", "=", "_encode_binary", "(", "message", ")", "if", "amplitude", ">", "1.0", ":", "amplitude", "=", "1.0", "if", "amplitude", "<", "0.0", ":", "amplitude", "=", "0.0", "seconds_per_dot", "=", "_seconds_per_dot", "(", "word_ref", ")", "# =1.2", "for", "i", "in", "count", "(", "skip_frame", ")", ":", "bit", "=", "morse_bin", "(", "i", "=", "i", ",", "lst_bin", "=", "lst_bin", ",", "wpm", "=", "wpm", ",", "framerate", "=", "framerate", ",", "default_value", "=", "0.0", ",", "seconds_per_dot", "=", "seconds_per_dot", ")", "sine", "=", "sine_wave", "(", "i", "=", "i", ",", "frequency", "=", "frequency", ",", "framerate", "=", "framerate", ",", "amplitude", "=", "amplitude", ")", "yield", "sine", "*", "bit" ]
35.851852
0.006036
def rotate(self, theta, around): """Rotate Compound around an arbitrary vector. Parameters ---------- theta : float The angle by which to rotate the Compound, in radians. around : np.ndarray, shape=(3,), dtype=float The vector about which to rotate the Compound. """ new_positions = _rotate(self.xyz_with_ports, theta, around) self.xyz_with_ports = new_positions
[ "def", "rotate", "(", "self", ",", "theta", ",", "around", ")", ":", "new_positions", "=", "_rotate", "(", "self", ".", "xyz_with_ports", ",", "theta", ",", "around", ")", "self", ".", "xyz_with_ports", "=", "new_positions" ]
33.846154
0.004425
def get_document_value(document, key): ''' Returns the display value of a field for a particular MongoDB document. ''' value = getattr(document, key) if isinstance(value, ObjectId): return value if isinstance(document._fields.get(key), URLField): return mark_safe("""<a href="{0}">{1}</a>""".format(value, value)) if isinstance(value, Document): app_label = value.__module__.replace(".models", "") document_name = value._class_name url = reverse( "document_detail", kwargs={'app_label': app_label, 'document_name': document_name, 'id': value.id}) return mark_safe("""<a href="{0}">{1}</a>""".format(url, value)) return value
[ "def", "get_document_value", "(", "document", ",", "key", ")", ":", "value", "=", "getattr", "(", "document", ",", "key", ")", "if", "isinstance", "(", "value", ",", "ObjectId", ")", ":", "return", "value", "if", "isinstance", "(", "document", ".", "_fields", ".", "get", "(", "key", ")", ",", "URLField", ")", ":", "return", "mark_safe", "(", "\"\"\"<a href=\"{0}\">{1}</a>\"\"\"", ".", "format", "(", "value", ",", "value", ")", ")", "if", "isinstance", "(", "value", ",", "Document", ")", ":", "app_label", "=", "value", ".", "__module__", ".", "replace", "(", "\".models\"", ",", "\"\"", ")", "document_name", "=", "value", ".", "_class_name", "url", "=", "reverse", "(", "\"document_detail\"", ",", "kwargs", "=", "{", "'app_label'", ":", "app_label", ",", "'document_name'", ":", "document_name", ",", "'id'", ":", "value", ".", "id", "}", ")", "return", "mark_safe", "(", "\"\"\"<a href=\"{0}\">{1}</a>\"\"\"", ".", "format", "(", "url", ",", "value", ")", ")", "return", "value" ]
34.809524
0.001332
def process(self, raster): """ Applies the morphological operation to the mask object """ dim = len(raster.shape) if dim == 3: for dim in range(raster.shape[2]): raster[:, :, dim] = self.morph_operation(raster[:, :, dim], self.struct_elem) elif dim == 4: for time, dim in it.product(range(raster.shape[0]), range(raster.shape[3])): raster[time, :, :, dim] = self.morph_operation(raster[time, :, :, dim], self.struct_elem) else: raise ValueError('Invalid number of dimensions: {}'.format(dim)) return raster
[ "def", "process", "(", "self", ",", "raster", ")", ":", "dim", "=", "len", "(", "raster", ".", "shape", ")", "if", "dim", "==", "3", ":", "for", "dim", "in", "range", "(", "raster", ".", "shape", "[", "2", "]", ")", ":", "raster", "[", ":", ",", ":", ",", "dim", "]", "=", "self", ".", "morph_operation", "(", "raster", "[", ":", ",", ":", ",", "dim", "]", ",", "self", ".", "struct_elem", ")", "elif", "dim", "==", "4", ":", "for", "time", ",", "dim", "in", "it", ".", "product", "(", "range", "(", "raster", ".", "shape", "[", "0", "]", ")", ",", "range", "(", "raster", ".", "shape", "[", "3", "]", ")", ")", ":", "raster", "[", "time", ",", ":", ",", ":", ",", "dim", "]", "=", "self", ".", "morph_operation", "(", "raster", "[", "time", ",", ":", ",", ":", ",", "dim", "]", ",", "self", ".", "struct_elem", ")", "else", ":", "raise", "ValueError", "(", "'Invalid number of dimensions: {}'", ".", "format", "(", "dim", ")", ")", "return", "raster" ]
45.071429
0.007764
def cooked(self, raw_args): """Return interpreted / typed list of args.""" args = [] for arg in raw_args: # TODO: use the xmlrpc-c type indicators instead / additionally if arg and arg[0] in "+-": try: arg = int(arg, 10) except (ValueError, TypeError), exc: self.LOG.warn("Not a valid number: %r (%s)" % (arg, exc)) elif arg.startswith('[['): # escaping, not a list arg = arg[1:] elif arg == '[]': arg = [] elif arg.startswith('['): arg = arg[1:].split(',') if all(i.isdigit() for i in arg): arg = [int(i, 10) for i in arg] elif arg.startswith('@'): arg = xmlrpclib.Binary(read_blob(arg)) args.append(arg) return args
[ "def", "cooked", "(", "self", ",", "raw_args", ")", ":", "args", "=", "[", "]", "for", "arg", "in", "raw_args", ":", "# TODO: use the xmlrpc-c type indicators instead / additionally", "if", "arg", "and", "arg", "[", "0", "]", "in", "\"+-\"", ":", "try", ":", "arg", "=", "int", "(", "arg", ",", "10", ")", "except", "(", "ValueError", ",", "TypeError", ")", ",", "exc", ":", "self", ".", "LOG", ".", "warn", "(", "\"Not a valid number: %r (%s)\"", "%", "(", "arg", ",", "exc", ")", ")", "elif", "arg", ".", "startswith", "(", "'[['", ")", ":", "# escaping, not a list", "arg", "=", "arg", "[", "1", ":", "]", "elif", "arg", "==", "'[]'", ":", "arg", "=", "[", "]", "elif", "arg", ".", "startswith", "(", "'['", ")", ":", "arg", "=", "arg", "[", "1", ":", "]", ".", "split", "(", "','", ")", "if", "all", "(", "i", ".", "isdigit", "(", ")", "for", "i", "in", "arg", ")", ":", "arg", "=", "[", "int", "(", "i", ",", "10", ")", "for", "i", "in", "arg", "]", "elif", "arg", ".", "startswith", "(", "'@'", ")", ":", "arg", "=", "xmlrpclib", ".", "Binary", "(", "read_blob", "(", "arg", ")", ")", "args", ".", "append", "(", "arg", ")", "return", "args" ]
38.478261
0.002205
def get_datacenter(self, datacenter_id, depth=1): """ Retrieves a data center by its ID. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param depth: The depth of the response data. :type depth: ``int`` """ response = self._perform_request( '/datacenters/%s?depth=%s' % (datacenter_id, str(depth))) return response
[ "def", "get_datacenter", "(", "self", ",", "datacenter_id", ",", "depth", "=", "1", ")", ":", "response", "=", "self", ".", "_perform_request", "(", "'/datacenters/%s?depth=%s'", "%", "(", "datacenter_id", ",", "str", "(", "depth", ")", ")", ")", "return", "response" ]
29.866667
0.004329
def register_target(self, target: Target): """Register a `target` instance in this build context. A registered target is saved in the `targets` map and in the `targets_by_module` map, but is not added to the target graph until target extraction is completed (thread safety considerations). """ if target.name in self.targets: first = self.targets[target.name] raise NameError( 'Target with name "{0.name}" ({0.builder_name} from module ' '"{1}") already exists - defined first as ' '{2.builder_name} in module "{3}"'.format( target, split_build_module(target.name), first, split_build_module(first.name))) self.targets[target.name] = target self.targets_by_module[split_build_module(target.name)].add( target.name)
[ "def", "register_target", "(", "self", ",", "target", ":", "Target", ")", ":", "if", "target", ".", "name", "in", "self", ".", "targets", ":", "first", "=", "self", ".", "targets", "[", "target", ".", "name", "]", "raise", "NameError", "(", "'Target with name \"{0.name}\" ({0.builder_name} from module '", "'\"{1}\") already exists - defined first as '", "'{2.builder_name} in module \"{3}\"'", ".", "format", "(", "target", ",", "split_build_module", "(", "target", ".", "name", ")", ",", "first", ",", "split_build_module", "(", "first", ".", "name", ")", ")", ")", "self", ".", "targets", "[", "target", ".", "name", "]", "=", "target", "self", ".", "targets_by_module", "[", "split_build_module", "(", "target", ".", "name", ")", "]", ".", "add", "(", "target", ".", "name", ")" ]
49.222222
0.002215
def prt_qualifiers(self, prt=sys.stdout): """Print Qualifiers: 1,462 colocalizes_with; 1,454 contributes_to; 1,157 not""" # 13 not colocalizes_with (TBD: CHK - Seen in gene2go, but not gafs) # 4 not contributes_to (TBD: CHK - Seen in gene2go, but not gafs) self._prt_qualifiers(self.associations, prt)
[ "def", "prt_qualifiers", "(", "self", ",", "prt", "=", "sys", ".", "stdout", ")", ":", "# 13 not colocalizes_with (TBD: CHK - Seen in gene2go, but not gafs)", "# 4 not contributes_to (TBD: CHK - Seen in gene2go, but not gafs)", "self", ".", "_prt_qualifiers", "(", "self", ".", "associations", ",", "prt", ")" ]
67.2
0.008824
def authorize(self, authentication_request, # type: oic.oic.message.AuthorizationRequest user_id, # type: str extra_id_token_claims=None # type: Optional[Union[Mapping[str, Union[str, List[str]]], Callable[[str, str], Mapping[str, Union[str, List[str]]]]] ): # type: (...) -> oic.oic.message.AuthorizationResponse """ Creates an Authentication Response for the specified authentication request and local identifier of the authenticated user. """ custom_sub = self.userinfo[user_id].get('sub') if custom_sub: self.authz_state.subject_identifiers[user_id] = {'public': custom_sub} sub = custom_sub else: sub = self._create_subject_identifier(user_id, authentication_request['client_id'], authentication_request['redirect_uri']) self._check_subject_identifier_matches_requested(authentication_request, sub) response = AuthorizationResponse() authz_code = None if 'code' in authentication_request['response_type']: authz_code = self.authz_state.create_authorization_code(authentication_request, sub) response['code'] = authz_code access_token_value = None if 'token' in authentication_request['response_type']: access_token = self.authz_state.create_access_token(authentication_request, sub) access_token_value = access_token.value self._add_access_token_to_response(response, access_token) if 'id_token' in authentication_request['response_type']: if extra_id_token_claims is None: extra_id_token_claims = {} elif callable(extra_id_token_claims): extra_id_token_claims = extra_id_token_claims(user_id, authentication_request['client_id']) requested_claims = self._get_requested_claims_in(authentication_request, 'id_token') if len(authentication_request['response_type']) == 1: # only id token is issued -> no way of doing userinfo request, so include all claims in ID Token, # even those requested by the scope parameter requested_claims.update( scope2claims( authentication_request['scope'], extra_scope_dict=self.extra_scopes ) ) user_claims = self.userinfo.get_claims_for(user_id, requested_claims) response['id_token'] = self._create_signed_id_token(authentication_request['client_id'], sub, user_claims, authentication_request.get('nonce'), authz_code, access_token_value, extra_id_token_claims) logger.debug('issued id_token=%s from requested_claims=%s userinfo=%s extra_claims=%s', response['id_token'], requested_claims, user_claims, extra_id_token_claims) if 'state' in authentication_request: response['state'] = authentication_request['state'] return response
[ "def", "authorize", "(", "self", ",", "authentication_request", ",", "# type: oic.oic.message.AuthorizationRequest", "user_id", ",", "# type: str", "extra_id_token_claims", "=", "None", "# type: Optional[Union[Mapping[str, Union[str, List[str]]], Callable[[str, str], Mapping[str, Union[str, List[str]]]]]", ")", ":", "# type: (...) -> oic.oic.message.AuthorizationResponse", "custom_sub", "=", "self", ".", "userinfo", "[", "user_id", "]", ".", "get", "(", "'sub'", ")", "if", "custom_sub", ":", "self", ".", "authz_state", ".", "subject_identifiers", "[", "user_id", "]", "=", "{", "'public'", ":", "custom_sub", "}", "sub", "=", "custom_sub", "else", ":", "sub", "=", "self", ".", "_create_subject_identifier", "(", "user_id", ",", "authentication_request", "[", "'client_id'", "]", ",", "authentication_request", "[", "'redirect_uri'", "]", ")", "self", ".", "_check_subject_identifier_matches_requested", "(", "authentication_request", ",", "sub", ")", "response", "=", "AuthorizationResponse", "(", ")", "authz_code", "=", "None", "if", "'code'", "in", "authentication_request", "[", "'response_type'", "]", ":", "authz_code", "=", "self", ".", "authz_state", ".", "create_authorization_code", "(", "authentication_request", ",", "sub", ")", "response", "[", "'code'", "]", "=", "authz_code", "access_token_value", "=", "None", "if", "'token'", "in", "authentication_request", "[", "'response_type'", "]", ":", "access_token", "=", "self", ".", "authz_state", ".", "create_access_token", "(", "authentication_request", ",", "sub", ")", "access_token_value", "=", "access_token", ".", "value", "self", ".", "_add_access_token_to_response", "(", "response", ",", "access_token", ")", "if", "'id_token'", "in", "authentication_request", "[", "'response_type'", "]", ":", "if", "extra_id_token_claims", "is", "None", ":", "extra_id_token_claims", "=", "{", "}", "elif", "callable", "(", "extra_id_token_claims", ")", ":", "extra_id_token_claims", "=", "extra_id_token_claims", "(", "user_id", ",", "authentication_request", "[", "'client_id'", "]", ")", "requested_claims", "=", "self", ".", "_get_requested_claims_in", "(", "authentication_request", ",", "'id_token'", ")", "if", "len", "(", "authentication_request", "[", "'response_type'", "]", ")", "==", "1", ":", "# only id token is issued -> no way of doing userinfo request, so include all claims in ID Token,", "# even those requested by the scope parameter", "requested_claims", ".", "update", "(", "scope2claims", "(", "authentication_request", "[", "'scope'", "]", ",", "extra_scope_dict", "=", "self", ".", "extra_scopes", ")", ")", "user_claims", "=", "self", ".", "userinfo", ".", "get_claims_for", "(", "user_id", ",", "requested_claims", ")", "response", "[", "'id_token'", "]", "=", "self", ".", "_create_signed_id_token", "(", "authentication_request", "[", "'client_id'", "]", ",", "sub", ",", "user_claims", ",", "authentication_request", ".", "get", "(", "'nonce'", ")", ",", "authz_code", ",", "access_token_value", ",", "extra_id_token_claims", ")", "logger", ".", "debug", "(", "'issued id_token=%s from requested_claims=%s userinfo=%s extra_claims=%s'", ",", "response", "[", "'id_token'", "]", ",", "requested_claims", ",", "user_claims", ",", "extra_id_token_claims", ")", "if", "'state'", "in", "authentication_request", ":", "response", "[", "'state'", "]", "=", "authentication_request", "[", "'state'", "]", "return", "response" ]
55.016949
0.007869
def _register_servicer(self, servicer): """register serviser :param servicer: servicer """ name = servicer.__name__ if name in self._servicers: raise exceptions.ConfigException( 'servicer duplicated: {}'.format(name)) add_func = self._get_servicer_add_func(servicer) self._servicers[name] = (add_func, servicer)
[ "def", "_register_servicer", "(", "self", ",", "servicer", ")", ":", "name", "=", "servicer", ".", "__name__", "if", "name", "in", "self", ".", "_servicers", ":", "raise", "exceptions", ".", "ConfigException", "(", "'servicer duplicated: {}'", ".", "format", "(", "name", ")", ")", "add_func", "=", "self", ".", "_get_servicer_add_func", "(", "servicer", ")", "self", ".", "_servicers", "[", "name", "]", "=", "(", "add_func", ",", "servicer", ")" ]
35.090909
0.005051
def get_subject(self, data): """ Try to get a unique ID from the object. By default, this will be the 'id' field of any given object, or a field specified by the 'rdfSubject' property. If no other option is available, a UUID will be generated. """ if not isinstance(data, Mapping): return None if data.get(self.subject): return data.get(self.subject) return uuid.uuid4().urn
[ "def", "get_subject", "(", "self", ",", "data", ")", ":", "if", "not", "isinstance", "(", "data", ",", "Mapping", ")", ":", "return", "None", "if", "data", ".", "get", "(", "self", ".", "subject", ")", ":", "return", "data", ".", "get", "(", "self", ".", "subject", ")", "return", "uuid", ".", "uuid4", "(", ")", ".", "urn" ]
44.5
0.004405
def should_update(self, requirement, requirement_file): """ Determines if a requirement can be updated :param requirement: Requirement :param requirement_file: RequirementFile :return: bool """ path = requirement_file.path if self.config.can_update_all(path) or \ (self.config.can_update_insecure(path) and requirement.is_insecure): # handle unpinned requirements only if pin is set if not requirement.is_pinned: return self.config.can_pin(path) return True return False
[ "def", "should_update", "(", "self", ",", "requirement", ",", "requirement_file", ")", ":", "path", "=", "requirement_file", ".", "path", "if", "self", ".", "config", ".", "can_update_all", "(", "path", ")", "or", "(", "self", ".", "config", ".", "can_update_insecure", "(", "path", ")", "and", "requirement", ".", "is_insecure", ")", ":", "# handle unpinned requirements only if pin is set", "if", "not", "requirement", ".", "is_pinned", ":", "return", "self", ".", "config", ".", "can_pin", "(", "path", ")", "return", "True", "return", "False" ]
39.733333
0.004918
def delay_response(delay): """Returns a delayed response (max of 10 seconds). --- tags: - Dynamic data parameters: - in: path name: delay type: int produces: - application/json responses: 200: description: A delayed response. """ delay = min(float(delay), 10) time.sleep(delay) return jsonify( get_dict("url", "args", "form", "data", "origin", "headers", "files") )
[ "def", "delay_response", "(", "delay", ")", ":", "delay", "=", "min", "(", "float", "(", "delay", ")", ",", "10", ")", "time", ".", "sleep", "(", "delay", ")", "return", "jsonify", "(", "get_dict", "(", "\"url\"", ",", "\"args\"", ",", "\"form\"", ",", "\"data\"", ",", "\"origin\"", ",", "\"headers\"", ",", "\"files\"", ")", ")" ]
20.272727
0.002141
async def start(self): """Starts receiving messages on the underlying socket and passes them to the message router. """ self._is_running = True while self._is_running: try: zmq_msg = await self._socket.recv_multipart() message = Message() message.ParseFromString(zmq_msg[-1]) await self._msg_router.route_msg(message) except DecodeError as e: LOGGER.warning('Unable to decode: %s', e) except zmq.ZMQError as e: LOGGER.warning('Unable to receive: %s', e) return except asyncio.CancelledError: self._is_running = False
[ "async", "def", "start", "(", "self", ")", ":", "self", ".", "_is_running", "=", "True", "while", "self", ".", "_is_running", ":", "try", ":", "zmq_msg", "=", "await", "self", ".", "_socket", ".", "recv_multipart", "(", ")", "message", "=", "Message", "(", ")", "message", ".", "ParseFromString", "(", "zmq_msg", "[", "-", "1", "]", ")", "await", "self", ".", "_msg_router", ".", "route_msg", "(", "message", ")", "except", "DecodeError", "as", "e", ":", "LOGGER", ".", "warning", "(", "'Unable to decode: %s'", ",", "e", ")", "except", "zmq", ".", "ZMQError", "as", "e", ":", "LOGGER", ".", "warning", "(", "'Unable to receive: %s'", ",", "e", ")", "return", "except", "asyncio", ".", "CancelledError", ":", "self", ".", "_is_running", "=", "False" ]
34.047619
0.002721
def stats_totals(self, kind='R', summary=False): """Returns a DataFrame of total box score statistics by season.""" return self._get_stats_table('totals', kind=kind, summary=summary)
[ "def", "stats_totals", "(", "self", ",", "kind", "=", "'R'", ",", "summary", "=", "False", ")", ":", "return", "self", ".", "_get_stats_table", "(", "'totals'", ",", "kind", "=", "kind", ",", "summary", "=", "summary", ")" ]
65.333333
0.010101
def delete_row_range(self, format_str, start_game, end_game): """Delete rows related to the given game range. Args: format_str: a string to `.format()` by the game numbers in order to create the row prefixes. start_game: the starting game number of the deletion. end_game: the ending game number of the deletion. """ row_keys = make_single_array( self.tf_table.keys_by_range_dataset( format_str.format(start_game), format_str.format(end_game))) row_keys = list(row_keys) if not row_keys: utils.dbg('No rows left for games %d..%d' % ( start_game, end_game)) return utils.dbg('Deleting %d rows: %s..%s' % ( len(row_keys), row_keys[0], row_keys[-1])) # Reverse the keys so that the queue is left in a more # sensible end state if you change your mind (say, due to a # mistake in the timestamp) and abort the process: there will # be a bit trimmed from the end, rather than a bit # trimmed out of the middle. row_keys.reverse() total_keys = len(row_keys) utils.dbg('Deleting total of %d keys' % total_keys) concurrency = min(MAX_BT_CONCURRENCY, multiprocessing.cpu_count() * 2) with multiprocessing.Pool(processes=concurrency) as pool: batches = [] with tqdm(desc='Keys', unit_scale=2, total=total_keys) as pbar: for b in utils.iter_chunks(bigtable.row.MAX_MUTATIONS, row_keys): pbar.update(len(b)) batches.append((self.btspec, b)) if len(batches) >= concurrency: pool.map(_delete_rows, batches) batches = [] pool.map(_delete_rows, batches) batches = []
[ "def", "delete_row_range", "(", "self", ",", "format_str", ",", "start_game", ",", "end_game", ")", ":", "row_keys", "=", "make_single_array", "(", "self", ".", "tf_table", ".", "keys_by_range_dataset", "(", "format_str", ".", "format", "(", "start_game", ")", ",", "format_str", ".", "format", "(", "end_game", ")", ")", ")", "row_keys", "=", "list", "(", "row_keys", ")", "if", "not", "row_keys", ":", "utils", ".", "dbg", "(", "'No rows left for games %d..%d'", "%", "(", "start_game", ",", "end_game", ")", ")", "return", "utils", ".", "dbg", "(", "'Deleting %d rows: %s..%s'", "%", "(", "len", "(", "row_keys", ")", ",", "row_keys", "[", "0", "]", ",", "row_keys", "[", "-", "1", "]", ")", ")", "# Reverse the keys so that the queue is left in a more", "# sensible end state if you change your mind (say, due to a", "# mistake in the timestamp) and abort the process: there will", "# be a bit trimmed from the end, rather than a bit", "# trimmed out of the middle.", "row_keys", ".", "reverse", "(", ")", "total_keys", "=", "len", "(", "row_keys", ")", "utils", ".", "dbg", "(", "'Deleting total of %d keys'", "%", "total_keys", ")", "concurrency", "=", "min", "(", "MAX_BT_CONCURRENCY", ",", "multiprocessing", ".", "cpu_count", "(", ")", "*", "2", ")", "with", "multiprocessing", ".", "Pool", "(", "processes", "=", "concurrency", ")", "as", "pool", ":", "batches", "=", "[", "]", "with", "tqdm", "(", "desc", "=", "'Keys'", ",", "unit_scale", "=", "2", ",", "total", "=", "total_keys", ")", "as", "pbar", ":", "for", "b", "in", "utils", ".", "iter_chunks", "(", "bigtable", ".", "row", ".", "MAX_MUTATIONS", ",", "row_keys", ")", ":", "pbar", ".", "update", "(", "len", "(", "b", ")", ")", "batches", ".", "append", "(", "(", "self", ".", "btspec", ",", "b", ")", ")", "if", "len", "(", "batches", ")", ">=", "concurrency", ":", "pool", ".", "map", "(", "_delete_rows", ",", "batches", ")", "batches", "=", "[", "]", "pool", ".", "map", "(", "_delete_rows", ",", "batches", ")", "batches", "=", "[", "]" ]
45.023256
0.001011
def read_input_registers(slave_id, starting_address, quantity): """ Return ADU for Modbus function code 04: Read Input Registers. :param slave_id: Number of slave. :return: Byte array with ADU. """ function = ReadInputRegisters() function.starting_address = starting_address function.quantity = quantity return _create_request_adu(slave_id, function.request_pdu)
[ "def", "read_input_registers", "(", "slave_id", ",", "starting_address", ",", "quantity", ")", ":", "function", "=", "ReadInputRegisters", "(", ")", "function", ".", "starting_address", "=", "starting_address", "function", ".", "quantity", "=", "quantity", "return", "_create_request_adu", "(", "slave_id", ",", "function", ".", "request_pdu", ")" ]
35.090909
0.002525
def measure_states(states, measurement_matrix, measurement_covariance): """ Measure a list of states with a measurement matrix in the presence of measurement noise. Args: states (array): states to measure. Shape is NxSTATE_DIM. measurement_matrix (array): Each state in *states* is measured with this matrix. Should be MEAS_DIMxSTATE_DIM in shape. measurement_covariance (array): Measurement noise covariance. Should be MEAS_DIMxMEAS_DIM. Returns: (array): NxMEAS_DIM array of measurements. """ # Sanitise input measurement_matrix = np.atleast_2d(measurement_matrix) measurement_covariance = np.atleast_2d(measurement_covariance) measurement_dim = measurement_matrix.shape[0] if measurement_covariance.shape != (measurement_dim, measurement_dim): raise ValueError(("Measurement matrix and covariance have inconsistent " "shapes {} and {}").format(measurement_matrix.shape, measurement_covariance.shape)) states = np.atleast_2d(states) # Special case: no output if states.shape[0] == 0: return np.zeros((0, measurement_dim)) # Measure states measurement_means = measurement_matrix.dot(states.T).T measurement_noises = np.random.multivariate_normal( mean=np.zeros(measurement_dim), cov=measurement_covariance, size=states.shape[0] ) return measurement_means + measurement_noises
[ "def", "measure_states", "(", "states", ",", "measurement_matrix", ",", "measurement_covariance", ")", ":", "# Sanitise input", "measurement_matrix", "=", "np", ".", "atleast_2d", "(", "measurement_matrix", ")", "measurement_covariance", "=", "np", ".", "atleast_2d", "(", "measurement_covariance", ")", "measurement_dim", "=", "measurement_matrix", ".", "shape", "[", "0", "]", "if", "measurement_covariance", ".", "shape", "!=", "(", "measurement_dim", ",", "measurement_dim", ")", ":", "raise", "ValueError", "(", "(", "\"Measurement matrix and covariance have inconsistent \"", "\"shapes {} and {}\"", ")", ".", "format", "(", "measurement_matrix", ".", "shape", ",", "measurement_covariance", ".", "shape", ")", ")", "states", "=", "np", ".", "atleast_2d", "(", "states", ")", "# Special case: no output", "if", "states", ".", "shape", "[", "0", "]", "==", "0", ":", "return", "np", ".", "zeros", "(", "(", "0", ",", "measurement_dim", ")", ")", "# Measure states", "measurement_means", "=", "measurement_matrix", ".", "dot", "(", "states", ".", "T", ")", ".", "T", "measurement_noises", "=", "np", ".", "random", ".", "multivariate_normal", "(", "mean", "=", "np", ".", "zeros", "(", "measurement_dim", ")", ",", "cov", "=", "measurement_covariance", ",", "size", "=", "states", ".", "shape", "[", "0", "]", ")", "return", "measurement_means", "+", "measurement_noises" ]
39.157895
0.002623
def retry(self, func, partition_id, retry_message, final_failure_message, max_retries, host_id): """ Make attempt_renew_lease async call sync. """ loop = asyncio.new_event_loop() loop.run_until_complete(self.retry_async(func, partition_id, retry_message, final_failure_message, max_retries, host_id))
[ "def", "retry", "(", "self", ",", "func", ",", "partition_id", ",", "retry_message", ",", "final_failure_message", ",", "max_retries", ",", "host_id", ")", ":", "loop", "=", "asyncio", ".", "new_event_loop", "(", ")", "loop", ".", "run_until_complete", "(", "self", ".", "retry_async", "(", "func", ",", "partition_id", ",", "retry_message", ",", "final_failure_message", ",", "max_retries", ",", "host_id", ")", ")" ]
54.714286
0.012853
def _setup_tunnel( self, tunnelParameters): """ *setup a ssh tunnel for a database connection to port through* **Key Arguments:** - ``tunnelParameters`` -- the tunnel parameters found associated with the database settings **Return:** - ``sshPort`` -- the port the ssh tunnel is connected via .. todo :: - update key arguments values and definitions with defaults - update return values and definitions - update usage examples and text - update docstring text - check sublime snippet exists - clip any useful text to docs mindmap - regenerate the docs and check redendering of this docstring """ self.log.debug('starting the ``_setup_tunnel`` method') # TEST TUNNEL DOES NOT ALREADY EXIST sshPort = tunnelParameters["port"] connected = self._checkServer( "127.0.0.1", sshPort) if connected: self.log.debug('ssh tunnel already exists - moving on') else: # GRAB TUNNEL SETTINGS FROM SETTINGS FILE ru = tunnelParameters["remote user"] rip = tunnelParameters["remote ip"] rh = tunnelParameters["remote datbase host"] cmd = "ssh -fnN %(ru)s@%(rip)s -L %(sshPort)s:%(rh)s:3306" % locals() p = Popen(cmd, shell=True, close_fds=True) output = p.communicate()[0] self.log.debug('output: %(output)s' % locals()) # TEST CONNECTION - QUIT AFTER SO MANY TRIES connected = False count = 0 while not connected: connected = self._checkServer( "127.0.0.1", sshPort) time.sleep(1) count += 1 if count == 5: self.log.error( 'cound not setup tunnel to remote datbase' % locals()) sys.exit(0) return sshPort
[ "def", "_setup_tunnel", "(", "self", ",", "tunnelParameters", ")", ":", "self", ".", "log", ".", "debug", "(", "'starting the ``_setup_tunnel`` method'", ")", "# TEST TUNNEL DOES NOT ALREADY EXIST", "sshPort", "=", "tunnelParameters", "[", "\"port\"", "]", "connected", "=", "self", ".", "_checkServer", "(", "\"127.0.0.1\"", ",", "sshPort", ")", "if", "connected", ":", "self", ".", "log", ".", "debug", "(", "'ssh tunnel already exists - moving on'", ")", "else", ":", "# GRAB TUNNEL SETTINGS FROM SETTINGS FILE", "ru", "=", "tunnelParameters", "[", "\"remote user\"", "]", "rip", "=", "tunnelParameters", "[", "\"remote ip\"", "]", "rh", "=", "tunnelParameters", "[", "\"remote datbase host\"", "]", "cmd", "=", "\"ssh -fnN %(ru)s@%(rip)s -L %(sshPort)s:%(rh)s:3306\"", "%", "locals", "(", ")", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "close_fds", "=", "True", ")", "output", "=", "p", ".", "communicate", "(", ")", "[", "0", "]", "self", ".", "log", ".", "debug", "(", "'output: %(output)s'", "%", "locals", "(", ")", ")", "# TEST CONNECTION - QUIT AFTER SO MANY TRIES", "connected", "=", "False", "count", "=", "0", "while", "not", "connected", ":", "connected", "=", "self", ".", "_checkServer", "(", "\"127.0.0.1\"", ",", "sshPort", ")", "time", ".", "sleep", "(", "1", ")", "count", "+=", "1", "if", "count", "==", "5", ":", "self", ".", "log", ".", "error", "(", "'cound not setup tunnel to remote datbase'", "%", "locals", "(", ")", ")", "sys", ".", "exit", "(", "0", ")", "return", "sshPort" ]
36.87037
0.001957
def get_tasks(self): """Returns an ordered dictionary {task_name: task} of all tasks within this workflow. :return: Ordered dictionary with key being task_name (str) and an instance of a corresponding task from this workflow :rtype: OrderedDict """ tasks = collections.OrderedDict() for dep in self.ordered_dependencies: tasks[dep.name] = dep.task return tasks
[ "def", "get_tasks", "(", "self", ")", ":", "tasks", "=", "collections", ".", "OrderedDict", "(", ")", "for", "dep", "in", "self", ".", "ordered_dependencies", ":", "tasks", "[", "dep", ".", "name", "]", "=", "dep", ".", "task", "return", "tasks" ]
36.5
0.008909
def import_(self, conn): """Do the actual import. Copy data and store in connection object. This function: - Creates the tables - Imports data (using self.gen_rows) - Run any post_import hooks. - Creates any indexs - Does *not* run self.make_views - those must be done after all tables are loaded. """ if self.print_progress: print('Beginning', self.__class__.__name__) # what is this mystical self._conn ? self._conn = conn self.create_table(conn) # This does insertions if self.mode in ('all', 'import') and self.fname and self.exists() and self.table not in ignore_tables: self.insert_data(conn) # This makes indexes in the DB. if self.mode in ('all', 'index') and hasattr(self, 'index'): self.create_index(conn) # Any post-processing to be done after the full import. if self.mode in ('all', 'import') and hasattr(self, 'post_import'): self.run_post_import(conn) # Commit it all conn.commit()
[ "def", "import_", "(", "self", ",", "conn", ")", ":", "if", "self", ".", "print_progress", ":", "print", "(", "'Beginning'", ",", "self", ".", "__class__", ".", "__name__", ")", "# what is this mystical self._conn ?", "self", ".", "_conn", "=", "conn", "self", ".", "create_table", "(", "conn", ")", "# This does insertions", "if", "self", ".", "mode", "in", "(", "'all'", ",", "'import'", ")", "and", "self", ".", "fname", "and", "self", ".", "exists", "(", ")", "and", "self", ".", "table", "not", "in", "ignore_tables", ":", "self", ".", "insert_data", "(", "conn", ")", "# This makes indexes in the DB.", "if", "self", ".", "mode", "in", "(", "'all'", ",", "'index'", ")", "and", "hasattr", "(", "self", ",", "'index'", ")", ":", "self", ".", "create_index", "(", "conn", ")", "# Any post-processing to be done after the full import.", "if", "self", ".", "mode", "in", "(", "'all'", ",", "'import'", ")", "and", "hasattr", "(", "self", ",", "'post_import'", ")", ":", "self", ".", "run_post_import", "(", "conn", ")", "# Commit it all", "conn", ".", "commit", "(", ")" ]
38.892857
0.002688
def CargarFormatoPDF(self, archivo="liquidacion_form_c1116b_wslpg.csv"): "Cargo el formato de campos a generar desde una planilla CSV" # si no encuentro archivo, lo busco en el directorio predeterminado: if not os.path.exists(archivo): archivo = os.path.join(self.InstallDir, "plantillas", os.path.basename(archivo)) if DEBUG: print "abriendo archivo ", archivo # inicializo la lista de los elementos: self.elements = [] for lno, linea in enumerate(open(archivo.encode('latin1')).readlines()): if DEBUG: print "procesando linea ", lno, linea args = [] for i,v in enumerate(linea.split(";")): if not v.startswith("'"): v = v.replace(",",".") else: v = v#.decode('latin1') if v.strip()=='': v = None else: v = eval(v.strip()) args.append(v) # corrijo path relativo para las imágenes: if args[1] == 'I': if not os.path.exists(args[14]): args[14] = os.path.join(self.InstallDir, "plantillas", os.path.basename(args[14])) if DEBUG: print "NUEVO PATH:", args[14] self.AgregarCampoPDF(*args) self.AgregarCampoPDF("anulado", 'T', 150, 250, 0, 0, size=70, rotate=45, foreground=0x808080, priority=-1) if HOMO: self.AgregarCampoPDF("homo", 'T', 100, 250, 0, 0, size=70, rotate=45, foreground=0x808080, priority=-1) # cargo los elementos en la plantilla self.template.load_elements(self.elements) return True
[ "def", "CargarFormatoPDF", "(", "self", ",", "archivo", "=", "\"liquidacion_form_c1116b_wslpg.csv\"", ")", ":", "# si no encuentro archivo, lo busco en el directorio predeterminado:", "if", "not", "os", ".", "path", ".", "exists", "(", "archivo", ")", ":", "archivo", "=", "os", ".", "path", ".", "join", "(", "self", ".", "InstallDir", ",", "\"plantillas\"", ",", "os", ".", "path", ".", "basename", "(", "archivo", ")", ")", "if", "DEBUG", ":", "print", "\"abriendo archivo \"", ",", "archivo", "# inicializo la lista de los elementos:", "self", ".", "elements", "=", "[", "]", "for", "lno", ",", "linea", "in", "enumerate", "(", "open", "(", "archivo", ".", "encode", "(", "'latin1'", ")", ")", ".", "readlines", "(", ")", ")", ":", "if", "DEBUG", ":", "print", "\"procesando linea \"", ",", "lno", ",", "linea", "args", "=", "[", "]", "for", "i", ",", "v", "in", "enumerate", "(", "linea", ".", "split", "(", "\";\"", ")", ")", ":", "if", "not", "v", ".", "startswith", "(", "\"'\"", ")", ":", "v", "=", "v", ".", "replace", "(", "\",\"", ",", "\".\"", ")", "else", ":", "v", "=", "v", "#.decode('latin1')", "if", "v", ".", "strip", "(", ")", "==", "''", ":", "v", "=", "None", "else", ":", "v", "=", "eval", "(", "v", ".", "strip", "(", ")", ")", "args", ".", "append", "(", "v", ")", "# corrijo path relativo para las imágenes:", "if", "args", "[", "1", "]", "==", "'I'", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "args", "[", "14", "]", ")", ":", "args", "[", "14", "]", "=", "os", ".", "path", ".", "join", "(", "self", ".", "InstallDir", ",", "\"plantillas\"", ",", "os", ".", "path", ".", "basename", "(", "args", "[", "14", "]", ")", ")", "if", "DEBUG", ":", "print", "\"NUEVO PATH:\"", ",", "args", "[", "14", "]", "self", ".", "AgregarCampoPDF", "(", "*", "args", ")", "self", ".", "AgregarCampoPDF", "(", "\"anulado\"", ",", "'T'", ",", "150", ",", "250", ",", "0", ",", "0", ",", "size", "=", "70", ",", "rotate", "=", "45", ",", "foreground", "=", "0x808080", ",", "priority", "=", "-", "1", ")", "if", "HOMO", ":", "self", ".", "AgregarCampoPDF", "(", "\"homo\"", ",", "'T'", ",", "100", ",", "250", ",", "0", ",", "0", ",", "size", "=", "70", ",", "rotate", "=", "45", ",", "foreground", "=", "0x808080", ",", "priority", "=", "-", "1", ")", "# cargo los elementos en la plantilla", "self", ".", "template", ".", "load_elements", "(", "self", ".", "elements", ")", "return", "True" ]
38.76087
0.013676
def propinquity_fn_to_study_tree(inp_fn, strip_extension=True): """This should only be called by propinquity - other code should be treating theses filenames (and the keys that are based on them) as opaque strings. Takes a filename (or key if strip_extension is False), returns (study_id, tree_id) propinquity provides a map to look up the study ID and tree ID (and git SHA) from these strings. """ if strip_extension: study_tree = '.'.join(inp_fn.split('.')[:-1]) # strip extension else: study_tree = inp_fn x = study_tree.split('@') if len(x) != 2: msg = 'Currently we are expecting studyID@treeID.<file extension> format. Expected exactly 1 @ in the filename. Got "{}"' msg = msg.format(study_tree) raise ValueError(msg) return x
[ "def", "propinquity_fn_to_study_tree", "(", "inp_fn", ",", "strip_extension", "=", "True", ")", ":", "if", "strip_extension", ":", "study_tree", "=", "'.'", ".", "join", "(", "inp_fn", ".", "split", "(", "'.'", ")", "[", ":", "-", "1", "]", ")", "# strip extension", "else", ":", "study_tree", "=", "inp_fn", "x", "=", "study_tree", ".", "split", "(", "'@'", ")", "if", "len", "(", "x", ")", "!=", "2", ":", "msg", "=", "'Currently we are expecting studyID@treeID.<file extension> format. Expected exactly 1 @ in the filename. Got \"{}\"'", "msg", "=", "msg", ".", "format", "(", "study_tree", ")", "raise", "ValueError", "(", "msg", ")", "return", "x" ]
42.157895
0.006105
def _add_singles_to_buffer(self, results, ifos): """Add single detector triggers to the internal buffer Parameters ---------- results: dict of arrays Dictionary of dictionaries indexed by ifo and keys such as 'snr', 'chisq', etc. The specific format it determined by the LiveBatchMatchedFilter class. Returns ------- updated_singles: dict of numpy.ndarrays Array of indices that have been just updated in the internal buffers of single detector triggers. """ if len(self.singles.keys()) == 0: self.set_singles_buffer(results) # convert to single detector trigger values # FIXME Currently configured to use pycbc live output # where chisq is the reduced chisq and chisq_dof is the actual DOF logging.info("adding singles to the background estimate...") updated_indices = {} for ifo in ifos: trigs = results[ifo] if len(trigs['snr'] > 0): trigsc = copy.copy(trigs) trigsc['chisq'] = trigs['chisq'] * trigs['chisq_dof'] trigsc['chisq_dof'] = (trigs['chisq_dof'] + 2) / 2 single_stat = self.stat_calculator.single(trigsc) else: single_stat = numpy.array([], ndmin=1, dtype=self.stat_calculator.single_dtype) trigs['stat'] = single_stat # add each single detector trigger to the and advance the buffer data = numpy.zeros(len(single_stat), dtype=self.singles_dtype) for key, value in trigs.items(): data[key] = value self.singles[ifo].add(trigs['template_id'], data) updated_indices[ifo] = trigs['template_id'] return updated_indices
[ "def", "_add_singles_to_buffer", "(", "self", ",", "results", ",", "ifos", ")", ":", "if", "len", "(", "self", ".", "singles", ".", "keys", "(", ")", ")", "==", "0", ":", "self", ".", "set_singles_buffer", "(", "results", ")", "# convert to single detector trigger values", "# FIXME Currently configured to use pycbc live output", "# where chisq is the reduced chisq and chisq_dof is the actual DOF", "logging", ".", "info", "(", "\"adding singles to the background estimate...\"", ")", "updated_indices", "=", "{", "}", "for", "ifo", "in", "ifos", ":", "trigs", "=", "results", "[", "ifo", "]", "if", "len", "(", "trigs", "[", "'snr'", "]", ">", "0", ")", ":", "trigsc", "=", "copy", ".", "copy", "(", "trigs", ")", "trigsc", "[", "'chisq'", "]", "=", "trigs", "[", "'chisq'", "]", "*", "trigs", "[", "'chisq_dof'", "]", "trigsc", "[", "'chisq_dof'", "]", "=", "(", "trigs", "[", "'chisq_dof'", "]", "+", "2", ")", "/", "2", "single_stat", "=", "self", ".", "stat_calculator", ".", "single", "(", "trigsc", ")", "else", ":", "single_stat", "=", "numpy", ".", "array", "(", "[", "]", ",", "ndmin", "=", "1", ",", "dtype", "=", "self", ".", "stat_calculator", ".", "single_dtype", ")", "trigs", "[", "'stat'", "]", "=", "single_stat", "# add each single detector trigger to the and advance the buffer", "data", "=", "numpy", ".", "zeros", "(", "len", "(", "single_stat", ")", ",", "dtype", "=", "self", ".", "singles_dtype", ")", "for", "key", ",", "value", "in", "trigs", ".", "items", "(", ")", ":", "data", "[", "key", "]", "=", "value", "self", ".", "singles", "[", "ifo", "]", ".", "add", "(", "trigs", "[", "'template_id'", "]", ",", "data", ")", "updated_indices", "[", "ifo", "]", "=", "trigs", "[", "'template_id'", "]", "return", "updated_indices" ]
40.511111
0.001607
def get_parameters(self, grad_only=True): """Get parameters. Args: grad_only (bool, optional): Return parameters with `need_grad` option as `True`. If you set this option as `False`, All parameters are returned. Default is `True`. Returns: dict: The dictionary of parameter name (`str`) to Variable (:obj:`~nnabla.Variable`). """ params = OrderedDict() for v in self.get_modules(): if not isinstance(v, tuple): continue prefix, module = v for k, v in module.__dict__.items(): if not isinstance(v, nn.Variable): continue pname = k name = "{}/{}".format(prefix, pname) if grad_only and v.need_grad == False: continue params[name] = v return params
[ "def", "get_parameters", "(", "self", ",", "grad_only", "=", "True", ")", ":", "params", "=", "OrderedDict", "(", ")", "for", "v", "in", "self", ".", "get_modules", "(", ")", ":", "if", "not", "isinstance", "(", "v", ",", "tuple", ")", ":", "continue", "prefix", ",", "module", "=", "v", "for", "k", ",", "v", "in", "module", ".", "__dict__", ".", "items", "(", ")", ":", "if", "not", "isinstance", "(", "v", ",", "nn", ".", "Variable", ")", ":", "continue", "pname", "=", "k", "name", "=", "\"{}/{}\"", ".", "format", "(", "prefix", ",", "pname", ")", "if", "grad_only", "and", "v", ".", "need_grad", "==", "False", ":", "continue", "params", "[", "name", "]", "=", "v", "return", "params" ]
38.652174
0.007684
def usn_v4_record(header, record): """Extracts USN V4 record information.""" length, major_version, minor_version = header fields = V4_RECORD.unpack_from(record, RECORD_HEADER.size) raise NotImplementedError('Not implemented')
[ "def", "usn_v4_record", "(", "header", ",", "record", ")", ":", "length", ",", "major_version", ",", "minor_version", "=", "header", "fields", "=", "V4_RECORD", ".", "unpack_from", "(", "record", ",", "RECORD_HEADER", ".", "size", ")", "raise", "NotImplementedError", "(", "'Not implemented'", ")" ]
39.666667
0.004115
def _get_tagged_content_from_taskpaper_files( self, taskpaperFiles, tagSet, editorial=False, workflowTagSet=False, includeFileTags=True): """*get all tasks tagged with a sync-tag from taskpaper files* **Key Arguments:** - ``taskpaperFiles`` -- paths to all taskpaper files in workspace - ``tagSet`` -- the tagset to extract from the taskpaper files. - ``editorial`` -- format links for editorial ios apps - ``workflowTagSet`` -- does the tag set contain workflow tags (if not skip the non-live project lists) - ``includeFileTags`` -- if the tag is in the filepath (e.g. /@due/mytasks.taskpaper) include all items the file in that tag set **Return:** - ``content`` -- the given tagged content of all taskpaper files in a workspace (string) """ self.log.info( 'starting the ``_get_tagged_content_from_taskpaper_files`` method') content = "" for tp in taskpaperFiles: done = False if not workflowTagSet: for tag in ["@next", "@hold", "@done", "@someday"]: if "/" + tag + "/" in tp: done = True if done: continue # OPEN TASKPAPER FILE doc = document(tp) basename = os.path.basename(tp).replace("-", " ").upper() archive = doc.get_project("Archive") if archive: archive.delete() fileTagged = False done = False for tag in tagSet: if includeFileTags == True: tag = "@" + tag.replace("@", "") if "/%(tag)s/" % locals() in tp: fileTagged = True if "/@done/" in tp: done = True if done: continue # GENERATE THE EDITORIAL FILE LINK if self.editorialRootPath: tp = urllib.quote(tp) tp = tp.replace( self.editorialRootPath, "editorial://open") + "?root=dropbox" for tag in tagSet: tag = "@" + tag.replace("@", "") etag = "%40" + tag.replace("@", "") # DETERMINE THE SUBORDINATE/HIGH LEVEL TAGS lesserTags = [] greaterTags = [] if workflowTagSet: trumped = False else: trumped = True for t in self.workflowTags: if t == tag: trumped = True if t != tag: if trumped: lesserTags.append(t) else: greaterTags.append(t) # FOR DOCUMENT WITH THIS SYNC TAG filteredTasks = [] if ("/%(tag)s/" % locals() in tp or "/%(etag)s/" % locals() in tp) and includeFileTags == True: filteredTasks = doc.all_tasks() for ft in filteredTasks: trumped = False for t in ft.tags: if t in " ".join(greaterTags): trumped = True if not trumped: for t in lesserTags: ft.del_tag(t) ft.add_tag(tag) elif not fileTagged: filteredProjects = doc.tagged_projects(tag) for p in filteredProjects: allTasks = p.all_tasks() for ft in filteredTasks: trumped = False for t in ft.tags: if t in " ".join(greaterTags): trumped = True if not trumped: for t in lesserTags: ft.del_tag(t) ft.add_tag(tag) filteredTasks = doc.tagged_tasks(tag) for ft in filteredTasks: if "done" not in "".join(ft.tags): if "Project" in ft.parent.__repr__(): thisNote = tp + " > " + ft.parent.title[:-1] else: thisNote = tp ft.add_note(thisNote) content += ft.to_string() + "\n" self.log.info( 'completed the ``_get_tagged_content_from_taskpaper_files`` method') return content
[ "def", "_get_tagged_content_from_taskpaper_files", "(", "self", ",", "taskpaperFiles", ",", "tagSet", ",", "editorial", "=", "False", ",", "workflowTagSet", "=", "False", ",", "includeFileTags", "=", "True", ")", ":", "self", ".", "log", ".", "info", "(", "'starting the ``_get_tagged_content_from_taskpaper_files`` method'", ")", "content", "=", "\"\"", "for", "tp", "in", "taskpaperFiles", ":", "done", "=", "False", "if", "not", "workflowTagSet", ":", "for", "tag", "in", "[", "\"@next\"", ",", "\"@hold\"", ",", "\"@done\"", ",", "\"@someday\"", "]", ":", "if", "\"/\"", "+", "tag", "+", "\"/\"", "in", "tp", ":", "done", "=", "True", "if", "done", ":", "continue", "# OPEN TASKPAPER FILE", "doc", "=", "document", "(", "tp", ")", "basename", "=", "os", ".", "path", ".", "basename", "(", "tp", ")", ".", "replace", "(", "\"-\"", ",", "\" \"", ")", ".", "upper", "(", ")", "archive", "=", "doc", ".", "get_project", "(", "\"Archive\"", ")", "if", "archive", ":", "archive", ".", "delete", "(", ")", "fileTagged", "=", "False", "done", "=", "False", "for", "tag", "in", "tagSet", ":", "if", "includeFileTags", "==", "True", ":", "tag", "=", "\"@\"", "+", "tag", ".", "replace", "(", "\"@\"", ",", "\"\"", ")", "if", "\"/%(tag)s/\"", "%", "locals", "(", ")", "in", "tp", ":", "fileTagged", "=", "True", "if", "\"/@done/\"", "in", "tp", ":", "done", "=", "True", "if", "done", ":", "continue", "# GENERATE THE EDITORIAL FILE LINK", "if", "self", ".", "editorialRootPath", ":", "tp", "=", "urllib", ".", "quote", "(", "tp", ")", "tp", "=", "tp", ".", "replace", "(", "self", ".", "editorialRootPath", ",", "\"editorial://open\"", ")", "+", "\"?root=dropbox\"", "for", "tag", "in", "tagSet", ":", "tag", "=", "\"@\"", "+", "tag", ".", "replace", "(", "\"@\"", ",", "\"\"", ")", "etag", "=", "\"%40\"", "+", "tag", ".", "replace", "(", "\"@\"", ",", "\"\"", ")", "# DETERMINE THE SUBORDINATE/HIGH LEVEL TAGS", "lesserTags", "=", "[", "]", "greaterTags", "=", "[", "]", "if", "workflowTagSet", ":", "trumped", "=", "False", "else", ":", "trumped", "=", "True", "for", "t", "in", "self", ".", "workflowTags", ":", "if", "t", "==", "tag", ":", "trumped", "=", "True", "if", "t", "!=", "tag", ":", "if", "trumped", ":", "lesserTags", ".", "append", "(", "t", ")", "else", ":", "greaterTags", ".", "append", "(", "t", ")", "# FOR DOCUMENT WITH THIS SYNC TAG", "filteredTasks", "=", "[", "]", "if", "(", "\"/%(tag)s/\"", "%", "locals", "(", ")", "in", "tp", "or", "\"/%(etag)s/\"", "%", "locals", "(", ")", "in", "tp", ")", "and", "includeFileTags", "==", "True", ":", "filteredTasks", "=", "doc", ".", "all_tasks", "(", ")", "for", "ft", "in", "filteredTasks", ":", "trumped", "=", "False", "for", "t", "in", "ft", ".", "tags", ":", "if", "t", "in", "\" \"", ".", "join", "(", "greaterTags", ")", ":", "trumped", "=", "True", "if", "not", "trumped", ":", "for", "t", "in", "lesserTags", ":", "ft", ".", "del_tag", "(", "t", ")", "ft", ".", "add_tag", "(", "tag", ")", "elif", "not", "fileTagged", ":", "filteredProjects", "=", "doc", ".", "tagged_projects", "(", "tag", ")", "for", "p", "in", "filteredProjects", ":", "allTasks", "=", "p", ".", "all_tasks", "(", ")", "for", "ft", "in", "filteredTasks", ":", "trumped", "=", "False", "for", "t", "in", "ft", ".", "tags", ":", "if", "t", "in", "\" \"", ".", "join", "(", "greaterTags", ")", ":", "trumped", "=", "True", "if", "not", "trumped", ":", "for", "t", "in", "lesserTags", ":", "ft", ".", "del_tag", "(", "t", ")", "ft", ".", "add_tag", "(", "tag", ")", "filteredTasks", "=", "doc", ".", "tagged_tasks", "(", "tag", ")", "for", "ft", "in", "filteredTasks", ":", "if", "\"done\"", "not", "in", "\"\"", ".", "join", "(", "ft", ".", "tags", ")", ":", "if", "\"Project\"", "in", "ft", ".", "parent", ".", "__repr__", "(", ")", ":", "thisNote", "=", "tp", "+", "\" > \"", "+", "ft", ".", "parent", ".", "title", "[", ":", "-", "1", "]", "else", ":", "thisNote", "=", "tp", "ft", ".", "add_note", "(", "thisNote", ")", "content", "+=", "ft", ".", "to_string", "(", ")", "+", "\"\\n\"", "self", ".", "log", ".", "info", "(", "'completed the ``_get_tagged_content_from_taskpaper_files`` method'", ")", "return", "content" ]
39.133333
0.002077
def declare(full_table_name, definition, context): """ Parse declaration and create new SQL table accordingly. :param full_table_name: full name of the table :param definition: DataJoint table definition :param context: dictionary of objects that might be referred to in the table. """ table_name = full_table_name.strip('`').split('.')[1] if len(table_name) > MAX_TABLE_NAME_LENGTH: raise DataJointError( 'Table name `{name}` exceeds the max length of {max_length}'.format( name=table_name, max_length=MAX_TABLE_NAME_LENGTH)) # split definition into lines definition = re.split(r'\s*\n\s*', definition.strip()) # check for optional table comment table_comment = definition.pop(0)[1:].strip() if definition[0].startswith('#') else '' in_key = True # parse primary keys primary_key = [] attributes = [] attribute_sql = [] foreign_key_sql = [] index_sql = [] uses_external = False for line in definition: if line.startswith('#'): # additional comments are ignored pass elif line.startswith('---') or line.startswith('___'): in_key = False # start parsing dependent attributes elif is_foreign_key(line): compile_foreign_key(line, context, attributes, primary_key if in_key else None, attribute_sql, foreign_key_sql, index_sql) elif re.match(r'^(unique\s+)?index[^:]*$', line, re.I): # index compile_index(line, index_sql) else: name, sql, is_external = compile_attribute(line, in_key, foreign_key_sql) uses_external = uses_external or is_external if in_key and name not in primary_key: primary_key.append(name) if name not in attributes: attributes.append(name) attribute_sql.append(sql) # compile SQL if not primary_key: raise DataJointError('Table must have a primary key') return ( 'CREATE TABLE IF NOT EXISTS %s (\n' % full_table_name + ',\n'.join(attribute_sql + ['PRIMARY KEY (`' + '`,`'.join(primary_key) + '`)'] + foreign_key_sql + index_sql) + '\n) ENGINE=InnoDB, COMMENT "%s"' % table_comment), uses_external
[ "def", "declare", "(", "full_table_name", ",", "definition", ",", "context", ")", ":", "table_name", "=", "full_table_name", ".", "strip", "(", "'`'", ")", ".", "split", "(", "'.'", ")", "[", "1", "]", "if", "len", "(", "table_name", ")", ">", "MAX_TABLE_NAME_LENGTH", ":", "raise", "DataJointError", "(", "'Table name `{name}` exceeds the max length of {max_length}'", ".", "format", "(", "name", "=", "table_name", ",", "max_length", "=", "MAX_TABLE_NAME_LENGTH", ")", ")", "# split definition into lines", "definition", "=", "re", ".", "split", "(", "r'\\s*\\n\\s*'", ",", "definition", ".", "strip", "(", ")", ")", "# check for optional table comment", "table_comment", "=", "definition", ".", "pop", "(", "0", ")", "[", "1", ":", "]", ".", "strip", "(", ")", "if", "definition", "[", "0", "]", ".", "startswith", "(", "'#'", ")", "else", "''", "in_key", "=", "True", "# parse primary keys", "primary_key", "=", "[", "]", "attributes", "=", "[", "]", "attribute_sql", "=", "[", "]", "foreign_key_sql", "=", "[", "]", "index_sql", "=", "[", "]", "uses_external", "=", "False", "for", "line", "in", "definition", ":", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "# additional comments are ignored", "pass", "elif", "line", ".", "startswith", "(", "'---'", ")", "or", "line", ".", "startswith", "(", "'___'", ")", ":", "in_key", "=", "False", "# start parsing dependent attributes", "elif", "is_foreign_key", "(", "line", ")", ":", "compile_foreign_key", "(", "line", ",", "context", ",", "attributes", ",", "primary_key", "if", "in_key", "else", "None", ",", "attribute_sql", ",", "foreign_key_sql", ",", "index_sql", ")", "elif", "re", ".", "match", "(", "r'^(unique\\s+)?index[^:]*$'", ",", "line", ",", "re", ".", "I", ")", ":", "# index", "compile_index", "(", "line", ",", "index_sql", ")", "else", ":", "name", ",", "sql", ",", "is_external", "=", "compile_attribute", "(", "line", ",", "in_key", ",", "foreign_key_sql", ")", "uses_external", "=", "uses_external", "or", "is_external", "if", "in_key", "and", "name", "not", "in", "primary_key", ":", "primary_key", ".", "append", "(", "name", ")", "if", "name", "not", "in", "attributes", ":", "attributes", ".", "append", "(", "name", ")", "attribute_sql", ".", "append", "(", "sql", ")", "# compile SQL", "if", "not", "primary_key", ":", "raise", "DataJointError", "(", "'Table must have a primary key'", ")", "return", "(", "'CREATE TABLE IF NOT EXISTS %s (\\n'", "%", "full_table_name", "+", "',\\n'", ".", "join", "(", "attribute_sql", "+", "[", "'PRIMARY KEY (`'", "+", "'`,`'", ".", "join", "(", "primary_key", ")", "+", "'`)'", "]", "+", "foreign_key_sql", "+", "index_sql", ")", "+", "'\\n) ENGINE=InnoDB, COMMENT \"%s\"'", "%", "table_comment", ")", ",", "uses_external" ]
42.5
0.002555
def possible_forms(self): """ Generate a list of possible forms for the current lemma :returns: List of possible forms for the current lemma :rtype: [str] """ forms = [] for morph in self.modele().morphos(): for desinence in self.modele().desinences(morph): radicaux = self.radical(desinence.numRad()) if isinstance(radicaux, Radical): forms.append(radicaux.gr() + desinence.gr()) else: for rad in radicaux: forms.append(rad.gr() + desinence.gr()) return list(set(forms))
[ "def", "possible_forms", "(", "self", ")", ":", "forms", "=", "[", "]", "for", "morph", "in", "self", ".", "modele", "(", ")", ".", "morphos", "(", ")", ":", "for", "desinence", "in", "self", ".", "modele", "(", ")", ".", "desinences", "(", "morph", ")", ":", "radicaux", "=", "self", ".", "radical", "(", "desinence", ".", "numRad", "(", ")", ")", "if", "isinstance", "(", "radicaux", ",", "Radical", ")", ":", "forms", ".", "append", "(", "radicaux", ".", "gr", "(", ")", "+", "desinence", ".", "gr", "(", ")", ")", "else", ":", "for", "rad", "in", "radicaux", ":", "forms", ".", "append", "(", "rad", ".", "gr", "(", ")", "+", "desinence", ".", "gr", "(", ")", ")", "return", "list", "(", "set", "(", "forms", ")", ")" ]
39.8125
0.003067
def populate_link(self, finder, upgrade, require_hashes): # type: (PackageFinder, bool, bool) -> None """Ensure that if a link can be found for this, that it is found. Note that self.link may still be None - if Upgrade is False and the requirement is already installed. If require_hashes is True, don't use the wheel cache, because cached wheels, always built locally, have different hashes than the files downloaded from the index server and thus throw false hash mismatches. Furthermore, cached wheels at present have undeterministic contents due to file modification times. """ if self.link is None: self.link = finder.find_requirement(self, upgrade) if self._wheel_cache is not None and not require_hashes: old_link = self.link self.link = self._wheel_cache.get(self.link, self.name) if old_link != self.link: logger.debug('Using cached wheel link: %s', self.link)
[ "def", "populate_link", "(", "self", ",", "finder", ",", "upgrade", ",", "require_hashes", ")", ":", "# type: (PackageFinder, bool, bool) -> None", "if", "self", ".", "link", "is", "None", ":", "self", ".", "link", "=", "finder", ".", "find_requirement", "(", "self", ",", "upgrade", ")", "if", "self", ".", "_wheel_cache", "is", "not", "None", "and", "not", "require_hashes", ":", "old_link", "=", "self", ".", "link", "self", ".", "link", "=", "self", ".", "_wheel_cache", ".", "get", "(", "self", ".", "link", ",", "self", ".", "name", ")", "if", "old_link", "!=", "self", ".", "link", ":", "logger", ".", "debug", "(", "'Using cached wheel link: %s'", ",", "self", ".", "link", ")" ]
50.55
0.002913
def fit(self, X, y, sample_weight=None, monitor=None): """Fit the gradient boosting model. Parameters ---------- X : array-like, shape = (n_samples, n_features) Data matrix y : structured array, shape = (n_samples,) A structured array containing the binary event indicator as first field, and time of event or time of censoring as second field. sample_weight : array-like, shape = (n_samples,), optional Weights given to each sample. If omitted, all samples have weight 1. monitor : callable, optional The monitor is called after each iteration with the current iteration, a reference to the estimator and the local variables of ``_fit_stages`` as keyword arguments ``callable(i, self, locals())``. If the callable returns ``True`` the fitting procedure is stopped. The monitor can be used for various things such as computing held-out estimates, early stopping, model introspect, and snapshoting. Returns ------- self : object Returns self. """ random_state = check_random_state(self.random_state) X, event, time = check_arrays_survival(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=DTYPE) n_samples, self.n_features_ = X.shape X = X.astype(DTYPE) if sample_weight is None: sample_weight = numpy.ones(n_samples, dtype=numpy.float32) else: sample_weight = column_or_1d(sample_weight, warn=True) check_consistent_length(X, sample_weight) self._check_params() self.loss_ = LOSS_FUNCTIONS[self.loss](1) if isinstance(self.loss_, (CensoredSquaredLoss, IPCWLeastSquaresError)): time = numpy.log(time) self._init_state() self.init_.fit(X, (event, time), sample_weight) y_pred = self.init_.predict(X) begin_at_stage = 0 if self.presort is True and issparse(X): raise ValueError( "Presorting is not supported for sparse matrices.") presort = self.presort # Allow presort to be 'auto', which means True if the dataset is dense, # otherwise it will be False. if presort == 'auto': presort = not issparse(X) X_idx_sorted = None if presort: X_idx_sorted = numpy.asfortranarray(numpy.argsort(X, axis=0), dtype=numpy.int32) # fit the boosting stages y = numpy.fromiter(zip(event, time), dtype=[('event', numpy.bool), ('time', numpy.float64)]) n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state, begin_at_stage, monitor, X_idx_sorted) # change shape of arrays after fit (early-stopping or additional tests) if n_stages != self.estimators_.shape[0]: self.estimators_ = self.estimators_[:n_stages] self.train_score_ = self.train_score_[:n_stages] if hasattr(self, 'oob_improvement_'): self.oob_improvement_ = self.oob_improvement_[:n_stages] self.n_estimators_ = n_stages return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", ",", "sample_weight", "=", "None", ",", "monitor", "=", "None", ")", ":", "random_state", "=", "check_random_state", "(", "self", ".", "random_state", ")", "X", ",", "event", ",", "time", "=", "check_arrays_survival", "(", "X", ",", "y", ",", "accept_sparse", "=", "[", "'csr'", ",", "'csc'", ",", "'coo'", "]", ",", "dtype", "=", "DTYPE", ")", "n_samples", ",", "self", ".", "n_features_", "=", "X", ".", "shape", "X", "=", "X", ".", "astype", "(", "DTYPE", ")", "if", "sample_weight", "is", "None", ":", "sample_weight", "=", "numpy", ".", "ones", "(", "n_samples", ",", "dtype", "=", "numpy", ".", "float32", ")", "else", ":", "sample_weight", "=", "column_or_1d", "(", "sample_weight", ",", "warn", "=", "True", ")", "check_consistent_length", "(", "X", ",", "sample_weight", ")", "self", ".", "_check_params", "(", ")", "self", ".", "loss_", "=", "LOSS_FUNCTIONS", "[", "self", ".", "loss", "]", "(", "1", ")", "if", "isinstance", "(", "self", ".", "loss_", ",", "(", "CensoredSquaredLoss", ",", "IPCWLeastSquaresError", ")", ")", ":", "time", "=", "numpy", ".", "log", "(", "time", ")", "self", ".", "_init_state", "(", ")", "self", ".", "init_", ".", "fit", "(", "X", ",", "(", "event", ",", "time", ")", ",", "sample_weight", ")", "y_pred", "=", "self", ".", "init_", ".", "predict", "(", "X", ")", "begin_at_stage", "=", "0", "if", "self", ".", "presort", "is", "True", "and", "issparse", "(", "X", ")", ":", "raise", "ValueError", "(", "\"Presorting is not supported for sparse matrices.\"", ")", "presort", "=", "self", ".", "presort", "# Allow presort to be 'auto', which means True if the dataset is dense,", "# otherwise it will be False.", "if", "presort", "==", "'auto'", ":", "presort", "=", "not", "issparse", "(", "X", ")", "X_idx_sorted", "=", "None", "if", "presort", ":", "X_idx_sorted", "=", "numpy", ".", "asfortranarray", "(", "numpy", ".", "argsort", "(", "X", ",", "axis", "=", "0", ")", ",", "dtype", "=", "numpy", ".", "int32", ")", "# fit the boosting stages", "y", "=", "numpy", ".", "fromiter", "(", "zip", "(", "event", ",", "time", ")", ",", "dtype", "=", "[", "(", "'event'", ",", "numpy", ".", "bool", ")", ",", "(", "'time'", ",", "numpy", ".", "float64", ")", "]", ")", "n_stages", "=", "self", ".", "_fit_stages", "(", "X", ",", "y", ",", "y_pred", ",", "sample_weight", ",", "random_state", ",", "begin_at_stage", ",", "monitor", ",", "X_idx_sorted", ")", "# change shape of arrays after fit (early-stopping or additional tests)", "if", "n_stages", "!=", "self", ".", "estimators_", ".", "shape", "[", "0", "]", ":", "self", ".", "estimators_", "=", "self", ".", "estimators_", "[", ":", "n_stages", "]", "self", ".", "train_score_", "=", "self", ".", "train_score_", "[", ":", "n_stages", "]", "if", "hasattr", "(", "self", ",", "'oob_improvement_'", ")", ":", "self", ".", "oob_improvement_", "=", "self", ".", "oob_improvement_", "[", ":", "n_stages", "]", "self", ".", "n_estimators_", "=", "n_stages", "return", "self" ]
39.740741
0.001819
def compile_binary(source): """ Prepare chkrootkit binary $ tar xzvf chkrootkit.tar.gz $ cd chkrootkit-0.52 $ make sense sudo mv chkrootkit-0.52 /usr/local/chkrootkit sudo ln -s """ cmd = 'make sense' src = '/usr/local/bin/chkrootkit' dst = '/usr/local/chkrootkit/chkrootkit' # Tar Extraction t = tarfile.open(source, 'r') t.extractall(TMPDIR) if isinstance(t.getnames(), list): extract_dir = t.getnames()[0].split('/')[0] os.chdir(TMPDIR + '/' + extract_dir) logger.info('make output: \n%s' % subprocess.getoutput(cmd)) # move directory in place mv_cmd = 'sudo mv %s /usr/local/chkrootkit' % (TMPDIR + '/' + extract_dir) subprocess.getoutput(mv_cmd) # create symlink to binary in directory os.symlink(dst, src) return True return False
[ "def", "compile_binary", "(", "source", ")", ":", "cmd", "=", "'make sense'", "src", "=", "'/usr/local/bin/chkrootkit'", "dst", "=", "'/usr/local/chkrootkit/chkrootkit'", "# Tar Extraction", "t", "=", "tarfile", ".", "open", "(", "source", ",", "'r'", ")", "t", ".", "extractall", "(", "TMPDIR", ")", "if", "isinstance", "(", "t", ".", "getnames", "(", ")", ",", "list", ")", ":", "extract_dir", "=", "t", ".", "getnames", "(", ")", "[", "0", "]", ".", "split", "(", "'/'", ")", "[", "0", "]", "os", ".", "chdir", "(", "TMPDIR", "+", "'/'", "+", "extract_dir", ")", "logger", ".", "info", "(", "'make output: \\n%s'", "%", "subprocess", ".", "getoutput", "(", "cmd", ")", ")", "# move directory in place", "mv_cmd", "=", "'sudo mv %s /usr/local/chkrootkit'", "%", "(", "TMPDIR", "+", "'/'", "+", "extract_dir", ")", "subprocess", ".", "getoutput", "(", "mv_cmd", ")", "# create symlink to binary in directory", "os", ".", "symlink", "(", "dst", ",", "src", ")", "return", "True", "return", "False" ]
32.576923
0.002294
def set_pscale(self): """ Compute the pixel scale based on active WCS values. """ if self.new: self.pscale = 1.0 else: self.pscale = self.compute_pscale(self.cd11,self.cd21)
[ "def", "set_pscale", "(", "self", ")", ":", "if", "self", ".", "new", ":", "self", ".", "pscale", "=", "1.0", "else", ":", "self", ".", "pscale", "=", "self", ".", "compute_pscale", "(", "self", ".", "cd11", ",", "self", ".", "cd21", ")" ]
36
0.013575
def get_job_errors(self, job_id): """GetJobErrors https://apidocs.joyent.com/manta/api.html#GetJobErrors with the added sugar that it will retrieve the archived job if it has been archived, per: https://apidocs.joyent.com/manta/jobs-reference.html#job-completion-and-archival """ try: return RawMantaClient.get_job_errors(self, job_id) except errors.MantaAPIError as ex: if ex.res.status != 404: raise # Job was archived, try to retrieve the archived data. mpath = "/%s/jobs/%s/err.txt" % (self.account, job_id) content = self.get_object(mpath) return self._job_errors_from_content(content)
[ "def", "get_job_errors", "(", "self", ",", "job_id", ")", ":", "try", ":", "return", "RawMantaClient", ".", "get_job_errors", "(", "self", ",", "job_id", ")", "except", "errors", ".", "MantaAPIError", "as", "ex", ":", "if", "ex", ".", "res", ".", "status", "!=", "404", ":", "raise", "# Job was archived, try to retrieve the archived data.", "mpath", "=", "\"/%s/jobs/%s/err.txt\"", "%", "(", "self", ".", "account", ",", "job_id", ")", "content", "=", "self", ".", "get_object", "(", "mpath", ")", "return", "self", ".", "_job_errors_from_content", "(", "content", ")" ]
45.9375
0.002667
def sometimes(fn): """ They've done studies, you know. 50% of the time, it works every time. """ def wrapped(*args, **kwargs): wrapped.x += 1 if wrapped.x % 2 == 1: return fn(*args, **kwargs) wrapped.x = 0 return wrapped
[ "def", "sometimes", "(", "fn", ")", ":", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "wrapped", ".", "x", "+=", "1", "if", "wrapped", ".", "x", "%", "2", "==", "1", ":", "return", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "wrapped", ".", "x", "=", "0", "return", "wrapped" ]
20.461538
0.003597
def init(*, output_dir=FS_DEFAULT_OUTPUT_DIR, dry_run=False, **kwargs): """ Set up output directory :param output_dir(str, optional): Output dir for holding temporary files :param \*\*kwargs: arbitrary keyword arguments """ # Output directory global _output_dir _output_dir = output_dir # Dry run mode global _dry_run _dry_run = dry_run # Type checks utils.chkstr(_output_dir, 'output_dir') # log the thing log.msg("Output directory will be: {output_dir}" .format(output_dir=_output_dir)) # Create output directory if it does not exist if not os.path.exists(_output_dir): log.msg_warn("Output path '{out_dir}' does not exist, creating it ..." .format(out_dir=_output_dir)) if not _dry_run: # create the actual root output directory os.makedirs(_output_dir) # set folder permissions to 0770 os.chmod(_output_dir, stat.S_IRWXU | stat.S_IRWXG)
[ "def", "init", "(", "*", ",", "output_dir", "=", "FS_DEFAULT_OUTPUT_DIR", ",", "dry_run", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# Output directory", "global", "_output_dir", "_output_dir", "=", "output_dir", "# Dry run mode", "global", "_dry_run", "_dry_run", "=", "dry_run", "# Type checks", "utils", ".", "chkstr", "(", "_output_dir", ",", "'output_dir'", ")", "# log the thing", "log", ".", "msg", "(", "\"Output directory will be: {output_dir}\"", ".", "format", "(", "output_dir", "=", "_output_dir", ")", ")", "# Create output directory if it does not exist", "if", "not", "os", ".", "path", ".", "exists", "(", "_output_dir", ")", ":", "log", ".", "msg_warn", "(", "\"Output path '{out_dir}' does not exist, creating it ...\"", ".", "format", "(", "out_dir", "=", "_output_dir", ")", ")", "if", "not", "_dry_run", ":", "# create the actual root output directory", "os", ".", "makedirs", "(", "_output_dir", ")", "# set folder permissions to 0770", "os", ".", "chmod", "(", "_output_dir", ",", "stat", ".", "S_IRWXU", "|", "stat", ".", "S_IRWXG", ")" ]
31.612903
0.00297
def get_range_values_per_column(df): """ Retrieves the finite max, min and mean values per column in the DataFrame `df` and stores them in three dictionaries. Those dictionaries `col_to_max`, `col_to_min`, `col_to_median` map the columnname to the maximal, minimal or median value of that column. If a column does not contain any finite values at all, a 0 is stored instead. :param df: the Dataframe to get columnswise max, min and median from :type df: pandas.DataFrame :return: Dictionaries mapping column names to max, min, mean values :rtype: (dict, dict, dict) """ data = df.get_values() masked = np.ma.masked_invalid(data) columns = df.columns is_col_non_finite = masked.mask.sum(axis=0) == masked.data.shape[0] if np.any(is_col_non_finite): # We have columns that does not contain any finite value at all, so we will store 0 instead. _logger.warning("The columns {} did not have any finite values. Filling with zeros.".format( df.iloc[:, np.where(is_col_non_finite)[0]].columns.values)) masked.data[:, is_col_non_finite] = 0 # Set the values of the columns to 0 masked.mask[:, is_col_non_finite] = False # Remove the mask for this column # fetch max, min and median for all columns col_to_max = dict(zip(columns, np.max(masked, axis=0))) col_to_min = dict(zip(columns, np.min(masked, axis=0))) col_to_median = dict(zip(columns, np.ma.median(masked, axis=0))) return col_to_max, col_to_min, col_to_median
[ "def", "get_range_values_per_column", "(", "df", ")", ":", "data", "=", "df", ".", "get_values", "(", ")", "masked", "=", "np", ".", "ma", ".", "masked_invalid", "(", "data", ")", "columns", "=", "df", ".", "columns", "is_col_non_finite", "=", "masked", ".", "mask", ".", "sum", "(", "axis", "=", "0", ")", "==", "masked", ".", "data", ".", "shape", "[", "0", "]", "if", "np", ".", "any", "(", "is_col_non_finite", ")", ":", "# We have columns that does not contain any finite value at all, so we will store 0 instead.", "_logger", ".", "warning", "(", "\"The columns {} did not have any finite values. Filling with zeros.\"", ".", "format", "(", "df", ".", "iloc", "[", ":", ",", "np", ".", "where", "(", "is_col_non_finite", ")", "[", "0", "]", "]", ".", "columns", ".", "values", ")", ")", "masked", ".", "data", "[", ":", ",", "is_col_non_finite", "]", "=", "0", "# Set the values of the columns to 0", "masked", ".", "mask", "[", ":", ",", "is_col_non_finite", "]", "=", "False", "# Remove the mask for this column", "# fetch max, min and median for all columns", "col_to_max", "=", "dict", "(", "zip", "(", "columns", ",", "np", ".", "max", "(", "masked", ",", "axis", "=", "0", ")", ")", ")", "col_to_min", "=", "dict", "(", "zip", "(", "columns", ",", "np", ".", "min", "(", "masked", ",", "axis", "=", "0", ")", ")", ")", "col_to_median", "=", "dict", "(", "zip", "(", "columns", ",", "np", ".", "ma", ".", "median", "(", "masked", ",", "axis", "=", "0", ")", ")", ")", "return", "col_to_max", ",", "col_to_min", ",", "col_to_median" ]
44.441176
0.005181
def _get_netengine_arguments(self, required=False): """ returns list of available config params returns list of required config params if required is True for internal use only """ # inspect netengine class backend_class = self._get_netengine_backend() argspec = inspect.getargspec(backend_class.__init__) # store args args = argspec.args # remove known arguments for argument_name in ['self', 'host', 'port']: args.remove(argument_name) if required: # list of default values default_values = list(argspec.defaults) # always remove last default value, which is port number default_values = default_values[0:-1] # remove an amount of arguments equals to number of default values, starting from right args = args[0:len(args)-len(default_values)] return args
[ "def", "_get_netengine_arguments", "(", "self", ",", "required", "=", "False", ")", ":", "# inspect netengine class", "backend_class", "=", "self", ".", "_get_netengine_backend", "(", ")", "argspec", "=", "inspect", ".", "getargspec", "(", "backend_class", ".", "__init__", ")", "# store args", "args", "=", "argspec", ".", "args", "# remove known arguments", "for", "argument_name", "in", "[", "'self'", ",", "'host'", ",", "'port'", "]", ":", "args", ".", "remove", "(", "argument_name", ")", "if", "required", ":", "# list of default values", "default_values", "=", "list", "(", "argspec", ".", "defaults", ")", "# always remove last default value, which is port number", "default_values", "=", "default_values", "[", "0", ":", "-", "1", "]", "# remove an amount of arguments equals to number of default values, starting from right", "args", "=", "args", "[", "0", ":", "len", "(", "args", ")", "-", "len", "(", "default_values", ")", "]", "return", "args" ]
37.2
0.003145
def _add_replace_pair(self, name, value, quote): """ Adds a replace part to the map of replace pairs. :param name: The name of the replace pair. :param value: The value of value of the replace pair. """ key = '@' + name + '@' key = key.lower() class_name = value.__class__.__name__ if class_name in ['int', 'float']: value = str(value) elif class_name in ['bool']: value = '1' if value else '0' elif class_name in ['str']: if quote: value = "'" + value + "'" else: self._io.log_verbose("Ignoring constant {} which is an instance of {}".format(name, class_name)) self._replace_pairs[key] = value
[ "def", "_add_replace_pair", "(", "self", ",", "name", ",", "value", ",", "quote", ")", ":", "key", "=", "'@'", "+", "name", "+", "'@'", "key", "=", "key", ".", "lower", "(", ")", "class_name", "=", "value", ".", "__class__", ".", "__name__", "if", "class_name", "in", "[", "'int'", ",", "'float'", "]", ":", "value", "=", "str", "(", "value", ")", "elif", "class_name", "in", "[", "'bool'", "]", ":", "value", "=", "'1'", "if", "value", "else", "'0'", "elif", "class_name", "in", "[", "'str'", "]", ":", "if", "quote", ":", "value", "=", "\"'\"", "+", "value", "+", "\"'\"", "else", ":", "self", ".", "_io", ".", "log_verbose", "(", "\"Ignoring constant {} which is an instance of {}\"", ".", "format", "(", "name", ",", "class_name", ")", ")", "self", ".", "_replace_pairs", "[", "key", "]", "=", "value" ]
32.347826
0.003916
def is_carrying_vespene(self) -> bool: """ Checks if a worker is carrying vespene. """ return any( buff.value in self._proto.buff_ids for buff in { BuffId.CARRYHARVESTABLEVESPENEGEYSERGAS, BuffId.CARRYHARVESTABLEVESPENEGEYSERGASPROTOSS, BuffId.CARRYHARVESTABLEVESPENEGEYSERGASZERG, } )
[ "def", "is_carrying_vespene", "(", "self", ")", "->", "bool", ":", "return", "any", "(", "buff", ".", "value", "in", "self", ".", "_proto", ".", "buff_ids", "for", "buff", "in", "{", "BuffId", ".", "CARRYHARVESTABLEVESPENEGEYSERGAS", ",", "BuffId", ".", "CARRYHARVESTABLEVESPENEGEYSERGASPROTOSS", ",", "BuffId", ".", "CARRYHARVESTABLEVESPENEGEYSERGASZERG", ",", "}", ")" ]
38.4
0.005089
def mean(series): """ Returns the mean of a series. Args: series (pandas.Series): column to summarize. """ if np.issubdtype(series.dtype, np.number): return series.mean() else: return np.nan
[ "def", "mean", "(", "series", ")", ":", "if", "np", ".", "issubdtype", "(", "series", ".", "dtype", ",", "np", ".", "number", ")", ":", "return", "series", ".", "mean", "(", ")", "else", ":", "return", "np", ".", "nan" ]
19.083333
0.004167
def remove(self, id_option_pool): """Remove Option pool by identifier and all Environment related . :param id_option_pool: Identifier of the Option Pool. Integer value and greater than zero. :return: None :raise InvalidParameterError: Option Pool identifier is null and invalid. :raise optionpoolNotFoundError: Option Pool not registered. :raise optionpoolError: Option Pool associated with Pool. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ if not is_valid_int_param(id_option_pool): raise InvalidParameterError( u'The identifier of Option Pool is invalid or was not informed.') url = 'api/pools/options/' + str(id_option_pool) + '/' return self.delete(url)
[ "def", "remove", "(", "self", ",", "id_option_pool", ")", ":", "if", "not", "is_valid_int_param", "(", "id_option_pool", ")", ":", "raise", "InvalidParameterError", "(", "u'The identifier of Option Pool is invalid or was not informed.'", ")", "url", "=", "'api/pools/options/'", "+", "str", "(", "id_option_pool", ")", "+", "'/'", "return", "self", ".", "delete", "(", "url", ")" ]
40.857143
0.005695
def from_session(cls, session): """Create a new Nvim instance for a Session instance. This method must be called to create the first Nvim instance, since it queries Nvim metadata for type information and sets a SessionHook for creating specialized objects from Nvim remote handles. """ session.error_wrapper = lambda e: NvimError(e[1]) channel_id, metadata = session.request(b'vim_get_api_info') if IS_PYTHON3: # decode all metadata strings for python3 metadata = walk(decode_if_bytes, metadata) types = { metadata['types']['Buffer']['id']: Buffer, metadata['types']['Window']['id']: Window, metadata['types']['Tabpage']['id']: Tabpage, } return cls(session, channel_id, metadata, types)
[ "def", "from_session", "(", "cls", ",", "session", ")", ":", "session", ".", "error_wrapper", "=", "lambda", "e", ":", "NvimError", "(", "e", "[", "1", "]", ")", "channel_id", ",", "metadata", "=", "session", ".", "request", "(", "b'vim_get_api_info'", ")", "if", "IS_PYTHON3", ":", "# decode all metadata strings for python3", "metadata", "=", "walk", "(", "decode_if_bytes", ",", "metadata", ")", "types", "=", "{", "metadata", "[", "'types'", "]", "[", "'Buffer'", "]", "[", "'id'", "]", ":", "Buffer", ",", "metadata", "[", "'types'", "]", "[", "'Window'", "]", "[", "'id'", "]", ":", "Window", ",", "metadata", "[", "'types'", "]", "[", "'Tabpage'", "]", "[", "'id'", "]", ":", "Tabpage", ",", "}", "return", "cls", "(", "session", ",", "channel_id", ",", "metadata", ",", "types", ")" ]
39
0.002384
def color_square(self, x, y, unit_coords=False): """ Handles actually coloring the squares """ # Calculate column and row number if unit_coords: col = x row = y else: col = x//self.col_width row = y//self.row_height # If the tile is not filled, create a rectangle if not self.tiles[row][col]: self.tiles[row][col] = \ self.c.create_rectangle(col*self.col_width, row*self.row_height, (col+1)*self.col_width, (row+1)*self.row_height, fill="black") self.cells.append(row*self.cols + col) # If the tile is filled, delete the rectangle and clear the reference else: self.c.delete(self.tiles[row][col]) self.tiles[row][col] = None self.cells.remove(row*self.cols + col)
[ "def", "color_square", "(", "self", ",", "x", ",", "y", ",", "unit_coords", "=", "False", ")", ":", "# Calculate column and row number", "if", "unit_coords", ":", "col", "=", "x", "row", "=", "y", "else", ":", "col", "=", "x", "//", "self", ".", "col_width", "row", "=", "y", "//", "self", ".", "row_height", "# If the tile is not filled, create a rectangle", "if", "not", "self", ".", "tiles", "[", "row", "]", "[", "col", "]", ":", "self", ".", "tiles", "[", "row", "]", "[", "col", "]", "=", "self", ".", "c", ".", "create_rectangle", "(", "col", "*", "self", ".", "col_width", ",", "row", "*", "self", ".", "row_height", ",", "(", "col", "+", "1", ")", "*", "self", ".", "col_width", ",", "(", "row", "+", "1", ")", "*", "self", ".", "row_height", ",", "fill", "=", "\"black\"", ")", "self", ".", "cells", ".", "append", "(", "row", "*", "self", ".", "cols", "+", "col", ")", "# If the tile is filled, delete the rectangle and clear the reference", "else", ":", "self", ".", "c", ".", "delete", "(", "self", ".", "tiles", "[", "row", "]", "[", "col", "]", ")", "self", ".", "tiles", "[", "row", "]", "[", "col", "]", "=", "None", "self", ".", "cells", ".", "remove", "(", "row", "*", "self", ".", "cols", "+", "col", ")" ]
37.148148
0.001944
def record_sets_list_by_type(zone_name, resource_group, record_type, top=None, recordsetnamesuffix=None, **kwargs): ''' .. versionadded:: Fluorine Lists the record sets of a specified type in a DNS zone. :param zone_name: The name of the DNS zone (without a terminating dot). :param resource_group: The name of the resource group. :param record_type: The type of record sets to enumerate. Possible values include: 'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT' :param top: The maximum number of record sets to return. If not specified, returns up to 100 record sets. :param recordsetnamesuffix: The suffix label of the record set name that has to be used to filter the record set enumerations. CLI Example: .. code-block:: bash salt-call azurearm_dns.record_sets_list_by_type myzone testgroup SOA ''' result = {} dnsconn = __utils__['azurearm.get_client']('dns', **kwargs) try: record_sets = __utils__['azurearm.paged_object_to_list']( dnsconn.record_sets.list_by_type( zone_name=zone_name, resource_group_name=resource_group, record_type=record_type, top=top, recordsetnamesuffix=recordsetnamesuffix ) ) for record_set in record_sets: result[record_set['name']] = record_set except CloudError as exc: __utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs) result = {'error': str(exc)} return result
[ "def", "record_sets_list_by_type", "(", "zone_name", ",", "resource_group", ",", "record_type", ",", "top", "=", "None", ",", "recordsetnamesuffix", "=", "None", ",", "*", "*", "kwargs", ")", ":", "result", "=", "{", "}", "dnsconn", "=", "__utils__", "[", "'azurearm.get_client'", "]", "(", "'dns'", ",", "*", "*", "kwargs", ")", "try", ":", "record_sets", "=", "__utils__", "[", "'azurearm.paged_object_to_list'", "]", "(", "dnsconn", ".", "record_sets", ".", "list_by_type", "(", "zone_name", "=", "zone_name", ",", "resource_group_name", "=", "resource_group", ",", "record_type", "=", "record_type", ",", "top", "=", "top", ",", "recordsetnamesuffix", "=", "recordsetnamesuffix", ")", ")", "for", "record_set", "in", "record_sets", ":", "result", "[", "record_set", "[", "'name'", "]", "]", "=", "record_set", "except", "CloudError", "as", "exc", ":", "__utils__", "[", "'azurearm.log_cloud_error'", "]", "(", "'dns'", ",", "str", "(", "exc", ")", ",", "*", "*", "kwargs", ")", "result", "=", "{", "'error'", ":", "str", "(", "exc", ")", "}", "return", "result" ]
33.413043
0.002528
def get_assignable_log_ids(self, log_id): """Gets a list of log including and under the given log node in which any log entry can be assigned. arg: log_id (osid.id.Id): the ``Id`` of the ``Log`` return: (osid.id.IdList) - list of assignable log ``Ids`` raise: NullArgument - ``log_id`` is ``null`` raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids # This will likely be overridden by an authorization adapter mgr = self._get_provider_manager('LOGGING', local=True) lookup_session = mgr.get_log_lookup_session(proxy=self._proxy) logs = lookup_session.get_logs() id_list = [] for log in logs: id_list.append(log.get_id()) return IdList(id_list)
[ "def", "get_assignable_log_ids", "(", "self", ",", "log_id", ")", ":", "# Implemented from template for", "# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids", "# This will likely be overridden by an authorization adapter", "mgr", "=", "self", ".", "_get_provider_manager", "(", "'LOGGING'", ",", "local", "=", "True", ")", "lookup_session", "=", "mgr", ".", "get_log_lookup_session", "(", "proxy", "=", "self", ".", "_proxy", ")", "logs", "=", "lookup_session", ".", "get_logs", "(", ")", "id_list", "=", "[", "]", "for", "log", "in", "logs", ":", "id_list", ".", "append", "(", "log", ".", "get_id", "(", ")", ")", "return", "IdList", "(", "id_list", ")" ]
46.8
0.003141
def login(self, template='login'): ''' This property will return component which will handle login requests. auth.login(template='login.html') ''' def _login(env, data): form = self._login_form(env) next = env.request.GET.get('next', '/') login_failed = False if env.request.method == 'POST': if form.accept(env.request.POST): user_identity = self.get_user_identity( env, **form.python_data) if user_identity is not None: response = HTTPSeeOther(location=next) return self.login_identity(user_identity, response) login_failed = True data.form = form data.login_failed = login_failed data.login_url = env.root.login.as_url.qs_set(next=next) return env.template.render_to_response(template, data.as_dict()) return web.match('/login', 'login') | _login
[ "def", "login", "(", "self", ",", "template", "=", "'login'", ")", ":", "def", "_login", "(", "env", ",", "data", ")", ":", "form", "=", "self", ".", "_login_form", "(", "env", ")", "next", "=", "env", ".", "request", ".", "GET", ".", "get", "(", "'next'", ",", "'/'", ")", "login_failed", "=", "False", "if", "env", ".", "request", ".", "method", "==", "'POST'", ":", "if", "form", ".", "accept", "(", "env", ".", "request", ".", "POST", ")", ":", "user_identity", "=", "self", ".", "get_user_identity", "(", "env", ",", "*", "*", "form", ".", "python_data", ")", "if", "user_identity", "is", "not", "None", ":", "response", "=", "HTTPSeeOther", "(", "location", "=", "next", ")", "return", "self", ".", "login_identity", "(", "user_identity", ",", "response", ")", "login_failed", "=", "True", "data", ".", "form", "=", "form", "data", ".", "login_failed", "=", "login_failed", "data", ".", "login_url", "=", "env", ".", "root", ".", "login", ".", "as_url", ".", "qs_set", "(", "next", "=", "next", ")", "return", "env", ".", "template", ".", "render_to_response", "(", "template", ",", "data", ".", "as_dict", "(", ")", ")", "return", "web", ".", "match", "(", "'/login'", ",", "'login'", ")", "|", "_login" ]
45.565217
0.001869
async def check_response(response, valid_response_codes): """Check the response for correctness.""" if response.status == 204: return True if response.status in valid_response_codes: _js = await response.json() return _js else: raise PvApiResponseStatusError(response.status)
[ "async", "def", "check_response", "(", "response", ",", "valid_response_codes", ")", ":", "if", "response", ".", "status", "==", "204", ":", "return", "True", "if", "response", ".", "status", "in", "valid_response_codes", ":", "_js", "=", "await", "response", ".", "json", "(", ")", "return", "_js", "else", ":", "raise", "PvApiResponseStatusError", "(", "response", ".", "status", ")" ]
35
0.003096
def getobject_use_prevfield(idf, idfobject, fieldname): """field=object_name, prev_field=object_type. Return the object""" if not fieldname.endswith("Name"): return None # test if prevfieldname ends with "Object_Type" fdnames = idfobject.fieldnames ifieldname = fdnames.index(fieldname) prevfdname = fdnames[ifieldname - 1] if not prevfdname.endswith("Object_Type"): return None objkey = idfobject[prevfdname].upper() objname = idfobject[fieldname] try: foundobj = idf.getobject(objkey, objname) except KeyError as e: return None return foundobj
[ "def", "getobject_use_prevfield", "(", "idf", ",", "idfobject", ",", "fieldname", ")", ":", "if", "not", "fieldname", ".", "endswith", "(", "\"Name\"", ")", ":", "return", "None", "# test if prevfieldname ends with \"Object_Type\"", "fdnames", "=", "idfobject", ".", "fieldnames", "ifieldname", "=", "fdnames", ".", "index", "(", "fieldname", ")", "prevfdname", "=", "fdnames", "[", "ifieldname", "-", "1", "]", "if", "not", "prevfdname", ".", "endswith", "(", "\"Object_Type\"", ")", ":", "return", "None", "objkey", "=", "idfobject", "[", "prevfdname", "]", ".", "upper", "(", ")", "objname", "=", "idfobject", "[", "fieldname", "]", "try", ":", "foundobj", "=", "idf", ".", "getobject", "(", "objkey", ",", "objname", ")", "except", "KeyError", "as", "e", ":", "return", "None", "return", "foundobj" ]
35.823529
0.0016
async def membership(client: Client, membership_signed_raw: str) -> ClientResponse: """ POST a Membership document :param client: Client to connect to the api :param membership_signed_raw: Membership signed raw document :return: """ return await client.post(MODULE + '/membership', {'membership': membership_signed_raw}, rtype=RESPONSE_AIOHTTP)
[ "async", "def", "membership", "(", "client", ":", "Client", ",", "membership_signed_raw", ":", "str", ")", "->", "ClientResponse", ":", "return", "await", "client", ".", "post", "(", "MODULE", "+", "'/membership'", ",", "{", "'membership'", ":", "membership_signed_raw", "}", ",", "rtype", "=", "RESPONSE_AIOHTTP", ")" ]
40.555556
0.008043
def _ws_on_close(self, ws: websocket.WebSocketApp): """Callback for closing the websocket connection Args: ws: websocket connection (now closed) """ self.connected = False self.logger.error('Websocket closed') self._reconnect_websocket()
[ "def", "_ws_on_close", "(", "self", ",", "ws", ":", "websocket", ".", "WebSocketApp", ")", ":", "self", ".", "connected", "=", "False", "self", ".", "logger", ".", "error", "(", "'Websocket closed'", ")", "self", ".", "_reconnect_websocket", "(", ")" ]
32.222222
0.006711
def index_metrics( config, start, end, incremental=False, concurrency=5, accounts=None, period=3600, tag=None, index='policy-metrics', verbose=False): """index policy metrics""" logging.basicConfig(level=(verbose and logging.DEBUG or logging.INFO)) logging.getLogger('botocore').setLevel(logging.WARNING) logging.getLogger('elasticsearch').setLevel(logging.WARNING) logging.getLogger('urllib3').setLevel(logging.WARNING) logging.getLogger('requests').setLevel(logging.WARNING) logging.getLogger('c7n.worker').setLevel(logging.INFO) with open(config) as fh: config = yaml.safe_load(fh.read()) jsonschema.validate(config, CONFIG_SCHEMA) start, end = get_date_range(start, end) p_accounts = set() p_account_stats = {} i_time = i_points = 0 t = time.time() with ProcessPoolExecutor(max_workers=concurrency) as w: futures = {} jobs = [] # Filter for account in config.get('accounts'): if accounts and account['name'] not in accounts: continue if tag: found = False for t in account['tags'].values(): if tag == t: found = True break if not found: continue p_accounts.add((account['name'])) for region in account.get('regions'): for (p_start, p_end) in get_periods(start, end, period): p = (config, index, region, account, p_start, p_end, period) jobs.append(p) # by default we'll be effectively processing in order, but thats bumps # our concurrency into rate limits on metrics retrieval in a given account # region, go ahead and shuffle, at least with lucene, the non ordering # should have minimal impact on query perf (inverted index). random.shuffle(jobs) for j in jobs: log.debug("submit account:%s region:%s start:%s end:%s" % ( j[3]['name'], j[2], j[4], j[5])) futures[w.submit(index_account_metrics, *j)] = j # Process completed for f in as_completed(futures): config, index, region, account, p_start, p_end, period = futures[f] if f.exception(): log.warning("error account:%s region:%s error:%s", account['name'], region, f.exception()) continue rtime, rpoints = f.result() rstat = p_account_stats.setdefault( account['name'], {}).setdefault(region, {'points': 0}) rstat['points'] += rpoints # log.info("complete account:%s, region:%s points:%s time:%0.2f", # account['name'], region, rpoints, rtime) i_time += rtime i_points += rpoints log.info("complete accounts:%d points:%d itime:%0.2f time:%0.2f", len(p_accounts), i_points, i_time, time.time() - t)
[ "def", "index_metrics", "(", "config", ",", "start", ",", "end", ",", "incremental", "=", "False", ",", "concurrency", "=", "5", ",", "accounts", "=", "None", ",", "period", "=", "3600", ",", "tag", "=", "None", ",", "index", "=", "'policy-metrics'", ",", "verbose", "=", "False", ")", ":", "logging", ".", "basicConfig", "(", "level", "=", "(", "verbose", "and", "logging", ".", "DEBUG", "or", "logging", ".", "INFO", ")", ")", "logging", ".", "getLogger", "(", "'botocore'", ")", ".", "setLevel", "(", "logging", ".", "WARNING", ")", "logging", ".", "getLogger", "(", "'elasticsearch'", ")", ".", "setLevel", "(", "logging", ".", "WARNING", ")", "logging", ".", "getLogger", "(", "'urllib3'", ")", ".", "setLevel", "(", "logging", ".", "WARNING", ")", "logging", ".", "getLogger", "(", "'requests'", ")", ".", "setLevel", "(", "logging", ".", "WARNING", ")", "logging", ".", "getLogger", "(", "'c7n.worker'", ")", ".", "setLevel", "(", "logging", ".", "INFO", ")", "with", "open", "(", "config", ")", "as", "fh", ":", "config", "=", "yaml", ".", "safe_load", "(", "fh", ".", "read", "(", ")", ")", "jsonschema", ".", "validate", "(", "config", ",", "CONFIG_SCHEMA", ")", "start", ",", "end", "=", "get_date_range", "(", "start", ",", "end", ")", "p_accounts", "=", "set", "(", ")", "p_account_stats", "=", "{", "}", "i_time", "=", "i_points", "=", "0", "t", "=", "time", ".", "time", "(", ")", "with", "ProcessPoolExecutor", "(", "max_workers", "=", "concurrency", ")", "as", "w", ":", "futures", "=", "{", "}", "jobs", "=", "[", "]", "# Filter", "for", "account", "in", "config", ".", "get", "(", "'accounts'", ")", ":", "if", "accounts", "and", "account", "[", "'name'", "]", "not", "in", "accounts", ":", "continue", "if", "tag", ":", "found", "=", "False", "for", "t", "in", "account", "[", "'tags'", "]", ".", "values", "(", ")", ":", "if", "tag", "==", "t", ":", "found", "=", "True", "break", "if", "not", "found", ":", "continue", "p_accounts", ".", "add", "(", "(", "account", "[", "'name'", "]", ")", ")", "for", "region", "in", "account", ".", "get", "(", "'regions'", ")", ":", "for", "(", "p_start", ",", "p_end", ")", "in", "get_periods", "(", "start", ",", "end", ",", "period", ")", ":", "p", "=", "(", "config", ",", "index", ",", "region", ",", "account", ",", "p_start", ",", "p_end", ",", "period", ")", "jobs", ".", "append", "(", "p", ")", "# by default we'll be effectively processing in order, but thats bumps", "# our concurrency into rate limits on metrics retrieval in a given account", "# region, go ahead and shuffle, at least with lucene, the non ordering", "# should have minimal impact on query perf (inverted index).", "random", ".", "shuffle", "(", "jobs", ")", "for", "j", "in", "jobs", ":", "log", ".", "debug", "(", "\"submit account:%s region:%s start:%s end:%s\"", "%", "(", "j", "[", "3", "]", "[", "'name'", "]", ",", "j", "[", "2", "]", ",", "j", "[", "4", "]", ",", "j", "[", "5", "]", ")", ")", "futures", "[", "w", ".", "submit", "(", "index_account_metrics", ",", "*", "j", ")", "]", "=", "j", "# Process completed", "for", "f", "in", "as_completed", "(", "futures", ")", ":", "config", ",", "index", ",", "region", ",", "account", ",", "p_start", ",", "p_end", ",", "period", "=", "futures", "[", "f", "]", "if", "f", ".", "exception", "(", ")", ":", "log", ".", "warning", "(", "\"error account:%s region:%s error:%s\"", ",", "account", "[", "'name'", "]", ",", "region", ",", "f", ".", "exception", "(", ")", ")", "continue", "rtime", ",", "rpoints", "=", "f", ".", "result", "(", ")", "rstat", "=", "p_account_stats", ".", "setdefault", "(", "account", "[", "'name'", "]", ",", "{", "}", ")", ".", "setdefault", "(", "region", ",", "{", "'points'", ":", "0", "}", ")", "rstat", "[", "'points'", "]", "+=", "rpoints", "# log.info(\"complete account:%s, region:%s points:%s time:%0.2f\",", "# account['name'], region, rpoints, rtime)", "i_time", "+=", "rtime", "i_points", "+=", "rpoints", "log", ".", "info", "(", "\"complete accounts:%d points:%d itime:%0.2f time:%0.2f\"", ",", "len", "(", "p_accounts", ")", ",", "i_points", ",", "i_time", ",", "time", ".", "time", "(", ")", "-", "t", ")" ]
39.826667
0.00098
def __calculate_farthest_distance(self, index_cluster1, index_cluster2): """! @brief Finds two farthest objects in two specified clusters in terms and returns distance between them. @param[in] (uint) Index of the first cluster. @param[in] (uint) Index of the second cluster. @return The farthest euclidean distance between two clusters. """ candidate_maximum_distance = 0.0; for index_object1 in self.__clusters[index_cluster1]: for index_object2 in self.__clusters[index_cluster2]: distance = euclidean_distance_square(self.__pointer_data[index_object1], self.__pointer_data[index_object2]); if (distance > candidate_maximum_distance): candidate_maximum_distance = distance; return candidate_maximum_distance;
[ "def", "__calculate_farthest_distance", "(", "self", ",", "index_cluster1", ",", "index_cluster2", ")", ":", "candidate_maximum_distance", "=", "0.0", "for", "index_object1", "in", "self", ".", "__clusters", "[", "index_cluster1", "]", ":", "for", "index_object2", "in", "self", ".", "__clusters", "[", "index_cluster2", "]", ":", "distance", "=", "euclidean_distance_square", "(", "self", ".", "__pointer_data", "[", "index_object1", "]", ",", "self", ".", "__pointer_data", "[", "index_object2", "]", ")", "if", "(", "distance", ">", "candidate_maximum_distance", ")", ":", "candidate_maximum_distance", "=", "distance", "return", "candidate_maximum_distance" ]
46.157895
0.014525
def cli(variant_file, vep, split): """Parses a vcf file.\n \n Usage:\n parser infile.vcf\n If pipe:\n parser - """ from datetime import datetime from pprint import pprint as pp if variant_file == '-': my_parser = VCFParser(fsock=sys.stdin, split_variants=split) else: my_parser = VCFParser(infile = variant_file, split_variants=split) start = datetime.now() nr_of_variants = 0 for line in my_parser.metadata.print_header(): print(line) for variant in my_parser: pp(variant) nr_of_variants += 1 print('Number of variants: %s' % nr_of_variants)
[ "def", "cli", "(", "variant_file", ",", "vep", ",", "split", ")", ":", "from", "datetime", "import", "datetime", "from", "pprint", "import", "pprint", "as", "pp", "if", "variant_file", "==", "'-'", ":", "my_parser", "=", "VCFParser", "(", "fsock", "=", "sys", ".", "stdin", ",", "split_variants", "=", "split", ")", "else", ":", "my_parser", "=", "VCFParser", "(", "infile", "=", "variant_file", ",", "split_variants", "=", "split", ")", "start", "=", "datetime", ".", "now", "(", ")", "nr_of_variants", "=", "0", "for", "line", "in", "my_parser", ".", "metadata", ".", "print_header", "(", ")", ":", "print", "(", "line", ")", "for", "variant", "in", "my_parser", ":", "pp", "(", "variant", ")", "nr_of_variants", "+=", "1", "print", "(", "'Number of variants: %s'", "%", "nr_of_variants", ")" ]
29.727273
0.005926
def new_dxfile(mode=None, write_buffer_size=dxfile.DEFAULT_BUFFER_SIZE, expected_file_size=None, file_is_mmapd=False, **kwargs): ''' :param mode: One of "w" or "a" for write and append modes, respectively :type mode: string :rtype: :class:`~dxpy.bindings.dxfile.DXFile` Additional optional parameters not listed: all those under :func:`dxpy.bindings.DXDataObject.new`. Creates a new remote file object that is ready to be written to; returns a :class:`~dxpy.bindings.dxfile.DXFile` object that is a writable file-like object. Example:: with new_dxfile(media_type="application/json") as fd: fd.write("foo\\n") Note that this is shorthand for:: dxFile = DXFile() dxFile.new(**kwargs) ''' dx_file = DXFile(mode=mode, write_buffer_size=write_buffer_size, expected_file_size=expected_file_size, file_is_mmapd=file_is_mmapd) dx_file.new(**kwargs) return dx_file
[ "def", "new_dxfile", "(", "mode", "=", "None", ",", "write_buffer_size", "=", "dxfile", ".", "DEFAULT_BUFFER_SIZE", ",", "expected_file_size", "=", "None", ",", "file_is_mmapd", "=", "False", ",", "*", "*", "kwargs", ")", ":", "dx_file", "=", "DXFile", "(", "mode", "=", "mode", ",", "write_buffer_size", "=", "write_buffer_size", ",", "expected_file_size", "=", "expected_file_size", ",", "file_is_mmapd", "=", "file_is_mmapd", ")", "dx_file", ".", "new", "(", "*", "*", "kwargs", ")", "return", "dx_file" ]
33.310345
0.003018
def load_tagged_classes(cls, tag, fail_silently=True): """ Produce a sequence of all XBlock classes tagged with `tag`. fail_silently causes the code to simply log warnings if a plugin cannot import. The goal is to be able to use part of libraries from an XBlock (and thus have it installed), even if the overall XBlock cannot be used (e.g. depends on Django in a non-Django application). There is diagreement about whether this is a good idea, or whether we should see failures early (e.g. on startup or first page load), and in what contexts. Hence, the flag. """ # Allow this method to access the `_class_tags` # pylint: disable=W0212 for name, class_ in cls.load_classes(fail_silently): if tag in class_._class_tags: yield name, class_
[ "def", "load_tagged_classes", "(", "cls", ",", "tag", ",", "fail_silently", "=", "True", ")", ":", "# Allow this method to access the `_class_tags`", "# pylint: disable=W0212", "for", "name", ",", "class_", "in", "cls", ".", "load_classes", "(", "fail_silently", ")", ":", "if", "tag", "in", "class_", ".", "_class_tags", ":", "yield", "name", ",", "class_" ]
47.888889
0.002275
def become(cls, session_token): """ 通过 session token 获取用户对象 :param session_token: 用户的 session token :return: leancloud.User """ response = client.get('/users/me', params={'session_token': session_token}) content = response.json() user = cls() user._update_data(content) user._handle_save_result(True) if 'smsCode' not in content: user._attributes.pop('smsCode', None) return user
[ "def", "become", "(", "cls", ",", "session_token", ")", ":", "response", "=", "client", ".", "get", "(", "'/users/me'", ",", "params", "=", "{", "'session_token'", ":", "session_token", "}", ")", "content", "=", "response", ".", "json", "(", ")", "user", "=", "cls", "(", ")", "user", ".", "_update_data", "(", "content", ")", "user", ".", "_handle_save_result", "(", "True", ")", "if", "'smsCode'", "not", "in", "content", ":", "user", ".", "_attributes", ".", "pop", "(", "'smsCode'", ",", "None", ")", "return", "user" ]
31.6
0.006148
def _prepare_uri(self, path, query_params={}): """ Prepares a full URI with the selected information. ``path``: Path can be in one of two formats: - If :attr:`server` was defined, the ``path`` will be appended to the existing host, or - an absolute URL ``query_params``: Used to generate a query string, which will be appended to the end of the absolute URL. Returns an absolute URL. """ query_str = urllib.urlencode(query_params) # If we have a relative path (as opposed to a full URL), build it of # the connection info if path.startswith('/') and self.server: protocol = self.protocol server = self.server else: protocol, server, path, _, _, _ = urlparse.urlparse(path) assert server, "%s is not a valid URL" % path return urlparse.urlunparse(( protocol, server, path, None, query_str, None))
[ "def", "_prepare_uri", "(", "self", ",", "path", ",", "query_params", "=", "{", "}", ")", ":", "query_str", "=", "urllib", ".", "urlencode", "(", "query_params", ")", "# If we have a relative path (as opposed to a full URL), build it of", "# the connection info", "if", "path", ".", "startswith", "(", "'/'", ")", "and", "self", ".", "server", ":", "protocol", "=", "self", ".", "protocol", "server", "=", "self", ".", "server", "else", ":", "protocol", ",", "server", ",", "path", ",", "_", ",", "_", ",", "_", "=", "urlparse", ".", "urlparse", "(", "path", ")", "assert", "server", ",", "\"%s is not a valid URL\"", "%", "path", "return", "urlparse", ".", "urlunparse", "(", "(", "protocol", ",", "server", ",", "path", ",", "None", ",", "query_str", ",", "None", ")", ")" ]
34.965517
0.001919
def get_dimension_from_db_by_name(dimension_name): """ Gets a dimension from the DB table. """ try: dimension = db.DBSession.query(Dimension).filter(Dimension.name==dimension_name).one() return JSONObject(dimension) except NoResultFound: raise ResourceNotFoundError("Dimension %s not found"%(dimension_name))
[ "def", "get_dimension_from_db_by_name", "(", "dimension_name", ")", ":", "try", ":", "dimension", "=", "db", ".", "DBSession", ".", "query", "(", "Dimension", ")", ".", "filter", "(", "Dimension", ".", "name", "==", "dimension_name", ")", ".", "one", "(", ")", "return", "JSONObject", "(", "dimension", ")", "except", "NoResultFound", ":", "raise", "ResourceNotFoundError", "(", "\"Dimension %s not found\"", "%", "(", "dimension_name", ")", ")" ]
38.666667
0.011236
def _find_countour_yaml(start, checked, names=None): """Traverse the directory tree identified by start until a directory already in checked is encountered or the path of countour.yaml is found. Checked is present both to make the loop termination easy to reason about and so the same directories do not get rechecked Args: start: the path to start looking in and work upward from checked: the set of already checked directories Returns: the path of the countour.yaml file or None if it is not found """ extensions = [] if names: for name in names: if not os.path.splitext(name)[1]: extensions.append(name + ".yaml") extensions.append(name + ".yml") yaml_names = (names or []) + CONTOUR_YAML_NAMES + extensions directory = start while directory not in checked: checked.add(directory) for fs_yaml_name in yaml_names: yaml_path = os.path.join(directory, fs_yaml_name) if os.path.exists(yaml_path): return yaml_path directory = os.path.dirname(directory) return
[ "def", "_find_countour_yaml", "(", "start", ",", "checked", ",", "names", "=", "None", ")", ":", "extensions", "=", "[", "]", "if", "names", ":", "for", "name", "in", "names", ":", "if", "not", "os", ".", "path", ".", "splitext", "(", "name", ")", "[", "1", "]", ":", "extensions", ".", "append", "(", "name", "+", "\".yaml\"", ")", "extensions", ".", "append", "(", "name", "+", "\".yml\"", ")", "yaml_names", "=", "(", "names", "or", "[", "]", ")", "+", "CONTOUR_YAML_NAMES", "+", "extensions", "directory", "=", "start", "while", "directory", "not", "in", "checked", ":", "checked", ".", "add", "(", "directory", ")", "for", "fs_yaml_name", "in", "yaml_names", ":", "yaml_path", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "fs_yaml_name", ")", "if", "os", ".", "path", ".", "exists", "(", "yaml_path", ")", ":", "return", "yaml_path", "directory", "=", "os", ".", "path", ".", "dirname", "(", "directory", ")", "return" ]
28.974359
0.000856
def unders_to_dashes_in_keys(self) -> None: """Replaces underscores with dashes in key names. For each attribute in a mapping, this replaces any underscores \ in its keys with dashes. Handy because Python does not \ accept dashes in identifiers, while some YAML-based formats use \ dashes in their keys. """ for key_node, _ in self.yaml_node.value: key_node.value = key_node.value.replace('_', '-')
[ "def", "unders_to_dashes_in_keys", "(", "self", ")", "->", "None", ":", "for", "key_node", ",", "_", "in", "self", ".", "yaml_node", ".", "value", ":", "key_node", ".", "value", "=", "key_node", ".", "value", ".", "replace", "(", "'_'", ",", "'-'", ")" ]
45.8
0.004283
def parse_phones(self): """Parse TextGrid phone intervals. This method parses the phone intervals in a TextGrid to extract each phone and each phone's start and end times in the audio recording. For each phone, it instantiates the class Phone(), with the phone and its start and end times as attributes of that class instance. """ phones = [] for i in self.phone_intervals: start = float(i[i.index('xmin = ')+7: i.index('xmin = ')+12].strip('\t').strip('\n')) end = float(i[i.index('xmax = ')+7: i.index('xmax = ')+12].strip('\t').strip('\n')) phone = i[i.index('\"')+1:i.index("$")] phones.append(Phone(phone, start, end)) return phones
[ "def", "parse_phones", "(", "self", ")", ":", "phones", "=", "[", "]", "for", "i", "in", "self", ".", "phone_intervals", ":", "start", "=", "float", "(", "i", "[", "i", ".", "index", "(", "'xmin = '", ")", "+", "7", ":", "i", ".", "index", "(", "'xmin = '", ")", "+", "12", "]", ".", "strip", "(", "'\\t'", ")", ".", "strip", "(", "'\\n'", ")", ")", "end", "=", "float", "(", "i", "[", "i", ".", "index", "(", "'xmax = '", ")", "+", "7", ":", "i", ".", "index", "(", "'xmax = '", ")", "+", "12", "]", ".", "strip", "(", "'\\t'", ")", ".", "strip", "(", "'\\n'", ")", ")", "phone", "=", "i", "[", "i", ".", "index", "(", "'\\\"'", ")", "+", "1", ":", "i", ".", "index", "(", "\"$\"", ")", "]", "phones", ".", "append", "(", "Phone", "(", "phone", ",", "start", ",", "end", ")", ")", "return", "phones" ]
41.6
0.007051
def write_payload(payload=None, objectInput=None): """ This function writes a base64 payload or file object on disk. Args: payload (string): payload in base64 objectInput (object): file object/standard input to analyze Returns: Path of file """ temp = tempfile.mkstemp()[1] log.debug("Write payload in temp file {!r}".format(temp)) with open(temp, 'wb') as f: if payload: payload = base64.b64decode(payload) elif objectInput: if six.PY3: payload = objectInput.buffer.read() elif six.PY2: payload = objectInput.read() f.write(payload) return temp
[ "def", "write_payload", "(", "payload", "=", "None", ",", "objectInput", "=", "None", ")", ":", "temp", "=", "tempfile", ".", "mkstemp", "(", ")", "[", "1", "]", "log", ".", "debug", "(", "\"Write payload in temp file {!r}\"", ".", "format", "(", "temp", ")", ")", "with", "open", "(", "temp", ",", "'wb'", ")", "as", "f", ":", "if", "payload", ":", "payload", "=", "base64", ".", "b64decode", "(", "payload", ")", "elif", "objectInput", ":", "if", "six", ".", "PY3", ":", "payload", "=", "objectInput", ".", "buffer", ".", "read", "(", ")", "elif", "six", ".", "PY2", ":", "payload", "=", "objectInput", ".", "read", "(", ")", "f", ".", "write", "(", "payload", ")", "return", "temp" ]
25.074074
0.001422
def get_contents_entry(node): """Fetch the contents of the entry. Returns the exact binary contents of the file.""" try: node = node.disambiguate(must_exist=1) except SCons.Errors.UserError: # There was nothing on disk with which to disambiguate # this entry. Leave it as an Entry, but return a null # string so calls to get_contents() in emitters and the # like (e.g. in qt.py) don't have to disambiguate by hand # or catch the exception. return '' else: return _get_contents_map[node._func_get_contents](node)
[ "def", "get_contents_entry", "(", "node", ")", ":", "try", ":", "node", "=", "node", ".", "disambiguate", "(", "must_exist", "=", "1", ")", "except", "SCons", ".", "Errors", ".", "UserError", ":", "# There was nothing on disk with which to disambiguate", "# this entry. Leave it as an Entry, but return a null", "# string so calls to get_contents() in emitters and the", "# like (e.g. in qt.py) don't have to disambiguate by hand", "# or catch the exception.", "return", "''", "else", ":", "return", "_get_contents_map", "[", "node", ".", "_func_get_contents", "]", "(", "node", ")" ]
41.714286
0.001675
def get_or_add_video_media_part(self, video): """Return rIds for media and video relationships to media part. A new |MediaPart| object is created if it does not already exist (such as would occur if the same video appeared more than once in a presentation). Two relationships to the media part are created, one each with MEDIA and VIDEO relationship types. The need for two appears to be for legacy support for an earlier (pre-Office 2010) PowerPoint media embedding strategy. """ media_part = self._package.get_or_add_media_part(video) media_rId = self.relate_to(media_part, RT.MEDIA) video_rId = self.relate_to(media_part, RT.VIDEO) return media_rId, video_rId
[ "def", "get_or_add_video_media_part", "(", "self", ",", "video", ")", ":", "media_part", "=", "self", ".", "_package", ".", "get_or_add_media_part", "(", "video", ")", "media_rId", "=", "self", ".", "relate_to", "(", "media_part", ",", "RT", ".", "MEDIA", ")", "video_rId", "=", "self", ".", "relate_to", "(", "media_part", ",", "RT", ".", "VIDEO", ")", "return", "media_rId", ",", "video_rId" ]
53.357143
0.002632
def add_transition (self, input_symbol, state, action=None, next_state=None): '''This adds a transition that associates: (input_symbol, current_state) --> (action, next_state) The action may be set to None in which case the process() method will ignore the action and only set the next_state. The next_state may be set to None in which case the current state will be unchanged. You can also set transitions for a list of symbols by using add_transition_list(). ''' if next_state is None: next_state = state self.state_transitions[(input_symbol, state)] = (action, next_state)
[ "def", "add_transition", "(", "self", ",", "input_symbol", ",", "state", ",", "action", "=", "None", ",", "next_state", "=", "None", ")", ":", "if", "next_state", "is", "None", ":", "next_state", "=", "state", "self", ".", "state_transitions", "[", "(", "input_symbol", ",", "state", ")", "]", "=", "(", "action", ",", "next_state", ")" ]
41.0625
0.004464
def configure(self): """ Configure SPI controller with the SPI mode and operating frequency """ # Convert standard SPI sheme to USBISS scheme lookup_table = [0, 2, 1, 3] mode = lookup_table[self._mode] # Add signal for SPI switch iss_mode = self._usbiss.SPI_MODE + mode # Configure USB-ISS self._usbiss.mode = [iss_mode, self.sck_divisor]
[ "def", "configure", "(", "self", ")", ":", "# Convert standard SPI sheme to USBISS scheme", "lookup_table", "=", "[", "0", ",", "2", ",", "1", ",", "3", "]", "mode", "=", "lookup_table", "[", "self", ".", "_mode", "]", "# Add signal for SPI switch", "iss_mode", "=", "self", ".", "_usbiss", ".", "SPI_MODE", "+", "mode", "# Configure USB-ISS", "self", ".", "_usbiss", ".", "mode", "=", "[", "iss_mode", ",", "self", ".", "sck_divisor", "]" ]
29.142857
0.004751
def apply_effect(layer, image): """Apply effect to the image. ..note: Correct effect order is the following. All the effects are first applied to the original image then blended together. * dropshadow * outerglow * (original) * patternoverlay * gradientoverlay * coloroverlay * innershadow * innerglow * bevelemboss * satin * stroke """ for effect in layer.effects: if effect.__class__.__name__ == 'PatternOverlay': draw_pattern_fill(image, layer._psd, effect.value) for effect in layer.effects: if effect.__class__.__name__ == 'GradientOverlay': draw_gradient_fill(image, effect.value) for effect in layer.effects: if effect.__class__.__name__ == 'ColorOverlay': draw_solid_color_fill(image, effect.value)
[ "def", "apply_effect", "(", "layer", ",", "image", ")", ":", "for", "effect", "in", "layer", ".", "effects", ":", "if", "effect", ".", "__class__", ".", "__name__", "==", "'PatternOverlay'", ":", "draw_pattern_fill", "(", "image", ",", "layer", ".", "_psd", ",", "effect", ".", "value", ")", "for", "effect", "in", "layer", ".", "effects", ":", "if", "effect", ".", "__class__", ".", "__name__", "==", "'GradientOverlay'", ":", "draw_gradient_fill", "(", "image", ",", "effect", ".", "value", ")", "for", "effect", "in", "layer", ".", "effects", ":", "if", "effect", ".", "__class__", ".", "__name__", "==", "'ColorOverlay'", ":", "draw_solid_color_fill", "(", "image", ",", "effect", ".", "value", ")" ]
29.724138
0.001124
def bsp_traverse_pre_order( node: tcod.bsp.BSP, callback: Callable[[tcod.bsp.BSP, Any], None], userData: Any = 0, ) -> None: """Traverse this nodes hierarchy with a callback. .. deprecated:: 2.0 Use :any:`BSP.pre_order` instead. """ _bsp_traverse(node.pre_order(), callback, userData)
[ "def", "bsp_traverse_pre_order", "(", "node", ":", "tcod", ".", "bsp", ".", "BSP", ",", "callback", ":", "Callable", "[", "[", "tcod", ".", "bsp", ".", "BSP", ",", "Any", "]", ",", "None", "]", ",", "userData", ":", "Any", "=", "0", ",", ")", "->", "None", ":", "_bsp_traverse", "(", "node", ".", "pre_order", "(", ")", ",", "callback", ",", "userData", ")" ]
28.181818
0.003125
def to_protobuf(self): """Return a protobuf corresponding to the key. :rtype: :class:`.entity_pb2.Key` :returns: The protobuf representing the key. """ key = _entity_pb2.Key() key.partition_id.project_id = self.project if self.namespace: key.partition_id.namespace_id = self.namespace for item in self.path: element = key.path.add() if "kind" in item: element.kind = item["kind"] if "id" in item: element.id = item["id"] if "name" in item: element.name = item["name"] return key
[ "def", "to_protobuf", "(", "self", ")", ":", "key", "=", "_entity_pb2", ".", "Key", "(", ")", "key", ".", "partition_id", ".", "project_id", "=", "self", ".", "project", "if", "self", ".", "namespace", ":", "key", ".", "partition_id", ".", "namespace_id", "=", "self", ".", "namespace", "for", "item", "in", "self", ".", "path", ":", "element", "=", "key", ".", "path", ".", "add", "(", ")", "if", "\"kind\"", "in", "item", ":", "element", ".", "kind", "=", "item", "[", "\"kind\"", "]", "if", "\"id\"", "in", "item", ":", "element", ".", "id", "=", "item", "[", "\"id\"", "]", "if", "\"name\"", "in", "item", ":", "element", ".", "name", "=", "item", "[", "\"name\"", "]", "return", "key" ]
29.136364
0.003021
def goto_time(timeval): '''Go to a specific time (in nanoseconds) in the current trajectory. ''' i = bisect.bisect(viewer.frame_times, timeval * 1000) goto_frame(i)
[ "def", "goto_time", "(", "timeval", ")", ":", "i", "=", "bisect", ".", "bisect", "(", "viewer", ".", "frame_times", ",", "timeval", "*", "1000", ")", "goto_frame", "(", "i", ")" ]
25.571429
0.005405
def update(domain): """ Update language-specific translations files with new keys discovered by ``flask babel extract``. """ translations_dir = _get_translations_dir() domain = _get_translations_domain(domain) pot = os.path.join(translations_dir, f'{domain}.pot') return _run(f'update -i {pot} -d {translations_dir} --domain={domain}')
[ "def", "update", "(", "domain", ")", ":", "translations_dir", "=", "_get_translations_dir", "(", ")", "domain", "=", "_get_translations_domain", "(", "domain", ")", "pot", "=", "os", ".", "path", ".", "join", "(", "translations_dir", ",", "f'{domain}.pot'", ")", "return", "_run", "(", "f'update -i {pot} -d {translations_dir} --domain={domain}'", ")" ]
39.888889
0.002725
def get_mass(self): '''Returns mass''' mass = parsers.get_mass(self.__chebi_id) if math.isnan(mass): mass = parsers.get_mass(self.get_parent_id()) if math.isnan(mass): for parent_or_child_id in self.__get_all_ids(): mass = parsers.get_mass(parent_or_child_id) if not math.isnan(mass): break return mass
[ "def", "get_mass", "(", "self", ")", ":", "mass", "=", "parsers", ".", "get_mass", "(", "self", ".", "__chebi_id", ")", "if", "math", ".", "isnan", "(", "mass", ")", ":", "mass", "=", "parsers", ".", "get_mass", "(", "self", ".", "get_parent_id", "(", ")", ")", "if", "math", ".", "isnan", "(", "mass", ")", ":", "for", "parent_or_child_id", "in", "self", ".", "__get_all_ids", "(", ")", ":", "mass", "=", "parsers", ".", "get_mass", "(", "parent_or_child_id", ")", "if", "not", "math", ".", "isnan", "(", "mass", ")", ":", "break", "return", "mass" ]
27.2
0.004739
def load_data(self, filename, *args, **kwargs): """ load data from text file. :param filename: name of text file to read :type filename: str :returns: data read from file using :func:`numpy.loadtxt` :rtype: dict """ # header keys header_param = self.parameters.get('header') # default is None # data keys data_param = self.parameters['data'] # raises KeyError if no 'data' dtype = data_param['dtype'] # raises KeyError if no 'dtype' # convert to tuple and normal ASCII _utf8_list_to_ascii_tuple(dtype) if dtype else None # -> tuple of str delimiter = data_param.get('delimiter') # default is None skiprows = data_param.get('skiprows') # default is None data_units = data_param.get('units', {}) # default is an empty dict data = {} # a dictionary for data # open file for reading with open(filename, 'r') as fid: # read header if header_param: data.update(_read_header(fid, header_param)) fid.seek(0) # move cursor back to beginning # read data data_data = np.loadtxt(fid, dtype, delimiter=delimiter, skiprows=skiprows) # apply units data.update(_apply_units(data_data, data_units, fid.name)) return data
[ "def", "load_data", "(", "self", ",", "filename", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# header keys", "header_param", "=", "self", ".", "parameters", ".", "get", "(", "'header'", ")", "# default is None", "# data keys", "data_param", "=", "self", ".", "parameters", "[", "'data'", "]", "# raises KeyError if no 'data'", "dtype", "=", "data_param", "[", "'dtype'", "]", "# raises KeyError if no 'dtype'", "# convert to tuple and normal ASCII", "_utf8_list_to_ascii_tuple", "(", "dtype", ")", "if", "dtype", "else", "None", "# -> tuple of str", "delimiter", "=", "data_param", ".", "get", "(", "'delimiter'", ")", "# default is None", "skiprows", "=", "data_param", ".", "get", "(", "'skiprows'", ")", "# default is None", "data_units", "=", "data_param", ".", "get", "(", "'units'", ",", "{", "}", ")", "# default is an empty dict", "data", "=", "{", "}", "# a dictionary for data", "# open file for reading", "with", "open", "(", "filename", ",", "'r'", ")", "as", "fid", ":", "# read header", "if", "header_param", ":", "data", ".", "update", "(", "_read_header", "(", "fid", ",", "header_param", ")", ")", "fid", ".", "seek", "(", "0", ")", "# move cursor back to beginning", "# read data", "data_data", "=", "np", ".", "loadtxt", "(", "fid", ",", "dtype", ",", "delimiter", "=", "delimiter", ",", "skiprows", "=", "skiprows", ")", "# apply units", "data", ".", "update", "(", "_apply_units", "(", "data_data", ",", "data_units", ",", "fid", ".", "name", ")", ")", "return", "data" ]
43.15625
0.001416
def set(self, key, value): """Sets the value for the key in the override register. Will be used instead of values obtained via args, config file, env, defaults or key/value store. """ k = self._real_key(key.lower()) self._override[k] = value
[ "def", "set", "(", "self", ",", "key", ",", "value", ")", ":", "k", "=", "self", ".", "_real_key", "(", "key", ".", "lower", "(", ")", ")", "self", ".", "_override", "[", "k", "]", "=", "value" ]
40.428571
0.00692
def ROC_AUC_analysis(adata,groupby,group=None, n_genes=100): """Calculate correlation matrix. Calculate a correlation matrix for genes strored in sample annotation using rank_genes_groups.py Parameters ---------- adata : :class:`~anndata.AnnData` Annotated data matrix. groupby : `str` The key of the sample grouping to consider. group : `str`, int, optional (default: None) Group name or index for which the correlation matrix for top_ranked genes should be calculated. If no parameter is passed, ROC/AUC is calculated for all groups n_genes : `int`, optional (default: 100) For how many genes to calculate ROC and AUC. If no parameter is passed, calculation is done for all stored top ranked genes. """ if group is None: pass # TODO: Loop over all groups instead of just taking one. # Assume group takes an int value for one group for the moment. name_list = list() for j, k in enumerate(adata.uns['rank_genes_groups_gene_names']): if j >= n_genes: break name_list.append(adata.uns['rank_genes_groups_gene_names'][j][group]) # TODO: For the moment, see that everything works for comparison against the rest. Resolve issues later. groups = 'all' groups_order, groups_masks = utils.select_groups( adata, groups, groupby) # Use usual convention, better for looping later. imask = group mask = groups_masks[group] # TODO: Allow for sample weighting requires better mask access... later # We store calculated data in dict, access it via dict to dict. Check if this is the best way. fpr={} tpr={} thresholds={} roc_auc={} y_true=mask for i, j in enumerate(name_list): vec=adata[:,[j]].X if issparse(vec): y_score = vec.todense() else: y_score = vec fpr[name_list[i]], tpr[name_list[i]], thresholds[name_list[i]] = metrics.roc_curve(y_true, y_score, pos_label=None, sample_weight=None, drop_intermediate=False) roc_auc[name_list[i]]=metrics.auc(fpr[name_list[i]],tpr[name_list[i]]) adata.uns['ROCfpr' +groupby+ str(group)] = fpr adata.uns['ROCtpr' +groupby+ str(group)] = tpr adata.uns['ROCthresholds' +groupby+ str(group)] = thresholds adata.uns['ROC_AUC' + groupby + str(group)] = roc_auc
[ "def", "ROC_AUC_analysis", "(", "adata", ",", "groupby", ",", "group", "=", "None", ",", "n_genes", "=", "100", ")", ":", "if", "group", "is", "None", ":", "pass", "# TODO: Loop over all groups instead of just taking one.", "# Assume group takes an int value for one group for the moment.", "name_list", "=", "list", "(", ")", "for", "j", ",", "k", "in", "enumerate", "(", "adata", ".", "uns", "[", "'rank_genes_groups_gene_names'", "]", ")", ":", "if", "j", ">=", "n_genes", ":", "break", "name_list", ".", "append", "(", "adata", ".", "uns", "[", "'rank_genes_groups_gene_names'", "]", "[", "j", "]", "[", "group", "]", ")", "# TODO: For the moment, see that everything works for comparison against the rest. Resolve issues later.", "groups", "=", "'all'", "groups_order", ",", "groups_masks", "=", "utils", ".", "select_groups", "(", "adata", ",", "groups", ",", "groupby", ")", "# Use usual convention, better for looping later.", "imask", "=", "group", "mask", "=", "groups_masks", "[", "group", "]", "# TODO: Allow for sample weighting requires better mask access... later", "# We store calculated data in dict, access it via dict to dict. Check if this is the best way.", "fpr", "=", "{", "}", "tpr", "=", "{", "}", "thresholds", "=", "{", "}", "roc_auc", "=", "{", "}", "y_true", "=", "mask", "for", "i", ",", "j", "in", "enumerate", "(", "name_list", ")", ":", "vec", "=", "adata", "[", ":", ",", "[", "j", "]", "]", ".", "X", "if", "issparse", "(", "vec", ")", ":", "y_score", "=", "vec", ".", "todense", "(", ")", "else", ":", "y_score", "=", "vec", "fpr", "[", "name_list", "[", "i", "]", "]", ",", "tpr", "[", "name_list", "[", "i", "]", "]", ",", "thresholds", "[", "name_list", "[", "i", "]", "]", "=", "metrics", ".", "roc_curve", "(", "y_true", ",", "y_score", ",", "pos_label", "=", "None", ",", "sample_weight", "=", "None", ",", "drop_intermediate", "=", "False", ")", "roc_auc", "[", "name_list", "[", "i", "]", "]", "=", "metrics", ".", "auc", "(", "fpr", "[", "name_list", "[", "i", "]", "]", ",", "tpr", "[", "name_list", "[", "i", "]", "]", ")", "adata", ".", "uns", "[", "'ROCfpr'", "+", "groupby", "+", "str", "(", "group", ")", "]", "=", "fpr", "adata", ".", "uns", "[", "'ROCtpr'", "+", "groupby", "+", "str", "(", "group", ")", "]", "=", "tpr", "adata", ".", "uns", "[", "'ROCthresholds'", "+", "groupby", "+", "str", "(", "group", ")", "]", "=", "thresholds", "adata", ".", "uns", "[", "'ROC_AUC'", "+", "groupby", "+", "str", "(", "group", ")", "]", "=", "roc_auc" ]
39.852459
0.010036
def get_user_timer(self, index): """Get the current value of a user timer.""" err, tick = self.clock_manager.get_tick(index) return [err, tick]
[ "def", "get_user_timer", "(", "self", ",", "index", ")", ":", "err", ",", "tick", "=", "self", ".", "clock_manager", ".", "get_tick", "(", "index", ")", "return", "[", "err", ",", "tick", "]" ]
32.8
0.011905
def map(self, func: Callable[[Tuple[Any, Log]], Tuple[Any, Log]]) -> 'Writer': """Map a function func over the Writer value. Haskell: fmap f m = Writer $ let (a, w) = runWriter m in (f a, w) Keyword arguments: func -- Mapper function: """ a, w = self.run() b, _w = func((a, w)) return Writer(b, _w)
[ "def", "map", "(", "self", ",", "func", ":", "Callable", "[", "[", "Tuple", "[", "Any", ",", "Log", "]", "]", ",", "Tuple", "[", "Any", ",", "Log", "]", "]", ")", "->", "'Writer'", ":", "a", ",", "w", "=", "self", ".", "run", "(", ")", "b", ",", "_w", "=", "func", "(", "(", "a", ",", "w", ")", ")", "return", "Writer", "(", "b", ",", "_w", ")" ]
30.083333
0.005376
def set_guess_to_fit_result(self): """ If you have a fit result, set the guess parameters to the fit parameters. """ if self.results is None: print("No fit results to use! Run fit() first.") return # loop over the results and set the guess values for n in range(len(self._pguess)): self._pguess[n] = self.results[0][n] if self['autoplot']: self.plot() return self
[ "def", "set_guess_to_fit_result", "(", "self", ")", ":", "if", "self", ".", "results", "is", "None", ":", "print", "(", "\"No fit results to use! Run fit() first.\"", ")", "return", "# loop over the results and set the guess values", "for", "n", "in", "range", "(", "len", "(", "self", ".", "_pguess", ")", ")", ":", "self", ".", "_pguess", "[", "n", "]", "=", "self", ".", "results", "[", "0", "]", "[", "n", "]", "if", "self", "[", "'autoplot'", "]", ":", "self", ".", "plot", "(", ")", "return", "self" ]
29.866667
0.008658
def proxy_manager_for(self, proxy, **proxy_kwargs): """Called to initialize the HTTPAdapter when a proxy is used.""" try: proxy_kwargs['ssl_version'] = ssl.PROTOCOL_TLS except AttributeError: proxy_kwargs['ssl_version'] = ssl.PROTOCOL_SSLv23 return super(SSLAdapter, self).proxy_manager_for(proxy, **proxy_kwargs)
[ "def", "proxy_manager_for", "(", "self", ",", "proxy", ",", "*", "*", "proxy_kwargs", ")", ":", "try", ":", "proxy_kwargs", "[", "'ssl_version'", "]", "=", "ssl", ".", "PROTOCOL_TLS", "except", "AttributeError", ":", "proxy_kwargs", "[", "'ssl_version'", "]", "=", "ssl", ".", "PROTOCOL_SSLv23", "return", "super", "(", "SSLAdapter", ",", "self", ")", ".", "proxy_manager_for", "(", "proxy", ",", "*", "*", "proxy_kwargs", ")" ]
51.857143
0.00542
def get_routing_attributes(obj, modify_doc=False, keys=None): """ Loops through the provided object (using the dir() function) and finds any callables which match the name signature (e.g. get_foo()) AND has a docstring beginning with a path-like char string. This does process things in alphabetical order (rather than than the unpredictable __dict__ attribute) so take this into consideration if certain routes should be checked before others. Unfortunately, this is a problem because the 'all' method will always come before others, so there is no capturing one type followed by a catch-all 'all'. Until a solution is found, just make a router by hand. """ if keys is None: keys = dir(obj) for val, method_str in _find_routeable_attributes(obj, keys): path, *doc = val.__doc__.split(maxsplit=1) or ('', '') if not path: continue if modify_doc: val.__doc__ = ''.join(doc) method = HTTPMethod[method_str] yield method, path, val
[ "def", "get_routing_attributes", "(", "obj", ",", "modify_doc", "=", "False", ",", "keys", "=", "None", ")", ":", "if", "keys", "is", "None", ":", "keys", "=", "dir", "(", "obj", ")", "for", "val", ",", "method_str", "in", "_find_routeable_attributes", "(", "obj", ",", "keys", ")", ":", "path", ",", "", "*", "doc", "=", "val", ".", "__doc__", ".", "split", "(", "maxsplit", "=", "1", ")", "or", "(", "''", ",", "''", ")", "if", "not", "path", ":", "continue", "if", "modify_doc", ":", "val", ".", "__doc__", "=", "''", ".", "join", "(", "doc", ")", "method", "=", "HTTPMethod", "[", "method_str", "]", "yield", "method", ",", "path", ",", "val" ]
34.566667
0.000938
def get_number_footer_lines(docbody, page_break_posns): """Try to guess the number of footer lines each page of a document has. The positions of the page breaks in the document are used to try to guess the number of footer lines. @param docbody: (list) of strings - each string being a line in the document @param page_break_posns: (list) of integers - each integer is the position of a page break in the document. @return: (int) the number of lines that make up the footer of each page. """ num_breaks = len(page_break_posns) num_footer_lines = 0 empty_line = 0 keep_checking = 1 p_wordSearch = re.compile(unicode(r'([A-Za-z0-9-]+)'), re.UNICODE) if num_breaks > 2: while keep_checking: cur_break = 1 if page_break_posns[cur_break] - num_footer_lines - 1 < 0 or \ page_break_posns[cur_break] - num_footer_lines - 1 > \ len(docbody) - 1: # Be sure that the docbody list boundary wasn't overstepped: break if docbody[(page_break_posns[cur_break] - num_footer_lines - 1)].isspace(): empty_line = 1 grps_headLineWords = \ p_wordSearch.findall(docbody[(page_break_posns[cur_break] - num_footer_lines - 1)]) cur_break = cur_break + 1 while (cur_break < num_breaks) and keep_checking: grps_thisLineWords = \ p_wordSearch.findall(docbody[(page_break_posns[cur_break] - num_footer_lines - 1)]) if empty_line: if len(grps_thisLineWords) != 0: # this line should be empty, but isn't keep_checking = 0 else: if (len(grps_thisLineWords) == 0) or \ (len(grps_headLineWords) != len(grps_thisLineWords)): # Not same num 'words' as equivilent line # in 1st footer: keep_checking = 0 else: keep_checking = \ check_boundary_lines_similar(grps_headLineWords, grps_thisLineWords) # Update cur_break for nxt line to check cur_break = cur_break + 1 if keep_checking: # Line is a footer line: check next num_footer_lines = num_footer_lines + 1 empty_line = 0 return num_footer_lines
[ "def", "get_number_footer_lines", "(", "docbody", ",", "page_break_posns", ")", ":", "num_breaks", "=", "len", "(", "page_break_posns", ")", "num_footer_lines", "=", "0", "empty_line", "=", "0", "keep_checking", "=", "1", "p_wordSearch", "=", "re", ".", "compile", "(", "unicode", "(", "r'([A-Za-z0-9-]+)'", ")", ",", "re", ".", "UNICODE", ")", "if", "num_breaks", ">", "2", ":", "while", "keep_checking", ":", "cur_break", "=", "1", "if", "page_break_posns", "[", "cur_break", "]", "-", "num_footer_lines", "-", "1", "<", "0", "or", "page_break_posns", "[", "cur_break", "]", "-", "num_footer_lines", "-", "1", ">", "len", "(", "docbody", ")", "-", "1", ":", "# Be sure that the docbody list boundary wasn't overstepped:", "break", "if", "docbody", "[", "(", "page_break_posns", "[", "cur_break", "]", "-", "num_footer_lines", "-", "1", ")", "]", ".", "isspace", "(", ")", ":", "empty_line", "=", "1", "grps_headLineWords", "=", "p_wordSearch", ".", "findall", "(", "docbody", "[", "(", "page_break_posns", "[", "cur_break", "]", "-", "num_footer_lines", "-", "1", ")", "]", ")", "cur_break", "=", "cur_break", "+", "1", "while", "(", "cur_break", "<", "num_breaks", ")", "and", "keep_checking", ":", "grps_thisLineWords", "=", "p_wordSearch", ".", "findall", "(", "docbody", "[", "(", "page_break_posns", "[", "cur_break", "]", "-", "num_footer_lines", "-", "1", ")", "]", ")", "if", "empty_line", ":", "if", "len", "(", "grps_thisLineWords", ")", "!=", "0", ":", "# this line should be empty, but isn't", "keep_checking", "=", "0", "else", ":", "if", "(", "len", "(", "grps_thisLineWords", ")", "==", "0", ")", "or", "(", "len", "(", "grps_headLineWords", ")", "!=", "len", "(", "grps_thisLineWords", ")", ")", ":", "# Not same num 'words' as equivilent line", "# in 1st footer:", "keep_checking", "=", "0", "else", ":", "keep_checking", "=", "check_boundary_lines_similar", "(", "grps_headLineWords", ",", "grps_thisLineWords", ")", "# Update cur_break for nxt line to check", "cur_break", "=", "cur_break", "+", "1", "if", "keep_checking", ":", "# Line is a footer line: check next", "num_footer_lines", "=", "num_footer_lines", "+", "1", "empty_line", "=", "0", "return", "num_footer_lines" ]
48.345455
0.001106
def face_midpoints(self, simplices=None): """ Identify the centroid of every simplex in the triangulation. If an array of simplices is given then the centroids of only those simplices is returned. """ if type(simplices) == type(None): simplices = self.simplices mids = self.points[simplices].mean(axis=1) mid_xpt, mid_ypt = mids[:,0], mids[:,1] return mid_xpt, mid_ypt
[ "def", "face_midpoints", "(", "self", ",", "simplices", "=", "None", ")", ":", "if", "type", "(", "simplices", ")", "==", "type", "(", "None", ")", ":", "simplices", "=", "self", ".", "simplices", "mids", "=", "self", ".", "points", "[", "simplices", "]", ".", "mean", "(", "axis", "=", "1", ")", "mid_xpt", ",", "mid_ypt", "=", "mids", "[", ":", ",", "0", "]", ",", "mids", "[", ":", ",", "1", "]", "return", "mid_xpt", ",", "mid_ypt" ]
33.461538
0.01566
def find(pattern, path=os.path.curdir, recursive=False): """ Find absolute file/folder paths with the given ``re`` pattern. Args: * pattern: search pattern, support both string (exact match) and `re` pattern. * path: root path to start searching, default is current working directory. * recursive: whether to recursively find the matched items from `path`, False by default Returns: Generator of the matched items of Files/Folders. """ root = realpath(path) Finder = lambda item: regex.is_regex(pattern) \ and pattern.match(item) or (pattern == item) if recursive: for base, dirs, files in os.walk(root, topdown=True): for segment in itertools.chain(filter(Finder, files), filter(Finder, dirs)): yield FS(os.path.join(base, segment)) else: for segment in filter(Finder, os.listdir(root)): yield(os.path.join(root, segment))
[ "def", "find", "(", "pattern", ",", "path", "=", "os", ".", "path", ".", "curdir", ",", "recursive", "=", "False", ")", ":", "root", "=", "realpath", "(", "path", ")", "Finder", "=", "lambda", "item", ":", "regex", ".", "is_regex", "(", "pattern", ")", "and", "pattern", ".", "match", "(", "item", ")", "or", "(", "pattern", "==", "item", ")", "if", "recursive", ":", "for", "base", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "root", ",", "topdown", "=", "True", ")", ":", "for", "segment", "in", "itertools", ".", "chain", "(", "filter", "(", "Finder", ",", "files", ")", ",", "filter", "(", "Finder", ",", "dirs", ")", ")", ":", "yield", "FS", "(", "os", ".", "path", ".", "join", "(", "base", ",", "segment", ")", ")", "else", ":", "for", "segment", "in", "filter", "(", "Finder", ",", "os", ".", "listdir", "(", "root", ")", ")", ":", "yield", "(", "os", ".", "path", ".", "join", "(", "root", ",", "segment", ")", ")" ]
37.92
0.007202
def delete(self, table, identifier): """Deletes a table row by given identifier. :param table: the expression of the table to update quoted or unquoted :param identifier: the delete criteria; a dictionary containing column-value pairs :return: the number of affected rows :rtype: int """ assert isinstance(identifier, dict) sb = self.sql_builder().delete(table) for column, value in identifier.iteritems(): func = self._expr.in_ if isinstance(value, (list, tuple)) else self._expr.eq sb.and_where(func(column, sb.create_positional_parameter(value))) return sb.execute()
[ "def", "delete", "(", "self", ",", "table", ",", "identifier", ")", ":", "assert", "isinstance", "(", "identifier", ",", "dict", ")", "sb", "=", "self", ".", "sql_builder", "(", ")", ".", "delete", "(", "table", ")", "for", "column", ",", "value", "in", "identifier", ".", "iteritems", "(", ")", ":", "func", "=", "self", ".", "_expr", ".", "in_", "if", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ")", ")", "else", "self", ".", "_expr", ".", "eq", "sb", ".", "and_where", "(", "func", "(", "column", ",", "sb", ".", "create_positional_parameter", "(", "value", ")", ")", ")", "return", "sb", ".", "execute", "(", ")" ]
44.066667
0.005926
def gen_modules(self, initial_load=False): ''' Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules ''' self.opts['grains'] = salt.loader.grains(self.opts) self.opts['pillar'] = salt.pillar.get_pillar( self.opts, self.opts['grains'], self.opts['id'], saltenv=self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ).compile_pillar() if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts: errmsg = ( 'No "proxy" configuration key found in pillar or opts ' 'dictionaries for id {id}. Check your pillar/options ' 'configuration and contents. Salt-proxy aborted.' ).format(id=self.opts['id']) log.error(errmsg) self._running = False raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg) if 'proxy' not in self.opts: self.opts['proxy'] = self.opts['pillar']['proxy'] # Then load the proxy module self.proxy = salt.loader.proxy(self.opts) self.utils = salt.loader.utils(self.opts, proxy=self.proxy) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=False, proxy=self.proxy) self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy) self.matchers = salt.loader.matchers(self.opts) self.functions['sys.reload_modules'] = self.gen_modules self.executors = salt.loader.executors(self.opts, self.functions, proxy=self.proxy) fq_proxyname = self.opts['proxy']['proxytype'] # we can then sync any proxymodules down from the master # we do a sync_all here in case proxy code was installed by # SPM or was manually placed in /srv/salt/_modules etc. self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv']) self.functions.pack['__proxy__'] = self.proxy self.proxy.pack['__salt__'] = self.functions self.proxy.pack['__ret__'] = self.returners self.proxy.pack['__pillar__'] = self.opts['pillar'] # Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__ self.utils = salt.loader.utils(self.opts, proxy=self.proxy) self.proxy.pack['__utils__'] = self.utils # Reload all modules so all dunder variables are injected self.proxy.reload_modules() if ('{0}.init'.format(fq_proxyname) not in self.proxy or '{0}.shutdown'.format(fq_proxyname) not in self.proxy): errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \ 'Check your proxymodule. Salt-proxy aborted.' log.error(errmsg) self._running = False raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg) self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])() proxy_init_fn = self.proxy[fq_proxyname + '.init'] proxy_init_fn(self.opts) self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy) # Sync the grains here so the proxy can communicate them to the master self.functions['saltutil.sync_grains'](saltenv='base') self.grains_cache = self.opts['grains'] self.ready = True
[ "def", "gen_modules", "(", "self", ",", "initial_load", "=", "False", ")", ":", "self", ".", "opts", "[", "'grains'", "]", "=", "salt", ".", "loader", ".", "grains", "(", "self", ".", "opts", ")", "self", ".", "opts", "[", "'pillar'", "]", "=", "salt", ".", "pillar", ".", "get_pillar", "(", "self", ".", "opts", ",", "self", ".", "opts", "[", "'grains'", "]", ",", "self", ".", "opts", "[", "'id'", "]", ",", "saltenv", "=", "self", ".", "opts", "[", "'saltenv'", "]", ",", "pillarenv", "=", "self", ".", "opts", ".", "get", "(", "'pillarenv'", ")", ",", ")", ".", "compile_pillar", "(", ")", "if", "'proxy'", "not", "in", "self", ".", "opts", "[", "'pillar'", "]", "and", "'proxy'", "not", "in", "self", ".", "opts", ":", "errmsg", "=", "(", "'No \"proxy\" configuration key found in pillar or opts '", "'dictionaries for id {id}. Check your pillar/options '", "'configuration and contents. Salt-proxy aborted.'", ")", ".", "format", "(", "id", "=", "self", ".", "opts", "[", "'id'", "]", ")", "log", ".", "error", "(", "errmsg", ")", "self", ".", "_running", "=", "False", "raise", "SaltSystemExit", "(", "code", "=", "salt", ".", "defaults", ".", "exitcodes", ".", "EX_GENERIC", ",", "msg", "=", "errmsg", ")", "if", "'proxy'", "not", "in", "self", ".", "opts", ":", "self", ".", "opts", "[", "'proxy'", "]", "=", "self", ".", "opts", "[", "'pillar'", "]", "[", "'proxy'", "]", "# Then load the proxy module", "self", ".", "proxy", "=", "salt", ".", "loader", ".", "proxy", "(", "self", ".", "opts", ")", "self", ".", "utils", "=", "salt", ".", "loader", ".", "utils", "(", "self", ".", "opts", ",", "proxy", "=", "self", ".", "proxy", ")", "self", ".", "functions", "=", "salt", ".", "loader", ".", "minion_mods", "(", "self", ".", "opts", ",", "utils", "=", "self", ".", "utils", ",", "notify", "=", "False", ",", "proxy", "=", "self", ".", "proxy", ")", "self", ".", "returners", "=", "salt", ".", "loader", ".", "returners", "(", "self", ".", "opts", ",", "self", ".", "functions", ",", "proxy", "=", "self", ".", "proxy", ")", "self", ".", "matchers", "=", "salt", ".", "loader", ".", "matchers", "(", "self", ".", "opts", ")", "self", ".", "functions", "[", "'sys.reload_modules'", "]", "=", "self", ".", "gen_modules", "self", ".", "executors", "=", "salt", ".", "loader", ".", "executors", "(", "self", ".", "opts", ",", "self", ".", "functions", ",", "proxy", "=", "self", ".", "proxy", ")", "fq_proxyname", "=", "self", ".", "opts", "[", "'proxy'", "]", "[", "'proxytype'", "]", "# we can then sync any proxymodules down from the master", "# we do a sync_all here in case proxy code was installed by", "# SPM or was manually placed in /srv/salt/_modules etc.", "self", ".", "functions", "[", "'saltutil.sync_all'", "]", "(", "saltenv", "=", "self", ".", "opts", "[", "'saltenv'", "]", ")", "self", ".", "functions", ".", "pack", "[", "'__proxy__'", "]", "=", "self", ".", "proxy", "self", ".", "proxy", ".", "pack", "[", "'__salt__'", "]", "=", "self", ".", "functions", "self", ".", "proxy", ".", "pack", "[", "'__ret__'", "]", "=", "self", ".", "returners", "self", ".", "proxy", ".", "pack", "[", "'__pillar__'", "]", "=", "self", ".", "opts", "[", "'pillar'", "]", "# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__", "self", ".", "utils", "=", "salt", ".", "loader", ".", "utils", "(", "self", ".", "opts", ",", "proxy", "=", "self", ".", "proxy", ")", "self", ".", "proxy", ".", "pack", "[", "'__utils__'", "]", "=", "self", ".", "utils", "# Reload all modules so all dunder variables are injected", "self", ".", "proxy", ".", "reload_modules", "(", ")", "if", "(", "'{0}.init'", ".", "format", "(", "fq_proxyname", ")", "not", "in", "self", ".", "proxy", "or", "'{0}.shutdown'", ".", "format", "(", "fq_proxyname", ")", "not", "in", "self", ".", "proxy", ")", ":", "errmsg", "=", "'Proxymodule {0} is missing an init() or a shutdown() or both. '", ".", "format", "(", "fq_proxyname", ")", "+", "'Check your proxymodule. Salt-proxy aborted.'", "log", ".", "error", "(", "errmsg", ")", "self", ".", "_running", "=", "False", "raise", "SaltSystemExit", "(", "code", "=", "salt", ".", "defaults", ".", "exitcodes", ".", "EX_GENERIC", ",", "msg", "=", "errmsg", ")", "self", ".", "module_executors", "=", "self", ".", "proxy", ".", "get", "(", "'{0}.module_executors'", ".", "format", "(", "fq_proxyname", ")", ",", "lambda", ":", "[", "]", ")", "(", ")", "proxy_init_fn", "=", "self", ".", "proxy", "[", "fq_proxyname", "+", "'.init'", "]", "proxy_init_fn", "(", "self", ".", "opts", ")", "self", ".", "opts", "[", "'grains'", "]", "=", "salt", ".", "loader", ".", "grains", "(", "self", ".", "opts", ",", "proxy", "=", "self", ".", "proxy", ")", "# Sync the grains here so the proxy can communicate them to the master", "self", ".", "functions", "[", "'saltutil.sync_grains'", "]", "(", "saltenv", "=", "'base'", ")", "self", ".", "grains_cache", "=", "self", ".", "opts", "[", "'grains'", "]", "self", ".", "ready", "=", "True" ]
43.3
0.002822
def differentForks(self, lnode, rnode): """True, if lnode and rnode are located on different forks of IF/TRY""" ancestor = self.getCommonAncestor(lnode, rnode, self.root) parts = getAlternatives(ancestor) if parts: for items in parts: if self.descendantOf(lnode, items, ancestor) ^ \ self.descendantOf(rnode, items, ancestor): return True return False
[ "def", "differentForks", "(", "self", ",", "lnode", ",", "rnode", ")", ":", "ancestor", "=", "self", ".", "getCommonAncestor", "(", "lnode", ",", "rnode", ",", "self", ".", "root", ")", "parts", "=", "getAlternatives", "(", "ancestor", ")", "if", "parts", ":", "for", "items", "in", "parts", ":", "if", "self", ".", "descendantOf", "(", "lnode", ",", "items", ",", "ancestor", ")", "^", "self", ".", "descendantOf", "(", "rnode", ",", "items", ",", "ancestor", ")", ":", "return", "True", "return", "False" ]
44.9
0.004367
def process_sums(self): """ A redefined version of :func:`RC2.process_sums`. The only modification affects the clauses whose weight after splitting becomes less than the weight of the current optimization level. Such clauses are deactivated and to be reactivated at a later stage. """ # sums that should be deactivated (but not removed completely) to_deactivate = set([]) for l in self.core_sums: if self.wght[l] == self.minw: # marking variable as being a part of the core # so that next time it is not used as an assump self.garbage.add(l) else: # do not remove this variable from assumps # since it has a remaining non-zero weight self.wght[l] -= self.minw # deactivate this assumption and put at a lower level if self.wght[l] < self.blop[self.levl]: self.wstr[self.wght[l]].append(l) to_deactivate.add(l) # increase bound for the sum t, b = self.update_sum(l) # updating bounds and weights if b < len(t.rhs): lnew = -t.rhs[b] if lnew in self.garbage: self.garbage.remove(lnew) self.wght[lnew] = 0 if lnew not in self.wght: self.set_bound(t, b) else: self.wght[lnew] += self.minw # put this assumption to relaxation vars self.rels.append(-l) # deactivating unnecessary sums self.sums = list(filter(lambda x: x not in to_deactivate, self.sums))
[ "def", "process_sums", "(", "self", ")", ":", "# sums that should be deactivated (but not removed completely)", "to_deactivate", "=", "set", "(", "[", "]", ")", "for", "l", "in", "self", ".", "core_sums", ":", "if", "self", ".", "wght", "[", "l", "]", "==", "self", ".", "minw", ":", "# marking variable as being a part of the core", "# so that next time it is not used as an assump", "self", ".", "garbage", ".", "add", "(", "l", ")", "else", ":", "# do not remove this variable from assumps", "# since it has a remaining non-zero weight", "self", ".", "wght", "[", "l", "]", "-=", "self", ".", "minw", "# deactivate this assumption and put at a lower level", "if", "self", ".", "wght", "[", "l", "]", "<", "self", ".", "blop", "[", "self", ".", "levl", "]", ":", "self", ".", "wstr", "[", "self", ".", "wght", "[", "l", "]", "]", ".", "append", "(", "l", ")", "to_deactivate", ".", "add", "(", "l", ")", "# increase bound for the sum", "t", ",", "b", "=", "self", ".", "update_sum", "(", "l", ")", "# updating bounds and weights", "if", "b", "<", "len", "(", "t", ".", "rhs", ")", ":", "lnew", "=", "-", "t", ".", "rhs", "[", "b", "]", "if", "lnew", "in", "self", ".", "garbage", ":", "self", ".", "garbage", ".", "remove", "(", "lnew", ")", "self", ".", "wght", "[", "lnew", "]", "=", "0", "if", "lnew", "not", "in", "self", ".", "wght", ":", "self", ".", "set_bound", "(", "t", ",", "b", ")", "else", ":", "self", ".", "wght", "[", "lnew", "]", "+=", "self", ".", "minw", "# put this assumption to relaxation vars", "self", ".", "rels", ".", "append", "(", "-", "l", ")", "# deactivating unnecessary sums", "self", ".", "sums", "=", "list", "(", "filter", "(", "lambda", "x", ":", "x", "not", "in", "to_deactivate", ",", "self", ".", "sums", ")", ")" ]
36.851064
0.001687
def get_default_config_help(self): """ Return help text """ config_help = super(PassengerCollector, self).get_default_config_help() config_help.update({ "bin": "The path to the binary", "use_sudo": "Use sudo?", "sudo_cmd": "Path to sudo", "passenger_status_bin": "The path to the binary passenger-status", "passenger_memory_stats_bin": "The path to the binary passenger-memory-stats", }) return config_help
[ "def", "get_default_config_help", "(", "self", ")", ":", "config_help", "=", "super", "(", "PassengerCollector", ",", "self", ")", ".", "get_default_config_help", "(", ")", "config_help", ".", "update", "(", "{", "\"bin\"", ":", "\"The path to the binary\"", ",", "\"use_sudo\"", ":", "\"Use sudo?\"", ",", "\"sudo_cmd\"", ":", "\"Path to sudo\"", ",", "\"passenger_status_bin\"", ":", "\"The path to the binary passenger-status\"", ",", "\"passenger_memory_stats_bin\"", ":", "\"The path to the binary passenger-memory-stats\"", ",", "}", ")", "return", "config_help" ]
38.4
0.00339
def cleanup_sweep_threads(): ''' Not used. Keeping this function in case we decide not to use daemonized threads and it becomes necessary to clean up the running threads upon exit. ''' for dict_name, obj in globals().items(): if isinstance(obj, (TimedDict,)): logging.info( 'Stopping thread for TimedDict {dict_name}'.format( dict_name=dict_name)) obj.stop_sweep()
[ "def", "cleanup_sweep_threads", "(", ")", ":", "for", "dict_name", ",", "obj", "in", "globals", "(", ")", ".", "items", "(", ")", ":", "if", "isinstance", "(", "obj", ",", "(", "TimedDict", ",", ")", ")", ":", "logging", ".", "info", "(", "'Stopping thread for TimedDict {dict_name}'", ".", "format", "(", "dict_name", "=", "dict_name", ")", ")", "obj", ".", "stop_sweep", "(", ")" ]
34.230769
0.002188
def rm_gos(self, rm_goids): """Remove any edges that contain user-specified edges.""" self.edges = self._rm_gos_edges(rm_goids, self.edges) self.edges_rel = self._rm_gos_edges_rel(rm_goids, self.edges_rel)
[ "def", "rm_gos", "(", "self", ",", "rm_goids", ")", ":", "self", ".", "edges", "=", "self", ".", "_rm_gos_edges", "(", "rm_goids", ",", "self", ".", "edges", ")", "self", ".", "edges_rel", "=", "self", ".", "_rm_gos_edges_rel", "(", "rm_goids", ",", "self", ".", "edges_rel", ")" ]
56.5
0.008734
def to_outgoing_transaction(self, using, created=None, deleted=None): """ Serialize the model instance to an AES encrypted json object and saves the json object to the OutgoingTransaction model. """ OutgoingTransaction = django_apps.get_model( "django_collect_offline", "OutgoingTransaction" ) created = True if created is None else created action = INSERT if created else UPDATE timestamp_datetime = ( self.instance.created if created else self.instance.modified ) if not timestamp_datetime: timestamp_datetime = get_utcnow() if deleted: timestamp_datetime = get_utcnow() action = DELETE outgoing_transaction = None if self.is_serialized: hostname = socket.gethostname() outgoing_transaction = OutgoingTransaction.objects.using(using).create( tx_name=self.instance._meta.label_lower, tx_pk=getattr(self.instance, self.primary_key_field.name), tx=self.encrypted_json(), timestamp=timestamp_datetime.strftime("%Y%m%d%H%M%S%f"), producer=f"{hostname}-{using}", action=action, using=using, ) return outgoing_transaction
[ "def", "to_outgoing_transaction", "(", "self", ",", "using", ",", "created", "=", "None", ",", "deleted", "=", "None", ")", ":", "OutgoingTransaction", "=", "django_apps", ".", "get_model", "(", "\"django_collect_offline\"", ",", "\"OutgoingTransaction\"", ")", "created", "=", "True", "if", "created", "is", "None", "else", "created", "action", "=", "INSERT", "if", "created", "else", "UPDATE", "timestamp_datetime", "=", "(", "self", ".", "instance", ".", "created", "if", "created", "else", "self", ".", "instance", ".", "modified", ")", "if", "not", "timestamp_datetime", ":", "timestamp_datetime", "=", "get_utcnow", "(", ")", "if", "deleted", ":", "timestamp_datetime", "=", "get_utcnow", "(", ")", "action", "=", "DELETE", "outgoing_transaction", "=", "None", "if", "self", ".", "is_serialized", ":", "hostname", "=", "socket", ".", "gethostname", "(", ")", "outgoing_transaction", "=", "OutgoingTransaction", ".", "objects", ".", "using", "(", "using", ")", ".", "create", "(", "tx_name", "=", "self", ".", "instance", ".", "_meta", ".", "label_lower", ",", "tx_pk", "=", "getattr", "(", "self", ".", "instance", ",", "self", ".", "primary_key_field", ".", "name", ")", ",", "tx", "=", "self", ".", "encrypted_json", "(", ")", ",", "timestamp", "=", "timestamp_datetime", ".", "strftime", "(", "\"%Y%m%d%H%M%S%f\"", ")", ",", "producer", "=", "f\"{hostname}-{using}\"", ",", "action", "=", "action", ",", "using", "=", "using", ",", ")", "return", "outgoing_transaction" ]
43.566667
0.002246
def image_resolution(self) -> Optional[Tuple[int, int]]: r"""(:class:`~typing.Optional`\ [:class:`~typing.Tuple`\ [:class:`int`, :class:`int`]]) The (width, height) pair of the image. It may be :const:`None` if it's not an image. """ images = self.attributes.get('imageinfo', []) if images and isinstance(images, collections.abc.Sequence): img = images[0] return img['width'], img['height'] return None
[ "def", "image_resolution", "(", "self", ")", "->", "Optional", "[", "Tuple", "[", "int", ",", "int", "]", "]", ":", "images", "=", "self", ".", "attributes", ".", "get", "(", "'imageinfo'", ",", "[", "]", ")", "if", "images", "and", "isinstance", "(", "images", ",", "collections", ".", "abc", ".", "Sequence", ")", ":", "img", "=", "images", "[", "0", "]", "return", "img", "[", "'width'", "]", ",", "img", "[", "'height'", "]", "return", "None" ]
43
0.004141
def _get_bandfile(self, **options): """Get the VIIRS rsr filename""" # Need to understand why there are A&B files for band M16. FIXME! # Anyway, the absolute response differences are small, below 0.05 # LOG.debug("paths = %s", str(self.bandfilenames)) path = self.bandfilenames[self.bandname] if not os.path.exists(path): raise IOError("Couldn't find an existing file for this band ({band}): {path}".format( band=self.bandname, path=path)) self.filename = path return
[ "def", "_get_bandfile", "(", "self", ",", "*", "*", "options", ")", ":", "# Need to understand why there are A&B files for band M16. FIXME!", "# Anyway, the absolute response differences are small, below 0.05", "# LOG.debug(\"paths = %s\", str(self.bandfilenames))", "path", "=", "self", ".", "bandfilenames", "[", "self", ".", "bandname", "]", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "raise", "IOError", "(", "\"Couldn't find an existing file for this band ({band}): {path}\"", ".", "format", "(", "band", "=", "self", ".", "bandname", ",", "path", "=", "path", ")", ")", "self", ".", "filename", "=", "path", "return" ]
36.6
0.005329
def place_visual(self): """Places visual objects randomly until no collisions or max iterations hit.""" index = 0 bin_pos = string_to_array(self.bin2_body.get("pos")) bin_size = self.bin_size for _, obj_mjcf in self.visual_objects: bin_x_low = bin_pos[0] bin_y_low = bin_pos[1] if index == 0 or index == 2: bin_x_low -= bin_size[0] / 2 if index < 2: bin_y_low -= bin_size[1] / 2 bin_x_high = bin_x_low + bin_size[0] / 2 bin_y_high = bin_y_low + bin_size[1] / 2 bottom_offset = obj_mjcf.get_bottom_offset() bin_range = [bin_x_low + bin_x_high, bin_y_low + bin_y_high, 2 * bin_pos[2]] bin_center = np.array(bin_range) / 2.0 pos = bin_center - bottom_offset self.visual_obj_mjcf[index].set("pos", array_to_string(pos)) index += 1
[ "def", "place_visual", "(", "self", ")", ":", "index", "=", "0", "bin_pos", "=", "string_to_array", "(", "self", ".", "bin2_body", ".", "get", "(", "\"pos\"", ")", ")", "bin_size", "=", "self", ".", "bin_size", "for", "_", ",", "obj_mjcf", "in", "self", ".", "visual_objects", ":", "bin_x_low", "=", "bin_pos", "[", "0", "]", "bin_y_low", "=", "bin_pos", "[", "1", "]", "if", "index", "==", "0", "or", "index", "==", "2", ":", "bin_x_low", "-=", "bin_size", "[", "0", "]", "/", "2", "if", "index", "<", "2", ":", "bin_y_low", "-=", "bin_size", "[", "1", "]", "/", "2", "bin_x_high", "=", "bin_x_low", "+", "bin_size", "[", "0", "]", "/", "2", "bin_y_high", "=", "bin_y_low", "+", "bin_size", "[", "1", "]", "/", "2", "bottom_offset", "=", "obj_mjcf", ".", "get_bottom_offset", "(", ")", "bin_range", "=", "[", "bin_x_low", "+", "bin_x_high", ",", "bin_y_low", "+", "bin_y_high", ",", "2", "*", "bin_pos", "[", "2", "]", "]", "bin_center", "=", "np", ".", "array", "(", "bin_range", ")", "/", "2.0", "pos", "=", "bin_center", "-", "bottom_offset", "self", ".", "visual_obj_mjcf", "[", "index", "]", ".", "set", "(", "\"pos\"", ",", "array_to_string", "(", "pos", ")", ")", "index", "+=", "1" ]
36.92
0.004224
def all(self, fields=None, include_fields=True, page=None, per_page=None, extra_params=None): """Retrieves a list of all the applications. Important: The client_secret and encryption_key attributes can only be retrieved with the read:client_keys scope. Args: fields (list of str, optional): A list of fields to include or exclude from the result (depending on include_fields). Empty to retrieve all fields. include_fields (bool, optional): True if the fields specified are to be included in the result, False otherwise. page (int): The result's page number (zero based). per_page (int, optional): The amount of entries per page. extra_params (dictionary, optional): The extra parameters to add to the request. The fields, include_fields, page and per_page values specified as parameters take precedence over the ones defined here. See: https://auth0.com/docs/api/management/v2#!/Clients/get_clients """ params = extra_params or {} params['fields'] = fields and ','.join(fields) or None params['include_fields'] = str(include_fields).lower() params['page'] = page params['per_page'] = per_page return self.client.get(self._url(), params=params)
[ "def", "all", "(", "self", ",", "fields", "=", "None", ",", "include_fields", "=", "True", ",", "page", "=", "None", ",", "per_page", "=", "None", ",", "extra_params", "=", "None", ")", ":", "params", "=", "extra_params", "or", "{", "}", "params", "[", "'fields'", "]", "=", "fields", "and", "','", ".", "join", "(", "fields", ")", "or", "None", "params", "[", "'include_fields'", "]", "=", "str", "(", "include_fields", ")", ".", "lower", "(", ")", "params", "[", "'page'", "]", "=", "page", "params", "[", "'per_page'", "]", "=", "per_page", "return", "self", ".", "client", ".", "get", "(", "self", ".", "_url", "(", ")", ",", "params", "=", "params", ")" ]
41.875
0.002918
def write(self, data): ''' This could be a bit less clumsy. ''' if data == '\n': # print does this return self.stream.write(data) else: bytes_ = 0 for line in data.splitlines(True): nl = '' if line.endswith('\n'): # mv nl to end: line = line[:-1] nl = '\n' bytes_ += self.stream.write( f'{self.start}{line}{self.default}{nl}' ) or 0 # in case None returned (on Windows) return bytes_
[ "def", "write", "(", "self", ",", "data", ")", ":", "if", "data", "==", "'\\n'", ":", "# print does this", "return", "self", ".", "stream", ".", "write", "(", "data", ")", "else", ":", "bytes_", "=", "0", "for", "line", "in", "data", ".", "splitlines", "(", "True", ")", ":", "nl", "=", "''", "if", "line", ".", "endswith", "(", "'\\n'", ")", ":", "# mv nl to end:", "line", "=", "line", "[", ":", "-", "1", "]", "nl", "=", "'\\n'", "bytes_", "+=", "self", ".", "stream", ".", "write", "(", "f'{self.start}{line}{self.default}{nl}'", ")", "or", "0", "# in case None returned (on Windows)", "return", "bytes_" ]
39.333333
0.003311