code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def listMethods(self, pid, sdefpid=None): uri = % {: pid} if sdefpid: uri += + sdefpid return self.get(uri, params=self.format_xml)
List available service methods. :param pid: object pid :param sDefPid: service definition pid :rtype: :class:`requests.models.Response`
def permission_required(perms, login_url=None, raise_exception=False, redirect_field_name=REDIRECT_FIELD_NAME): def decorator(view_func): @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): if check_perms(perms, request.user, kwargs[], raise_exception=raise_exception): return view_func(request, *args, **kwargs) if is_authenticated(request.user): if WALIKI_RENDER_403: return render(request, , kwargs, status=403) else: raise PermissionDenied path = request.build_absolute_uri() resolved_login_url = force_str( resolve_url(login_url or settings.LOGIN_URL)) login_scheme, login_netloc = urlparse(resolved_login_url)[:2] current_scheme, current_netloc = urlparse(path)[:2] if ((not login_scheme or login_scheme == current_scheme) and (not login_netloc or login_netloc == current_netloc)): path = request.get_full_path() from django.contrib.auth.views import redirect_to_login return redirect_to_login( path, resolved_login_url, redirect_field_name) return _wrapped_view return decorator
this is analog to django's builtin ``permission_required`` decorator, but improved to check per slug ACLRules and default permissions for anonymous and logged in users if there is a rule affecting a slug, the user needs to be part of the rule's allowed users. If there isn't a matching rule, defaults permissions apply.
def str2listtuple(self, string_message): "Covert a string that is ready to be sent to graphite into a tuple" if type(string_message).__name__ not in (, ): raise TypeError("Must provide a string or unicode") if not string_message.endswith(): string_message += "\n" tpl_list = [] for line in string_message.split(): line = line.strip() if not line: continue path, metric, timestamp = (None, None, None) try: (path, metric, timestamp) = line.split() except ValueError: raise ValueError( "message must contain - metric_name, value and timestamp " % line) try: timestamp = float(timestamp) except ValueError: raise ValueError("Timestamp must be float or int") tpl_list.append((path, (timestamp, metric))) if len(tpl_list) == 0: raise GraphiteSendException("No messages to send") payload = pickle.dumps(tpl_list) header = struct.pack("!L", len(payload)) message = header + payload return message
Covert a string that is ready to be sent to graphite into a tuple
def calculate_heartbeats(shb, chb): (sx, sy) = shb (cx, cy) = chb x = 0 y = 0 if cx != 0 and sy != : x = max(cx, int(sy)) if cy != 0 and sx != : y = max(cy, int(sx)) return x, y
Given a heartbeat string from the server, and a heartbeat tuple from the client, calculate what the actual heartbeat settings should be. :param (str,str) shb: server heartbeat numbers :param (int,int) chb: client heartbeat numbers :rtype: (int,int)
def get_unique_figname(dirname, root, ext): i = 1 figname = root + % i + ext while True: if osp.exists(osp.join(dirname, figname)): i += 1 figname = root + % i + ext else: return osp.join(dirname, figname)
Append a number to "root" to form a filename that does not already exist in "dirname".
def virtualchain_set_opfields( op, **fields ): for f in fields.keys(): if f not in indexer.RESERVED_KEYS: log.warning("Unsupported virtualchain field " % f) for f in fields.keys(): if f in indexer.RESERVED_KEYS: op[f] = fields[f] return op
Pass along virtualchain-reserved fields to a virtualchain operation. This layer of indirection is meant to help with future compatibility, so virtualchain implementations do not try to set operation fields directly.
def file(cls, uri_or_path): uri = urlparse(uri_or_path) if not uri.scheme: return open(uri_or_path, ) else: it = cls(uri_or_path).download_iter(uri.path.lstrip(), skip_hash=True) if not it: raise ValueError(.format(uri_or_path)) tmp = tempfile.TemporaryFile() for chunk in it: tmp.write(chunk) tmp.seek(0, 0) return tmp
Given either a URI like s3://bucket/path.txt or a path like /path.txt, return a file object for it.
def start(self, rootJob): self._assertContextManagerUsed() self.writePIDFile() if self.config.restart: raise ToilRestartException( ) self._batchSystem = self.createBatchSystem(self.config) self._setupAutoDeployment(rootJob.getUserScript()) try: self._setBatchSystemEnvVars() self._serialiseEnv() self._cacheAllJobs() with self._jobStore.writeSharedFileStream() as fH: rootJob.prepareForPromiseRegistration(self._jobStore) promise = rootJob.rv() pickle.dump(promise, fH, protocol=pickle.HIGHEST_PROTOCOL) rootJobGraph = rootJob._serialiseFirstJob(self._jobStore) self._cacheJob(rootJobGraph) self._setProvisioner() return self._runMainLoop(rootJobGraph) finally: self._shutdownBatchSystem()
Invoke a Toil workflow with the given job as the root for an initial run. This method must be called in the body of a ``with Toil(...) as toil:`` statement. This method should not be called more than once for a workflow that has not finished. :param toil.job.Job rootJob: The root job of the workflow :return: The root job's return value
def get_last_version(skip_tags=None) -> Optional[str]: debug(, skip_tags) check_repo() skip_tags = skip_tags or [] def version_finder(tag): if isinstance(tag.commit, TagObject): return tag.tag.tagged_date return tag.commit.committed_date for i in sorted(repo.tags, reverse=True, key=version_finder): if re.match(r, i.name): if i.name in skip_tags: continue return i.name[1:] return None
Return last version from repo tags. :return: A string contains version number.
def t_escaped_CARRIAGE_RETURN_CHAR(self, t): r t.lexer.pop_state() t.value = unichr(0x000d) return t
r'\x72
def call(self, request=None, *args, **kwargs): if request is not None: self.request = request retry = self.request.configuration.retry if not isinstance(retry, SimpleRetry): raise Error() last_exception = None for i in range(0, retry.max_retry): try: if i > 0: retry.sleep_jitter() self.call_once() return self.response except Exception as ex: last_exception = RequestFailed(message=, cause=ex) logger.debug("Request %d failed, exception: %s" % (i, ex)) if i+1 == retry.max_retry: raise last_exception raise last_exception
Calls multiple time - with retry. :param request: :return: response
def read(self, domain, type_name, search_command, body=None): return self._request(domain, type_name, search_command, , body)
Read entry in ThreatConnect Data Store Args: domain (string): One of 'local', 'organization', or 'system'. type_name (string): This is a free form index type name. The ThreatConnect API will use this resource verbatim. search_command (string): Search command to pass to ES. body (str): JSON body
def validate_complex_list(prop, value, xpath_map=None): if value is not None: validate_type(prop, value, (dict, list)) if prop in _complex_definitions: complex_keys = _complex_definitions[prop] else: complex_keys = {} if xpath_map is None else xpath_map for idx, complex_struct in enumerate(wrap_value(value)): cs_idx = prop + + str(idx) + validate_type(cs_idx, complex_struct, dict) for cs_prop, cs_val in iteritems(complex_struct): cs_key = .join((cs_idx, cs_prop)) if cs_prop not in complex_keys: _validation_error(prop, None, value, (.format(.join(complex_keys)))) if not isinstance(cs_val, list): validate_type(cs_key, cs_val, (string_types, list)) else: for list_idx, list_val in enumerate(cs_val): list_prop = cs_key + + str(list_idx) + validate_type(list_prop, list_val, string_types)
Default validation for Attribute Details data structure
def add_predicate(self, predicate): _predicate = predicate if isinstance(predicate, partial): _predicate = % (predicate.func, predicate.args, predicate.keywords) if LOG_OPTS[]: hookenv.log( % (self.id(), _predicate), level=hookenv.DEBUG) self._predicates.append(predicate)
Add a new predicate callback to this handler.
def ping(daemon, channel, data=None): if not channel: return node_name = daemon.config[].get() reply = [] if node_name or data: reply.append(node_name or ) if data: reply.append(data) with utils.ignore_except(): daemon.db.publish(channel, .join(reply))
Process the 'ping' control message. :param daemon: The control daemon; used to get at the configuration and the database. :param channel: The publish channel to which to send the response. :param data: Optional extra data. Will be returned as the second argument of the response. Responds to the named channel with a command of 'pong' and with the node_name (if configured) and provided data as arguments.
def _dump_date(d, delim): if d is None: d = gmtime() elif isinstance(d, datetime): d = d.utctimetuple() elif isinstance(d, (integer_types, float)): d = gmtime(d) return "%s, %02d%s%s%s%s %02d:%02d:%02d GMT" % ( ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")[d.tm_wday], d.tm_mday, delim, ( "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", )[d.tm_mon - 1], delim, str(d.tm_year), d.tm_hour, d.tm_min, d.tm_sec, )
Used for `http_date` and `cookie_date`.
def index_to_time_seg(time_seg_idx, slide_step): assert (time_seg_idx * slide_step < const.MINUTES_IN_A_DAY) return time_util.minutes_to_time_str(time_seg_idx * slide_step)
将时间片索引值转换为时间片字符串 :param time_seg_idx: :param slide_step: :return:
def process(self): random.seed() for _ in range(0, self.__numlocal): self.__current = random.sample(range(0, len(self.__pointer_data)), self.__number_clusters) self.__update_clusters(self.__current) self.__optimize_configuration() estimation = self.__calculate_estimation() if estimation < self.__optimal_estimation: self.__optimal_medoids = self.__current[:] self.__optimal_estimation = estimation self.__update_clusters(self.__optimal_medoids)
! @brief Performs cluster analysis in line with rules of CLARANS algorithm. @see get_clusters() @see get_medoids()
def ebic_select(self, gamma=0): if not isinstance(self.precision_, list): raise ValueError("EBIC requires multiple models to select from.") return if not self.is_fitted_: return ebic_scores = self.ebic(gamma=gamma) min_indices = np.where(np.abs(ebic_scores - ebic_scores.min()) < 1e-10) return np.max(min_indices)
Uses Extended Bayesian Information Criteria for model selection. Can only be used in path mode (doesn't really make sense otherwise). See: Extended Bayesian Information Criteria for Gaussian Graphical Models R. Foygel and M. Drton NIPS 2010 Parameters ---------- gamma : (float) \in (0, 1) Choice of gamma=0 leads to classical BIC Positive gamma leads to stronger penalization of large graphs. Returns ------- Lambda index with best ebic score. When multiple ebic scores are the same, returns the smallest lambda (largest index) with minimum score.
def all_distinct(l, idx): out = [] deja_vu = [False for _ in l] for i in idx: to_add = cp.deepcopy(l[i]) if deja_vu[i] else l[i] out.append(to_add) deja_vu[i] = True return out
Returns the list [l[i] for i in idx]  When needed, objects l[i] are replaced by a copy, to make sure that the elements of the list are all distinct Parameters --------- l: iterable idx: iterable that generates ints (e.g. ndarray of ints) Returns ------- a list
def _decode_alt_names(self, alt_names): for alt_name in alt_names: tname = alt_name.getName() comp = alt_name.getComponent() if tname == "dNSName": key = "DNS" value = _decode_asn1_string(comp) elif tname == "uniformResourceIdentifier": key = "URI" value = _decode_asn1_string(comp) elif tname == "otherName": oid = comp.getComponentByName("type-id") value = comp.getComponentByName("value") if oid == XMPPADDR_OID: key = "XmppAddr" value = der_decoder.decode(value, asn1Spec = UTF8String())[0] value = _decode_asn1_string(value) elif oid == SRVNAME_OID: key = "SRVName" value = der_decoder.decode(value, asn1Spec = IA5String())[0] value = _decode_asn1_string(value) else: logger.debug("Unknown other name: {0}".format(oid)) continue else: logger.debug("Unsupported general name: {0}" .format(tname)) continue self.alt_names[key].append(value)
Load SubjectAltName from a ASN.1 GeneralNames value. :Values: - `alt_names`: the SubjectAltNama extension value :Types: - `alt_name`: `GeneralNames`
def delete(self, record): index, doc_type = self.record_to_index(record) return self.client.delete( id=str(record.id), index=index, doc_type=doc_type, )
Delete a record. :param record: Record instance.
def types(self, *args): return .join([.format(type(arg).__name__, arg) for arg in args])
Used for debugging, returns type of each arg. TYPES,ARG_1,...,ARG_N %{TYPES:A,...,10} -> 'str(A) str(B) ... int(10)'
def _build_provider_list(): registry = None if appsettings.FLUENT_OEMBED_SOURCE == : registry = bootstrap_basic() elif appsettings.FLUENT_OEMBED_SOURCE == : params = {} if appsettings.MICAWBER_EMBEDLY_KEY: params[] = appsettings.MICAWBER_EMBEDLY_KEY registry = bootstrap_embedly(**params) elif appsettings.FLUENT_OEMBED_SOURCE == : registry = bootstrap_noembed(nowrap=1) elif appsettings.FLUENT_OEMBED_SOURCE == : registry = ProviderRegistry() for regex, provider in appsettings.FLUENT_OEMBED_PROVIDER_LIST: registry.register(regex, Provider(provider)) else: raise ImproperlyConfigured("Invalid value of FLUENT_OEMBED_SOURCE, only , , or is supported.") for regex, provider in appsettings.FLUENT_OEMBED_EXTRA_PROVIDERS: registry.register(regex, Provider(provider)) return registry
Construct the provider registry, using the app settings.
def system_config_files(self): return [os.path.join(f, self.filename) for f in get_system_config_dirs( self.app_name, self.app_author)]
Get a list of absolute paths to the system config files.
def dew_point(self, db): if self._hum_type == : return self._hum_value elif self._hum_type == : return dew_point_from_db_wb( db, self._hum_value, self._barometric_pressure) elif self._hum_type == : return dew_point_from_db_hr( db, self._hum_value, self._barometric_pressure) elif self._hum_type == : return dew_point_from_db_enth( db, self._hum_value / 1000, self._barometric_pressure)
Get the dew point (C), which is constant throughout the day (except at saturation). args: db: The maximum dry bulb temperature over the day.
def update(self, of): for p in (, , , , , , , , ): setattr(self, p, getattr(of, p)) return self
Update a file from another file, for copying
def aggregations(self): prev_month_start = get_prev_month(self.end, self.query.interval_) self.query.since(prev_month_start) self.query.get_terms("author_name") return self.query.get_list(dataframe=True)
Override parent method. Obtain list of the terms and their corresponding values using "terms" aggregations for the previous time period. :returns: a data frame containing terms and their corresponding values
def TryLink( self, text, extension ): return self.TryBuild(self.env.Program, text, extension )
Compiles the program given in text to an executable env.Program, using extension as file extension (e.g. '.c'). Returns 1, if compilation was successful, 0 otherwise. The target is saved in self.lastTarget (for further processing).
def forms_valid(self, inlines): for formset in inlines: formset.save() return HttpResponseRedirect(self.get_success_url())
If the form and formsets are valid, save the associated models.
def _build_url(self, endpoint): if not issubclass(type(self), ChatApiBase) and not self.subdomain: raise ZenpyException("subdomain is required when accessing the Zendesk API!") if self.subdomain: endpoint.netloc = .format(self.subdomain, self.domain) else: endpoint.netloc = self.domain endpoint.prefix_path(self.api_prefix) return endpoint.build()
Build complete URL
def parse_args(): parser = argparse.ArgumentParser(prog=, usage=) parser.add_argument(, , action=, dest=, help=, default=False) parser.add_argument(, , action=, dest=, default=os.getenv(), help=, metavar=) parser.add_argument(, , action=, dest=, default=os.getenv(), help=, metavar=) parser.add_argument(, , action=, dest=, default=os.getenv(), help=, metavar=) parser.add_argument(, , action=, dest=, help=, default=False) parser.add_argument(, , action=, dest=, help=) parser.add_argument(, , action=, dest=, metavar=, default=None, help=) opts = parser.parse_args() return opts
Parse the arguments
def strptime_fuzzy(s): import dateutil.parser dt = dateutil.parser.parse(str(s), fuzzy=True) return dt
Fuzzy date string parsing Note: this returns current date if not found. If only year is provided, will return current month, day
def compress_and_sort_index(self): idx_fname = .format(**self.__dict__) try: reader = csv.reader(open(idx_fname), dialect=) except IOError: return False index = [x for x in reader if x] sorted_index = sorted(index, key=itemgetter(0)) gzip_idx_fname = idx_fname + header = [ , , , , , , , , ] s = cStringIO.StringIO() writer = csv.writer(s, dialect=) writer.writerow(header) for line in sorted_index: writer.writerow(line) compressed_index = inline_compress_chunk(s.getvalue()) s.close() with open(gzip_idx_fname, ) as fp: fp.write(compressed_index) os.remove(idx_fname) return True
Sort index, add header, and compress. :rtype: bool :returns: True
def _parse(self, stream, context, path): num_players = context._._._.replay.num_players start = stream.tell() read_bytes = stream.read() marker_up14 = read_bytes.find(b"\x16\xc6\x00\x00\x00\x21") marker_up15 = read_bytes.find(b"\x16\xf0\x00\x00\x00\x21") marker = -1 if marker_up14 > 0 and marker_up15 < 0: marker = marker_up14 elif marker_up15 > 0 and marker_up14 < 0: marker = marker_up15 if marker > 0: count = 0 while struct.unpack("<H", read_bytes[marker-2:marker])[0] != count: marker -= 1 count += 1 backtrack = 43 + num_players else: marker = read_bytes.find(b"\xf6\x28\x9c\x3f") backtrack = ((1817 * (num_players - 1)) + 4 + 19) end = start + marker - backtrack stream.seek(end) return end
Parse until the end of objects data.
def a_torispherical(D, f, k): r alpha = asin((1-2*k)/(2*(f-k))) a1 = f*D*(1 - cos(alpha)) a2 = k*D*cos(alpha) return a1 + a2
r'''Calculates depth of a torispherical head according to [1]_. .. math:: a = a_1 + a_2 .. math:: \alpha = \sin^{-1}\frac{1-2k}{2(f-k)} .. math:: a_1 = fD(1-\cos\alpha) .. math:: a_2 = kD\cos\alpha Parameters ---------- D : float Diameter of the main cylindrical section, [m] f : float Dish-radius parameter; fD = dish radius [1/m] k : float knuckle-radius parameter ; kD = knuckle radius [1/m] Returns ------- a : float Depth of head [m] Examples -------- Example from [1]_. >>> a_torispherical(D=96., f=0.9, k=0.2) 25.684268924767125 References ---------- .. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015. http://www.webcalc.com.br/blog/Tank_Volume.PDF
def _update_object_map(self, obj_map): try: super(FilesRecord, self)._update_object_map(obj_map) except AttributeError: pass bypass_asset_content_authorization = False acls = None try: config = self.my_osid_object._runtime.get_configuration() parameter_id = Id() bypass_asset_content_authorization = config.get_value_by_parameter(parameter_id).get_boolean_value() except (AttributeError, KeyError, NotFound): pass def replace_url_in_display_text(potential_display_text, dict_files_map): if ( in potential_display_text and potential_display_text[] is not None and in potential_display_text[]): p_parent.insert_after(new_template_tag) break p_parent = parent media_file_element.extract() else: media_file_element[media_key] = ac.get_url() if in media_file_element.attrs: alt_tag_label = media_file_element[].split()[-1] if alt_tag_label in dict_files_map: asset_id = Id(dict_files_map[alt_tag_label][]) ac_id = Id(dict_files_map[alt_tag_label][]) if bypass_asset_content_authorization: ac = acls.get_asset_content(ac_id) else: ac = self._get_asset_content(asset_id=asset_id, asset_content_id=ac_id) try: media_file_element[] = ac.get_alt_text().text except AttributeError: pass potential_display_text[] = soup.wrapper.renderContents().decode() else: for new_key, value in potential_display_text.items(): if isinstance(value, list): new_files_map = dict_files_map if in potential_display_text: new_files_map = potential_display_text[] potential_display_text[new_key] = check_list_children(value, new_files_map) return potential_display_text def check_list_children(potential_text_list, list_files_map): updated_list = [] for child in potential_text_list: if isinstance(child, dict): files_map = list_files_map if in child: files_map = child[] updated_list.append(replace_url_in_display_text(child, files_map)) elif isinstance(child, list): updated_list.append(check_list_children(child, list_files_map)) else: updated_list.append(child) return updated_list if bypass_asset_content_authorization: for key, data in obj_map.items(): if isinstance(data, dict): obj_map[key] = replace_url_in_display_text(data, original_files_map) elif isinstance(data, list): obj_map[key] = check_list_children(data, original_files_map)
loop through all the keys in self.my_osid_object._my_map, and see if any of them contain text like "AssetContent:<label>" If so, assume it is markup (?), replace the string with asset_content.get_url()
def AppConfigFlagHandler(feature=None): if not current_app: log.warn(u"Got a request to check for {feature} but we're outside the request context. Returning False".format(feature=feature)) return False try: return current_app.config[FEATURE_FLAGS_CONFIG][feature] except (AttributeError, KeyError): raise NoFeatureFlagFound()
This is the default handler. It checks for feature flags in the current app's configuration. For example, to have 'unfinished_feature' hidden in production but active in development: config.py class ProductionConfig(Config): FEATURE_FLAGS = { 'unfinished_feature' : False, } class DevelopmentConfig(Config): FEATURE_FLAGS = { 'unfinished_feature' : True, }
def _read_mode_ts(self, size, kind): if size > 40 or size < 4: raise ProtocolError(f) _tptr = self._read_unpack(1) _oflg = self._read_binary(1) _oflw = int(_oflg[:4], base=2) _flag = int(_oflg[4:], base=2) if _tptr < 5: raise ProtocolError(f) data = dict( kind=kind, type=self._read_opt_type(kind), length=size, pointer=_tptr, overflow=_oflw, flag=_flag, ) endpoint = min(_tptr, size) if _flag == 0: if (size - 4) % 4 != 0: raise ProtocolError(f) counter = 5 timestamp = list() while counter < endpoint: counter += 4 time = self._read_unpack(4, lilendian=True) timestamp.append(datetime.datetime.fromtimestamp(time)) data[] = timestamp or None elif _flag == 1 or _flag == 3: if (size - 4) % 8 != 0: raise ProtocolError(f) counter = 5 ipaddress = list() timestamp = list() while counter < endpoint: counter += 8 ipaddress.append(self._read_ipv4_addr()) time = self._read_unpack(4, lilendian=True) timestamp.append(datetime.datetime.fromtimestamp(time)) data[] = ipaddress or None data[] = timestamp or None else: data[] = self._read_fileng(size - 4) or None return data
Read Time Stamp option. Positional arguments: * size - int, length of option * kind - int, 68 (TS) Returns: * dict -- extracted Time Stamp (TS) option Structure of Timestamp (TS) option [RFC 791]: +--------+--------+--------+--------+ |01000100| length | pointer|oflw|flg| +--------+--------+--------+--------+ | internet address | +--------+--------+--------+--------+ | timestamp | +--------+--------+--------+--------+ | . | . . Octets Bits Name Description 0 0 ip.ts.kind Kind (25) 0 0 ip.ts.type.copy Copied Flag (0) 0 1 ip.ts.type.class Option Class (0) 0 3 ip.ts.type.number Option Number (25) 1 8 ip.ts.length Length (≤40) 2 16 ip.ts.pointer Pointer (≥5) 3 24 ip.ts.overflow Overflow Octets 3 28 ip.ts.flag Flag 4 32 ip.ts.ip Internet Address 8 64 ip.ts.timestamp Timestamp
def _infer_shape(self, dimensions): n = np.prod(dimensions) m = np.prod(abs(np.array(self._shape))) v = np.array(self._shape) v[v == -1] = n // m return tuple(v)
Replaces the -1 wildcard in the output shape vector. This function infers the correct output shape given the input dimensions. Args: dimensions: List of input non-batch dimensions. Returns: Tuple of non-batch output dimensions.
def _show_annotation_box(self, event): ax = event.artist.axes if self.display != : annotation = self.annotations[ax] elif event.mouseevent in self.annotations: annotation = self.annotations[event.mouseevent] else: annotation = self.annotate(ax, **self._annotation_kwargs) self.annotations[event.mouseevent] = annotation if self.display == : for ann in self.annotations.values(): ann.set_visible(False) self.update(event, annotation)
Update an existing box or create an annotation box for an event.
def post_freeze_hook(self): if os.path.isdir(self._metadata_fragments_abspath): shutil.rmtree(self._metadata_fragments_abspath)
Post :meth:`dtoolcore.ProtoDataSet.freeze` cleanup actions. This method is called at the end of the :meth:`dtoolcore.ProtoDataSet.freeze` method. In the :class:`dtoolcore.storage_broker.DiskStorageBroker` it removes the temporary directory for storing item metadata fragment files.
def com_google_fonts_check_whitespace_ink(ttFont): from fontbakery.utils import get_glyph_name, glyph_has_ink WHITESPACE_CHARACTERS = [ 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x0020, 0x0085, 0x00A0, 0x1680, 0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007, 0x2008, 0x2009, 0x200A, 0x2028, 0x2029, 0x202F, 0x205F, 0x3000, 0x180E, 0x200B, 0x2060, 0xFEFF ] failed = False for codepoint in WHITESPACE_CHARACTERS: g = get_glyph_name(ttFont, codepoint) if g is not None and glyph_has_ink(ttFont, g): failed = True yield FAIL, ("Glyph \"{}\" has ink." " It needs to be replaced by" " an empty glyph.").format(g) if not failed: yield PASS, "There is no whitespace glyph with ink."
Whitespace glyphs have ink?
def get_html_attrs(kwargs=None): kwargs = kwargs or {} attrs = [] props = [] classes = kwargs.get(, ).strip() if classes: classes = .join(re.split(r, classes)) classes = to_unicode(quoteattr(classes)) attrs.append( % classes) try: del kwargs[] except KeyError: pass for key, value in iteritems(kwargs): key = key.replace(, ) key = to_unicode(key) if isinstance(value, bool): if value is True: props.append(key) else: value = quoteattr(Markup(value)) attrs.append(u % (key, value)) attrs.sort() props.sort() attrs.extend(props) return u.join(attrs)
Generate HTML attributes from the provided keyword arguments. The output value is sorted by the passed keys, to provide consistent output. Because of the frequent use of the normally reserved keyword `class`, `classes` is used instead. Also, all underscores are translated to regular dashes. Set any property with a `True` value. >>> _get_html_attrs({'id': 'text1', 'classes': 'myclass', 'data_id': 1, 'checked': True}) u'class="myclass" data-id="1" id="text1" checked'
def get_dataset(): seg_data = "segmentation-X.npy" seg_labels = "segmentation-y.npy" if os.path.isfile(seg_data) and os.path.isfile(seg_labels): X = numpy.load(seg_data) y = numpy.load(seg_labels) with open(, ) as f: datasets = pickle.load(f) return (X, y, datasets) datasets = get_segmented_raw_data() X, y = [], [] for i, data in enumerate(datasets): if i % 10 == 0: logging.info("[Create Dataset] i=%i/%i", i, len(datasets)) segmentation = json.loads(data[]) recording = json.loads(data[]) X_symbol = [get_median_stroke_distance(recording)] if len([p for s in recording for p in s if p[] is None]) > 0: continue combis = itertools.combinations(list(range(len(recording))), 2) for strokeid1, strokeid2 in combis: stroke1 = recording[strokeid1] stroke2 = recording[strokeid2] if len(stroke1) == 0 or len(stroke2) == 0: logging.debug("stroke len 0. Skip.") continue X.append(get_stroke_features(recording, strokeid1, strokeid2) + X_symbol) same_symbol = (_get_symbol_index(strokeid1, segmentation) == _get_symbol_index(strokeid2, segmentation)) y.append(int(same_symbol)) X = numpy.array(X, dtype=numpy.float32) y = numpy.array(y, dtype=numpy.int32) numpy.save(seg_data, X) numpy.save(seg_labels, y) datasets = filter_recordings(datasets) with open(, ) as f: pickle.dump(datasets, f, protocol=pickle.HIGHEST_PROTOCOL) return (X, y, datasets)
Create a dataset for machine learning of segmentations. Returns ------- tuple : (X, y) where X is a list of tuples. Each tuple is a feature. y is a list of labels (0 for 'not in one symbol' and 1 for 'in symbol')
def enable_support_autoupload(self, **kwargs): enabled = kwargs.pop() callback = kwargs.pop(, self._callback) if not isinstance(enabled, bool): raise ValueError( % repr(enabled)) state_args = dict() autoupload_state = getattr(self._ras, ) config = autoupload_state(**state_args) if not enabled: shutdown = config.find() shutdown.set(, ) return callback(config)
Set Spanning Tree state. Args: enabled (bool): Is Autoupload enabled? (True, False). callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... enabled = True ... output = dev.ras.enable_support_autoupload( ... enabled=enabled) ... enabled = False ... output = dev.ras.enable_support_autoupload( ... enabled=enabled)
def retryable_http_error( e ): if isinstance( e, urllib.error.HTTPError ) and e.code in (, , ): return True if isinstance( e, BadStatusLine ): return True return False
Determine if an error encountered during an HTTP download is likely to go away if we try again.
def insert(self, name, index, value): return self._sequence[name].insert(index, value)
Insert a value at the passed index in the named header.
def close(self): self.stopped.set() for event in self.to_be_stopped: event.set() if self._receiver_thread is not None: self._receiver_thread.join() self._socket.close()
Stop the client.
def merge(s, t): for k, v in t.items(): if isinstance(v, dict): if k not in s: s[k] = v continue s[k] = merge(s[k], v) continue s[k] = v return s
Merge dictionary t into s.
def create_node(args): node = query(method=, args=args, http_method=) action = query( method=, server_id=node[][], command=, args={: }, http_method= ) return node
Create a node.
def _validate_image_orientation(image_orientation): row_cosine, column_cosine, slice_cosine = _extract_cosines(image_orientation) if not _almost_zero(np.dot(row_cosine, column_cosine), 1e-4): raise DicomImportException("Non-orthogonal direction cosines: {}, {}".format(row_cosine, column_cosine)) elif not _almost_zero(np.dot(row_cosine, column_cosine), 1e-8): logger.warning("Direction cosines arens magnitude is not 1: {}".format(row_cosine)) elif not _almost_one(np.linalg.norm(row_cosine), 1e-8): logger.warning("The row direction cosines magnitude is not 1: {}".format(column_cosine)) elif not _almost_one(np.linalg.norm(column_cosine), 1e-8): logger.warning("The column direction cosine's magnitude is not quite 1: {}".format(column_cosine))
Ensure that the image orientation is supported - The direction cosines have magnitudes of 1 (just in case) - The direction cosines are perpendicular
def register_filter(self, filter_name, filter_ref, force=False): if not force and (filter_name in self.filters_list()): self.log_warning("Extension %s already exist, ignore redefinition." % ext_in) return self.__jinja2_environment.filters[filter_name] = filter_ref
Add/register one filter. Args: filter_name (str): Filter name used inside :program:`Jinja2` tags. filter_ref: Reference to the filter itself, i.e. the corresponding :program:`Python` function. force (bool): If set to ``True``, forces the registration of a filter no matter if it already exists or not. Note: The list of user added/registered filters can be retrieve with :mth:`registered_filters_list`
def append_overhead_costs(costs, new_id, overhead_percentage=0.15): total_time = 0 for item in costs: total_time += item[] costs.append({ : new_id, : , : total_time * overhead_percentage, }, ) return costs
Adds 15% overhead costs to the list of costs. Usage:: from rapid_prototyping.context.utils import append_overhead_costs costs = [ .... ] costs = append_overhead_costs(costs, MAIN_ID + get_counter(counter)[0]) :param costs: Your final list of costs. :param new_id: The id that this new item should get.
def tournament_name2number(self, name): tournaments = self.get_tournaments() d = {t[]: t[] for t in tournaments} return d.get(name, None)
Translate tournament name to tournament number. Args: name (str): tournament name to translate Returns: number (int): number of the tournament or `None` if unknown. Examples: >>> NumerAPI().tournament_name2number('delta') 4 >>> NumerAPI().tournament_name2number('foo') None
def _find_package(self, root_package): package = self.path.replace(root_package, "") if package.endswith(".py"): package = package[:-3] package = package.replace(os.path.sep, MODULE_SEP) root_package = get_folder_name(root_package) package = root_package + package return package
Finds package name of file :param root_package: root package :return: package name
def stem(self): def s(tokens): return [PorterStemmer().stem(t) for t in tokens] self.stems = list(map(s, self.tokens))
Stem tokens with Porter Stemmer.
def rank(self, dte): timestamp = self.pickler.dumps(dte) return self.backend_structure().rank(timestamp)
The rank of a given *dte* in the timeseries
def pre_save_title(instance, **kwargs): if instance.article.languages: languages = instance.article.languages.split() else: languages = [] if instance.language not in languages: languages.append(instance.language) instance.article.languages = .join(languages) instance.article._publisher_keep_state = True instance.article.save(no_signals=True)
Update article.languages
def set_window_user_pointer(window, pointer): data = (False, pointer) if not isinstance(pointer, ctypes.c_void_p): data = (True, pointer) pointer = ctypes.cast(ctypes.pointer(ctypes.py_object(pointer)), ctypes.c_void_p) window_addr = ctypes.cast(ctypes.pointer(window), ctypes.POINTER(ctypes.c_long)).contents.value _window_user_data_repository[window_addr] = data _glfw.glfwSetWindowUserPointer(window, pointer)
Sets the user pointer of the specified window. You may pass a normal python object into this function and it will be wrapped automatically. The object will be kept in existence until the pointer is set to something else or until the window is destroyed. Wrapper for: void glfwSetWindowUserPointer(GLFWwindow* window, void* pointer);
def has_causal_out_edges(graph: BELGraph, node: BaseEntity) -> bool: return any( data[RELATION] in CAUSAL_RELATIONS for _, _, data in graph.out_edges(node, data=True) )
Return true if the node contains any out_edges that are causal.
def create_observations(params: Dict[str, Dict[str, Any]], access_token: str) -> List[Dict[str, Any]]: response = requests.post(url="{base_url}/observations.json".format(base_url=INAT_BASE_URL), json=params, headers=_build_auth_header(access_token)) response.raise_for_status() return response.json()
Create a single or several (if passed an array) observations). :param params: :param access_token: the access token, as returned by :func:`get_access_token()` :return: iNaturalist's JSON response, as a Python object :raise: requests.HTTPError, if the call is not successful. iNaturalist returns an error 422 (unprocessable entity) if it rejects the observation data (for example an observation date in the future or a latitude > 90. In that case the exception's `response` attribute give details about the errors. allowed params: see https://www.inaturalist.org/pages/api+reference#post-observations Example: params = {'observation': {'species_guess': 'Pieris rapae'}, } TODO investigate: according to the doc, we should be able to pass multiple observations (in an array, and in renaming observation to observations, but as far as I saw they are not created (while a status of 200 is returned)
def create_repo(self, repo_name=None, feed=None, envs=[], checksum_type="sha256", query=): data = {: repo_name, : { : , } } juicer.utils.Log.log_debug("Create Repo: %s", repo_name) for env in envs: if juicer.utils.repo_exists_p(repo_name, self.connectors[env], env): juicer.utils.Log.log_info("repo `%s` already exists in %s... skipping!", (repo_name, env)) continue else: data[] = % (env, repo_name) data[] = .join([repo_name, env]) _r = self.connectors[env].post(query, data) if _r.status_code == Constants.PULP_POST_CREATED: imp_query = % data[] imp_data = { : , : , : {}, } if feed: imp_data[][] = feed _r = self.connectors[env].post(imp_query, imp_data) dist_query = % data[] dist_data = {: , : , : { : % (env, repo_name), : True, : True, : checksum_type }, : True, : % (env, repo_name) } _r = self.connectors[env].post(dist_query, dist_data) if _r.status_code == Constants.PULP_POST_CREATED: pub_query = % data[] pub_data = {: } _r = self.connectors[env].post(pub_query, pub_data) if _r.status_code == Constants.PULP_POST_ACCEPTED: juicer.utils.Log.log_info("created repo `%s` in %s", repo_name, env) else: _r.raise_for_status() else: _r.raise_for_status() return True
`repo_name` - Name of repository to create `feed` - Repo URL to feed from `checksum_type` - Used for generating meta-data Create repository in specified environments, associate the yum_distributor with it and publish the repo
def start(self, container, **kwargs): self.push_log("Starting container .".format(container)) super(DockerFabricClient, self).start(container, **kwargs)
Identical to :meth:`docker.api.container.ContainerApiMixin.start` with additional logging.
def new(): form = CommunityForm(formdata=request.values) ctx = mycommunities_ctx() ctx.update({ : form, : True, : None, }) if form.validate_on_submit(): data = copy.deepcopy(form.data) community_id = data.pop() del data[] community = Community.create( community_id, current_user.get_id(), **data) file = request.files.get(, None) if file: if not community.save_logo(file.stream, file.filename): form.logo.errors.append(_( )) db.session.rollback() community = None if community: db.session.commit() flash("Community was successfully created.", category=) return redirect(url_for(, community_id=community.id)) return render_template( current_app.config[], community_form=form, **ctx )
Create a new community.
def write(self): with open(self.path, ) as f: self.config.write(f)
Write the configuration to :attr:`path`
def _RegisterProcess(self, process): if process is None: raise ValueError() if process.pid in self._processes_per_pid: raise KeyError( .format( process.name, process.pid)) self._processes_per_pid[process.pid] = process
Registers a process with the engine. Args: process (MultiProcessBaseProcess): process. Raises: KeyError: if the process is already registered with the engine. ValueError: if the process is missing.
def legacy_events_view(request): events = TeacherEvent.objects.all() event_count = events.count() paginator = Paginator(events, 100) page = request.GET.get() try: events = paginator.page(page) except PageNotAnInteger: events = paginator.page(1) except EmptyPage: events = paginator.page(paginator.num_pages) return render_to_response( , {: "Legacy Events", : events, : event_count,}, context_instance=RequestContext(request) )
View to see legacy events.
def _uses_db(func, self, *args, **kwargs): if not self.session: _logger.debug() self._init_db_session() try: ret = func(self, *args, **kwargs) self.session.commit() except: self.session.rollback() tb = traceback.format_exc() _logger.debug(tb) raise finally: _logger.debug() self.session.close() return ret
Use as a decorator for operations on the database, to ensure connection setup and teardown. Can only be used on methods on objects with a `self.session` attribute.
def orient_undirected_graph(self, data, umg, alg=): warnings.warn("The pairwise GNN model is computed on each edge of the UMG " "to initialize the model and start CGNN with a DAG") gnn = GNN(nh=self.nh, lr=self.lr) og = gnn.orient_graph(data, umg, nb_runs=self.nb_runs, nb_max_runs=self.nb_runs, nb_jobs=self.nb_jobs, train_epochs=self.train_epochs, test_epochs=self.test_epochs, verbose=self.verbose, gpu=self.gpu) dag = dagify_min_edge(og) return self.orient_directed_graph(data, dag, alg=alg)
Orient the undirected graph using GNN and apply CGNN to improve the graph. Args: data (pandas.DataFrame): Observational data on which causal discovery has to be performed. umg (nx.Graph): Graph that provides the skeleton, on which the GNN then the CGNN algorithm will be applied. alg (str): Exploration heuristic to use, among ["HC", "HCr", "tabu", "EHC"] Returns: networkx.DiGraph: Solution given by CGNN. .. note:: GNN (``cdt.causality.pairwise.GNN``) is first used to orient the undirected graph and output a DAG before applying CGNN.
def gradient(self, P, Q, Y, i): return 4 * sum([ (P[i, j] - Q[i, j]) * (Y[i] - Y[j]) * (1 + np.linalg.norm(Y[i] - Y[j]) ** 2) ** -1 \ for j in range(Y.shape[0]) ])
Computes the gradient of KL divergence with respect to the i'th example of Y
def read_file(input_filename): with open(input_filename) as f: g = GreenGenesTaxonomy.read(f) return g
Like read() except uses a file rather than an IO stream, for convenience
def _color_attr(self, ground, attr): attr = colors[ground][attr] attrs = self.cursor_attributes if ground == "foreground": self.cursor_attributes = (attrs[0], attr, attrs[2]) elif ground == "background": self.cursor_attributes = (attrs[0], attrs[1], attr)
Given a color attribute, set the current cursor appropriately.
def query(self, msg_type, query, *parameters, **options): {til x}til 10 if not self._connection: raise QConnectionException() if parameters and len(parameters) > 8: raise QWriterException() if not parameters or len(parameters) == 0: self._writer.write(query, msg_type, **self._options.union_dict(**options)) else: self._writer.write([query] + list(parameters), msg_type, **self._options.union_dict(**options))
Performs a query against a q service. In typical use case, `query` is the name of the function to call and `parameters` are its parameters. When `parameters` list is empty, the query can be an arbitrary q expression (e.g. ``0 +/ til 100``). Calls a anonymous function with a single parameter: >>> q.query(qconnection.MessageType.SYNC,'{til x}', 10) Executes a q expression: >>> q.query(qconnection.MessageType.SYNC,'til 10') :Parameters: - `msg_type` (one of the constants defined in :class:`.MessageType`) - type of the query to be executed - `query` (`string`) - query to be executed - `parameters` (`list` or `None`) - parameters for the query :Options: - `single_char_strings` (`boolean`) - if ``True`` single char Python strings are encoded as q strings instead of chars, **Default**: ``False`` :raises: :class:`.QConnectionException`, :class:`.QWriterException`
def _add_getter(self): property_ = property(self._getter, None, None) setattr(self._element_cls, self._prop_name, property_)
Add a read-only ``{prop_name}`` property to the element class for this child element.
def decorate(self, function_or_name): application_namemy_timer if callable(function_or_name): return self._decorate(function_or_name.__name__, function_or_name) else: return partial(self._decorate, function_or_name)
Decorate a function to time the execution The method can be called with or without a name. If no name is given the function defaults to the name of the function. :keyword function_or_name: The name to post to or the function to wrap >>> from statsd import Timer >>> timer = Timer('application_name') >>> >>> @timer.decorate ... def some_function(): ... # resulting timer name: application_name.some_function ... pass >>> >>> @timer.decorate('my_timer') ... def some_other_function(): ... # resulting timer name: application_name.my_timer ... pass
def setComponentByPosition(self, idx, value=noValue, verifyConstraints=True, matchTags=True, matchConstraints=True): oldIdx = self._currentIdx Set.setComponentByPosition(self, idx, value, verifyConstraints, matchTags, matchConstraints) self._currentIdx = idx if oldIdx is not None and oldIdx != idx: self._componentValues[oldIdx] = noValue return self
Assign |ASN.1| type component by position. Equivalent to Python sequence item assignment operation (e.g. `[]`). Parameters ---------- idx: :class:`int` Component index (zero-based). Must either refer to existing component or to N+1 component. In the latter case a new component type gets instantiated (if *componentType* is set, or given ASN.1 object is taken otherwise) and appended to the |ASN.1| sequence. Keyword Args ------------ value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative A Python value to initialize |ASN.1| component with (if *componentType* is set) or ASN.1 value object to assign to |ASN.1| component. Once a new value is set to *idx* component, previous value is dropped. verifyConstraints : :class:`bool` If `False`, skip constraints validation matchTags: :class:`bool` If `False`, skip component tags matching matchConstraints: :class:`bool` If `False`, skip component constraints matching Returns ------- self
def update_user(self, user_id, **kwargs): api = self._get_api(iam.AccountAdminApi) user = User._create_request_map(kwargs) body = iam.UserUpdateReq(**user) return User(api.update_user(user_id, body))
Update user properties of specified user. :param str user_id: The ID of the user to update (Required) :param str username: The unique username of the user :param str email: The unique email of the user :param str full_name: The full name of the user :param str password: The password string of the user. :param str phone_number: Phone number of the user :param bool terms_accepted: Is 'General Terms & Conditions' accepted :param bool marketing_accepted: Is receiving marketing information accepted? :returns: the updated user object :rtype: User
def write_file_chunk( file_path: str, packed_chunk: str, append: bool = True, offset: int = -1 ): mode = if append else contents = unpack_chunk(packed_chunk) writer.write_file(file_path, contents, mode=mode, offset=offset)
Write or append the specified chunk data to the given file path, unpacking the chunk before writing. If the file does not yet exist, it will be created. Set the append argument to False if you do not want the chunk to be appended to an existing file. :param file_path: The file where the chunk will be written or appended :param packed_chunk: The packed chunk data to write to the file. It will be unpacked before the file is written. :param append: Whether or not the chunk should be appended to the existing file. If False the chunk data will overwrite the existing file. :param offset: The byte offset in the file where the chunk should be written. If the value is less than zero, the chunk will be written or appended based on the `append` argument. Note that if you indicate an append write mode and an offset, the mode will be forced to write instead of append.
def update_region_to_controller(self, region): region_position = 1 if self.ignore_region: api_logger.debug("IGNORE_REGION set, not updating controller region.") return api_logger.debug("Updating Controller Region") api_logger.debug("CONTROLLER = %s", self.controller) api_logger.debug("CONTROLLER_ORIG = %s", self.controller_orig) api_logger.debug("CONTROLLER_REGION = %s", self.controller_region) if self.controller_orig: controller_base = self.controller_orig else: controller_base = self.controller self.controller_orig = self.controller controller_full_part_list = controller_base.split() for idx, part in enumerate(controller_full_part_list): if region == part: api_logger.debug("REGION %s ALREADY IN CONTROLLER AT INDEX = %s", region, idx) if self.controller_region != region: self.controller_region = region api_logger.debug("UPDATED_CONTROLLER_REGION = %s", self.controller_region) return controller_part_count = len(controller_full_part_list) if controller_part_count > 1: controller_full_part_list[region_position] = region self.controller = ".".join(controller_full_part_list) else: self.controller = ".".join(controller_full_part_list) + + region self.controller_orig = controller_base self.controller_region = region api_logger.debug("UPDATED_CONTROLLER = %s", self.controller) api_logger.debug("UPDATED_CONTROLLER_ORIG = %s", self.controller_orig) api_logger.debug("UPDATED_CONTROLLER_REGION = %s", self.controller_region) return
Update the controller string with dynamic region info. Controller string should end up as `<name[-env]>.<region>.cloudgenix.com` **Parameters:** - **region:** region string. **Returns:** No return value, mutates the controller in the class namespace
def clone(srcpath, destpath, vcs=None): vcs = vcs or probe(srcpath) cls = _get_repo_class(vcs) return cls.clone(srcpath, destpath)
Clone an existing repository. :param str srcpath: Path to an existing repository :param str destpath: Desired path of new repository :param str vcs: Either ``git``, ``hg``, or ``svn`` :returns VCSRepo: The newly cloned repository If ``vcs`` is not given, then the repository type is discovered from ``srcpath`` via :func:`probe`.
def select(self): self.selected_option = self.current_option self.selected_item.set_up() self.selected_item.action() self.selected_item.clean_up() self.returned_value = self.selected_item.get_return() self.should_exit = self.selected_item.should_exit if not self.should_exit: self.draw()
Select the current item and run it
def record_set_create_or_update(name, zone_name, resource_group, record_type, **kwargs): AAAAACAACNAMEMXNSPTRSOASRVTXT[{ipv4_address: 10.0.0.1}] dnsconn = __utils__[](, **kwargs) try: record_set_model = __utils__[](, , **kwargs) except TypeError as exc: result = {: .format(str(exc))} return result try: record_set = dnsconn.record_sets.create_or_update( relative_record_set_name=name, zone_name=zone_name, resource_group_name=resource_group, record_type=record_type, parameters=record_set_model, if_match=kwargs.get(), if_none_match=kwargs.get() ) result = record_set.as_dict() except CloudError as exc: __utils__[](, str(exc), **kwargs) result = {: str(exc)} except SerializationError as exc: result = {: .format(str(exc))} return result
.. versionadded:: Fluorine Creates or updates a record set within a DNS zone. :param name: The name of the record set, relative to the name of the zone. :param zone_name: The name of the DNS zone (without a terminating dot). :param resource_group: The name of the resource group. :param record_type: The type of DNS record in this record set. Record sets of type SOA can be updated but not created (they are created when the DNS zone is created). Possible values include: 'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT' CLI Example: .. code-block:: bash salt-call azurearm_dns.record_set_create_or_update myhost myzone testgroup A arecords='[{ipv4_address: 10.0.0.1}]' ttl=300
def smart_truncate(string, max_length=0, word_boundary=False, separator=, save_order=False): string = string.strip(separator) if not max_length: return string if len(string) < max_length: return string if not word_boundary: return string[:max_length].strip(separator) if separator not in string: return string[:max_length] truncated = for word in string.split(separator): if word: next_len = len(truncated) + len(word) if next_len < max_length: truncated += .format(word, separator) elif next_len == max_length: truncated += .format(word) break else: if save_order: break if not truncated: truncated = string[:max_length] return truncated.strip(separator)
Truncate a string. :param string (str): string for modification :param max_length (int): output string length :param word_boundary (bool): :param save_order (bool): if True then word order of output string is like input string :param separator (str): separator between words :return:
def grant_winsta_and_desktop(th): s user access to the current process current_sid = win32security.GetTokenInformation(th, win32security.TokenUser)[0] winsta = win32process.GetProcessWindowStation() set_user_perm(winsta, WINSTA_ALL, current_sid) desktop = win32service.GetThreadDesktop(win32api.GetCurrentThreadId()) set_user_perm(desktop, DESKTOP_ALL, current_sid)
Grant the token's user access to the current process's window station and desktop.
def _load(formula): _mk_client() paths = [] for ext in (, ): source_url = salt.utils.url.create(formula + + ext) paths.append(source_url) defaults_files = __context__[].cache_files(paths) for file_ in defaults_files: if not file_: continue suffix = file_.rsplit(, 1)[-1] if suffix == : loader = salt.utils.yaml.safe_load elif suffix == : loader = salt.utils.json.load else: log.debug("Failed to determine loader for %r", file_) continue if os.path.exists(file_): log.debug("Reading defaults from %r", file_) with salt.utils.files.fopen(file_) as fhr: defaults = loader(fhr) log.debug("Read defaults %r", defaults) return defaults or {}
Generates a list of salt://<formula>/defaults.(json|yaml) files and fetches them from the Salt master. Returns first defaults file as python dict.
def grab_filt(self, filt, analyte=None): if isinstance(filt, str): if filt in self.components: if analyte is None: return self.components[filt] else: if self.switches[analyte][filt]: return self.components[filt] else: try: ind = self.make_fromkey(filt) except KeyError: print(("\n\n***Filter key invalid. Please consult " "manual and try again.")) elif isinstance(filt, dict): try: ind = self.make_fromkey(filt[analyte]) except ValueError: print(("\n\n***Filter key invalid. Please consult manual " "and try again.\nOR\nAnalyte missing from filter " "key dict.")) elif filt: ind = self.make(analyte) else: ind = ~np.zeros(self.size, dtype=bool) return ind
Flexible access to specific filter using any key format. Parameters ---------- f : str, dict or bool either logical filter expression, dict of expressions, or a boolean analyte : str name of analyte the filter is for. Returns ------- array_like boolean filter
def parse_deltat_data(fileobj): array = np.loadtxt(fileobj) year, month, day = array[-1,:3].astype(int) expiration_date = date(year + 1, month, day) year, month, day, delta_t = array.T data = np.array((julian_date(year, month, day), delta_t)) return expiration_date, data
Parse the United States Naval Observatory ``deltat.data`` file. Each line file gives the date and the value of Delta T:: 2016 2 1 68.1577 This function returns a 2xN array of raw Julian dates and matching Delta T values.
def lu_solve_ATAI(A, rho, b, lu, piv, check_finite=True): r N, M = A.shape if N >= M: x = linalg.lu_solve((lu, piv), b, check_finite=check_finite) else: x = (b - A.T.dot(linalg.lu_solve((lu, piv), A.dot(b), 1, check_finite=check_finite))) / rho return x
r""" Solve the linear system :math:`(A^T A + \rho I)\mathbf{x} = \mathbf{b}` or :math:`(A^T A + \rho I)X = B` using :func:`scipy.linalg.lu_solve`. Parameters ---------- A : array_like Matrix :math:`A` rho : float Scalar :math:`\rho` b : array_like Vector :math:`\mathbf{b}` or matrix :math:`B` lu : array_like Matrix containing U in its upper triangle, and L in its lower triangle, as returned by :func:`scipy.linalg.lu_factor` piv : array_like Pivot indices representing the permutation matrix P, as returned by :func:`scipy.linalg.lu_factor` check_finite : bool, optional (default False) Flag indicating whether the input array should be checked for Inf and NaN values Returns ------- x : ndarray Solution to the linear system
def remove(self, item, count=0): self._client.lrem(self.key_prefix, count, self._dumps(item))
Removes @item from the list for @count number of occurences
def expandf(m, format): _assert_expandable(format, True) return _apply_replace_backrefs(m, format, flags=FORMAT)
Expand the string using the format replace pattern or function.
def smart_insert(cls, engine_or_session, data, minimal_size=5, op_counter=0): ses, auto_close = ensure_session(engine_or_session) if isinstance(data, list): try: ses.add_all(data) ses.commit() op_counter += 1 except (IntegrityError, FlushError): ses.rollback() n = len(data) if n >= minimal_size ** 2: n_chunk = math.floor(math.sqrt(n)) for chunk in grouper_list(data, n_chunk): op_counter = cls.smart_insert( ses, chunk, minimal_size, op_counter) else: for obj in data: try: ses.add(obj) ses.commit() op_counter += 1 except (IntegrityError, FlushError): ses.rollback() else: try: ses.add(data) ses.commit() except (IntegrityError, FlushError): ses.rollback() if auto_close: ses.close() return op_counter
An optimized Insert strategy. :return: number of insertion operation been executed. Usually it is greatly smaller than ``len(data)``. **中文文档** 在Insert中, 如果已经预知不会出现IntegrityError, 那么使用Bulk Insert的速度要 远远快于逐条Insert。而如果无法预知, 那么我们采用如下策略: 1. 尝试Bulk Insert, Bulk Insert由于在结束前不Commit, 所以速度很快。 2. 如果失败了, 那么对数据的条数开平方根, 进行分包, 然后对每个包重复该逻辑。 3. 若还是尝试失败, 则继续分包, 当分包的大小小于一定数量时, 则使用逐条插入。 直到成功为止。 该Insert策略在内存上需要额外的 sqrt(nbytes) 的开销, 跟原数据相比体积很小。 但时间上是各种情况下平均最优的。
def get_documents(self, doc_format=): def get_doc_id(root, rel_path): if not rel_path: return root.text else: child = root.find(rel_path[0]) if child is None: return None return get_doc_id(child, rel_path[1:]) if doc_format == : return dict([(get_doc_id(document, self._id_xpath), etree_to_dict(document)[]) for document in self._get_doc_list()]) elif doc_format == : return dict([(get_doc_id(document, self._id_xpath), document) for document in self._get_doc_list()]) elif doc_format == : return self._get_doc_list() elif doc_format == : return list([(ET.tostring(document)) for document in self._get_doc_list()]) elif doc_format in (, None, ): return dict([(get_doc_id(document, self._id_xpath), ET.tostring(document)) for document in self._get_doc_list()]) else: raise ParameterError("doc_format=" + doc_format)
Get the documents returned from Storege in this response. Keyword args: doc_format -- Specifies the doc_format for the returned documents. Can be 'dict', 'etree' or 'string'. Default is 'dict'. Returns: A dict where keys are document ids and values depending of the required doc_format: A dict representations of documents (see etree_to_dict()); A etree Element representing the document; A raw XML document string. Raises: ParameterError -- The doc_format value is not allowed.
def dist_string(self, val_meters): if self.settings.dist_unit == : return "%.1fnm" % (val_meters * 0.000539957) if self.settings.dist_unit == : return "%.1fmiles" % (val_meters * 0.000621371) return "%um" % val_meters
return a distance as a string
def branches(self): if self._branches is None: cmd = .format(self.sha1) out = shell.run( cmd, capture=True, never_pretend=True ).stdout.strip() self._branches = [x.strip() for x in out.splitlines()] return self._branches
List of all branches this commit is a part of.
def _vax_to_ieee_single_float(data): f = [] nfloat = int(len(data) / 4) for i in range(nfloat): byte2 = data[0 + i*4] byte1 = data[1 + i*4] byte4 = data[2 + i*4] byte3 = data[3 + i*4] sign = (byte1 & 0x80) >> 7 expon = ((byte1 & 0x7f) << 1) + ((byte2 & 0x80) >> 7) fract = ((byte2 & 0x7f) << 16) + (byte3 << 8) + byte4 if sign == 0: sign_mult = 1.0 else: sign_mult = -1.0 if 0 < expon: val = sign_mult * (0.5 + (fract/16777216.0)) * pow(2.0, expon - 128.0) f.append(val) elif expon == 0 and sign == 0: f.append(0) else: f.append(0) return f
Converts a float in Vax format to IEEE format. data should be a single string of chars that have been read in from a binary file. These will be processed 4 at a time into float values. Thus the total number of byte/chars in the string should be divisible by 4. Based on VAX data organization in a byte file, we need to do a bunch of bitwise operations to separate out the numbers that correspond to the sign, the exponent and the fraction portions of this floating point number role : S EEEEEEEE FFFFFFF FFFFFFFF FFFFFFFF bits : 1 2 9 10 32 bytes : byte2 byte1 byte4 byte3
def process_full_position(data, header, var_only=False): feature_type = data[header[]] if (feature_type == or feature_type.startswith()): return None if var_only and feature_type in [, ]: return None filters = [] if feature_type == : filters.append() if in header: if in data[header[]]: filters.append() else: var_filter = data[header[]] if var_filter and not var_filter == "PASS": filters = filters + var_filter.split() chrom = data[header[]] start = data[header[]] ref_allele = data[header[]] alleles = [data[header[]]] dbsnp_data = [] dbsnp_data = data[header[]].split() assert data[header[]] in [, ] if feature_type == or feature_type == : return [{: chrom, : start, : dbsnp_data, : ref_allele, : alleles, : data[header[]], : filters, : data[header[]]}] else: return [{: chrom, : start, : dbsnp_data, : ref_allele, : alleles, : data[header[]], : filters}]
Return genetic data when all alleles called on same line. Returns an array containing one item, a tuple of five items: (string) chromosome (string) start position (1-based) (array of strings) matching dbSNP entries (string) reference allele sequence (array of strings) the genome's allele sequences
def get_rotation_and_scale(header, skew_threshold=0.001): ((xrot, yrot), (cdelt1, cdelt2)) = get_xy_rotation_and_scale(header) if math.fabs(xrot) - math.fabs(yrot) > skew_threshold: raise ValueError("Skew detected: xrot=%.4f yrot=%.4f" % ( xrot, yrot)) rot = yrot lonpole = float(header.get(, 180.0)) if lonpole != 180.0: rot += 180.0 - lonpole return (rot, cdelt1, cdelt2)
Calculate rotation and CDELT.
def read_network_file(self, path_to_network_file): network_df = pd.read_table(path_to_network_file) network_edges = {} for _, row in network_df.iterrows(): vertex0_id = self.add_pathway(row["pw0"]) vertex1_id = self.add_pathway(row["pw1"]) edge_id = self.edge_tuple(vertex0_id, vertex1_id) if "features" in row: network_edges[edge_id] = \ [int(float(f)) for f in row["features"].split(" ")] else: network_edges[edge_id] = [] self._augment_network(network_edges)
Parameters ----------- path_to_network_file : str Expects a network file with columns "pw0" and "pw1." A "features" column that specifies the features where the (pw0, pw1) edge is present will assign a weight to the edge, though it is not required (edge will have weight 1 if no "features" column exists). "features" format: space-separated feature numbers, e.g. 0.0 1.0 2.0
def sample_from_largest_budget(self, info_dict): best = np.inf best_vector = None budget = max(self.kde_models.keys()) l = self.kde_models[budget][].pdf g = self.kde_models[budget][].pdf minimize_me = lambda x: max(1e-32, g(x))/max(l(x), 1e-32) kde_good = self.kde_models[budget][] kde_bad = self.kde_models[budget][] for i in range(self.num_samples): idx = np.random.randint(0, len(kde_good.data)) datum = kde_good.data[idx] vector = [] for m, bw, t in zip(datum, kde_good.bw, self.vartypes): bw = max(bw, self.min_bandwidth) if t == 0: bw = self.bw_factor*bw vector.append(sps.truncnorm.rvs(-m/bw, (1-m)/bw, loc=m, scale=bw)) else: if np.random.rand() < (1-bw): vector.append(int(m)) else: vector.append(np.random.randint(t)) val = minimize_me(vector) if not np.isfinite(val): logger.warning(%(vector, val)) logger.warning("data in the KDEs:\n%s\n%s"%(kde_good.data, kde_bad.data)) logger.warning("bandwidth of the KDEs:\n%s\n%s"%(kde_good.bw, kde_bad.bw)) logger.warning("l(x) = %s"%(l(vector))) logger.warning("g(x) = %s"%(g(vector))) return sample, info_dict
We opted for a single multidimensional KDE compared to the hierarchy of one-dimensional KDEs used in TPE. The dimensional is seperated by budget. This function sample a configuration from largest budget. Firstly we sample "num_samples" configurations, then prefer one with the largest l(x)/g(x). Parameters: ----------- info_dict: dict record the information of this configuration Returns ------- dict: new configuration named sample dict: info_dict, record the information of this configuration