code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def create(att_name: str, parsed_att: S, attribute_type: Type[T], caught_exec: Dict[Converter[S, T], Exception]): base_msg = "Error while trying to convert value for attribute to type <{t}>:\n" \ " - parsed value is : of type <{tv}>\n" \ "".format(a=str(att_name), t=get_pretty_type_str(attribute_type), v=parsed_att, tv=get_pretty_type_str(type(parsed_att))) msg = StringIO() if len(list(caught_exec.keys())) > 0: msg.writelines() msg.writelines(.join([str(converter) for converter in caught_exec.keys()])) msg.writelines() for converter, err in caught_exec.items(): msg.writelines( + str(converter) + ) print_error_to_io_stream(err, msg) msg.write() return AttrConversionException(base_msg + msg.getvalue())
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param att_name: :param parsed_att: :param attribute_type: :param caught_exec: :return:
def get_my_hostname(self, split_hostname_on_first_period=False): hostname = self.init_config.get("os_host") or self.hostname if split_hostname_on_first_period: hostname = hostname.split()[0] return hostname
Returns a best guess for the hostname registered with OpenStack for this host
def get(self, event): try: comp = event.data[] except KeyError: comp = None if not comp: self.log(, lvl=error) return self.log("Config data get request for ", event.data, "from", event.user) component = model_factory(Schema).find_one({ : comp }) response = { : , : , : component.serializablefields() } self.fireEvent(send(event.client.uuid, response))
Get a stored configuration
def plotFCM(data, channel_names, kind=, ax=None, autolabel=True, xlabel_kwargs={}, ylabel_kwargs={}, colorbar=False, grid=False, **kwargs): if ax == None: ax = pl.gca() xlabel_kwargs.setdefault(, 16) ylabel_kwargs.setdefault(, 16) channel_names = to_list(channel_names) if len(channel_names) == 1: kwargs.setdefault(, ) kwargs.setdefault(, ) kwargs.setdefault(, 200) x = data[channel_names[0]].values if len(x) >= 1: if (len(x) == 1) and isinstance(kwargs[], int): warnings.warn("One of the data sets only has a single event. " "This event wont draw a plot if therescatteredgecolornonehistogrambinscmincmapnormscatterhistogramReceived an unexpected number of channels: "{}"Counts' if len(channel_names) == 1 else channel_names[1] ax.set_xlabel(channel_names[0], **xlabel_kwargs) ax.set_ylabel(y_label_text, **ylabel_kwargs) return plot_output
Plots the sample on the current axis. Follow with a call to matplotlibs show() in order to see the plot. Parameters ---------- data : DataFrame {graph_plotFCM_pars} {common_plot_ax} Returns ------- The output of the plot command used
def print_png(o): s = latex(o, mode=) s = s.replace(,) s = s.replace(, ) png = latex_to_png(s) return png
A function to display sympy expression using inline style LaTeX in PNG.
def from_sky(cls, distancelimit=15, magnitudelimit=18): criteria = [] if distancelimit is not None: criteria.append(.format(1000.0/distancelimit)) if magnitudelimit is not None: criteria.append(.format(magnitudelimit)) allskyquery = .format(cls.basequery, .join(criteria)) print(allskyquery) print(.format(distancelimit, magnitudelimit)) table = query(allskyquery) c = cls(cls.standardize_table(table)) c.standardized.meta[] = allskyquery c.standardized.meta[] = magnitudelimit c.standardized.meta[] = distancelimit return c
Create a Constellation from a criteria search of the whole sky. Parameters ---------- distancelimit : float Maximum distance (parsecs). magnitudelimit : float Maximum magnitude (for Gaia G).
def analyzeParameters(expName, suite): print("\n================",expName,"=====================") try: expParams = suite.get_params(expName) pprint.pprint(expParams) for p in ["boost_strength", "k", "learning_rate", "weight_sparsity", "k_inference_factor", "boost_strength_factor", "c1_out_channels", "c1_k", "learning_rate_factor", "batches_in_epoch", ]: if p in expParams and type(expParams[p]) == list: print("\n",p) for v1 in expParams[p]: values, params = suite.get_values_fix_params( expName, 0, "testerror", "last", **{p:v1}) v = np.array(values) try: print("Average/min/max for", p, v1, "=", v.mean(), v.min(), v.max()) except: print("Cant load experiment",expName)
Analyze the impact of each list parameter in this experiment
def uuid_from_kronos_time(time, _type=UUIDType.RANDOM): return timeuuid_from_time(int(time) + UUID_TIME_OFFSET, type=_type)
Generate a UUID with the specified time. If `lowest` is true, return the lexicographically first UUID for the specified time.
def show_firmware_version_output_show_firmware_version_node_info_firmware_version_info_application_name(self, **kwargs): config = ET.Element("config") show_firmware_version = ET.Element("show_firmware_version") config = show_firmware_version output = ET.SubElement(show_firmware_version, "output") show_firmware_version = ET.SubElement(output, "show-firmware-version") node_info = ET.SubElement(show_firmware_version, "node-info") firmware_version_info = ET.SubElement(node_info, "firmware-version-info") application_name = ET.SubElement(firmware_version_info, "application-name") application_name.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
def send(self, test=None): self._check_values() json_message = self.to_json_message() if test is None: try: from django.conf import settings as django_settings test = getattr(django_settings, "POSTMARK_TEST_MODE", None) except ImportError: pass if test: print( % json.dumps(json_message, cls=PMJSONEncoder)) return if self.__template_id: endpoint_url = __POSTMARK_URL__ + else: endpoint_url = __POSTMARK_URL__ + req = Request( endpoint_url, json.dumps(json_message, cls=PMJSONEncoder).encode(), { : , : , : self.__api_key, : self.__user_agent } ) try: result = urlopen(req) jsontxt = result.read().decode() result.close() if result.code == 200: self.message_id = json.loads(jsontxt).get(, None) return True else: raise PMMailSendException( % (result.code, result.msg)) except HTTPError as err: if err.code == 401: raise PMMailUnauthorizedException(, err) elif err.code == 422: try: jsontxt = err.read().decode() jsonobj = json.loads(jsontxt) desc = jsonobj[] error_code = jsonobj[] except KeyError: raise PMMailUnprocessableEntityException() if error_code == 406: raise PMMailInactiveRecipientException() raise PMMailUnprocessableEntityException( % desc) elif err.code == 500: raise PMMailServerErrorException(, err) except URLError as err: if hasattr(err, ): raise PMMailURLException( % err.reason, err) elif hasattr(err, ): raise PMMailURLException(t fufill the request. (See "inner_exception" for details)URLError: The server couldn\, err)
Send the email through the Postmark system. Pass test=True to just print out the resulting JSON message being sent to Postmark
def _list_files(self, args): if len(args) < 2: raise SPMInvocationError() package = args[-1] files = self._pkgdb_fun(, package, self.db_conn) if files is None: raise SPMPackageError(.format(package)) else: for file_ in files: if self.opts[]: status_msg = .join(file_) else: status_msg = file_[0] self.ui.status(status_msg)
List files for an installed package
def to_json(df, x, y, timeseries=False): values = {k: [] for k in y} for i, row in df.iterrows(): for yy in y: values[yy].append({ "x": row[x], "y": row[yy] }) return {"result": [values[k] for k in y], "date": timeseries}
Format output for json response.
def heightmap_get_normal( hm: np.ndarray, x: float, y: float, waterLevel: float ) -> Tuple[float, float, float]: cn = ffi.new("float[3]") lib.TCOD_heightmap_get_normal(_heightmap_cdata(hm), x, y, cn, waterLevel) return tuple(cn)
Return the map normal at given coordinates. Args: hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions. x (float): The x coordinate. y (float): The y coordinate. waterLevel (float): The heightmap is considered flat below this value. Returns: Tuple[float, float, float]: An (x, y, z) vector normal.
def cipher(self): cipher = Cipher(*self.mode().aes_args(), **self.mode().aes_kwargs()) return WAES.WAESCipher(cipher)
Generate AES-cipher :return: Crypto.Cipher.AES.AESCipher
async def vcx_agent_provision(config: str) -> None: logger = logging.getLogger(__name__) if not hasattr(vcx_agent_provision, "cb"): logger.debug("vcx_agent_provision: Creating callback") vcx_agent_provision.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p)) c_config = c_char_p(config.encode()) result = await do_call(, c_config, vcx_agent_provision.cb) logger.debug("vcx_agent_provision completed") return result.decode()
Provision an agent in the agency, populate configuration and wallet for this agent. Example: import json enterprise_config = { 'agency_url': 'http://localhost:8080', 'agency_did': 'VsKV7grR1BUE29mG2Fm2kX', 'agency_verkey': "Hezce2UWMZ3wUhVkh2LfKSs8nDzWwzs2Win7EzNN3YaR", 'wallet_name': 'LIBVCX_SDK_WALLET', 'agent_seed': '00000000000000000000000001234561', 'enterprise_seed': '000000000000000000000000Trustee1', 'wallet_key': '1234' } vcx_config = await vcx_agent_provision(json.dumps(enterprise_config)) :param config: JSON configuration :return: Configuration for vcx_init call.
def exists(hdfs_path, user=None): hostname, port, path = split(hdfs_path, user=user) fs = hdfs_fs.hdfs(hostname, port) retval = fs.exists(path) fs.close() return retval
Return :obj:`True` if ``hdfs_path`` exists in the default HDFS.
def parser_functions(self) -> List[]: _lststr = self._lststr _type_to_spans = self._type_to_spans return [ ParserFunction(_lststr, _type_to_spans, span, ) for span in self._subspans()]
Return a list of parser function objects.
def loudest_time(self, start=0, duration=0): if duration == 0: duration = self.sound.nframes self.current_frame = start arr = self.read_frames(duration) max_amp_sample = int(np.floor(arr.argmax()/2)) + start return max_amp_sample
Find the loudest time in the window given by start and duration Returns frame number in context of entire track, not just the window. :param integer start: Start frame :param integer duration: Number of frames to consider from start :returns: Frame number of loudest frame :rtype: integer
def parse(self, rrstr): if self._initialized: raise pycdlibexception.PyCdlibInternalError() (su_len, su_entry_version_unused, posix_file_mode_le, posix_file_mode_be, posix_file_links_le, posix_file_links_be, posix_file_user_id_le, posix_file_user_id_be, posix_file_group_id_le, posix_file_group_id_be) = struct.unpack_from(, rrstr[:38], 2) self.posix_file_mode = posix_file_mode_le self.posix_file_links = posix_file_links_le self.posix_user_id = posix_file_user_id_le self.posix_group_id = posix_file_group_id_le self.posix_serial_number = posix_file_serial_number_le self._initialized = True return su_len
Parse a Rock Ridge POSIX File Attributes record out of a string. Parameters: rrstr - The string to parse the record out of. Returns: A string representing the RR version, either 1.09 or 1.12.
def observe(self, event_name, func): if isinstance(event_name, list) or isinstance(event_name, tuple): for name in event_name: self.observe(name, func) return self.log(func.__name__, "attached to", event_name) self._modify_event(event_name, , func)
event_name := {'created', 'modified', 'deleted'}, list, tuple Attaches a function to run to a particular event. The function must be unique to be removed cleanly. Alternatively, event_name can be an list/ tuple if any of the string possibilities to be added on multiple events
def is_longitudinal(self): return len(self.events) > 0 and \ len(self.arm_nums) > 0 and \ len(self.arm_names) > 0
Returns ------- boolean : longitudinal status of this project
def get_features(model_description_features): return utils.get_objectlist(model_description_features, config_key=, module=sys.modules[__name__])
Get features from a list of dictionaries Parameters ---------- model_description_features : list of dictionaries Examples -------- >>> l = [{'StrokeCount': None}, \ {'ConstantPointCoordinates': \ [{'strokes': 4}, \ {'points_per_stroke': 81}, \ {'fill_empty_with': 0}, \ {'pen_down': False}] \ } \ ] >>> get_features(l) [StrokeCount, ConstantPointCoordinates - strokes: 4 - points per stroke: 81 - fill empty with: 0 - pen down feature: False ]
def _make_sampling_sequence(n): seq = list(range(5)) i = 50 while len(seq) < n: seq.append(i) i += 50 return seq
Return a list containing the proposed call event sampling sequence. Return events are paired with call events and not counted separately. This is 0, 1, 2, ..., 4 plus 50, 100, 150, 200, etc. The total list size is n.
def minute(self): self.magnification = 60 self._update(self.baseNumber, self.magnification) return self
set unit to minute
def run(self): if self.update_time_value(): self.i3status.py3_wrapper.notify_update(self.module_name) due_time = self.py3.time_in(sync_to=self.time_delta) self.i3status.py3_wrapper.timeout_queue_add(self, due_time)
updates the modules output. Currently only time and tztime need to do this
def getObjectsInHouse(self, house): res = [obj for obj in self if house.hasObject(obj)] return ObjectList(res)
Returns a list with all objects in a house.
def result(self, s, a): if s[2] == 0: return (s[0] - a[1][0], s[1] - a[1][1], 1) else: return (s[0] + a[1][0], s[1] + a[1][1], 0)
Result of applying an action to a state.
def _check_inputs(self, operators, weights): operators = self._check_type(operators) for operator in operators: if not hasattr(operator, ): raise ValueError() if not hasattr(operator, ): raise ValueError() operator.op = check_callable(operator.op) operator.cost = check_callable(operator.adj_op) if not isinstance(weights, type(None)): weights = self._check_type(weights) if weights.size != operators.size: raise ValueError( ) if not np.issubdtype(weights.dtype, np.floating): raise TypeError() return operators, weights
Check Inputs This method cheks that the input operators and weights are correctly formatted Parameters ---------- operators : list, tuple or np.ndarray List of linear operator class instances weights : list, tuple or np.ndarray List of weights for combining the linear adjoint operator results Returns ------- tuple operators and weights Raises ------ ValueError If the number of weights does not match the number of operators TypeError If the individual weight values are not floats
def create_dataset(self, name, x_img_size, y_img_size, z_img_size, x_vox_res, y_vox_res, z_vox_res, x_offset=0, y_offset=0, z_offset=0, scaling_levels=0, scaling_option=0, dataset_description="", is_public=0): return self.resources.create_dataset(name, x_img_size, y_img_size, z_img_size, x_vox_res, y_vox_res, z_vox_res, x_offset, y_offset, z_offset, scaling_levels, scaling_option, dataset_description, is_public)
Creates a dataset. Arguments: name (str): Name of dataset x_img_size (int): max x coordinate of image size y_img_size (int): max y coordinate of image size z_img_size (int): max z coordinate of image size x_vox_res (float): x voxel resolution y_vox_res (float): y voxel resolution z_vox_res (float): z voxel resolution x_offset (int): x offset amount y_offset (int): y offset amount z_offset (int): z offset amount scaling_levels (int): Level of resolution scaling scaling_option (int): Z slices is 0 or Isotropic is 1 dataset_description (str): Your description of the dataset is_public (int): 1 'true' or 0 'false' for viewability of data set in public Returns: bool: True if dataset created, False if not
def create_tags_with_concatenated_css_classes(tags): current_classes = set() result = [] for pos, group in group_tags_at_same_position(tags): opening, closing = get_opening_closing_tags(group) closing_added = False if len(closing) > 0: closing_tag = Tag(pos, False, ) for tag in closing: current_classes.remove(tag.css_class) result.append(closing_tag) closing_added = True opening_added = False if len(opening) > 0: if not closing_added and len(current_classes) > 0: result.append(Tag(pos, False, )) for tag in opening: current_classes.add(tag.css_class) opening_tag = Tag(pos, True, .join(sorted(current_classes))) result.append(opening_tag) opening_added = True if closing_added and not opening_added and len(current_classes) > 0: opening_tag = Tag(pos, True, .join(sorted(current_classes))) result.append(opening_tag) return result
Function that creates <mark> tags such that they are not overlapping. In order to do this, it concatenates the css classes and stores the concatenated result in new tags.
async def fetch_block(self, request): error_traps = [error_handlers.BlockNotFoundTrap] block_id = request.match_info.get(, ) self._validate_id(block_id) response = await self._query_validator( Message.CLIENT_BLOCK_GET_BY_ID_REQUEST, client_block_pb2.ClientBlockGetResponse, client_block_pb2.ClientBlockGetByIdRequest(block_id=block_id), error_traps) return self._wrap_response( request, data=self._expand_block(response[]), metadata=self._get_metadata(request, response))
Fetches a specific block from the validator, specified by id. Request: path: - block_id: The 128-character id of the block to be fetched Response: data: A JSON object with the data from the fully expanded Block link: The link to this exact query
def send(self, line): line = line.strip() if line == ".": self.stop() return mav = self.master.mav if line != : line += "\r\n" buf = [ord(x) for x in line] buf.extend([0]*(70-len(buf))) flags = mavutil.mavlink.SERIAL_CONTROL_FLAG_RESPOND flags |= mavutil.mavlink.SERIAL_CONTROL_FLAG_MULTI flags |= mavutil.mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE mav.serial_control_send(self.serial_settings.port, flags, 0, self.serial_settings.baudrate, len(line), buf)
send some bytes
def filename(value): return re.sub(, , os.path.basename(InputSanitizer.trim(value)))
Remove everything that would affect paths in the filename :param value: :return:
def delete_table_rate_shipping_by_id(cls, table_rate_shipping_id, **kwargs): kwargs[] = True if kwargs.get(): return cls._delete_table_rate_shipping_by_id_with_http_info(table_rate_shipping_id, **kwargs) else: (data) = cls._delete_table_rate_shipping_by_id_with_http_info(table_rate_shipping_id, **kwargs) return data
Delete TableRateShipping Delete an instance of TableRateShipping by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_table_rate_shipping_by_id(table_rate_shipping_id, async=True) >>> result = thread.get() :param async bool :param str table_rate_shipping_id: ID of tableRateShipping to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
def to_header(self): ranges = [] for begin, end in self.ranges: if end is None: ranges.append("%s-" % begin if begin >= 0 else str(begin)) else: ranges.append("%s-%s" % (begin, end - 1)) return "%s=%s" % (self.units, ",".join(ranges))
Converts the object back into an HTTP header.
def yum_install_from_url(pkg_name, url): if is_package_installed(distribution=, pkg=pkg_name) is False: log_green( "installing %s from %s" % (pkg_name, url)) with settings(hide(, , , ), warn_only=True, capture=True): result = sudo("rpm -i %s" % url) if result.return_code == 0: return True elif result.return_code == 1: return False else: print(result) raise SystemExit()
installs a pkg from a url p pkg_name: the name of the package to install p url: the full URL for the rpm package
def islitlet_progress(islitlet, islitlet_max): if islitlet % 10 == 0: cout = str(islitlet // 10) else: cout = sys.stdout.write(cout) if islitlet == islitlet_max: sys.stdout.write() sys.stdout.flush()
Auxiliary function to print out progress in loop of slitlets. Parameters ---------- islitlet : int Current slitlet number. islitlet_max : int Maximum slitlet number.
def age(self, id): path = self.hash(id) if os.path.exists(path): modified = datetime.datetime.fromtimestamp(os.stat(path)[8]) age = datetime.datetime.today() - modified return age.days else: return 0
Returns the age of the cache entry, in days.
def gen_table(self, inner_widths, inner_heights, outer_widths): for i, row in enumerate(self.table_data): for line in self.gen_row_lines(row, , inner_widths, inner_heights[i]): yield line if i == 0: yield self.horizontal_border(None, outer_widths)
Combine everything and yield every line of the entire table with borders. :param iter inner_widths: List of widths (no padding) for each column. :param iter inner_heights: List of heights (no padding) for each row. :param iter outer_widths: List of widths (with padding) for each column. :return:
def _subset(subset, superset): result = True for k in subset: result = k in superset and subset[k] == superset[k] if not result: break return result
True if subset is a subset of superset. :param dict subset: subset to compare. :param dict superset: superset to compare. :return: True iif all pairs (key, value) of subset are in superset. :rtype: bool
def forget_fact(term): logger.info(, term) db.facts.remove({: term_regex(term)}) return random.choice(ACKS)
Forgets a fact by removing it from the database
def _render_content(self, content, **settings): result = [] columns = settings[self.SETTING_COLUMNS] (columns, content) = self.table_format(columns, content) if settings[self.SETTING_FLAG_ENUMERATE]: (columns, content) = self.table_enumerate(columns, content) dimensions = self.table_measure(columns, content) sb = {k: settings[k] for k in (self.SETTING_BORDER_STYLE, self.SETTING_BORDER_FORMATING)} result.append(self.fmt_border(dimensions, , **sb)) if settings[self.SETTING_FLAG_HEADER]: s = {k: settings[k] for k in (self.SETTING_FLAG_PLAIN, self.SETTING_BORDER_STYLE, self.SETTING_BORDER_FORMATING)} s[self.SETTING_TEXT_FORMATING] = settings[self.SETTING_HEADER_FORMATING] result.append(self.fmt_row_header(columns, dimensions, **s)) result.append(self.fmt_border(dimensions, , **sb)) for row in content: s = {k: settings[k] for k in (self.SETTING_FLAG_PLAIN, self.SETTING_BORDER_STYLE, self.SETTING_BORDER_FORMATING)} s[self.SETTING_TEXT_FORMATING] = settings[self.SETTING_TEXT_FORMATING] result.append(self.fmt_row(columns, dimensions, row, **s)) result.append(self.fmt_border(dimensions, , **sb)) return result
Perform widget rendering, but do not print anything.
def get_arg_type_descriptors(self): if not self.is_method: return tuple() tp = _typeseq(self.get_descriptor()) tp = _typeseq(tp[0][1:-1]) return tp
The parameter type descriptor list for a method, or None for a field. Type descriptors are shorthand identifiers for the builtin java types.
def add_update_resources(self, resources, ignore_datasetid=False): if not isinstance(resources, list): raise HDXError() for resource in resources: self.add_update_resource(resource, ignore_datasetid)
Add new or update existing resources with new metadata to the dataset Args: resources (List[Union[hdx.data.resource.Resource,Dict,str]]): A list of either resource ids or resources metadata from either Resource objects or dictionaries ignore_datasetid (bool): Whether to ignore dataset id in the resource. Defaults to False. Returns: None
def bump_version(project, source, force_init): file_opener = FileOpener() jiggler = JiggleVersion(project, source, file_opener, force_init) logger.debug( "Current, next : {0} -> {1} : {2}".format( jiggler.current_version, jiggler.version, jiggler.schema ) ) if not jiggler.version_finder.validate_current_versions(): logger.debug(unicode(jiggler.version_finder.all_current_versions())) logger.error("Versions not in sync, wont continue") changed = jiggler.jiggle_all() logger.debug("Changed {0} files".format(changed)) return changed
Entry point :return:
def loadTargetsFromFile(self, filename, cols = None, everyNrows = 1, delim = , checkEven = 1): self.targets = self.loadVectors(filename, cols, everyNrows, delim, checkEven)
Loads targets from file.
def get_time_delta(time_string: str) -> timedelta: rel_time: Pattern = re.compile( pattern=r"((?P<hours>\d+?)\s+hour)?((?P<minutes>\d+?)\s+minute)?((?P<seconds>\d+?)\s+second)?((?P<days>\d+?)\s+day)?", flags=re.IGNORECASE, ) parts: Optional[Match[AnyStr]] = rel_time.match(string=time_string) if not parts: raise Exception(f"Invalid relative time: {time_string}") parts: Dict[str, str] = parts.groupdict() time_params = {} if all(value == None for value in parts.values()): raise Exception(f"Invalid relative time: {time_string}") for time_unit, magnitude in parts.items(): if magnitude: time_params[time_unit]: int = int(magnitude) return timedelta(**time_params)
Takes a time string (1 hours, 10 days, etc.) and returns a python timedelta object :param time_string: the time value to convert to a timedelta :type time_string: str :returns: datetime.timedelta for relative time :type datetime.timedelta
def seriesshape(self): seriesshape = [len(hydpy.pub.timegrids.init)] seriesshape.extend(self.shape) return tuple(seriesshape)
Shape of the whole time series (time being the first dimension).
def _convert_to_dict(data): if isinstance(data, dict): return data if isinstance(data, list) or isinstance(data, tuple): if _all_correct_list(data): return dict(data) else: data = zip(data[::2], data[1::2]) return dict(data) else: raise MetaParsingException( "Can't decode provided metadata - unknown structure." )
Convert `data` to dictionary. Tries to get sense in multidimensional arrays. Args: data: List/dict/tuple of variable dimension. Returns: dict: If the data can be converted to dictionary. Raises: MetaParsingException: When the data are unconvertible to dict.
def match(self, fsys_view): evalue_dict = fsys_view[1] for key, value in six.viewitems(self.criteria): if key in evalue_dict: if evalue_dict[key] != value: return False else: return False return True
Compare potentially partial criteria against built filesystems entry dictionary
def doprinc(data): ppars = {} rad = old_div(np.pi, 180.) X = dir2cart(data) T = np.array(Tmatrix(X)) t, V = tauV(T) Pdir = cart2dir(V[0]) ppars[] = cart2dir(V[1]) dec, inc = doflip(Pdir[0], Pdir[1]) ppars[] = dec ppars[] = inc ppars[] = len(data) ppars[] = t[0] ppars[] = t[1] ppars[] = t[2] Pdir = cart2dir(V[1]) dec, inc = doflip(Pdir[0], Pdir[1]) ppars[] = dec ppars[] = inc Pdir = cart2dir(V[2]) dec, inc = doflip(Pdir[0], Pdir[1]) ppars[] = dec ppars[] = inc return ppars
Gets principal components from data in form of a list of [dec,inc] data. Parameters ---------- data : nested list of dec, inc directions Returns ------- ppars : dictionary with the principal components dec : principal directiion declination inc : principal direction inclination V2dec : intermediate eigenvector declination V2inc : intermediate eigenvector inclination V3dec : minor eigenvector declination V3inc : minor eigenvector inclination tau1 : major eigenvalue tau2 : intermediate eigenvalue tau3 : minor eigenvalue N : number of points Edir : elongation direction [dec, inc, length]
def start(self) -> None: self._setup() if self._run_control_loop: asyncio.set_event_loop(asyncio.new_event_loop()) self._heartbeat_reciever.start() self._logger.info() return self.loop.start() else: self._logger.debug()
Start the internal control loop. Potentially blocking, depending on the value of `_run_control_loop` set by the initializer.
def change_option_default(self, opt_name, default_val): if not self.has_option(opt_name): raise ValueError("Unknow option name (%s)" % opt_name) self._options[opt_name].default = default_val
Change the default value of an option :param opt_name: option name :type opt_name: str :param value: new default option value
def parse_sv_frequencies(variant): frequency_keys = [ , , , , , , , , , ] sv_frequencies = {} for key in frequency_keys: value = variant.INFO.get(key, 0) if in key: value = float(value) else: value = int(value) if value > 0: sv_frequencies[key] = value return sv_frequencies
Parsing of some custom sv frequencies These are very specific at the moment, this will hopefully get better over time when the field of structural variants is more developed. Args: variant(cyvcf2.Variant) Returns: sv_frequencies(dict)
def read(self, size=None): if size is not None: return self.__sf.read(size) block_size = self.__class__.__block_size b = bytearray() received_bytes = 0 while 1: partial = self.__sf.read(block_size) b.extend(partial) received_bytes += len(partial) if len(partial) < block_size: self.__log.debug("End of file.") break self.__log.debug("Read (%d) bytes for total-file." % (received_bytes)) return b
Read a length of bytes. Return empty on EOF. If 'size' is omitted, return whole file.
def profile(self): leftmost_idx = np.argmax(self.matrix().astype(bool), axis=0) return (np.arange(self.num_vertices()) - leftmost_idx).sum()
Measure of bandedness, also known as 'envelope size'.
def transmit(self, bytes, protocol=None): Observable.setChanged(self) Observable.notifyObservers(self, CardConnectionEvent( , [bytes, protocol])) data, sw1, sw2 = self.doTransmit(bytes, protocol) Observable.setChanged(self) Observable.notifyObservers(self, CardConnectionEvent( , [data, sw1, sw2])) if self.errorcheckingchain is not None: self.errorcheckingchain[0](data, sw1, sw2) return data, sw1, sw2
Transmit an apdu. Internally calls doTransmit() class method and notify observers upon command/response APDU events. Subclasses must override the doTransmit() class method. @param bytes: list of bytes to transmit @param protocol: the transmission protocol, from CardConnection.T0_protocol, CardConnection.T1_protocol, or CardConnection.RAW_protocol
def _dict_increment(self, dictionary, key): if key in dictionary: dictionary[key] += 1 else: dictionary[key] = 1
Increments the value of the dictionary at the specified key.
def setup_image_plane_pixelization_grid_from_galaxies_and_grid_stack(galaxies, grid_stack): if not isinstance(grid_stack.regular, grids.PaddedRegularGrid): for galaxy in galaxies: if hasattr(galaxy, ): if isinstance(galaxy.pixelization, ImagePlanePixelization): image_plane_pix_grid = galaxy.pixelization.image_plane_pix_grid_from_regular_grid( regular_grid=grid_stack.regular) return grid_stack.new_grid_stack_with_pix_grid_added(pix_grid=image_plane_pix_grid.sparse_grid, regular_to_nearest_pix=image_plane_pix_grid.regular_to_sparse) return grid_stack
An image-plane pixelization is one where its pixel centres are computed by tracing a sparse grid of pixels from \ the image's regular grid to other planes (e.g. the source-plane). Provided a galaxy has an image-plane pixelization, this function returns a new *GridStack* instance where the \ image-plane pixelization's sparse grid is added to it as an attibute. Thus, when the *GridStack* are are passed to the *ray_tracing* module this sparse grid is also traced and the \ traced coordinates represent the centre of each pixelization pixel. Parameters ----------- galaxies : [model.galaxy.galaxy.Galaxy] A list of galaxies, which may contain pixelizations and an *ImagePlanePixelization*. grid_stacks : image.array.grid_stacks.GridStack The collection of grid_stacks (regular, sub, etc.) which the image-plane pixelization grid (referred to as pix) \ may be added to.
def sbar(Ss): if type(Ss) == list: Ss = np.array(Ss) npts = Ss.shape[0] Ss = Ss.transpose() avd, avs = [], [] D = np.array([Ss[0], Ss[1], Ss[2], Ss[3] + 0.5 * (Ss[0] + Ss[1]), Ss[4] + 0.5 * (Ss[1] + Ss[2]), Ss[5] + 0.5 * (Ss[0] + Ss[2])]) for j in range(6): avd.append(np.average(D[j])) avs.append(np.average(Ss[j])) D = D.transpose() nf = (npts - 1) * 6 s0 = 0 Dels = (D - avd)**2 s0 = np.sum(Dels) sigma = np.sqrt(s0/float(nf)) return nf, sigma, avs
calculate average s,sigma from list of "s"s.
def next_string(min_size, max_size): result = max_size = max_size if max_size != None else min_size length = RandomInteger.next_integer(min_size, max_size) for i in range(length): result += random.choice(_chars) return result
Generates a random string, consisting of upper and lower case letters (of the English alphabet), digits (0-9), and symbols ("_,.:-/.[].{},#-!,$=%.+^.&*-() "). :param min_size: (optional) minimum string length. :param max_size: maximum string length. :return: a random string.
def headloss_fric(FlowRate, Diam, Length, Nu, PipeRough): ut.check_range([Length, ">0", "Length"]) return (fric(FlowRate, Diam, Nu, PipeRough) * 8 / (gravity.magnitude * np.pi**2) * (Length * FlowRate**2) / Diam**5 )
Return the major head loss (due to wall shear) in a pipe. This equation applies to both laminar and turbulent flows.
def ticket_satisfaction_rating_create(self, ticket_id, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/satisfaction_ratings api_path = "/api/v2/tickets/{ticket_id}/satisfaction_rating.json" api_path = api_path.format(ticket_id=ticket_id) return self.call(api_path, method="POST", data=data, **kwargs)
https://developer.zendesk.com/rest_api/docs/core/satisfaction_ratings#create-a-satisfaction-rating
def get_manifest_digests(image, registry, insecure=False, dockercfg_path=None, versions=(, , , , ), require_digest=True): registry_session = RegistrySession(registry, insecure=insecure, dockercfg_path=dockercfg_path) digests = {} all_not_found = True saved_not_found = None for version in versions: media_type = get_manifest_media_type(version) response, saved_not_found = get_manifest(image, registry_session, version) if saved_not_found is None: all_not_found = False if not response: continue digests[version] = True if not response.headers.get(): logger.warning(, media_type) continue digests[version] = response.headers[] context = .join([x for x in [image.namespace, image.repo] if x]) tag = image.tag logger.debug(, context, tag, version, digests[version]) if not digests: if all_not_found and len(versions) > 0: raise saved_not_found if require_digest: raise RuntimeError(.format(image)) return ManifestDigest(**digests)
Return manifest digest for image. :param image: ImageName, the remote image to inspect :param registry: str, URI for registry, if URI schema is not provided, https:// will be used :param insecure: bool, when True registry's cert is not verified :param dockercfg_path: str, dirname of .dockercfg location :param versions: tuple, which manifest schema versions to fetch digest :param require_digest: bool, when True exception is thrown if no digest is set in the headers. :return: dict, versions mapped to their digest
def post(method, hmc, uri, uri_parms, body, logon_required, wait_for_completion): assert wait_for_completion is True try: resource = hmc.lookup_by_uri(uri) except KeyError: raise InvalidResourceError(method, uri) resource.update(body)
Operation: Update <resource> Properties.
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format="channels_first", use_td=False, targeting_rate=None, keep_prob=None, is_training=None): if strides > 1: inputs = fixed_padding(inputs, kernel_size, data_format=data_format) if use_td: inputs_shape = common_layers.shape_list(inputs) if use_td == "weight": if data_format == "channels_last": size = kernel_size * kernel_size * inputs_shape[-1] else: size = kernel_size * kernel_size * inputs_shape[1] targeting_count = targeting_rate * tf.to_float(size) targeting_fn = common_layers.weight_targeting elif use_td == "unit": targeting_count = targeting_rate * filters targeting_fn = common_layers.unit_targeting else: raise Exception("Unrecognized targeted dropout type: %s" % use_td) y = common_layers.td_conv( inputs, filters, kernel_size, targeting_count, targeting_fn, keep_prob, is_training, do_prune=True, strides=strides, padding=("SAME" if strides == 1 else "VALID"), data_format=data_format, use_bias=False, kernel_initializer=tf.variance_scaling_initializer()) else: y = layers().Conv2D( filters=filters, kernel_size=kernel_size, strides=strides, padding=("SAME" if strides == 1 else "VALID"), use_bias=False, kernel_initializer=tf.variance_scaling_initializer(), data_format=data_format)(inputs) return y
Strided 2-D convolution with explicit padding. The padding is consistent and is based only on `kernel_size`, not on the dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone). Args: inputs: `Tensor` of size `[batch, channels, height_in, width_in]`. filters: `int` number of filters in the convolution. kernel_size: `int` size of the kernel to be used in the convolution. strides: `int` strides of the convolution. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. is_training: `bool` for whether the model is in training. Returns: A `Tensor` of shape `[batch, filters, height_out, width_out]`. Raises: Exception: if use_td is not valid.
def _parse_part(client, command, actor, args): actor = User(actor) channel, _, message = args.partition() channel = client.server.get_channel(channel) channel.remove_user(actor) if actor.nick == client.user.nick: client.server.remove_channel(channel) client.dispatch_event("PART", actor, channel, message) if actor.nick != client.user.nick: client.dispatch_event("MEMBERS", channel)
Parse a PART and update channel states, then dispatch events. Note that two events are dispatched here: - PART, because a user parted the channel - MEMBERS, because the channel's members changed
def next(self): if self.curr_idx == len(self.idx): raise StopIteration i, j = self.idx[self.curr_idx] self.curr_idx += 1 audio_paths = [] texts = [] for duration, audio_path, text in self.data[i][j:j+self.batch_size]: audio_paths.append(audio_path) texts.append(text) if self.is_first_epoch: data_set = self.datagen.prepare_minibatch(audio_paths, texts, overwrite=True, is_bi_graphemes=self.is_bi_graphemes, seq_length=self.buckets[i], save_feature_as_csvfile=self.save_feature_as_csvfile) else: data_set = self.datagen.prepare_minibatch(audio_paths, texts, overwrite=False, is_bi_graphemes=self.is_bi_graphemes, seq_length=self.buckets[i], save_feature_as_csvfile=self.save_feature_as_csvfile) data_all = [mx.nd.array(data_set[])] + self.init_state_arrays label_all = [mx.nd.array(data_set[])] self.label = label_all provide_data = [(, (self.batch_size, self.buckets[i], self.width * self.height))] + self.init_states return mx.io.DataBatch(data_all, label_all, pad=0, bucket_key=self.buckets[i], provide_data=provide_data, provide_label=self.provide_label)
Returns the next batch of data.
def process_needlist(app, doctree, fromdocname): env = app.builder.env for node in doctree.traverse(Needlist): if not app.config.needs_include_needs: for att in (, , , ): node[att] = [] node.replace_self([]) continue id = node.attributes["ids"][0] current_needfilter = env.need_all_needlists[id] all_needs = env.needs_all_needs content = [] all_needs = list(all_needs.values()) if current_needfilter["sort_by"] is not None: if current_needfilter["sort_by"] == "id": all_needs = sorted(all_needs, key=lambda node: node["id"]) elif current_needfilter["sort_by"] == "status": all_needs = sorted(all_needs, key=status_sorter) found_needs = procces_filters(all_needs, current_needfilter) line_block = nodes.line_block() for need_info in found_needs: para = nodes.line() description = "%s: %s" % (need_info["id"], need_info["title"]) if current_needfilter["show_status"] and need_info["status"] is not None: description += " (%s)" % need_info["status"] if current_needfilter["show_tags"] and need_info["tags"] is not None: description += " [%s]" % "; ".join(need_info["tags"]) title = nodes.Text(description, description) if not need_info["hide"]: ref = nodes.reference(, ) ref[] = need_info[] ref[] = app.builder.get_relative_uri( fromdocname, need_info[]) ref[] += + need_info[][] ref.append(title) para += ref else: para += title line_block.append(para) content.append(line_block) if len(content) == 0: content.append(no_needs_found_paragraph()) if current_needfilter["show_filters"]: content.append(used_filter_paragraph(current_needfilter)) node.replace_self(content)
Replace all needlist nodes with a list of the collected needs. Augment each need with a backlink to the original location.
def get_movielens_iter(filename, batch_size): logging.info("Preparing data iterators for " + filename + " ... ") user = [] item = [] score = [] with open(filename, ) as f: num_samples = 0 for line in f: tks = line.strip().split() if len(tks) != 4: continue num_samples += 1 user.append((tks[0])) item.append((tks[1])) score.append((tks[2])) user = mx.nd.array(user, dtype=) item = mx.nd.array(item) score = mx.nd.array(score) data_train = {: user, : item} label_train = {: score} iter_train = mx.io.NDArrayIter(data=data_train,label=label_train, batch_size=batch_size, shuffle=True) return mx.io.PrefetchingIter(iter_train)
Not particularly fast code to parse the text file and load into NDArrays. return two data iters, one for train, the other for validation.
def _speak_normal_inherit(self, element): self._visit(element, self._speak_normal) element.normalize()
Speak the content of element and descendants. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement
def init(self, formula, incr=False): self.oracle = Solver(name=self.solver, bootstrap_with=formula.hard, incr=incr, use_timer=True) for i, cl in enumerate(formula.soft): selv = cl[0] if len(cl) > 1: self.topv += 1 selv = self.topv cl.append(-self.topv) self.oracle.add_clause(cl) if selv not in self.wght: self.sels.append(selv) self.wght[selv] = formula.wght[i] self.smap[selv] = i else: self.wght[selv] += formula.wght[i] self.sels_set = set(self.sels) for v in range(1, formula.nv + 1): self.vmap.e2i[v] = v self.vmap.i2e[v] = v if self.verbose > 1: print(.format(formula.nv, len(formula.hard), len(formula.soft)))
Initialize the internal SAT oracle. The oracle is used incrementally and so it is initialized only once when constructing an object of class :class:`RC2`. Given an input :class:`.WCNF` formula, the method bootstraps the oracle with its hard clauses. It also augments the soft clauses with "fresh" selectors and adds them to the oracle afterwards. Optional input parameter ``incr`` (``False`` by default) regulates whether or not Glucose's incremental mode [6]_ is turned on. :param formula: input formula :param incr: apply incremental mode of Glucose :type formula: :class:`.WCNF` :type incr: bool
def get_list(file,fmt): out=[] for i in fmt: if i == : out.append(get_int(file)); elif i == or i == : out.append(get_float(file)); elif i == : out.append(get_str(file)); else: raise ValueError("Unexpected flag ".format(i)); return out;
makes a list out of the fmt from the LspOutput f using the format i for int f for float d for double s for string
def import_class(classpath): modname, classname = classpath.rsplit(".", 1) module = importlib.import_module(modname) klass = getattr(module, classname) return klass
Import the class referred to by the fully qualified class path. Args: classpath: A full "foo.bar.MyClass" path to a class definition. Returns: The class referred to by the classpath. Raises: ImportError: If an error occurs while importing the module. AttributeError: IF the class does not exist in the imported module.
def snr_ratio(in1, in2): out1 = 20*(np.log10(np.linalg.norm(in1)/np.linalg.norm(in1-in2))) return out1
The following function simply calculates the signal to noise ratio between two signals. INPUTS: in1 (no default): Array containing values for signal 1. in2 (no default): Array containing values for signal 2. OUTPUTS: out1 The ratio of the signal to noise ratios of two signals.
def _connect(host=DEFAULT_HOST, port=DEFAULT_PORT): if six.text_type(port).isdigit(): return memcache.Client([.format(host, port)], debug=0) raise SaltInvocationError()
Returns a tuple of (user, host, port) with config, pillar, or default values assigned to missing values.
def schema_columns(self): t = self.schema_term columns = [] if t: for i, c in enumerate(t.children): if c.term_is("Table.Column"): p = c.all_props p[] = i p[] = c.value p[] = self._name_for_col_term(c, i) columns.append(p) return columns
Return column informatino only from this schema
def _prepare(constituents, t0, t = None, radians = True): if isinstance(t0, Iterable): t0 = t0[0] if t is None: t = [t0] if not isinstance(t, Iterable): t = [t] a0 = astro(t0) a = [astro(t_i) for t_i in t] V0 = np.array([c.V(a0) for c in constituents])[:, np.newaxis] speed = np.array([c.speed(a0) for c in constituents])[:, np.newaxis] u = [np.mod(np.array([c.u(a_i) for c in constituents])[:, np.newaxis], 360.0) for a_i in a] f = [np.mod(np.array([c.f(a_i) for c in constituents])[:, np.newaxis], 360.0) for a_i in a] if radians: speed = d2r*speed V0 = d2r*V0 u = [d2r*each for each in u] return speed, u, f, V0
Return constituent speed and equilibrium argument at a given time, and constituent node factors at given times. Arguments: constituents -- list of constituents to prepare t0 -- time at which to evaluate speed and equilibrium argument for each constituent t -- list of times at which to evaluate node factors for each constituent (default: t0) radians -- whether to return the angular arguments in radians or degrees (default: True)
def prepare(self, hash, start, end, name, sources, sample=None): if len(sources) == 0: raise HistoricSourcesRequired() if not isinstance(sources, list): sources = [sources] params = {: hash, : start, : end, : name, : .join(sources)} if sample: params[] = sample return self.request.post(, params)
Prepare a historics query which can later be started. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsprepare :param hash: The hash of a CSDL create the query for :type hash: str :param start: when to start querying data from - unix timestamp :type start: int :param end: when the query should end - unix timestamp :type end: int :param name: the name of the query :type name: str :param sources: list of sources e.g. ['facebook','bitly','tumblr'] :type sources: list :param sample: percentage to sample, either 10 or 100 :type sample: int :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.HistoricSourcesRequired`, :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
def clear(self): "Remove all rows and reset internal structures" for i in range(len(self)-1, -1, -1): del self[i] self._key = 0 if hasattr(self._grid_view, "wx_obj"): self._grid_view.wx_obj.ClearGrid()
Remove all rows and reset internal structures
def mock_cmd(self, release, *cmd, **kwargs): fmt = if kwargs.get() is True: fmt += fmt += return self.call(fmt.format(**release).split() + list(cmd))
Run a mock command in the chroot for a given release
def from_stmt(stmt, engine, **kwargs): result_proxy = engine.execute(stmt, **kwargs) return from_db_cursor(result_proxy.cursor)
Execute a query in form of texture clause, return the result in form of :class:`PrettyTable`. :type stmt: TextClause :param stmt: :type engine: Engine :param engine: :rtype: PrettyTable
def get_user_lists(self, course, aggregationid=): tutor_list = course.get_staff() student_list = list(self.database.aggregations.aggregate([ {"$match": {"courseid": course.get_id()}}, {"$unwind": "$students"}, {"$project": { "classroom": "$_id", "students": 1, "grouped": { "$anyElementTrue": { "$map": { "input": "$groups.students", "as": "group", "in": { "$anyElementTrue": { "$map": { "input": "$$group", "as": "groupmember", "in": {"$eq": ["$$groupmember", "$students"]} } } } } } } }} ])) student_list = dict([(student["students"], student) for student in student_list]) users_info = self.user_manager.get_users_info(list(student_list.keys()) + tutor_list) if aggregationid: other_students = [student_list[entry][] for entry in student_list.keys() if not student_list[entry][] == ObjectId(aggregationid)] other_students = sorted(other_students, key=lambda val: (("0"+users_info[val][0]) if users_info[val] else ("1"+val))) return student_list, tutor_list, other_students, users_info else: return student_list, tutor_list, users_info
Get the available student and tutor lists for aggregation edition
def getPageSizeByName(self, pageSizeName): pageSize = None lowerCaseNames = {pageSize.lower(): pageSize for pageSize in self.availablePageSizes()} if pageSizeName.lower() in lowerCaseNames: pageSize = getattr(QPagedPaintDevice, lowerCaseNames[pageSizeName.lower()]) return pageSize
Returns a validated PageSize instance corresponding to the given name. Returns None if the name is not a valid PageSize.
def create_ip_arp_request(srchw, srcip, targetip): ether = Ethernet() ether.src = srchw ether.dst = SpecialEthAddr.ETHER_BROADCAST.value ether.ethertype = EtherType.ARP arp = Arp() arp.operation = ArpOperation.Request arp.senderhwaddr = srchw arp.senderprotoaddr = srcip arp.targethwaddr = SpecialEthAddr.ETHER_BROADCAST.value arp.targetprotoaddr = targetip return ether + arp
Create and return a packet containing an Ethernet header and ARP header.
def download_data(identifier, outdir): if use_local_data_repository is not None: url_base = + request.pathname2url( use_local_data_repository + os.sep) else: url_base = repository_url print(.format(url_base)) url = url_base + inventory_filename filename, headers =request.urlretrieve(url) df = pd.read_csv( filename, delim_whitespace=True, comment=, header=None, names=[, ], ) rel_path_query = df.query(.format(identifier)) if rel_path_query.shape[0] == 0: raise Exception() rel_path = rel_path_query[].values[0] url = url_base + rel_path print(.format(url)) filename, headers =request.urlretrieve(url) if not os.path.isdir(outdir): os.makedirs(outdir) zip_obj = zipfile.ZipFile(filename) zip_obj.extractall(outdir)
Download data from a separate data repository for testing. Parameters ---------- identifier: string The identifier used to find the data set outdir: string unzip the data in this directory
def paint(self): snippet = { : VectorStyle.get_style_value(self.opacity), : VectorStyle.get_style_value(self.color), : VectorStyle.get_style_value(self.base), : VectorStyle.get_style_value(self.height) } if self.translate: snippet[] = self.translate return snippet
Renders a javascript snippet suitable for use as a mapbox-gl fill-extrusion paint entry Returns: A dict that can be converted to a mapbox-gl javascript paint snippet
def set_attributes(obj, additional_data): for key, value in additional_data.items(): if hasattr(obj, key): raise ValueError("Key %s in additional_data already exists in this object" % key) setattr(obj, _strip_column_name(key), value)
Given an object and a dictionary, give the object new attributes from that dictionary. Uses _strip_column_name to git rid of whitespace/uppercase/special characters.
def xlim(min, max): if not isinstance(min, (int, float, complex)): min = tplot_utilities.str_to_int(min) if not isinstance(max, (int, float, complex)): max = tplot_utilities.str_to_int(max) if in tplot_opt_glob: lim_info[] = tplot_opt_glob[] else: lim_info[] = Range1d(min, max) lim_info[] = Range1d(min, max) tplot_opt_glob[] = [min, max] return
This function will set the x axis range for all time series plots Parameters: min : flt The time to start all time series plots. Can be given in seconds since epoch, or as a string in the format "YYYY-MM-DD HH:MM:SS" max : flt The time to end all time series plots. Can be given in seconds since epoch, or as a string in the format "YYYY-MM-DD HH:MM:SS" Returns: None Examples: >>> # Set the timespan to be 2017-07-17 00:00:00 plus 1 day >>> import pytplot >>> pytplot.xlim(1500249600, 1500249600 + 86400) >>> # The same as above, but using different inputs >>> pytplot.xlim("2017-07-17 00:00:00", "2017-07-18 00:00:00")
def _get_log_lines(self, n=300): with open(self.log_file) as fh: last_lines = fh.readlines()[-n:] return last_lines
Returns a list with the last ``n`` lines of the nextflow log file Parameters ---------- n : int Number of last lines from the log file Returns ------- list List of strings with the nextflow log
def _check_choices_attribute(self): if self.choices: warning_params = { : ( " contains an invalid time zone value " "which was not found as a supported time zone by pytz " "{version}." ), : "Values must be found in pytz.all_timezones.", : self, } for option_key, option_value in self.choices: if isinstance(option_value, (list, tuple)): for optgroup_key in map(lambda x: x[0], option_value): if optgroup_key not in pytz.all_timezones: if option_key not in self.empty_values: warning_params.update({ : warning_params[].format( value=option_key, version=pytz.VERSION ) }) return [ checks.Warning(**warning_params) ] return []
Checks to make sure that choices contains valid timezone choices.
def items_sharing_ngrams(self, query): shared = {} remaining = {} for ngram in self.split(query): try: for match, count in self._grams[ngram].items(): remaining.setdefault(ngram, {}).setdefault(match, count) if remaining[ngram][match] > 0: remaining[ngram][match] -= 1 shared.setdefault(match, 0) shared[match] += 1 except KeyError: pass return shared
Retrieve the subset of items that share n-grams the query string. :param query: look up items that share N-grams with this string. :return: mapping from matched string to the number of shared N-grams. >>> from ngram import NGram >>> n = NGram(["ham","spam","eggs"]) >>> sorted(n.items_sharing_ngrams("mam").items()) [('ham', 2), ('spam', 2)]
def create_dialog(self): bbox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel) self.idx_ok = bbox.button(QDialogButtonBox.Ok) self.idx_cancel = bbox.button(QDialogButtonBox.Cancel) filebutton = QPushButton() filebutton.setText() self.idx_filename = filebutton self.xp_format = FormMenu([, ]) self.all_types = FormBool() self.idx_evt_type = QListWidget() self.idx_evt_type.setSelectionMode(QAbstractItemView.ExtendedSelection) filebutton.clicked.connect(self.save_as) self.all_types.connect(self.toggle_buttons) bbox.clicked.connect(self.button_clicked) form = QFormLayout() form.addRow(, self.idx_filename) form.addRow(, self.xp_format) form.addRow(self.all_types) form.addRow(, self.idx_evt_type) btnlayout = QHBoxLayout() btnlayout.addStretch(1) btnlayout.addWidget(bbox) vlayout = QVBoxLayout() vlayout.addLayout(form) vlayout.addStretch(1) vlayout.addLayout(btnlayout) self.setLayout(vlayout)
Create the dialog.
def centroid_2dg(data, error=None, mask=None): gfit = fit_2dgaussian(data, error=error, mask=mask) return np.array([gfit.x_mean.value, gfit.y_mean.value])
Calculate the centroid of a 2D array by fitting a 2D Gaussian (plus a constant) to the array. Invalid values (e.g. NaNs or infs) in the ``data`` or ``error`` arrays are automatically masked. The mask for invalid values represents the combination of the invalid-value masks for the ``data`` and ``error`` arrays. Parameters ---------- data : array_like The 2D data array. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- centroid : `~numpy.ndarray` The ``x, y`` coordinates of the centroid.
def project_closing(self, project): yield from super().project_closing(project) tasks = [] for device in self._devices.values(): if device.project.id == project.id: tasks.append(asyncio.async(device.delete())) if tasks: done, _ = yield from asyncio.wait(tasks) for future in done: try: future.result() except (Exception, GeneratorExit) as e: log.error("Could not delete device {}".format(e), exc_info=1)
Called when a project is about to be closed. :param project: Project instance
def parse_dict_header(value): result = {} for item in _parse_list_header(value): if not in item: result[item] = None continue name, value = item.split(, 1) if value[:1] == value[-1:] == : value = unquote_header_value(value[1:-1]) result[name] = value return result
Parse lists of key, value pairs as described by RFC 2068 Section 2 and convert them into a python dict: >>> d = parse_dict_header('foo="is a fish", bar="as well"') >>> type(d) is dict True >>> sorted(d.items()) [('bar', 'as well'), ('foo', 'is a fish')] If there is no value for a key it will be `None`: >>> parse_dict_header('key_without_value') {'key_without_value': None} To create a header from the :class:`dict` again, use the :func:`dump_header` function. :param value: a string with a dict header. :return: :class:`dict` :rtype: dict
def get_vprof_version(filename): with open(filename) as src_file: version_match = re.search(r"^__version__ = [\"]*)[Unable to find version info.')
Returns actual version specified in filename.
def handle_starting_instance(self): if not self.state_handler.check_for_startup() or self.is_paused(): self.set_start_timeout(None) if self.is_paused(): self.state_handler.set_state(self.state_handler.is_running() and or ) return None if self.has_lock(): if not self.update_lock(): logger.info("Lost lock while starting up. Demoting self.") self.demote() return timeout = self._start_timeout or self.patroni.config[] time_left = timeout - self.state_handler.time_in_state() if time_left <= 0: if self.is_failover_possible(self.cluster.members): logger.info("Demoting self because master startup is taking too long") self.demote() return else: return else: msg = self.process_manual_failover_from_leader() if msg is not None: return msg return .format(time_left) else: logger.info("Still starting up as a standby.") return None
Starting up PostgreSQL may take a long time. In case we are the leader we may want to fail over to.
def get_next_iteration(self, iteration, iteration_kwargs={}): s = self.max_SH_iter - 1 - (iteration%self.max_SH_iter) n0 = int(np.floor((self.max_SH_iter)/(s+1)) * self.eta**s) ns = [max(int(n0*(self.eta**(-i))), 1) for i in range(s+1)] return(SuccessiveHalving(HPB_iter=iteration, num_configs=ns, budgets=self.budgets[(-s-1):], config_sampler=self.config_generator.get_config, **iteration_kwargs))
BO-HB uses (just like Hyperband) SuccessiveHalving for each iteration. See Li et al. (2016) for reference. Parameters ---------- iteration: int the index of the iteration to be instantiated Returns ------- SuccessiveHalving: the SuccessiveHalving iteration with the corresponding number of configurations
def __related_categories(self, category_id): related = [] for cat in self.categories_tree: if category_id in self.categories_tree[cat]: related.append(self.categories[cat]) return related
Get all related categories to a given one