code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def upload(sess_id_or_alias, files): if len(files) < 1: return with Session() as session: try: print_wait() kernel = session.Kernel(sess_id_or_alias) kernel.upload(files, show_progress=True) print_done() except Exception as e: print_error(e) sys.exit(1)
Upload files to user's home folder. \b SESSID: Session ID or its alias given when creating the session. FILES: Path to upload.
def lon360to180(lon): if np.any(lon > 360.0) or np.any(lon < 0.0): print("Warning: lon outside expected range") lon = wraplon(lon) lon = lon - (lon.astype(int)/180)*360.0 return lon
Convert longitude from (0, 360) to (-180, 180)
def show_error(cls, error=True): if error: cls.input_el.style.border = "2px solid red" cls.conspect_el.style.border = "2px solid red" cls.subconspect_el.style.border = "2px solid red" else: cls.input_el.style.border = "0" cls.conspect_el.style.border = "0" cls.subconspect_el.style.border = "0"
Show `error` around the conspect elements. If the `error` is ``False``, hide it.
def read_stdout(self): output = "" if self._stdout_file: try: with open(self._stdout_file, "rb") as file: output = file.read().decode("utf-8", errors="replace") except OSError as e: log.warning("Could not read {}: {}".format(self._stdout_file, e)) return output
Reads the standard output of the QEMU process. Only use when the process has been stopped or has crashed.
def issuer(self): return Subject([(s.oid, s.value) for s in self.x509.issuer])
The certificate issuer field as :py:class:`~django_ca.subject.Subject`.
def call(subcommand, args): args[] = parse_napps(args[]) func = getattr(NAppsAPI, subcommand) func(args)
Call a subcommand passing the args.
def interpret_pixel_data(data, dc, pixel_array, invert=True): index_value = np.where(is_address_record(data))[0] + 1 index_value = index_value[is_value_record(data[index_value])] index_address = index_value - 1 address = get_address_record_address(data[index_address]) value = get_value_record(data[index_address + 1]) address_split = np.array_split(address, np.where(np.diff(address.astype(np.int32)) < 0)[0] + 1) value_split = np.array_split(value, np.where(np.diff(address.astype(np.int32)) < 0)[0] + 1) if len(address_split) > 5: pixel_array.mask[dc * 2, :] = True pixel_array.mask[dc * 2 + 1, :] = True logging.warning(, dc) return mask = np.empty_like(pixel_array.data) mask[:] = len(address_split) for bit, (bit_address, bit_value) in enumerate(zip(address_split, value_split)): if len(bit_address) == 0: logging.warning(, dc) continue if len(bit_address) != 42: logging.warning(, dc) if (np.any(bit_address > 672)): RuntimeError(, dc) pixel = [] for i in bit_address: pixel.extend(range(i - 15, i + 1)) pixel = np.array(pixel) value_new = bit_value.view(np.uint8) if invert: value_new = np.invert(value_new) value_new = np.insert(value_new[::4], np.arange(len(value_new[1::4])), value_new[1::4]) value_bit = np.unpackbits(value_new, axis=0) if len(address_split) == 5: bit_set = len(address_split) - bit - 1 else: bit_set = bit pixel_array.data[dc * 2, pixel[pixel >= 336] - 336] = np.bitwise_or(pixel_array.data[dc * 2, pixel[pixel >= 336] - 336], np.left_shift(value_bit[pixel >= 336], bit_set)) pixel_array.data[dc * 2 + 1, pixel[pixel < 336]] = np.bitwise_or(pixel_array.data[dc * 2 + 1, pixel[pixel < 336]], np.left_shift(value_bit[pixel < 336], bit_set)[::-1]) mask[dc * 2, pixel[pixel >= 336] - 336] = mask[dc * 2, pixel[pixel >= 336] - 336] - 1 mask[dc * 2 + 1, pixel[pixel < 336]] = mask[dc * 2 + 1, pixel[pixel < 336]] - 1 pixel_array.mask[np.equal(mask, 0)] = False
Takes the pixel raw data and interprets them. This includes consistency checks and pixel/data matching. The data has to come from one double column only but can have more than one pixel bit (e.g. TDAC = 5 bit). Parameters ---------- data : numpy.ndarray The raw data words. dc : int The double column where the data is from. pixel_array : numpy.ma.ndarray The masked numpy.ndarrays to be filled. The masked is set to zero for pixels with valid data. invert : boolean Invert the read pixel data.
def get_area(self, geojson): geojson = json.load(open(geojson, )) self.area = Polygon(geojson[][0][][][0])
Read the first feature from the geojson and return it as a Polygon object.
def debug(self, msg, *args, **kwargs): self._baseLogger.debug(self, self.getExtendedMsg(msg), *args, **kwargs)
Log 'msg % args' with severity 'DEBUG'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
def _ilshift(self, n): assert 0 < n <= self.len self._append(Bits(n)) self._truncatestart(n) return self
Shift bits by n to the left in place. Return self.
def synctree(src, dst, onexist=None): src = pathlib.Path(src).resolve() dst = pathlib.Path(dst).resolve() if not src.is_dir(): raise ValueError if dst.exists() and not dst.is_dir(): raise ValueError if onexist is None: def onexist(): pass _synctree(src, dst, onexist)
Recursively sync files at directory src to dst This is more or less equivalent to:: cp -n -R ${src}/ ${dst}/ If a file at the same path exists in src and dst, it is NOT overwritten in dst. Pass ``onexist`` in order to raise an error on such conditions. Args: src (path-like): source directory dst (path-like): destination directory, does not need to exist onexist (callable): function to call if file exists at destination, takes the full path to destination file as only argument
def current_session(): driver = current_driver or default_driver session_key = "{driver}:{session}:{app}".format( driver=driver, session=session_name, app=str(id(app))) session = _session_pool.get(session_key, None) if session is None: from capybara.session import Session session = Session(driver, app) _session_pool[session_key] = session return session
Returns the :class:`Session` for the current driver and app, instantiating one if needed. Returns: Session: The :class:`Session` for the current driver and app.
def merge_includes(code): pattern = regex = re.compile(pattern) includes = [] def replace(match): filename = match.group("filename") if filename not in includes: includes.append(filename) path = glsl.find(filename) if not path: logger.critical( % filename) raise RuntimeError("File not found", filename) text = % filename with open(path) as fh: text += fh.read() text += % filename return text return for i in range(10): if re.search(regex, code): code = re.sub(regex, replace, code) else: break return code
Merge all includes recursively.
async def add_unit(self, count=1, to=None): app_facade = client.ApplicationFacade.from_connection(self.connection) log.debug( , count, if count == 1 else , self.name) result = await app_facade.AddUnits( application=self.name, placement=parse_placement(to) if to else None, num_units=count, ) return await asyncio.gather(*[ asyncio.ensure_future(self.model._wait_for_new(, unit_id)) for unit_id in result.units ])
Add one or more units to this application. :param int count: Number of units to add :param str to: Placement directive, e.g.:: '23' - machine 23 'lxc:7' - new lxc container on machine 7 '24/lxc/3' - lxc container 3 or machine 24 If None, a new machine is provisioned.
def add_validation_message(self, message): if message.file not in self.messages: self.messages[message.file] = [] self.messages[message.file].append(message)
Adds a message to the messages dict :param message:
def update(cls, domain, source, dest_add, dest_del): result = None if dest_add or dest_del: current_destinations = cls.get_destinations(domain, source) fwds = current_destinations[:] if dest_add: for dest in dest_add: if dest not in fwds: fwds.append(dest) if dest_del: for dest in dest_del: if dest in fwds: fwds.remove(dest) if ((len(current_destinations) != len(fwds)) or (current_destinations != fwds)): cls.echo( % (source, domain)) options = {: fwds} result = cls.call(, domain, source, options) return result
Update a domain mail forward destinations.
def Print(self, x, data, message, **kwargs): tf.logging.info("PlacementMeshImpl::Print") new_slices = x.tensor_list[:] with tf.device(self._devices[0]): new_slices[0] = tf.Print( new_slices[0], [t for d in data for t in d.tensor_list], message, **kwargs) return self.LaidOutTensor(new_slices)
call tf.Print. Args: x: a LaidOutTensor data: a list of LaidOutTensor message: a string **kwargs: keyword arguments to tf.print Returns: a LaidOutTensor
def directory(): home = os.path.expanduser() if platform.system() == : hitman_dir = os.path.join(home, ) elif platform.system() == : hitman_dir = os.path.join(home, , , ) elif platform.system() == : hitman_dir = os.path.join(os.environ[], ) else: hitman_dir = os.path.join(home, ) if not os.path.isdir(hitman_dir): os.mkdir(hitman_dir) return hitman_dir
Construct hitman_dir from os name
def __EncodedAttribute_generic_encode_rgb24(self, rgb24, width=0, height=0, quality=0, format=_ImageFormat.RawImage): if not is_seq(rgb24): raise TypeError("Expected sequence (str, numpy.ndarray, list, tuple " "or bytearray) as first argument") is_str = is_pure_str(rgb24) if is_str: if not width or not height: raise ValueError("When giving a string as data, you must also " "supply width and height") if np and isinstance(rgb24, np.ndarray): if rgb24.ndim != 3: if not width or not height: raise ValueError("When giving a non 2D numpy array, width and " "height must be supplied") if rgb24.nbytes / 3 != width * height: raise ValueError("numpy array size mismatch") else: if rgb24.itemsize != 1: raise TypeError("Expected numpy array with itemsize == 1") if not rgb24.flags.c_contiguous: raise TypeError("Currently, only contiguous, aligned numpy arrays " "are supported") if not rgb24.flags.aligned: raise TypeError("Currently, only contiguous, aligned numpy arrays " "are supported") if not is_str and (not width or not height): height = len(rgb24) if height < 1: raise IndexError("Expected sequence with at least one row") row0 = rgb24[0] if not is_seq(row0): raise IndexError("Expected sequence (str, numpy.ndarray, list, tuple or " "bytearray) inside a sequence") width = len(row0) if is_pure_str(row0) or type(row0) == bytearray: width /= 3 if format == _ImageFormat.RawImage: self._encode_rgb24(rgb24, width, height) elif format == _ImageFormat.JpegImage: self._encode_jpeg_rgb24(rgb24, width, height, quality)
Internal usage only
def _lonlat_from_geos_angle(x, y, geos_area): h = (geos_area.proj_dict[] + geos_area.proj_dict[]) / 1000 b__ = (geos_area.proj_dict[] / geos_area.proj_dict[]) ** 2 sd = np.sqrt((h * np.cos(x) * np.cos(y)) ** 2 - (np.cos(y)**2 + b__ * np.sin(y)**2) * (h**2 - (geos_area.proj_dict[] / 1000)**2)) sn = (h * np.cos(x) * np.cos(y) - sd) / (np.cos(y)**2 + b__ * np.sin(y)**2) s1 = h - sn * np.cos(x) * np.cos(y) s2 = sn * np.sin(x) * np.cos(y) s3 = -sn * np.sin(y) sxy = np.sqrt(s1**2 + s2**2) lons = np.rad2deg(np.arctan2(s2, s1)) + geos_area.proj_dict.get(, 0) lats = np.rad2deg(-np.arctan2(b__ * s3, sxy)) return lons, lats
Get lons and lats from x, y in projection coordinates.
def _dump_query_timestamps(self, current_time: float): windows = [10, 11, 15, 20, 30, 60] print("GraphQL requests:", file=sys.stderr) for query_hash, times in self._graphql_query_timestamps.items(): print(" {}".format(query_hash), file=sys.stderr) for window in windows: reqs_in_sliding_window = sum(t > current_time - window * 60 for t in times) print(" last {} minutes: {} requests".format(window, reqs_in_sliding_window), file=sys.stderr)
Output the number of GraphQL queries grouped by their query_hash within the last time.
def secp256k1(): GFp = FiniteField(2 ** 256 - 2 ** 32 - 977) ec = EllipticCurve(GFp, 0, 7) return ECDSA(ec, ec.point(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798, 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8), 2 ** 256 - 432420386565659656852420866394968145599)
create the secp256k1 curve
def estimate_tx_operational_gas(self, safe_address: str, data_bytes_length: int): threshold = self.retrieve_threshold(safe_address) return 15000 + data_bytes_length // 32 * 100 + 5000 * threshold
Estimates the gas for the verification of the signatures and other safe related tasks before and after executing a transaction. Calculation will be the sum of: - Base cost of 15000 gas - 100 of gas per word of `data_bytes` - Validate the signatures 5000 * threshold (ecrecover for ecdsa ~= 4K gas) :param safe_address: Address of the safe :param data_bytes_length: Length of the data (in bytes, so `len(HexBytes('0x12'))` would be `1` :return: gas costs per signature * threshold of Safe
def _handle_subscription(self, topics): if not isinstance(topics, list): topics = [topics] for topic in topics: topic_levels = topic.split() try: qos = int(topic_levels[-2]) except ValueError: qos = 0 try: _LOGGER.debug(, topic, qos) self._sub_callback(topic, self.recv, qos) except Exception as exception: _LOGGER.exception( , topic, exception)
Handle subscription of topics.
def pre_build(self, traj, brian_list, network_dict): self._pre_build = not _explored_parameters_in_group(traj, traj.parameters.connections) self._pre_build = (self._pre_build and in network_dict and in network_dict) if self._pre_build: self._build_connections(traj, brian_list, network_dict)
Pre-builds the connections. Pre-build is only performed if none of the relevant parameters is explored and the relevant neuron groups exist. :param traj: Trajectory container :param brian_list: List of objects passed to BRIAN network constructor. Adds: Connections, amount depends on clustering :param network_dict: Dictionary of elements shared among the components Expects: 'neurons_i': Inhibitory neuron group 'neurons_e': Excitatory neuron group Adds: Connections, amount depends on clustering
def _init_map(self, record_types=None, **kwargs): osid_objects.OsidObjectForm._init_map(self, record_types=record_types) self._my_map[] = [str(kwargs[])] self._my_map[] = self._group_default self._my_map[] = self._avatar_default
Initialize form map
def add_tot_length(self, qname, sname, value, sym=True): self.alignment_lengths.loc[qname, sname] = value if sym: self.alignment_lengths.loc[sname, qname] = value
Add a total length value to self.alignment_lengths.
def http_query(self, method, path, data={}, params={}, timeout=300): data = json.dumps(data) if timeout is None: timeout = 60 * 60 * 24 * 31 if path == : url = "http://docker/v1.12/" + path else: url = "http://docker/v" + DOCKER_MINIMUM_API_VERSION + "/" + path try: if path != "version": yield from self._check_connection() if self._session is None or self._session.closed: connector = self.connector() self._session = aiohttp.ClientSession(connector=connector) response = yield from self._session.request( method, url, params=params, data=data, headers={"content-type": "application/json", }, timeout=timeout ) except (aiohttp.ClientResponseError, aiohttp.ClientOSError) as e: raise DockerError("Docker has returned an error: {}".format(str(e))) except (asyncio.TimeoutError): raise DockerError("Docker timeout " + method + " " + path) if response.status >= 300: body = yield from response.read() try: body = json.loads(body.decode("utf-8"))["message"] except ValueError: pass log.debug("Query Docker %s %s params=%s data=%s Response: %s", method, path, params, data, body) if response.status == 304: raise DockerHttp304Error("Docker has returned an error: {} {}".format(response.status, body)) elif response.status == 404: raise DockerHttp404Error("Docker has returned an error: {} {}".format(response.status, body)) else: raise DockerError("Docker has returned an error: {} {}".format(response.status, body)) return response
Make a query to the docker daemon :param method: HTTP method :param path: Endpoint in API :param data: Dictionnary with the body. Will be transformed to a JSON :param params: Parameters added as a query arg :param timeout: Timeout :returns: HTTP response
def get_editorTab(self, editor): for i in range(self.Script_Editor_tabWidget.count()): if not self.get_widget(i) == editor: continue LOGGER.debug("> Editor : Tab index .".format(editor, i)) return i
Returns the **Script_Editor_tabWidget** Widget tab associated with the given editor. :param editor: Editor to search tab for. :type editor: Editor :return: Tab index. :rtype: Editor
def gaussian(x, mu, sigma): return _np.exp(-(x-mu)**2/(2*sigma**2)) / (_np.sqrt(2*_np.pi) * sigma)
Gaussian function of the form :math:`\\frac{1}{\\sqrt{2 \\pi}\\sigma} e^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}`. .. versionadded:: 1.5 Parameters ---------- x : float Function variable :math:`x`. mu : float Mean of the Gaussian function. sigma : float Standard deviation of the Gaussian function.
def _replicate(n, tensor): multiples = tf.concat([[n], tf.ones_like(tensor.shape)], axis=0) return tf.tile(tf.expand_dims(tensor, axis=0), multiples)
Replicate the input tensor n times along a new (major) dimension.
def determine_file_type(self, z): content = z.read() with io.BytesIO(content) as b: encoding = self._analyze_file(b) if encoding is None: encoding = b.seek(0) text = b.read().decode(encoding) soup = bs4.BeautifulSoup(text, ) for o in soup.find_all(): name = o.attrs.get() for k, v in MIMEMAP.items(): if name.startswith(.format(k)): self.type = v break if self.type: break self.filepattern = DOC_PARAMS[self.type][] self.namespaces = DOC_PARAMS[self.type][] self.captures = sv.compile(DOC_PARAMS[self.type][], DOC_PARAMS[self.type][])
Determine file type.
def write_table(self, table): table_sql, serial_key_sql = super(PostgresDbWriter, self).write_table(table) for sql in serial_key_sql + table_sql: self.execute(sql)
Send DDL to create the specified `table` :Parameters: - `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write. Returns None
def render_pull_base_image(self): phase = plugin = if self.user_params.parent_images_digests.value: self.pt.set_plugin_arg(phase, plugin, , self.user_params.parent_images_digests.value)
Configure pull_base_image
def make_pinwheel(radial_std, tangential_std, num_classes, num_per_class, rate, rs=npr.RandomState(0)): rads = np.linspace(0, 2*np.pi, num_classes, endpoint=False) features = rs.randn(num_classes*num_per_class, 2) \ * np.array([radial_std, tangential_std]) features[:, 0] += 1 labels = np.repeat(np.arange(num_classes), num_per_class) angles = rads[labels] + rate * np.exp(features[:,0]) rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)]) rotations = np.reshape(rotations.T, (-1, 2, 2)) return np.einsum(, features, rotations)
Based on code by Ryan P. Adams.
def fuse_batchnorm_weights(gamma, beta, mean, var, epsilon): scale = gamma / np.sqrt(var + epsilon) bias = beta - gamma * mean / np.sqrt(var + epsilon) return [scale, bias]
float sqrt_var = sqrt(var_data[i]); a_data[i] = bias_data[i] - slope_data[i] * mean_data[i] / sqrt_var; b_data[i] = slope_data[i] / sqrt_var; ... ptr[i] = b * ptr[i] + a;
def iterate_forever(func, *args, **kwargs): output = func(*args, **kwargs) while True: try: playlist_item = next(output) playlist_item.prepare_playback() yield playlist_item except StopIteration: output = func(*args, **kwargs)
Iterate over a finite iterator forever When the iterator is exhausted will call the function again to generate a new iterator and keep iterating.
def emit(self, event, data=None, room=None, include_self=True, namespace=None, callback=None): return self.socketio.emit(event, data, room=room, include_self=include_self, namespace=namespace or self.namespace, callback=callback)
Emit a custom event to one or more connected clients.
def get_storage(self): if self.storage: return self.storage self.storage = self.reconnect_redis() return self.storage
Get the storage instance. :return Redis: Redis instance
def __envelope(x, hop): return util.frame(x, hop_length=hop, frame_length=hop).max(axis=0)
Compute the max-envelope of x at a stride/frame length of h
def CSS_setMediaText(self, styleSheetId, range, text): assert isinstance(text, (str,) ), "Argument must be of type str. Received type: " % type( text) subdom_funcs = self.synchronous_command(, styleSheetId= styleSheetId, range=range, text=text) return subdom_funcs
Function path: CSS.setMediaText Domain: CSS Method name: setMediaText Parameters: Required arguments: 'styleSheetId' (type: StyleSheetId) -> No description 'range' (type: SourceRange) -> No description 'text' (type: string) -> No description Returns: 'media' (type: CSSMedia) -> The resulting CSS media rule after modification. Description: Modifies the rule selector.
def batch_iter(data, batch_size, num_epochs): data = np.array(data) data_size = len(data) num_batches_per_epoch = int(len(data)/batch_size) + 1 for epoch in range(num_epochs): shuffle_indices = np.random.permutation(np.arange(data_size)) shuffled_data = data[shuffle_indices] for batch_num in range(num_batches_per_epoch): start_index = batch_num * batch_size end_index = min((batch_num + 1) * batch_size, data_size) yield shuffled_data[start_index:end_index]
Generates a batch iterator for a dataset.
def shift(schedule: ScheduleComponent, time: int, name: str = None) -> Schedule: if name is None: name = schedule.name return union((time, schedule), name=name)
Return schedule shifted by `time`. Args: schedule: The schedule to shift time: The time to shift by name: Name of shifted schedule. Defaults to name of `schedule`
def _get_dvportgroup_dict(pg_ref): props = salt.utils.vmware.get_properties_of_managed_object( pg_ref, [, , , , ]) pg_dict = {: props[], : props.get(), : props[], : props[]} if props[]: dpg = props[] if dpg.vlan and \ isinstance(dpg.vlan, vim.VmwareDistributedVirtualSwitchVlanIdSpec): pg_dict.update({: dpg.vlan.vlanId}) pg_dict.update({: _get_dvportgroup_out_shaping( props[], props[])}) pg_dict.update({: _get_dvportgroup_security_policy( props[], props[])}) pg_dict.update({: _get_dvportgroup_teaming( props[], props[])}) return pg_dict
Returns a dictionary with a distributed virutal portgroup data pg_ref Portgroup reference
def open(self): self.stats = self.linter.add_stats() self._returns = [] self._branches = defaultdict(int) self._stmts = []
initialize visit variables
def write(self, fn=None): fn = fn or self.fn if not os.path.exists(os.path.dirname(fn)): os.makedirs(os.path.dirname(fn)) f = open(self.fn, ) b = f.read() f.close() f = open(fn, ) f.write(b) f.close()
copy the zip file from its filename to the given filename.
def qzordered(A,B,crit=1.0): "Eigenvalues bigger than crit are sorted in the top-left." TOL = 1e-10 def select(alpha, beta): return alpha**2>crit*beta**2 [S,T,alpha,beta,U,V] = ordqz(A,B,output=,sort=select) eigval = abs(numpy.diag(S)/numpy.diag(T)) return [S,T,U,V,eigval]
Eigenvalues bigger than crit are sorted in the top-left.
def get_available_gpus(): local_device_protos = device_lib.list_local_devices() return [x.name for x in local_device_protos if x.device_type == ]
Returns a list of string names of all available GPUs
def get_configdir(name): configdir = os.environ.get( % name.upper()) if configdir is not None: return os.path.abspath(configdir) p = None h = _get_home() if ((sys.platform.startswith() or sys.platform.startswith()) and h is not None): p = os.path.join(h, + name) elif h is not None: p = os.path.join(h, + name) if not os.path.exists(p): os.makedirs(p) return p
Return the string representing the configuration directory. The directory is chosen as follows: 1. If the ``name.upper() + CONFIGDIR`` environment variable is supplied, choose that. 2a. On Linux, choose `$HOME/.config`. 2b. On other platforms, choose `$HOME/.matplotlib`. 3. If the chosen directory exists, use that as the configuration directory. 4. A directory: return None. Notes ----- This function is taken from the matplotlib [1] module References ---------- [1]: http://matplotlib.org/api/
def exclude_range(self, field, start="*", stop="*", inclusive=True, new_group=False): if start is None: start = "*" if stop is None: stop = "*" if start == "*" and stop == "*": return self.match_not_exists(field, new_group=new_group) if inclusive: value = "[" + str(start) + " TO " + str(stop) + "]" else: value = "{" + str(start) + " TO " + str(stop) + "}" return self.exclude_field(field, value, new_group=new_group)
Exclude a ``field:[some range]`` term from the query. Matches will not have any ``value`` in the range in the ``field``. Arguments: field (str): The field to check for the value. The field must be namespaced according to Elasticsearch rules using the dot syntax. For example, ``"mdf.source_name"`` is the ``source_name`` field of the ``mdf`` dictionary. start (str or int): The starting value, or ``None`` for no lower bound. **Default:** ``None``. stop (str or int): The ending value, or ``None`` for no upper bound. **Default:** ``None``. inclusive (bool): If ``True``, the ``start`` and ``stop`` values will be excluded from the search. If ``False``, the ``start`` and ``stop`` values will not be excluded from the search. **Default:** ``True``. new_group (bool): If ``True``, will separate the term into a new parenthetical group. If ``False``, will not. **Default:** ``False``. Returns: SearchHelper: Self
def generate_fault_source_model(self): source_model = [] model_weight = [] for iloc in range(0, self.get_number_mfd_models()): model_mfd = EvenlyDiscretizedMFD( self.mfd[0][iloc].min_mag, self.mfd[0][iloc].bin_width, self.mfd[0][iloc].occur_rates.tolist()) if isinstance(self.geometry, ComplexFaultGeometry): source = mtkComplexFaultSource( self.id, self.name, self.trt, self.geometry.surface, self.mfd[2][iloc], self.rupt_aspect_ratio, model_mfd, self.rake) source.fault_edges = self.geometry.trace else: source = mtkSimpleFaultSource( self.id, self.name, self.trt, self.geometry.surface, self.geometry.dip, self.geometry.upper_depth, self.geometry.lower_depth, self.mfd[2][iloc], self.rupt_aspect_ratio, model_mfd, self.rake) source.fault_trace = self.geometry.trace source_model.append(source) model_weight.append(self.mfd[1][iloc]) return source_model, model_weight
Creates a resulting `openquake.hmtk` fault source set. :returns: source_model - list of instances of either the :class: `openquake.hmtk.sources.simple_fault_source.mtkSimpleFaultSource` or :class: `openquake.hmtk.sources.complex_fault_source.mtkComplexFaultSource` model_weight - Corresponding weights for each source model
def _check_flag_meanings(self, ds, name): s flag_meanings attribute for compliance under CF - flag_meanings exists - flag_meanings is a string - flag_meanings elements are valid strings :param netCDF4.Dataset ds: An open netCDF dataset :param str name: Variable name :rtype: compliance_checker.base.Result flag_meanings3.5s flag_meanings attribute is required for flag variables".format(name)) valid_meanings.assert_true(isinstance(flag_meanings, basestring), "{}t perform any additional checks if its flag_meanings cans flag_meanings attribute defined an illegal flag meaning ".format(name)+\ "{}".format(meaning)) return valid_meanings.to_result()
Check a variable's flag_meanings attribute for compliance under CF - flag_meanings exists - flag_meanings is a string - flag_meanings elements are valid strings :param netCDF4.Dataset ds: An open netCDF dataset :param str name: Variable name :rtype: compliance_checker.base.Result
def Stephan_Abdelsalam(rhol, rhog, mul, kl, Cpl, Hvap, sigma, Tsat, Te=None, q=None, kw=401, rhow=8.96, Cpw=384, angle=None, correlation=): rgeneralwaterhydrocarboncryogenicrefrigerant if Te is None and q is None: raise Exception() angle = _angles_Stephan_Abdelsalam[correlation] db = 0.0146*angle*(2*sigma/g/(rhol-rhog))**0.5 diffusivity_L = kl/rhol/Cpl if Te: X1 = db/kl/Tsat*Te else: X1 = db/kl/Tsat*q X2 = diffusivity_L**2*rhol/sigma/db X3 = Hvap*db**2/diffusivity_L**2 X4 = Hvap*db**2/diffusivity_L**2 X5 = rhog/rhol X6 = Cpl*mul/kl X7 = rhow*Cpw*kw/(rhol*Cpl*kl) X8 = (rhol-rhog)/rhol if correlation == : if Te: h = (0.23*X1**0.674*X2**0.35*X3**0.371*X5**0.297*X8**-1.73*kl/db)**(1/0.326) else: h = (0.23*X1**0.674*X2**0.35*X3**0.371*X5**0.297*X8**-1.73*kl/db) elif correlation == : if Te: h = (0.246E7*X1**0.673*X4**-1.58*X3**1.26*X8**5.22*kl/db)**(1/0.327) else: h = (0.246E7*X1**0.673*X4**-1.58*X3**1.26*X8**5.22*kl/db) elif correlation == : if Te: h = (0.0546*X5**0.335*X1**0.67*X8**-4.33*X4**0.248*kl/db)**(1/0.33) else: h = (0.0546*X5**0.335*X1**0.67*X8**-4.33*X4**0.248*kl/db) elif correlation == : if Te: h = (4.82*X1**0.624*X7**0.117*X3**0.374*X4**-0.329*X5**0.257*kl/db)**(1/0.376) else: h = (4.82*X1**0.624*X7**0.117*X3**0.374*X4**-0.329*X5**0.257*kl/db) else: if Te: h = (207*X1**0.745*X5**0.581*X6**0.533*kl/db)**(1/0.255) else: h = (207*X1**0.745*X5**0.581*X6**0.533*kl/db) return h
r'''Calculates heat transfer coefficient for a evaporator operating in the nucleate boiling regime according to [2]_ as presented in [1]_. Five variants are possible. Either heat flux or excess temperature is required. The forms for `Te` are not shown here, but are similar to those of the other functions. .. math:: h = 0.23X_1^{0.674} X_2^{0.35} X_3^{0.371} X_5^{0.297} X_8^{-1.73} k_L/d_B X1 = \frac{q D_d}{K_L T_{sat}} X2 = \frac{\alpha^2 \rho_L}{\sigma D_d} X3 = \frac{C_{p,L} T_{sat} D_d^2}{\alpha^2} X4 = \frac{H_{vap} D_d^2}{\alpha^2} X5 = \frac{\rho_V}{\rho_L} X6 = \frac{C_{p,l} \mu_L}{k_L} X7 = \frac{\rho_W C_{p,W} k_W}{\rho_L C_{p,L} k_L} X8 = \frac{\rho_L-\rho_V}{\rho_L} D_b = 0.0146\theta\sqrt{\frac{2\sigma}{g(\rho_L-\rho_g)}} Respectively, the following four correlations are for water, hydrocarbons, cryogenic fluids, and refrigerants. .. math:: h = 0.246\times 10^7 X1^{0.673} X4^{-1.58} X3^{1.26}X8^{5.22}k_L/d_B h = 0.0546 X5^{0.335} X1^{0.67} X8^{-4.33} X4^{0.248}k_L/d_B h = 4.82 X1^{0.624} X7^{0.117} X3^{0.374} X4^{-0.329}X5^{0.257} k_L/d_B h = 207 X1^{0.745} X5^{0.581} X6^{0.533} k_L/d_B Parameters ---------- rhol : float Density of the liquid [kg/m^3] rhog : float Density of the produced gas [kg/m^3] mul : float Viscosity of liquid [Pa*s] kl : float Thermal conductivity of liquid [W/m/K] Cpl : float Heat capacity of liquid [J/kg/K] Hvap : float Heat of vaporization of the fluid at P, [J/kg] sigma : float Surface tension of liquid [N/m] Tsat : float Saturation temperature at operating pressure [Pa] Te : float, optional Excess wall temperature, [K] q : float, optional Heat flux, [W/m^2] kw : float, optional Thermal conductivity of wall (only for cryogenics) [W/m/K] rhow : float, optional Density of the wall (only for cryogenics) [kg/m^3] Cpw : float, optional Heat capacity of wall (only for cryogenics) [J/kg/K] angle : float, optional Contact angle of bubble with wall [degrees] correlation : str, optional Any of 'general', 'water', 'hydrocarbon', 'cryogenic', or 'refrigerant' Returns ------- h : float Heat transfer coefficient [W/m^2/K] Notes ----- If cryogenic correlation is selected, metal properties are used. Default values are the properties of copper at STP. The angle is selected automatically if a correlation is selected; if angle is provided anyway, the automatic selection is ignored. A IndexError exception is raised if the correlation is not in the dictionary _angles_Stephan_Abdelsalam. Examples -------- Example is from [3]_ and matches. >>> Stephan_Abdelsalam(Te=16.2, Tsat=437.5, Cpl=2730., kl=0.086, mul=156E-6, ... sigma=0.0082, Hvap=272E3, rhol=567, rhog=18.09, angle=35) 26722.441071108373 References ---------- .. [1] Cao, Eduardo. Heat Transfer in Process Engineering. McGraw Hill Professional, 2009. .. [2] Stephan, K., and M. Abdelsalam. "Heat-Transfer Correlations for Natural Convection Boiling." International Journal of Heat and Mass Transfer 23, no. 1 (January 1980): 73-87. doi:10.1016/0017-9310(80)90140-4. .. [3] Serth, R. W., Process Heat Transfer: Principles, Applications and Rules of Thumb. 2E. Amsterdam: Academic Press, 2014.
def get_template_names(self): if self.request.is_ajax(): template = self.ajax_template_name else: template = self.template_name return template
Returns the template name to use for this request.
def rhsm_register(self, rhsm): login = rhsm.get() password = rhsm.get(, os.environ.get()) pool_id = rhsm.get() self.run(, ignore_error=True) custom_log = % login self.run( % ( login, password), success_status=(0, 64), custom_log=custom_log, retry=3) if pool_id: self.run( % pool_id) else: self.run() self.rhsm_active = True
Register the host on the RHSM. :param rhsm: a dict of parameters (login, password, pool_id)
def diff(s1, s2): delta = difflib.Differ().compare(s1.split(), s2.split()) difflist = [] fullline = for line in delta: if line[0] == : continue elif line[0] == : fullline += line.strip() + else: if fullline: difflist.append(fullline[:-1]) fullline = difflist.append(line) if fullline: difflist.append(fullline[:-1]) return [l[:] for l in .join(difflist).splitlines() if l]
--word-diff=porcelain clone
def sort_values( self, by, axis=0, ascending=True, inplace=False, kind="quicksort", na_position="last", ): axis = self._get_axis_number(axis) if not is_list_like(by): by = [by] if axis == 0: broadcast_value_dict = {col: self[col] for col in by} broadcast_values = pandas.DataFrame(broadcast_value_dict, index=self.index) new_index = broadcast_values.sort_values( by=by, axis=axis, ascending=ascending, kind=kind, na_position=na_position, ).index return self.reindex(index=new_index, copy=not inplace) else: broadcast_value_list = [ self[row :: len(self.index)]._to_pandas() for row in by ] index_builder = list(zip(broadcast_value_list, by)) broadcast_values = pandas.concat( [row for row, idx in index_builder], copy=False ) broadcast_values.columns = self.columns new_columns = broadcast_values.sort_values( by=by, axis=axis, ascending=ascending, kind=kind, na_position=na_position, ).columns return self.reindex(columns=new_columns, copy=not inplace)
Sorts by a column/row or list of columns/rows. Args: by: A list of labels for the axis to sort over. axis: The axis to sort. ascending: Sort in ascending or descending order. inplace: If true, do the operation inplace. kind: How to sort. na_position: Where to put np.nan values. Returns: A sorted DataFrame.
def get_json(self, prettyprint=False, translate=True): j = [] if translate: d = self.get_translated_data() else: d = self.data for k in d: j.append(d[k]) if prettyprint: j = json.dumps(j, indent=2, separators=(,)) else: j = json.dumps(j) return j
Get the data in JSON form
def MakeRequest(http, http_request, retries=7, max_retry_wait=60, redirections=5, retry_func=HandleExceptionsAndRebuildHttpConnections, check_response_func=CheckResponse): retry = 0 first_req_time = time.time() while True: try: return _MakeRequestNoRetry( http, http_request, redirections=redirections, check_response_func=check_response_func) except Exception as e: retry += 1 if retry >= retries: raise else: total_wait_sec = time.time() - first_req_time retry_func(ExceptionRetryArgs(http, http_request, e, retry, max_retry_wait, total_wait_sec))
Send http_request via the given http, performing error/retry handling. Args: http: An httplib2.Http instance, or a http multiplexer that delegates to an underlying http, for example, HTTPMultiplexer. http_request: A Request to send. retries: (int, default 7) Number of retries to attempt on retryable replies (such as 429 or 5XX). max_retry_wait: (int, default 60) Maximum number of seconds to wait when retrying. redirections: (int, default 5) Number of redirects to follow. retry_func: Function to handle retries on exceptions. Argument is an ExceptionRetryArgs tuple. check_response_func: Function to validate the HTTP response. Arguments are (Response, response content, url). Raises: InvalidDataFromServerError: if there is no response after retries. Returns: A Response object.
def makeAB(self): for fld in self.m_blk_a: compare_fld = fld.upper() if not "RESERVED" in compare_fld and not "CRC" in compare_fld: self.m_req[fld] = self.m_blk_a[fld] for fld in self.m_blk_b: compare_fld = fld.upper() if not "RESERVED" in compare_fld and not "CRC" in compare_fld: self.m_req[fld] = self.m_blk_b[fld] pass
Munge A and B reads into single serial block with only unique fields.
def _get_album_or_image(json, imgur): if json[]: return Gallery_album(json, imgur, has_fetched=False) return Gallery_image(json, imgur)
Return a gallery image/album depending on what the json represent.
def send_at_position(self, what, useSelection, where="range"): self.log.debug() b, e = self.editor.selection_pos() if useSelection else self.editor.word_under_cursor_pos() self.log.debug(.format(useSelection, b, e)) beg = self.get_position(b[0], b[1]) end = self.get_position(e[0], e[1]) self.send_request( {"typehint": what + "AtPointReq", "file": self.editor.path(), where: {"from": beg, "to": end}})
Ask the server to perform an operation on a range (sometimes named point) `what` is used as the prefix for the typehint. If `useSelection` is `False` the range is calculated based on the word under de cursor. Current selection start and end is used as the range otherwise. `where` defines the name of the property holding the range info within the request. Default value is 'range' but 'point' is sometimes used
def xy2rd(input,x=None,y=None,coords=None, coordfile=None,colnames=None,separator=None, hms=True, precision=6,output=None,verbose=True): single_coord = False if coords is not None and coordfile is None: coordfile = coords warnings.simplefilter(,DeprecationWarning) warnings.warn("Please update calling code to pass in `coordfile` instead of `coords`.", category=DeprecationWarning) warnings.simplefilter(,DeprecationWarning) if coordfile is not None: if colnames in blank_list: colnames = [,] cols = util.parse_colnames(colnames,coordfile) xyvals = np.loadtxt(coordfile,usecols=cols,delimiter=separator) if xyvals.ndim == 1: xlist = [xyvals[0].copy()] ylist = [xyvals[1].copy()] else: xlist = xyvals[:,0].copy() ylist = xyvals[:,1].copy() del xyvals else: if isinstance(x, np.ndarray): xlist = x.tolist() ylist = y.tolist() elif not isinstance(x,list): xlist = [x] ylist = [y] single_coord = True else: xlist = x ylist = y inwcs = wcsutil.HSTWCS(input) if inwcs.wcs.is_unity(): print(" dra,ddec = inwcs.all_pix2world(xlist,ylist,1) if hms: ra,dec = wcs_functions.ddtohms(dra,ddec,precision=precision) rastr = ra decstr = dec else: rastr = [] decstr = [] fmt = "%."+repr(precision)+"f" for r,d in zip(dra,ddec): rastr.append(fmt%r) decstr.append(fmt%d) ra = dra dec = ddec if verbose or (not verbose and util.is_blank(output)): print(,input) print() for x,y,r,d in zip(xlist,ylist,rastr,decstr): print("%.4f %.4f %s %s"%(x,y,r,d)) if output: f = open(output,mode=) f.write(" for r,d in zip(rastr,decstr): f.write(%(r,d)) f.close() print(,output) if single_coord: ra = ra[0] dec = dec[0] return ra,dec
Primary interface to perform coordinate transformations from pixel to sky coordinates using STWCS and full distortion models read from the input image header.
def libvlc_media_add_option(p_md, psz_options): s advanced reading/streaming options on a per-media basis. @note: The options are listed in from the command line, e.g. "-sout-all". Keep in mind that available options and their semantics vary across LibVLC versions and builds. @warning: Not all options affects L{Media} objects: Specifically, due to architectural issues most audio and video options, such as text renderer options, have no effects on an individual media. These options must be set through L{libvlc_new}() instead. @param p_md: the media descriptor. @param psz_options: the options (as a string). libvlc_media_add_optionlibvlc_media_add_option', ((1,), (1,),), None, None, Media, ctypes.c_char_p) return f(p_md, psz_options)
Add an option to the media. This option will be used to determine how the media_player will read the media. This allows to use VLC's advanced reading/streaming options on a per-media basis. @note: The options are listed in 'vlc --long-help' from the command line, e.g. "-sout-all". Keep in mind that available options and their semantics vary across LibVLC versions and builds. @warning: Not all options affects L{Media} objects: Specifically, due to architectural issues most audio and video options, such as text renderer options, have no effects on an individual media. These options must be set through L{libvlc_new}() instead. @param p_md: the media descriptor. @param psz_options: the options (as a string).
def rename(self): renamer.execute(self.original, self.out_location) if cfg.CONF.move_files_enabled: LOG.debug(, self) else: LOG.debug(, self)
Renames media file to formatted name. After parsing data from initial media filename and searching for additional data to using a data service, a formatted filename will be generated and the media file will be renamed to the generated name and optionally relocated.
def getHourTable(date, pos): table = hourTable(date, pos) return HourTable(table, date)
Returns an HourTable object.
def fetch_list_members(list_url): match = re.match(r, list_url) if not match: print( % list_url) return [] screen_name, slug = match.groups() print( % (screen_name, slug)) return twutil.collect.list_members(slug, screen_name)
Get all members of the list specified by the given url. E.g., https://twitter.com/lore77/lists/libri-cultura-education
def get_output(self, buildroot_id): def add_buildroot_id(output): logfile, metadata = output metadata.update({: buildroot_id}) return Output(file=logfile, metadata=metadata) def add_log_type(output, arch): logfile, metadata = output metadata.update({: , : arch}) return Output(file=logfile, metadata=metadata) arch = os.uname()[4] output_files = [add_log_type(add_buildroot_id(metadata), arch) for metadata in self.get_logs()] image_id = self.workflow.builder.image_id parent_id = None if not self.workflow.builder.base_from_scratch: parent_id = self.workflow.builder.base_image_inspect[] registries = self.workflow.push_conf.docker_registries if registries: config = copy.deepcopy(registries[0].config) else: config = {} operator_manifests = add_buildroot_id(operator_manifests_output) output_files.append(operator_manifests) return output_files
Build the 'output' section of the metadata. :return: list, Output instances
def _convert_to_indexer(self, obj, axis=None, is_setter=False, raise_missing=False): if axis is None: axis = self.axis or 0 labels = self.obj._get_axis(axis) if isinstance(obj, slice): return self._convert_slice_indexer(obj, axis) try: obj = self._convert_scalar_indexer(obj, axis) except TypeError: if is_setter: pass is_int_index = labels.is_integer() is_int_positional = is_integer(obj) and not is_int_index try: return labels.get_loc(obj) except LookupError: if isinstance(obj, tuple) and isinstance(labels, MultiIndex): if is_setter and len(obj) == labels.nlevels: return {: obj} raise except TypeError: pass except (ValueError): if not is_int_positional: raise if is_int_positional: if is_setter: if self.name == : return {: obj} if (obj >= self.obj.shape[axis] and not isinstance(labels, MultiIndex)): raise ValueError("cannot set by positional indexing with " "enlargement") return obj if is_nested_tuple(obj, labels): return labels.get_locs(obj) elif is_list_like_indexer(obj): if com.is_bool_indexer(obj): obj = check_bool_indexer(labels, obj) inds, = obj.nonzero() return inds else: kwargs = {: True if is_setter else raise_missing} return self._get_listlike_indexer(obj, axis, **kwargs)[1] else: try: return labels.get_loc(obj) except LookupError: if not is_list_like_indexer(obj) and is_setter: return {: obj} raise
Convert indexing key into something we can use to do actual fancy indexing on an ndarray Examples ix[:5] -> slice(0, 5) ix[[1,2,3]] -> [1,2,3] ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz) Going by Zen of Python? 'In the face of ambiguity, refuse the temptation to guess.' raise AmbiguousIndexError with integer labels? - No, prefer label-based indexing
def sign(self, tx=None, wifs=[]): if tx: txbuffer = self.transactionbuilder_class(tx, blockchain_instance=self) else: txbuffer = self.txbuffer txbuffer.appendWif(wifs) txbuffer.appendMissingSignatures() txbuffer.sign() return txbuffer.json()
Sign a provided transaction witht he provided key(s) :param dict tx: The transaction to be signed and returned :param string wifs: One or many wif keys to use for signing a transaction. If not present, the keys will be loaded from the wallet as defined in "missing_signatures" key of the transactions.
def normalize(self, stats:Collection[Tensor]=None, do_x:bool=True, do_y:bool=False)->None: "Add normalize transform using `stats` (defaults to `DataBunch.batch_stats`)" if getattr(self,,False): raise Exception() if stats is None: self.stats = self.batch_stats() else: self.stats = stats self.norm,self.denorm = normalize_funcs(*self.stats, do_x=do_x, do_y=do_y) self.add_tfm(self.norm) return self
Add normalize transform using `stats` (defaults to `DataBunch.batch_stats`)
def parse_arguments(argv): parser = argparse.ArgumentParser( description=) parser.add_argument(, type=str, required=True, help=) parser.add_argument(, type=str, required=True, help=) parser.add_argument(, type=str, required=True, help=()) args = parser.parse_args(args=argv[1:]) file_io.recursive_create_dir(args.output_dir) return args
Parse command line arguments. Args: argv: list of command line arguments, includeing programe name. Returns: An argparse Namespace object.
def init_app(self, app=None, blueprint=None, additional_blueprints=None): if app is not None: self.app = app if blueprint is not None: self.blueprint = blueprint for resource in self.resources: self.route(resource[], resource[], *resource[], url_rule_options=resource[]) if self.blueprint is not None: self.app.register_blueprint(self.blueprint) if additional_blueprints is not None: for blueprint in additional_blueprints: self.app.register_blueprint(blueprint) self.app.config.setdefault(, 30)
Update flask application with our api :param Application app: a flask application
def _medianindex(self,v): assert self.prevlayer()!=None N = self._neighbors(v) g=self.layout.grx pos = [g[x].pos for x in N] lp = len(pos) if lp==0: return [] pos.sort() pos = pos[::self.layout.dirh] i,j = divmod(lp-1,2) return [pos[i]] if j==0 else [pos[i],pos[i+j]]
find new position of vertex v according to adjacency in layer l+dir. position is given by the median value of adjacent positions. median heuristic is proven to achieve at most 3 times the minimum of crossings (while barycenter achieve in theory the order of |V|)
def transport_param(image): transports = {SkopeoTransport.CONTAINERS_STORAGE: "containers-storage:", SkopeoTransport.DIRECTORY: "dir:", SkopeoTransport.DOCKER: "docker://", SkopeoTransport.DOCKER_ARCHIVE: "docker-archive", SkopeoTransport.DOCKER_DAEMON: "docker-daemon:", SkopeoTransport.OCI: "oci:", SkopeoTransport.OSTREE: "ostree:"} transport = image.transport tag = image.tag repository = image.name path = image.path if not transport: transport = SkopeoTransport.DOCKER command = transports[transport] path_required = [SkopeoTransport.DIRECTORY, SkopeoTransport.DOCKER_ARCHIVE, SkopeoTransport.OCI] if transport in path_required and path is None: raise ValueError(transports[transport] + " path is required to be specified") if transport == SkopeoTransport.DIRECTORY: return command + path if transport == SkopeoTransport.DOCKER_ARCHIVE: command += path if repository is None: return command command += ":" if transport in [SkopeoTransport.CONTAINERS_STORAGE, SkopeoTransport.DOCKER, SkopeoTransport.DOCKER_ARCHIVE, transport.DOCKER_DAEMON]: return command + repository + ":" + tag if transport == SkopeoTransport.OCI: return command + path + ":" + tag if transport == SkopeoTransport.OSTREE: return command + repository + ("@" + path if path else "") raise ConuException("This transport is not supported")
Parse DockerImage info into skopeo parameter :param image: DockerImage :return: string. skopeo parameter specifying image
def addNewMainWindow(self, settings=None, inspectorFullName=None): mainWindow = MainWindow(self) self.mainWindows.append(mainWindow) self.windowActionGroup.addAction(mainWindow.activateWindowAction) self.repopulateAllWindowMenus() if settings: mainWindow.readViewSettings(settings) if inspectorFullName: inspectorId = nameToIdentifier(inspectorFullName) mainWindow.setInspectorById(inspectorId) if mainWindow.inspectorRegItem: inspectorId = mainWindow.inspectorRegItem.identifier mainWindow.getInspectorActionById(inspectorId).setChecked(True) logger.info("Created new window with inspector: {}" .format(mainWindow.inspectorRegItem.fullName)) else: logger.info("Created new window without inspector") mainWindow.drawInspectorContents(reason=UpdateReason.NEW_MAIN_WINDOW) mainWindow.show() if sys.platform.startswith(): mainWindow.raise_() pass return mainWindow
Creates and shows a new MainWindow. If inspectorFullName is set, it will set the identifier from that name. If the inspector identifier is not found in the registry, a KeyError is raised.
def mirror(self, axes=): polygon = np.array([[0,0], [0,1], [1,1]]) space = Space(Place(polygon, polyhedra=self)) return space.mirror(axes, inplace=False)[0].polyhedra[0]
Generates a symmetry of the Polyhedron respect global axes. :param axes: 'x', 'y', 'z', 'xy', 'xz', 'yz'... :type axes: str :returns: ``pyny.Polyhedron``
def __get_merged_api_info(self, services): merged_api_info = services[0].api_info for service in services[1:]: if not merged_api_info.is_same_api(service.api_info): raise api_exceptions.ApiConfigurationError( _MULTICLASS_MISMATCH_ERROR_TEMPLATE % (service.api_info.name, service.api_info.api_version)) return merged_api_info
Builds a description of an API. Args: services: List of protorpc.remote.Service instances implementing an api/version. Returns: The _ApiInfo object to use for the API that the given services implement. Raises: ApiConfigurationError: If there's something wrong with the API configuration, such as a multiclass API decorated with different API descriptors (see the docstring for api()).
def delete_session(self, ticket): assert isinstance(self.session_storage_adapter, CASSessionAdapter) logging.debug(.format(ticket)) self.session_storage_adapter.delete(ticket)
Delete a session record associated with a service ticket.
def pyquil_to_circuit(program: pyquil.Program) -> Circuit: circ = Circuit() for inst in program.instructions: if isinstance(inst, pyquil.Declare): continue if isinstance(inst, pyquil.Halt): continue if isinstance(inst, pyquil.Pragma): continue elif isinstance(inst, pyquil.Measurement): circ += Measure(inst.qubit.index) elif isinstance(inst, pyquil.Gate): defgate = STDGATES[inst.name] gate = defgate(*inst.params) qubits = [q.index for q in inst.qubits] gate = gate.relabel(qubits) circ += gate else: raise ValueError() return circ
Convert a protoquil pyQuil program to a QuantumFlow Circuit
def getVertices(self,data): return self.bone.transformVertices(data,self.vertices,self.dims)
Returns the vertices of this region already transformed and ready-to-use. Internally uses :py:meth:`Bone.transformVertices()`\ .
def namer(cls, imageUrl, pageUrl): num = pageUrl.split()[-1] ext = imageUrl.rsplit(, 1)[1] return "thethinhline-%s.%s" % (num, ext)
Use page URL sequence which is apparently increasing.
def write_bvec_file(bvecs, bvec_file): if bvec_file is None: return logger.info( % bvec_file) with open(bvec_file, ) as text_file: text_file.write( % .join(map(str, bvecs[:, 0]))) text_file.write( % .join(map(str, bvecs[:, 1]))) text_file.write( % .join(map(str, bvecs[:, 2])))
Write an array of bvecs to a bvec file :param bvecs: array with the vectors :param bvec_file: filepath to write to
def get_users(self, usernames): if self.standard_grant_type is not "authorization_code": raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.") response = self._req(, post_data={ "usernames":usernames }) users = [] for item in response[]: u = User() u.from_dict(item) users.append(u) return users
Fetch user info for given usernames :param username: The usernames you want metadata for (max. 50)
def needkwargs(*argnames): required = set(argnames) def decorator(func): def inner(*args, **kwargs): missing = required - set(kwargs) if missing: err = "%s kwargs are missing." % list(missing) raise ValueError(err) return func(*args, **kwargs) return inner return decorator
Function decorator which checks that the decorated function is called with a set of required kwargs. Args: *argnames: String keyword argument names. Raises: ValueError: If a required kwarg is missing in the decorated function call.
def _endpoint(self, endpoint, action, *url_args): args = (self.api_base, endpoint, action) if action == : args = (self.api_base, endpoint) api_url = "/".join(args) if url_args: if len(url_args) == 1: api_url += "/" + url_args[0] else: api_url += "/".join(url_args) return api_url
Return the URL for the action. :param str endpoint: The controller :param str action: The action provided by the controller :param url_args: Additional endpoints(for endpoints that take part of the url as option) :return: Full URL for the requested action
def until_any_child_in_state(self, state, timeout=None): return until_any(*[r.until_state(state) for r in dict.values(self.children)], timeout=timeout)
Return a tornado Future; resolves when any client is in specified state
def clustering_gmm(data, n_clusters, tol=1e-7, min_covar=None, scale=): gmm.weight_ = weights gmm.means_ = means gmm.covars_ = covars gmm.fit(data) resp = gmm.predict_proba(data) labels = [np.random.choice(range(n_clusters), p=ri) for ri in resp] return labels
Find clusters in an array using a Gaussian Mixture Model. Before clustering, `data` can be automatically rescaled as specified by the `scale` argument. Parameters ---------- data : FCSData or array_like Data to cluster. n_clusters : int Number of clusters to find. tol : float, optional Tolerance for convergence. Directly passed to either ``GaussianMixture`` or ``GMM``, depending on ``scikit-learn``'s version. min_covar : float, optional The minimum trace that the initial covariance matrix will have. If ``scikit-learn``'s version is older than 0.18, `min_covar` is also passed directly to ``GMM``. scale : str, optional Rescaling applied to `data` before performing clustering. Can be either ``linear`` (no rescaling), ``log``, or ``logicle``. Returns ------- labels : array Nx1 array with labels for each element in `data`, assigning ``data[i]`` to cluster ``labels[i]``. Notes ----- A Gaussian Mixture Model finds clusters by fitting a linear combination of `n_clusters` Gaussian probability density functions (pdf) to `data` using Expectation Maximization (EM). This method can be fairly sensitive to the initial parameter choice. To generate a reasonable set of initial conditions, `clustering_gmm` first divides all points in `data` into `n_clusters` groups of the same size based on their Euclidean distance to the minimum value. Then, for each group, the 50% samples farther away from the mean are discarded. The mean and covariance are calculated from the remaining samples of each group, and used as initial conditions for the GMM EM algorithm. `clustering_gmm` internally uses a `GaussianMixture` object from the ``scikit-learn`` library (``GMM`` if ``scikit-learn``'s version is lower than 0.18), with full covariance matrices for each cluster. For more information, consult ``scikit-learn``'s documentation.
def get_receive(self, script_list): events = defaultdict(set) for script in script_list: if self.script_start_type(script) == self.HAT_WHEN_I_RECEIVE: event = script.blocks[0].args[0].lower() events[event].add(script) return events
Return a list of received events contained in script_list.
def conv(arg,default=None,func=None): if func: return func(arg) if arg else default; else: return arg if arg else default;
essentially, the generalization of arg if arg else default or func(arg) if arg else default
def householder(self): bidiagMatrix = Matrix.from_two_dim_array(self.get_width(), self.get_height(), self.matrix) identityMatrixRow = Matrix(self.get_height(), self.get_height()) for i in xrange(self.get_height()): identityMatrixRow.set_value(i, i, 1.0) identityMatrixCol = Matrix(self.get_width(), self.get_width()) for i in xrange(self.get_width()): identityMatrixCol.set_value(i, i, 1.0) x = Vector(self.get_height()) y = Vector(self.get_height()) if k > 0: x.set_value(0, k - 1, bidiagMatrix.get_value(k, k - 1)) y.set_value(0, k - 1, bidiagMatrix.get_value(k, k - 1)) s = 0.0 for i in xrange(k, self.get_height()): val = bidiagMatrix.get_value(k, i) x.set_value(0, i, val) s += (val ** 2) s = sqrt(s) y.set_value(0, k, s) tmp = x - y norm = sqrt(sum(i[0] ** 2 for i in tmp.get_array())) w = tmp / norm s = sqrt(s) vk = identityMatrixCol - (2 * (w * w.transform())) bidiagMatrix = bidiagMatrix * vk if k == 0: v = vk else: v = vk * v return (u, bidiagMatrix, v)
Return Matrices u,b,v with self = ubv and b is in bidiagonal form The algorithm uses householder transformations. :return tuple (u,b,v): A tuple with the Matrix u, b and v. and self = ubv (except some rounding errors) u is a unitary matrix b is a bidiagonal matrix. v is a unitary matrix. :note: Currently the algorithm only works for squared matrices :todo: Make sure, that the bidiagonal matrix is 0.0 except for the bidiagonal. Due to rounding errors, this is currently not ensured
def models(cls, api_version=DEFAULT_API_VERSION): if api_version == : from .v2016_02_01 import models return models elif api_version == : from .v2016_09_01 import models return models elif api_version == : from .v2017_05_10 import models return models elif api_version == : from .v2018_02_01 import models return models elif api_version == : from .v2018_05_01 import models return models raise NotImplementedError("APIVersion {} is not available".format(api_version))
Module depends on the API version: * 2016-02-01: :mod:`v2016_02_01.models<azure.mgmt.resource.resources.v2016_02_01.models>` * 2016-09-01: :mod:`v2016_09_01.models<azure.mgmt.resource.resources.v2016_09_01.models>` * 2017-05-10: :mod:`v2017_05_10.models<azure.mgmt.resource.resources.v2017_05_10.models>` * 2018-02-01: :mod:`v2018_02_01.models<azure.mgmt.resource.resources.v2018_02_01.models>` * 2018-05-01: :mod:`v2018_05_01.models<azure.mgmt.resource.resources.v2018_05_01.models>`
def register(self, item): if callable(item) and hasattr(item, ): item = item.__orb__ key = item.name() model = self.__model if isinstance(item, orb.Index): self.__indexes[key] = item item.setSchema(self) if model and not hasattr(model, key): setattr(model, key, classmethod(item)) elif isinstance(item, orb.Collector): self.__collectors[key] = item item.setSchema(self) elif isinstance(item, orb.Column): self.__columns[key] = item item.setSchema(self)
Registers a new orb object to this schema. This could be a column, index, or collector -- including a virtual object defined through the orb.virtual decorator. :param item: <variant> :return:
def get_microscope_files(self, plate_name, acquisition_name): logger.info( , self.experiment_name, plate_name, acquisition_name ) acquisition_id = self._get_acquisition_id(plate_name, acquisition_name) image_files = self._get_image_files(acquisition_id) metadata_files = self._get_metadata_files(acquisition_id) return image_files + metadata_files
Gets status and name of files that have been registered for upload. Parameters ---------- plate_name: str name of the parent plate acquisition_name: str name of the parent acquisition Returns ------- List[Dict[str, str]] names and status of uploaded files See also -------- :func:`tmserver.api.acquisition.get_microscope_image_files_information` :func:`tmserver.api.acquisition.get_microscope_metadata_file_information` :class:`tmlib.models.acquisition.Acquisition` :class:`tmlib.models.file.MicroscopeImageFile` :class:`tmlib.models.file.MicroscopeMetadataFile`
def randomPositions(input, nside_pix, n=1): input = np.array(input) if len(input.shape) == 1: if hp.npix2nside(len(input)) < nside_pix: logger.warning() subpix = np.nonzero(input)[0] lon, lat = pix2ang(hp.npix2nside(len(input)), subpix) elif len(input.shape) == 2: lon, lat = input[0], input[1] else: logger.warning() pix = surveyPixel(lon, lat, nside_pix) area = len(pix) * hp.nside2pixarea(nside_pix, degrees=True) mask = np.tile(False, hp.nside2npix(nside_pix)) mask[pix] = True coverage_fraction = float(np.sum(mask)) / len(mask) n_throw = int(n / coverage_fraction) lon, lat = [], [] count = 0 while len(lon) < n: lon_throw = np.random.uniform(0., 360., n_throw) lat_throw = np.degrees(np.arcsin(np.random.uniform(-1., 1., n_throw))) pix_throw = ugali.utils.healpix.angToPix(nside_pix, lon_throw, lat_throw) cut = mask[pix_throw].astype(bool) lon = np.append(lon, lon_throw[cut]) lat = np.append(lat, lat_throw[cut]) count += 1 if count > 10: raise RuntimeError() return lon[0:n], lat[0:n], area
Generate n random positions within a full HEALPix mask of booleans, or a set of (lon, lat) coordinates. Parameters: ----------- input : (1) full HEALPix mask of booleans, or (2) a set of (lon, lat) coordinates for catalog objects that define the occupied pixels. nside_pix : nside_pix is meant to be at coarser resolution than the input mask or catalog object positions so that gaps from star holes, bleed trails, cosmic rays, etc. are filled in. Returns: -------- lon,lat,area : Return the longitude and latitude of the random positions (deg) and the total area (deg^2).
def encode(self, pdu): if _debug: APCI._debug("encode %r", pdu) PCI.update(pdu, self) if (self.apduType == ConfirmedRequestPDU.pduType): buff = self.apduType << 4 if self.apduSeg: buff += 0x08 if self.apduMor: buff += 0x04 if self.apduSA: buff += 0x02 pdu.put(buff) pdu.put((self.apduMaxSegs << 4) + self.apduMaxResp) pdu.put(self.apduInvokeID) if self.apduSeg: pdu.put(self.apduSeq) pdu.put(self.apduWin) pdu.put(self.apduService) elif (self.apduType == UnconfirmedRequestPDU.pduType): pdu.put(self.apduType << 4) pdu.put(self.apduService) elif (self.apduType == SimpleAckPDU.pduType): pdu.put(self.apduType << 4) pdu.put(self.apduInvokeID) pdu.put(self.apduService) elif (self.apduType == ComplexAckPDU.pduType): buff = self.apduType << 4 if self.apduSeg: buff += 0x08 if self.apduMor: buff += 0x04 pdu.put(buff) pdu.put(self.apduInvokeID) if self.apduSeg: pdu.put(self.apduSeq) pdu.put(self.apduWin) pdu.put(self.apduService) elif (self.apduType == SegmentAckPDU.pduType): buff = self.apduType << 4 if self.apduNak: buff += 0x02 if self.apduSrv: buff += 0x01 pdu.put(buff) pdu.put(self.apduInvokeID) pdu.put(self.apduSeq) pdu.put(self.apduWin) elif (self.apduType == ErrorPDU.pduType): pdu.put(self.apduType << 4) pdu.put(self.apduInvokeID) pdu.put(self.apduService) elif (self.apduType == RejectPDU.pduType): pdu.put(self.apduType << 4) pdu.put(self.apduInvokeID) pdu.put(self.apduAbortRejectReason) elif (self.apduType == AbortPDU.pduType): buff = self.apduType << 4 if self.apduSrv: buff += 0x01 pdu.put(buff) pdu.put(self.apduInvokeID) pdu.put(self.apduAbortRejectReason) else: raise ValueError("invalid APCI.apduType")
encode the contents of the APCI into the PDU.
def pauli_constraints(X, Y, Z): substitutions = {} n_vars = len(X) for i in range(n_vars): substitutions[X[i] * X[i]] = 1 substitutions[Y[i] * Y[i]] = 1 substitutions[Z[i] * Z[i]] = 1 substitutions[Y[i] * X[i]] = - X[i] * Y[i] substitutions[Z[i] * X[i]] = - X[i] * Z[i] substitutions[Z[i] * Y[i]] = - Y[i] * Z[i] for j in range(i + 1, n_vars): substitutions[X[j] * X[i]] = X[i] * X[j] substitutions[Y[j] * Y[i]] = Y[i] * Y[j] substitutions[Y[j] * X[i]] = X[i] * Y[j] substitutions[Y[i] * X[j]] = X[j] * Y[i] substitutions[Z[j] * Z[i]] = Z[i] * Z[j] substitutions[Z[j] * X[i]] = X[i] * Z[j] substitutions[Z[i] * X[j]] = X[j] * Z[i] substitutions[Z[j] * Y[i]] = Y[i] * Z[j] substitutions[Z[i] * Y[j]] = Y[j] * Z[i] return substitutions
Return a set of constraints that define Pauli spin operators. :param X: List of Pauli X operator on sites. :type X: list of :class:`sympy.physics.quantum.operator.HermitianOperator`. :param Y: List of Pauli Y operator on sites. :type Y: list of :class:`sympy.physics.quantum.operator.HermitianOperator`. :param Z: List of Pauli Z operator on sites. :type Z: list of :class:`sympy.physics.quantum.operator.HermitianOperator`. :returns: tuple of substitutions and equalities.
def sort_name(self): if self._record and self._record.sort_name: return self._record.sort_name return self.name
Get the sorting name of this category
def register(): from pelican import signals signals.initialized.connect(setup_git) signals.article_generator_finalized.connect(replace_git_url)
Plugin registration.
def merge_dicts(*dicts, **kwargs): cls = kwargs.get("cls", None) if cls is None: for d in dicts: if isinstance(d, dict): cls = d.__class__ break else: raise TypeError("cannot infer cls as none of the passed objects is of type dict") merged_dict = cls() for d in dicts: if isinstance(d, dict): merged_dict.update(d) return merged_dict
merge_dicts(*dicts, cls=None) Takes multiple *dicts* and returns a single merged dict. The merging takes place in order of the passed dicts and therefore, values of rear objects have precedence in case of field collisions. The class of the returned merged dict is configurable via *cls*. If it is *None*, the class is inferred from the first dict object in *dicts*.