code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def update(self, observable, handlers): addedreaders, removedreaders = handlers for reader in addedreaders: item = self.Append(str(reader)) self.SetClientData(item, reader) for reader in removedreaders: item = self.FindString(str(reader)) if wx.NOT_FOUND != item: self.Delete(item) selection = self.GetSelection()
Toolbar ReaderObserver callback that is notified when readers are added or removed.
def _build_rhs(p, q, deriv): b = [0 for _ in range(p+q+1)] b[deriv] = math.factorial(deriv) return np.array(b)
The right hand side of the equation system matrix
def import_surf_mesh(file_name): raw_content = read_file(file_name) raw_content = raw_content.split("\n") content = [] for rc in raw_content: temp = rc.strip().split() content.append(temp) if int(content[0][0]) != 3: raise TypeError("Input mesh must be 3-dimensional") surf = shortcuts.generate_surface(rational=True) surf.degree_u = int(content[1][0]) surf.degree_v = int(content[1][1]) dim_u = int(content[2][0]) dim_v = int(content[2][1]) ctrlpts_end = 5 + (dim_u * dim_v) ctrlpts_mesh = content[5:ctrlpts_end] ctrlpts = compatibility.flip_ctrlpts_u(ctrlpts_mesh, dim_u, dim_v) ctrlptsw = compatibility.generate_ctrlptsw(ctrlpts) surf.set_ctrlpts(ctrlptsw, dim_u, dim_v) surf.knotvector_u = [float(u) for u in content[3]] surf.knotvector_v = [float(v) for v in content[4]] return surf
Generates a NURBS surface object from a mesh file. :param file_name: input mesh file :type file_name: str :return: a NURBS surface :rtype: NURBS.Surface
def attitude_quaternion_encode(self, time_boot_ms, q1, q2, q3, q4, rollspeed, pitchspeed, yawspeed): return MAVLink_attitude_quaternion_message(time_boot_ms, q1, q2, q3, q4, rollspeed, pitchspeed, yawspeed)
The attitude in the aeronautical frame (right-handed, Z-down, X-front, Y-right), expressed as quaternion. Quaternion order is w, x, y, z and a zero rotation would be expressed as (1 0 0 0). time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) q1 : Quaternion component 1, w (1 in null-rotation) (float) q2 : Quaternion component 2, x (0 in null-rotation) (float) q3 : Quaternion component 3, y (0 in null-rotation) (float) q4 : Quaternion component 4, z (0 in null-rotation) (float) rollspeed : Roll angular speed (rad/s) (float) pitchspeed : Pitch angular speed (rad/s) (float) yawspeed : Yaw angular speed (rad/s) (float)
def save(self, inplace=True): modified_data = self._modified_data() if bool(modified_data): extra = { : self.__class__.__name__, : { : self.id, : modified_data } } logger.info(, extra=extra) data = self._api.patch(url=self._URL[].format(id=self.id), data=modified_data).json() marker = Marker(api=self._api, **data) return marker else: raise ResourceNotModified()
Saves all modification to the marker on the server. :param inplace Apply edits on the current instance or get a new one. :return: Marker instance.
def load_data(self, sess, inputs, state_inputs): if log_once("load_data"): logger.info( "Training on concatenated sample batches:\n\n{}\n".format( summarize({ "placeholders": self.loss_inputs, "inputs": inputs, "state_inputs": state_inputs }))) feed_dict = {} assert len(self.loss_inputs) == len(inputs + state_inputs), \ (self.loss_inputs, inputs, state_inputs) if len(state_inputs) > 0: smallest_array = state_inputs[0] seq_len = len(inputs[0]) // len(state_inputs[0]) self._loaded_max_seq_len = seq_len else: smallest_array = inputs[0] self._loaded_max_seq_len = 1 sequences_per_minibatch = ( self.max_per_device_batch_size // self._loaded_max_seq_len * len( self.devices)) if sequences_per_minibatch < 1: logger.warn( ("Target minibatch size is {}, however the rollout sequence " "length is {}, hence the minibatch size will be raised to " "{}.").format(self.max_per_device_batch_size, self._loaded_max_seq_len, self._loaded_max_seq_len * len(self.devices))) sequences_per_minibatch = 1 if len(smallest_array) < sequences_per_minibatch: sequences_per_minibatch = make_divisible_by( len(smallest_array), len(self.devices)) if log_once("data_slicing"): logger.info( ("Divided {} rollout sequences, each of length {}, among " "{} devices.").format( len(smallest_array), self._loaded_max_seq_len, len(self.devices))) if sequences_per_minibatch < len(self.devices): raise ValueError( "Must load at least 1 tuple sequence per device. Try " "increasing `sgd_minibatch_size` or reducing `max_seq_len` " "to ensure that at least one sequence fits per device.") self._loaded_per_device_batch_size = (sequences_per_minibatch // len( self.devices) * self._loaded_max_seq_len) if len(state_inputs) > 0: state_inputs = [ make_divisible_by(arr, sequences_per_minibatch) for arr in state_inputs ] inputs = [arr[:len(state_inputs[0]) * seq_len] for arr in inputs] assert len(state_inputs[0]) * seq_len == len(inputs[0]), \ (len(state_inputs[0]), sequences_per_minibatch, seq_len, len(inputs[0])) for ph, arr in zip(self.loss_inputs, inputs + state_inputs): feed_dict[ph] = arr truncated_len = len(inputs[0]) else: for ph, arr in zip(self.loss_inputs, inputs + state_inputs): truncated_arr = make_divisible_by(arr, sequences_per_minibatch) feed_dict[ph] = truncated_arr truncated_len = len(truncated_arr) sess.run([t.init_op for t in self._towers], feed_dict=feed_dict) self.num_tuples_loaded = truncated_len tuples_per_device = truncated_len // len(self.devices) assert tuples_per_device > 0, "No data loaded?" assert tuples_per_device % self._loaded_per_device_batch_size == 0 return tuples_per_device
Bulk loads the specified inputs into device memory. The shape of the inputs must conform to the shapes of the input placeholders this optimizer was constructed with. The data is split equally across all the devices. If the data is not evenly divisible by the batch size, excess data will be discarded. Args: sess: TensorFlow session. inputs: List of arrays matching the input placeholders, of shape [BATCH_SIZE, ...]. state_inputs: List of RNN input arrays. These arrays have size [BATCH_SIZE / MAX_SEQ_LEN, ...]. Returns: The number of tuples loaded per device.
def update_email_marketing_campaign(self, email_marketing_campaign, name, email_content, from_email, from_name, reply_to_email, subject, text_content, address, is_view_as_webpage_enabled=False, view_as_web_page_link_text=, view_as_web_page_text=, is_permission_reminder_enabled=False, permission_reminder_text=): url = self.api.join( .join([self.EMAIL_MARKETING_CAMPAIGN_URL, str(email_marketing_campaign.constant_contact_id)])) inlined_email_content = self.inline_css(email_content) minified_email_content = html_minify(inlined_email_content) worked_around_email_content = work_around(minified_email_content) data = { : name, : subject, : from_name, : from_email, : reply_to_email, : worked_around_email_content, : , : text_content, : { : address[], : address[], : address[], : address[], : address[], : address[], : address[], : address[], : address[] }, : is_view_as_webpage_enabled, : view_as_web_page_link_text, : view_as_web_page_text, : is_permission_reminder_enabled, : permission_reminder_text } response = url.put(data=json.dumps(data), headers={: }) self.handle_response_status(response) email_marketing_campaign.data = response.json() email_marketing_campaign.save() return email_marketing_campaign
Update a Constant Contact email marketing campaign. Returns the updated EmailMarketingCampaign object.
def get_process_params(xmldoc, program, param, require_unique_program = True): process_ids = lsctables.ProcessTable.get_table(xmldoc).get_ids_by_program(program) if len(process_ids) < 1: raise ValueError("process table must contain at least one program named " % program) elif require_unique_program and len(process_ids) != 1: raise ValueError("process table must contain exactly one program named " % program) return [row.pyvalue for row in lsctables.ProcessParamsTable.get_table(xmldoc) if (row.process_id in process_ids) and (row.param == param)]
Return a list of the values stored in the process_params table for params named param for the program(s) named program. The values are returned as Python native types, not as the strings appearing in the XML document. If require_unique_program is True (default), then the document must contain exactly one program with the requested name, otherwise ValueError is raised. If require_unique_program is not True, then there must be at least one program with the requested name otherwise ValueError is raised.
def load_all_methods(self): r methods = [] if self.CASRN in CRC_inorg_s_const_data.index: methods.append(CRC_INORG_S) self.CRC_INORG_S_Vm = float(CRC_inorg_s_const_data.at[self.CASRN, ]) self.all_methods = set(methods)
r'''Method which picks out coefficients for the specified chemical from the various dictionaries and DataFrames storing it. All data is stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`, and :obj:`all_methods` as a set of methods for which the data exists for. Called on initialization only. See the source code for the variables at which the coefficients are stored. The coefficients can safely be altered once the class is initialized. This method can be called again to reset the parameters.
def many_psds(k=2,fs=1.0, b0=1.0, N=1024): psd=[] for j in range(k): print j x = noise.white(N=2*4096,b0=b0,fs=fs) f, tmp = noise.numpy_psd(x,fs) if j==0: psd = tmp else: psd = psd + tmp return f, psd/k
compute average of many PSDs
def ping(self, timeout=12): self.conn("POST", "{0}/users/ME/endpoints/{1}/active".format(self.conn.msgsHost, self.id), auth=SkypeConnection.Auth.RegToken, json={"timeout": timeout})
Send a keep-alive request for the endpoint. Args: timeout (int): maximum amount of time for the endpoint to stay active
def jarFlags(target, source, env, for_signature): jarflags = env.subst(, target=target, source=source) for src in source: contents = src.get_text_contents() if contents[:16] == "Manifest-Version": if not in jarflags: return jarflags + break return jarflags
If we have a manifest, make sure that the 'm' flag is specified.
def blend_html_colour_to_white(html_colour, alpha): html_colour = html_colour.upper() has_hash = False if html_colour[0] == : has_hash = True html_colour = html_colour[1:] r_str = html_colour[0:2] g_str = html_colour[2:4] b_str = html_colour[4:6] r = int(r_str, 16) g = int(g_str, 16) b = int(b_str, 16) r = int(alpha * r + (1 - alpha) * 255) g = int(alpha * g + (1 - alpha) * 255) b = int(alpha * b + (1 - alpha) * 255) out = .format(r, g, b) if has_hash: out = + out return out
:param html_colour: Colour string like FF552B or #334455 :param alpha: Alpha value :return: Html colour alpha blended onto white
def make_github_markdown_writer(opts): assert hasattr(opts, ) atx = MarkdownATXWriterStrategy(opts, ) setext = MarkdownSetextWriterStrategy(opts, ) inline = MarkdownInlineLinkWriterStrategy(opts, ) ref = MarkdownReferenceLinkWriterStrategy(opts, ) code_block_switch = ghswitches.code_block_switch strategies = [atx, setext, inline, ref] switches = [code_block_switch] return Writer(strategies, switches=switches)
Creates a Writer object used for parsing and writing Markdown files with a GitHub style anchor transformation opts is a namespace object containing runtime options. It should generally include the following attributes: * 'open': a string corresponding to the opening portion of the wrapper identifier. Built-in AnchorHub usage defaults this to '{' * 'close: a string corresponding ot the closing portion of the wrapper identifier. Built-in AnchorHub usage defaults this to '}' * 'wrapper_regex': An escaped regular expression that matches tags located inside of wrappers :param opts: namespace object, usually created from command-line arguments, that is used to pass runtime options to concrete WriterStrategy objects. :return: A Writer object designed for parsing, modifying, and writing AnchorHub tags to converted anchors in Markdown files using GitHub style anchors
def gen_chunks(self, gen): for data in gen: size = len(data) if size < self.chunk_size: yield data else: mv = buffer(data) offset = 0 while offset < size: nb = min(self.chunk_size, size - offset) yield mv[offset:offset + nb] offset += nb
Generates byte chunks of a given size. Takes a bytes generator and yields chunks of a maximum of ``chunk_size`` bytes. Parameters ---------- gen : generator The bytes generator that produces the bytes
def _load_market_scheme(self): try: self.scheme = yaml.load(open(self.scheme_path, )) except Exception, error: raise LoadMarketSchemeFailed(reason=error)
Load market yaml description
def __capture(self, checkout_id, **kwargs): params = { : checkout_id } return self.make_call(self.__capture, params, kwargs)
Call documentation: `/checkout/capture <https://www.wepay.com/developer/reference/checkout#capture>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay`
def make_list(var, num_terms=1): if not isinstance(var, list): if isinstance(var, tuple): var = list(var) else: var = [var] for _ in range(1, num_terms): var.append(var[0]) return var
Make a variable a list if it is not already If variable is not a list it will make it a list of the correct length with all terms identical.
def section_menu( context, show_section_root=True, show_multiple_levels=True, apply_active_classes=True, allow_repeating_parents=True, max_levels=settings.DEFAULT_SECTION_MENU_MAX_LEVELS, template=, sub_menu_template=, sub_menu_templates=None, use_specific=settings.DEFAULT_SECTION_MENU_USE_SPECIFIC, use_absolute_page_urls=False, add_sub_menus_inline=None, **kwargs ): validate_supplied_values(, max_levels=max_levels, use_specific=use_specific) if not show_multiple_levels: max_levels = 1 menu_class = settings.objects.SECTION_MENU_CLASS return menu_class.render_from_tag( context=context, max_levels=max_levels, use_specific=use_specific, apply_active_classes=apply_active_classes, allow_repeating_parents=allow_repeating_parents, use_absolute_page_urls=use_absolute_page_urls, add_sub_menus_inline=add_sub_menus_inline, template_name=template, sub_menu_template_name=sub_menu_template, sub_menu_template_names=split_if_string(sub_menu_templates), show_section_root=show_section_root, **kwargs )
Render a section menu for the current section.
def export_yaml(obj, file_name): def callback(data): stream = StringIO() yaml = YAML() yaml.dump(data, stream) return stream.getvalue() try: from ruamel.yaml import YAML except ImportError: raise exch.GeomdlException("Please install package to use YAML format: pip install ruamel.yaml") exported_data = exch.export_dict_str(obj=obj, callback=callback) return exch.write_file(file_name, exported_data)
Exports curves and surfaces in YAML format. .. note:: Requires `ruamel.yaml <https://pypi.org/project/ruamel.yaml/>`_ package. YAML format is also used by the `geomdl command-line application <https://github.com/orbingol/geomdl-cli>`_ as a way to input shape data from the command line. :param obj: input geometry :type obj: abstract.SplineGeometry, multi.AbstractContainer :param file_name: name of the output file :type file_name: str :raises GeomdlException: an error occurred writing the file
def p_single_statement_systemcall(self, p): p[0] = SingleStatement(p[1], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
single_statement : systemcall SEMICOLON
def _set_vlan_add(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=vlan_add.vlan_add, is_container=, presence=False, yang_name="vlan-add", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__vlan_add = t if hasattr(self, ): self._set()
Setter method for vlan_add, mapped from YANG variable /routing_system/evpn_config/evpn/evpn_instance/vlan/vlan_add (container) If this variable is read-only (config: false) in the source YANG file, then _set_vlan_add is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vlan_add() directly.
def collect_results(project, force=False): if not project.crawlable: return project now = datetime.datetime.now() if (now - project.updated_at).total_seconds() < 4 and (not force): return project result_paths = [] if os.path.isdir(project.path_name): result_paths.extend(_list_result_paths(project.path_name)) registered_results = db.session.query(Result.path_name).filter_by( project_id=project.id ).all() registered_paths = {r.path_name for r in registered_results} for result_path in result_paths: if result_path not in registered_paths: _register_result(project.id, result_path) project.updated_at = datetime.datetime.now() db.session.commit() return project
collect_results.
def ref(function, callback=None): try: function.__func__ except AttributeError: return _WeakMethodFree(function, callback) return _WeakMethodBound(function, callback)
Returns a weak reference to the given method or function. If the callback argument is not None, it is called as soon as the referenced function is garbage deleted. :type function: callable :param function: The function to reference. :type callback: callable :param callback: Called when the function dies.
def predict(self, data, output_margin=False, ntree_limit=0, pred_leaf=False): option_mask = 0x00 if output_margin: option_mask |= 0x01 if pred_leaf: option_mask |= 0x02 self._validate_features(data) length = ctypes.c_ulong() preds = ctypes.POINTER(ctypes.c_float)() _check_call(_LIB.XGBoosterPredict(self.handle, data.handle, option_mask, ntree_limit, ctypes.byref(length), ctypes.byref(preds))) preds = ctypes2numpy(preds, length.value, np.float32) if pred_leaf: preds = preds.astype(np.int32) nrow = data.num_row() if preds.size != nrow and preds.size % nrow == 0: preds = preds.reshape(nrow, preds.size / nrow) return preds
Predict with data. NOTE: This function is not thread safe. For each booster object, predict can only be called from one thread. If you want to run prediction using multiple thread, call bst.copy() to make copies of model object and then call predict Parameters ---------- data : DMatrix The dmatrix storing the input. output_margin : bool Whether to output the raw untransformed margin value. ntree_limit : int Limit number of trees in the prediction; defaults to 0 (use all trees). pred_leaf : bool When this option is on, the output will be a matrix of (nsample, ntrees) with each record indicating the predicted leaf index of each sample in each tree. Note that the leaf index of a tree is unique per tree, so you may find leaf 1 in both tree 1 and tree 0. Returns ------- prediction : numpy array
def index(self): for i, hashtable in enumerate(self.hashtables): self.sorted_hashtables[i] = [H for H in hashtable.keys()] self.sorted_hashtables[i].sort()
Index all the keys added so far and make them searchable.
def _get_task_from_task_dir(self, job_id, user_id, task_id, task_attempt): task_dir = self._task_directory(job_id, task_id, task_attempt) job_descriptor = self._read_task_metadata(task_dir) if not job_descriptor: return None if not job_descriptor.job_metadata.get(): job_descriptor.job_metadata[] = user_id pid = -1 try: with open(os.path.join(task_dir, ), ) as f: pid = int(f.readline().strip()) except (IOError, OSError): pass script = None script_name = job_descriptor.job_metadata.get() if script_name: script = self._read_script(task_dir, script_name) end_time = self._get_end_time_from_task_dir(task_dir) last_update = self._get_last_update_time_from_task_dir(task_dir) events = self._get_events_from_task_dir(task_dir) status = self._get_status_from_task_dir(task_dir) log_detail = self._get_log_detail_from_task_dir(task_dir) if not status: status = log_detail = [] return LocalTask( task_status=status, events=events, log_detail=log_detail, job_descriptor=job_descriptor, end_time=end_time, last_update=last_update, pid=pid, script=script)
Return a Task object with this task's info.
def buses_of_vlvl(network, voltage_level): mask = network.buses.v_nom.isin(voltage_level) df = network.buses[mask] return df.index
Get bus-ids of given voltage level(s). Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA voltage_level: list Returns ------- list List containing bus-ids.
def adjust_brightness_contrast(image, brightness=0., contrast=0.): beta = 0 return cv2.addWeighted(image, 1 + float(contrast) / 100., image, beta, float(brightness))
Adjust the brightness and/or contrast of an image :param image: OpenCV BGR image :param contrast: Float, contrast adjustment with 0 meaning no change :param brightness: Float, brightness adjustment with 0 meaning no change
def get_package_status(owner, repo, identifier): client = get_packages_api() with catch_raise_api_exception(): data, _, headers = client.packages_status_with_http_info( owner=owner, repo=repo, identifier=identifier ) ratelimits.maybe_rate_limit(client, headers) return ( data.is_sync_completed, data.is_sync_failed, data.sync_progress, data.status_str, data.stage_str, data.status_reason, )
Get the status for a package in a repository.
def __read_and_render_yaml_file(source, template, saltenv): sfn = __salt__[](source, saltenv) if not sfn: raise CommandExecutionError( {0}\.format(source)) with salt.utils.files.fopen(sfn, ) as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data[]: raise CommandExecutionError( .format(data[]) ) contents = data[].encode() else: raise CommandExecutionError( .format( template)) return salt.utils.yaml.safe_load(contents)
Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file.
def _get_valid_endpoint(resp, name, entry_type): catalog = resp.get(, {}).get(, []) for entry in catalog: if ( entry.get() and entry.get() and entry.get() == name and entry.get() == entry_type ): valid_endpoints = {} for ep in entry.get(): interface = ep.get(, ) if interface in [, ]: valid_endpoints[interface] = ep.get() if valid_endpoints: return valid_endpoints.get(, valid_endpoints.get()) return None
Parse the service catalog returned by the Identity API for an endpoint matching the Nova service with the requested version Sends a CRITICAL service check when no viable candidates are found in the Catalog
def signal_alias_exists(alias: str) -> bool: if SignalDispatcher.signals.get(alias): return True return False
Checks if signal alias exists. :param alias: Signal alias. :return:
def hdfpath_to_nifti1image(file_path, h5path): with h5py.File(file_path, ) as f: return hdfgroup_to_nifti1image(f[h5path])
Returns a nibabel Nifti1Image from a HDF5 group datasets Parameters ---------- file_path: string HDF5 file path h5path: HDF5 group path in file_path Returns ------- nibabel Nifti1Image
def main(inputstructs, inputpdbids): pdbid, pdbpath = None, None title = "* Protein-Ligand Interaction Profiler v%s *" % __version__ write_message( + * len(title) + ) write_message(title) write_message( + * len(title) + ) outputprefix = config.OUTPUTFILENAME if inputstructs is not None: num_structures = len(inputstructs) inputstructs = remove_duplicates(inputstructs) read_from_stdin = False for inputstruct in inputstructs: if inputstruct == : inputstruct = sys.stdin.read() read_from_stdin = True if config.RAWSTRING: if sys.version_info < (3,): inputstruct = bytes(inputstruct).decode() else: inputstruct = bytes(inputstruct, ).decode() else: if os.path.getsize(inputstruct) == 0: sysexit(2, ) if num_structures > 1: basename = inputstruct.split()[-2].split()[-1] config.OUTPATH = .join([config.BASEPATH, basename]) outputprefix = process_pdb(inputstruct, config.OUTPATH, as_string=read_from_stdin, outputprefix=outputprefix) else: num_pdbids = len(inputpdbids) inputpdbids = remove_duplicates(inputpdbids) for inputpdbid in inputpdbids: pdbpath, pdbid = download_structure(inputpdbid) if num_pdbids > 1: config.OUTPATH = .join([config.BASEPATH, pdbid[1:3].upper(), pdbid.upper()]) outputprefix = process_pdb(pdbpath, config.OUTPATH, outputprefix=outputprefix) if (pdbid is not None or inputstructs is not None) and config.BASEPATH is not None: if config.BASEPATH in [, ]: write_message() else: write_message( % config.BASEPATH)
Main function. Calls functions for processing, report generation and visualization.
def get_jenkins_job_urls( rosdistro_name, jenkins_url, release_build_name, targets): urls = {} for target in targets: view_name = get_release_view_name( rosdistro_name, release_build_name, target.os_name, target.os_code_name, target.arch) base_url = jenkins_url + % \ (view_name, view_name) if target.arch == : urls[target] = base_url + % \ (target.os_name, target.os_code_name) else: urls[target] = base_url + % \ (target.os_name, target.os_code_name, target.arch) return urls
Get the Jenkins job urls for each target. The placeholder {pkg} needs to be replaced with the ROS package name. :return: a dict indexed by targets containing a string
def count_id(w0): def f(w1): count = [set(w0.root).intersection(w1.root), set(w0.flexing).intersection(w1.flexing), set(w0.root).intersection(w1.flexing) | set(w1.root).intersection(w0.flexing)] if any(count): return max((1,2,3), key=lambda i: len(count[i - 1])) else: return 0 return f
0 -> no terms idd 1 -> most term idd are shared in root morphem 2 -> most term idd are shared in flexing morphem 3 -> most term idd are shared root <-> flexing (crossed) :param w0: :param w1: :return:
def voicing_measures(ref_voicing, est_voicing): validate_voicing(ref_voicing, est_voicing) ref_voicing = ref_voicing.astype(bool) est_voicing = est_voicing.astype(bool) if ref_voicing.size == 0 or est_voicing.size == 0: return 0. TP = (ref_voicing*est_voicing).sum() FP = ((ref_voicing == 0)*est_voicing).sum() FN = (ref_voicing*(est_voicing == 0)).sum() TN = ((ref_voicing == 0)*(est_voicing == 0)).sum() if TP + FN == 0: vx_recall = 0. else: vx_recall = TP/float(TP + FN) if FP + TN == 0: vx_false_alm = 0. else: vx_false_alm = FP/float(FP + TN) return vx_recall, vx_false_alm
Compute the voicing recall and false alarm rates given two voicing indicator sequences, one as reference (truth) and the other as the estimate (prediction). The sequences must be of the same length. Examples -------- >>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt') >>> est_time, est_freq = mir_eval.io.load_time_series('est.txt') >>> (ref_v, ref_c, ... est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time, ... ref_freq, ... est_time, ... est_freq) >>> recall, false_alarm = mir_eval.melody.voicing_measures(ref_v, ... est_v) Parameters ---------- ref_voicing : np.ndarray Reference boolean voicing array est_voicing : np.ndarray Estimated boolean voicing array Returns ------- vx_recall : float Voicing recall rate, the fraction of voiced frames in ref indicated as voiced in est vx_false_alarm : float Voicing false alarm rate, the fraction of unvoiced frames in ref indicated as voiced in est
def find_or_create_by_name(self, item_name, items_list, item_type): item = self.find_by_name(item_name, items_list) if not item: item = self.data_lists[item_type][2](item_name, None) return item
See if item with item_name exists in item_list. If not, create that item. Either way, return an item of type item_type.
def _readintbe(self, length, start): if length % 8: raise InterpretError("Big-endian integers must be whole-byte. " "Length = {0} bits.", length) return self._readint(length, start)
Read bits and interpret as a big-endian signed int.
def create_nio(self, node, nio_settings): nio = None if nio_settings["type"] == "nio_udp": lport = nio_settings["lport"] rhost = nio_settings["rhost"] rport = nio_settings["rport"] try: info = socket.getaddrinfo(rhost, rport, socket.AF_UNSPEC, socket.SOCK_DGRAM, 0, socket.AI_PASSIVE) if not info: raise DynamipsError("getaddrinfo returns an empty list on {}:{}".format(rhost, rport)) for res in info: af, socktype, proto, _, sa = res with socket.socket(af, socktype, proto) as sock: sock.connect(sa) except OSError as e: raise DynamipsError("Could not create an UDP connection to {}:{}: {}".format(rhost, rport, e)) nio = NIOUDP(node, lport, rhost, rport, nio_settings.get("filters", {})) elif nio_settings["type"] == "nio_generic_ethernet": ethernet_device = nio_settings["ethernet_device"] if sys.platform.startswith("win"): windows_interfaces = interfaces() npf_interface = None for interface in windows_interfaces: if interface["name"] == ethernet_device: npf_interface = interface["id"] if not npf_interface: raise DynamipsError("Could not find interface {} on this host".format(ethernet_device)) else: ethernet_device = npf_interface if not is_interface_up(ethernet_device): raise aiohttp.web.HTTPConflict(text="Ethernet interface {} is down".format(ethernet_device)) nio = NIOGenericEthernet(node.hypervisor, ethernet_device) elif nio_settings["type"] == "nio_linux_ethernet": if sys.platform.startswith("win"): raise DynamipsError("This NIO type is not supported on Windows") ethernet_device = nio_settings["ethernet_device"] nio = NIOLinuxEthernet(node.hypervisor, ethernet_device) elif nio_settings["type"] == "nio_tap": tap_device = nio_settings["tap_device"] nio = NIOTAP(node.hypervisor, tap_device) if not is_interface_up(tap_device): raise aiohttp.web.HTTPConflict(text="TAP interface {} is down".format(tap_device)) elif nio_settings["type"] == "nio_unix": local_file = nio_settings["local_file"] remote_file = nio_settings["remote_file"] nio = NIOUNIX(node.hypervisor, local_file, remote_file) elif nio_settings["type"] == "nio_vde": control_file = nio_settings["control_file"] local_file = nio_settings["local_file"] nio = NIOVDE(node.hypervisor, control_file, local_file) elif nio_settings["type"] == "nio_null": nio = NIONull(node.hypervisor) else: raise aiohttp.web.HTTPConflict(text="NIO of type {} is not supported".format(nio_settings["type"])) yield from nio.create() return nio
Creates a new NIO. :param node: Dynamips node instance :param nio_settings: information to create the NIO :returns: a NIO object
def from_content(cls, content): parsed_content = parse_tibiacom_content(content) image_column, desc_column, *_ = parsed_content.find_all() if "Error" in image_column.text: return None image = image_column.find() for br in desc_column.find_all("br"): br.replace_with("\n") description = desc_column.text.replace("\u00a0", " ").replace("\n\n","\n") lines = description.splitlines() try: name, beds, info, state, *_ = lines except ValueError: raise InvalidContent("content does is not from the house section of Tibia.com") house = cls(name.strip()) house.image_url = image["src"] house.id = int(id_regex.search(house.image_url).group(1)) m = bed_regex.search(beds) if m: house.type = HouseType.GUILDHALL if m.group("type") in ["guildhall", "clanhall"] else HouseType.HOUSE beds_word = m.group("beds") if beds_word == "no": house.beds = 0 else: house.beds = parse_number_words(beds_word) m = info_regex.search(info) if m: house.world = m.group("world") house.rent = int(m.group("rent")) house.size = int(m.group("size")) house._parse_status(state) return house
Parses a Tibia.com response into a House object. Parameters ---------- content: :class:`str` HTML content of the page. Returns ------- :class:`House` The house contained in the page, or None if the house doesn't exist. Raises ------ InvalidContent If the content is not the house section on Tibia.com
def path(self, target, args, kw): if type(target) in string_types: if in target: prefix, rest = target.split(, 1) route = self.named_routes[prefix] prefix_params = route._pop_params(args, kw) prefix_path = route.path([], prefix_params) next_mapper = route.resource return prefix_path + next_mapper.path(rest, args, kw) else: return self.named_routes[target].path(args, kw) elif isinstance(target, Route): for route in self.routes: if route is target: return route.path(args, kw) raise InvalidArgumentError("Route not found in this %s object." % (target, self.__class__.__name__)) else: target_id = id(target) if target_id in self._lookup: return self._lookup[target_id].path(args, kw) raise InvalidArgumentError("No Route found for target in this %s object." % (target, self.__class__.__name__))
Build a URL path fragment for a resource or route. Possible values for `target`: A string that does not start with a '.' and does not contain ':'. : Looks up the route of the same name on this mapper and returns it's path. A string of the form 'a:b', 'a:b:c', etc. : Follows the route to nested mappers by splitting off consecutive segments. Returns the path of the route found by looking up the final segment on the last mapper. A `Route` object : Returns the path for the route. A resource that was added previously : Looks up the first route that points to this resource and returns its path.
def __we_c(cls, calib, tc, temp, we_v): offset_v = calib.pid_elc_mv / 1000.0 response_v = we_v - offset_v response_c = tc.correct(temp, response_v) if response_c is None: return None we_c = response_c + offset_v return we_c
Compute weC from sensor temperature compensation of weV
def middle(self): return (self.min_x + self.max_x) / 2, (self.min_y + self.max_y) / 2
Returns the middle point of the bounding box :return: middle point :rtype: (float, float)
def add_ip(self,oid,value,label=None): self.add_oid_entry(oid,,value,label=label)
Short helper to add an IP address value to the MIB subtree.
def __validate_and_fix_spark_args(spark_args): pattern = re.compile(r) fixed_args = [] for arg in spark_args: if arg not in SPARK_SUBMIT_FLAGS: if not pattern.match(arg): raise SystemExit( % (arg, str(SPARK_SUBMIT_FLAGS))) eq_pos = arg.find() fixed_args.append( + arg[:eq_pos]) fixed_args.append(arg[eq_pos + 1:]) else: fixed_args.append( + arg) return fixed_args
Prepares spark arguments. In the command-line script, they are passed as for example `-s master=local[4] deploy-mode=client verbose`, which would be passed to spark-submit as `--master local[4] --deploy-mode client --verbose` Parameters ---------- spark_args (List): List of spark arguments Returns ------- fixed_args (List): List of fixed and validated spark arguments
def match_agent_id(self, agent_id, match): self._add_match(, str(agent_id), bool(match))
Matches the agent identified by the given ``Id``. arg: agent_id (osid.id.Id): the Id of the ``Agent`` arg: match (boolean): ``true`` if a positive match, ``false`` for a negative match raise: NullArgument - ``agent_id`` is ``null`` *compliance: mandatory -- This method must be implemented.*
def checktype_seq(self, seq, kind, *, unique=False, **kargs): exp = self.str_kind(kind) try: iterator = iter(seq) seen.append(item)
Raise TypeError if seq is not a sequence of elements satisfying kind. Optionally require elements to be unique. As a special case, a string is considered to be an atomic value rather than a sequence of single-character strings. (Thus, checktype_seq('foo', str) will fail.)
def transform_data(from_client, from_project, from_logstore, from_time, to_time=None, to_client=None, to_project=None, to_logstore=None, shard_list=None, config=None, batch_size=None, compress=None, cg_name=None, c_name=None, cg_heartbeat_interval=None, cg_data_fetch_interval=None, cg_in_order=None, cg_worker_pool_size=None ): if not config: logger.info("transform_data: config is not configured, use copy data by default.") return copy_data(from_client, from_project, from_logstore, from_time, to_time=to_time, to_client=to_client, to_project=to_project, to_logstore=to_logstore, shard_list=shard_list, batch_size=batch_size, compress=compress) to_client = to_client or from_client from_client.timeout = max(from_client.timeout, 120) to_client.timeout = max(to_client.timeout, 120) to_project = to_project or from_project to_logstore = to_logstore or from_logstore if not cg_name: to_time = to_time or "end" cpu_count = multiprocessing.cpu_count() * 2 shards = from_client.list_shards(from_project, from_logstore).get_shards_info() current_shards = [str(shard[]) for shard in shards] target_shards = _parse_shard_list(shard_list, current_shards) worker_size = min(cpu_count, len(target_shards)) result = dict() total_count = 0 total_removed = 0 with ProcessPoolExecutor(max_workers=worker_size) as pool: futures = [pool.submit(transform_worker, from_client, from_project, from_logstore, shard, from_time, to_time, config, to_client, to_project, to_logstore, batch_size=batch_size, compress=compress) for shard in target_shards] for future in as_completed(futures): if future.exception(): logger.error("get error when transforming data: {0}".format(future.exception())) else: partition, count, removed, processed, failed = future.result() total_count += count total_removed += removed if count: result[partition] = {"total_count": count, "transformed": processed, "removed": removed, "failed": failed} return LogResponse({}, {"total_count": total_count, "shards": result}) else: c_name = c_name or "transform_data_{0}".format(multiprocessing.current_process().pid) cg_heartbeat_interval = cg_heartbeat_interval or 20 cg_data_fetch_interval = cg_data_fetch_interval or 2 cg_in_order = False if cg_in_order is None else cg_in_order cg_worker_pool_size = cg_worker_pool_size or 3 option = LogHubConfig(from_client._endpoint, from_client._accessKeyId, from_client._accessKey, from_project, from_logstore, cg_name, c_name, cursor_position=CursorPosition.SPECIAL_TIMER_CURSOR, cursor_start_time=from_time, cursor_end_time=to_time, heartbeat_interval=cg_heartbeat_interval, data_fetch_interval=cg_data_fetch_interval, in_order=cg_in_order, worker_pool_size=cg_worker_pool_size) TransformDataConsumer.set_transform_options(config, to_client, to_project, to_logstore) result = {"total_count": 0, "shards": {}} l = RLock() def status_updator(shard_id, count=0, removed=0, processed=0, failed=0): logger.info("status update is called, shard: {0}, count: {1}, removed: {2}, processed: {3}, failed: {4}".format(shard_id, count, removed, processed, failed)) with l: result["total_count"] += count if shard_id in result["shards"]: data = result["shards"][shard_id] result["shards"][shard_id] = {"total_count": data["total_count"] + count, "transformed": data["transformed"] + processed, "removed": data["removed"] + removed, "failed": data["failed"] + failed} else: result["shards"][shard_id] = {"total_count": count, "transformed": processed, "removed": removed, "failed": failed} worker = ConsumerWorker(TransformDataConsumer, consumer_option=option, args=(status_updator, ) ) worker.start() try: while worker.is_alive(): worker.join(timeout=60) logger.info("transform_data: worker exit unexpected, try to shutdown it") worker.shutdown() except KeyboardInterrupt: logger.info("transform_data: *** try to exit **** ") print("try to stop transforming data.") worker.shutdown() worker.join(timeout=120) return LogResponse({}, result)
transform data from one logstore to another one (could be the same or in different region), the time is log received time on server side.
def find_video_by_id(self, video_id): url = params = { : self.client_id, : video_id } r = requests.get(url, params=params) check_error(r) return r.json()
doc: http://open.youku.com/docs/doc?id=44
def backward_committor(T, A, B): r X = set(range(T.shape[0])) A = set(A) B = set(B) AB = A.intersection(B) notAB = X.difference(A).difference(B) if len(AB) > 0: raise ValueError("Sets A and B have to be disjoint") pi = stationary_distribution(T) L = T - eye(T.shape[0], T.shape[0]) D = diags([pi, ], [0, ]) K = (D.dot(L)).T W = 1.0 * K W = W.todok() W[list(A), :] = 0.0 W.tocsr() W = W + coo_matrix((np.ones(len(A)), (list(A), list(A))), shape=W.shape).tocsr() W = W.todok() W[list(B), :] = 0.0 W.tocsr() W = W + coo_matrix((np.ones(len(B)), (list(B), list(B))), shape=W.shape).tocsr() r = np.zeros(T.shape[0]) r[list(A)] = 1.0 u = spsolve(W, r) return u
r"""Backward committor between given sets. The backward committor u(x) between sets A and B is the probability for the chain starting in x to have come from A last rather than from B. Parameters ---------- T : (M, M) ndarray Transition matrix A : array_like List of integer state labels for set A B : array_like List of integer state labels for set B Returns ------- u : (M, ) ndarray Vector of forward committor probabilities Notes ----- The forward committor is a solution to the following boundary-value problem .. math:: \sum_j K_{ij} \pi_{j} u_{j}=0 for i in X\(A u B) (I) u_{i}=1 for i \in A (II) u_{i}=0 for i \in B (III) with adjoint of the generator matrix K=(D_pi(P-I))'.
def get_raw_access_token(self, request_token, request_token_secret, method=, **kwargs): response over the :attr:`rauth.OAuth1Service.access_token_url`. Use this if your endpoint if you need the full `Response` object. :param request_token: The request token as returned by :meth:`get_request_token`. :type request_token: str :param request_token_secret: The request token secret as returned by :meth:`get_request_token`. :type request_token_secret: str :param method: A string representation of the HTTP method to be used, defaults to `GET`. :type method: str :param \*\*kwargs: Optional arguments. Same as Requests. :type \*\*kwargs: dict ve set the access_token_url if self.access_token_url is None: raise TypeError() session = self.get_session((request_token, request_token_secret)) self.access_token_response = session.request(method, self.access_token_url, **kwargs) return self.access_token_response
Returns a Requests' response over the :attr:`rauth.OAuth1Service.access_token_url`. Use this if your endpoint if you need the full `Response` object. :param request_token: The request token as returned by :meth:`get_request_token`. :type request_token: str :param request_token_secret: The request token secret as returned by :meth:`get_request_token`. :type request_token_secret: str :param method: A string representation of the HTTP method to be used, defaults to `GET`. :type method: str :param \*\*kwargs: Optional arguments. Same as Requests. :type \*\*kwargs: dict
def places_nearby(client, location=None, radius=None, keyword=None, language=None, min_price=None, max_price=None, name=None, open_now=False, rank_by=None, type=None, page_token=None): if not location and not page_token: raise ValueError("either a location or page_token arg is required") if rank_by == "distance": if not (keyword or name or type): raise ValueError("either a keyword, name, or type arg is required " "when rank_by is set to distance") elif radius is not None: raise ValueError("radius cannot be specified when rank_by is set to " "distance") return _places(client, "nearby", location=location, radius=radius, keyword=keyword, language=language, min_price=min_price, max_price=max_price, name=name, open_now=open_now, rank_by=rank_by, type=type, page_token=page_token)
Performs nearby search for places. :param location: The latitude/longitude value for which you wish to obtain the closest, human-readable address. :type location: string, dict, list, or tuple :param radius: Distance in meters within which to bias results. :type radius: int :param region: The region code, optional parameter. See more @ https://developers.google.com/places/web-service/search :type region: string :param keyword: A term to be matched against all content that Google has indexed for this place. :type keyword: string :param language: The language in which to return results. :type language: string :param min_price: Restricts results to only those places with no less than this price level. Valid values are in the range from 0 (most affordable) to 4 (most expensive). :type min_price: int :param max_price: Restricts results to only those places with no greater than this price level. Valid values are in the range from 0 (most affordable) to 4 (most expensive). :type max_price: int :param name: One or more terms to be matched against the names of places. :type name: string or list of strings :param open_now: Return only those places that are open for business at the time the query is sent. :type open_now: bool :param rank_by: Specifies the order in which results are listed. Possible values are: prominence (default), distance :type rank_by: string :param type: Restricts the results to places matching the specified type. The full list of supported types is available here: https://developers.google.com/places/supported_types :type type: string :param page_token: Token from a previous search that when provided will returns the next page of results for the same search. :type page_token: string :rtype: result dict with the following keys: status: status code results: list of places html_attributions: set of attributions which must be displayed next_page_token: token for retrieving the next page of results
def upload(self, stop_at=None): self.stop_at = stop_at or self.file_size while self.offset < self.stop_at: self.upload_chunk() else: if self.log_func: self.log_func("maximum upload specified({} bytes) has been reached".format(self.stop_at))
Perform file upload. Performs continous upload of chunks of the file. The size uploaded at each cycle is the value of the attribute 'chunk_size'. :Args: - stop_at (Optional[int]): Determines at what offset value the upload should stop. If not specified this defaults to the file size.
def writeDB(filename, catalog, meta=None): def sqlTypes(obj, names): types = [] for n in names: val = getattr(obj, n) if isinstance(val, bool): types.append("BOOL") elif isinstance(val, (int, np.int64, np.int32)): types.append("INT") elif isinstance(val, (float, np.float64, np.float32)): types.append("FLOAT") elif isinstance(val, six.string_types): types.append("VARCHAR") else: log.warning("Column {0} is of unknown type {1}".format(n, type(n))) log.warning("Using VARCHAR") types.append("VARCHAR") return types if os.path.exists(filename): log.warning("overwriting {0}".format(filename)) os.remove(filename) conn = sqlite3.connect(filename) db = conn.cursor() for t, tn in zip(classify_catalog(catalog), ["components", "islands", "simples"]): if len(t) < 1: continue conn.close() log.info("Wrote file {0}".format(filename)) return
Output an sqlite3 database containing one table for each source type Parameters ---------- filename : str Output filename catalog : list List of sources of type :class:`AegeanTools.models.OutputSource`, :class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`. meta : dict Meta data to be written to table `meta` Returns ------- None
def refine_get_urls(original): def get_urls(): from django.conf.urls import url from django.conf import settings from django.contrib.staticfiles.urls import staticfiles_urlpatterns from django.views.static import serve if settings.DEBUG: return staticfiles_urlpatterns() + [ url(r, serve, { : settings.MEDIA_ROOT, }), ] + original() else: return original() return get_urls
serve static files (and media files also) in production the webserver should serve requested static files itself and never let requests to /static/* and /media/* get to the django application.
def handle_new_tuple_set_2(self, hts2): if self.my_pplan_helper is None or self.my_instance is None: Log.error("Got tuple set when no instance assigned yet") else: hts = tuple_pb2.HeronTupleSet() if hts2.HasField(): hts.control.CopyFrom(hts2.control) else: hdts = tuple_pb2.HeronDataTupleSet() hdts.stream.CopyFrom(hts2.data.stream) try: for trunk in hts2.data.tuples: added_tuple = hdts.tuples.add() added_tuple.ParseFromString(trunk) except Exception: Log.exception() hts.data.CopyFrom(hdts) self.in_stream.offer(hts) if self.my_pplan_helper.is_topology_running(): self.my_instance.py_class.process_incoming_tuples()
Called when new HeronTupleSet2 arrives Convert(Assemble) HeronTupleSet2(raw byte array) to HeronTupleSet See more at GitHub PR #1421 :param tuple_msg_set: HeronTupleSet2 type
def set_inputhook(self, callback): ignore_CTRL_C() self._callback = callback self._callback_pyfunctype = self.PYFUNC(callback) pyos_inputhook_ptr = self.get_pyos_inputhook() original = self.get_pyos_inputhook_as_func() pyos_inputhook_ptr.value = \ ctypes.cast(self._callback_pyfunctype, ctypes.c_void_p).value self._installed = True return original
Set PyOS_InputHook to callback and return the previous one.
def _load_builtins(self): pymux = self.pymux kb = KeyBindings() has_prefix = HasPrefix(pymux) waits_for_confirmation = WaitsForConfirmation(pymux) prompt_or_command_focus = has_focus(COMMAND) | has_focus(PROMPT) display_pane_numbers = Condition(lambda: pymux.display_pane_numbers) in_scroll_buffer_not_searching = InScrollBufferNotSearching(pymux) @kb.add(Keys.Any, filter=has_prefix) def _(event): " Ignore unknown Ctrl-B prefixed key sequences. " pymux.get_client_state().has_prefix = False @kb.add(, filter=prompt_or_command_focus & ~has_prefix) @kb.add(, filter=prompt_or_command_focus & ~has_prefix) def _(event): " Leave command mode. " pymux.leave_command_mode(append_to_history=False) @kb.add(, filter=waits_for_confirmation) @kb.add(, filter=waits_for_confirmation) def _(event): client_state = pymux.get_client_state() command = client_state.confirm_command client_state.confirm_command = None client_state.confirm_text = None pymux.handle_command(command) @kb.add(, filter=waits_for_confirmation) @kb.add(, filter=waits_for_confirmation) @kb.add( , filter=waits_for_confirmation) def _(event): client_state = pymux.get_client_state() client_state.confirm_command = None client_state.confirm_text = None @kb.add(, filter=in_scroll_buffer_not_searching) @kb.add(, filter=in_scroll_buffer_not_searching) @kb.add(, filter=in_scroll_buffer_not_searching) def _(event): " Exit scroll buffer. " pane = pymux.arrangement.get_active_pane() pane.exit_scroll_buffer() @kb.add(, filter=in_scroll_buffer_not_searching) def _(event): " Enter selection mode when pressing space in copy mode. " event.current_buffer.start_selection(selection_type=SelectionType.CHARACTERS) @kb.add(, filter=in_scroll_buffer_not_searching & has_selection) def _(event): " Copy selection when pressing Enter. " clipboard_data = event.current_buffer.copy_selection() event.app.clipboard.set_data(clipboard_data) @kb.add(, filter=in_scroll_buffer_not_searching & has_selection) def _(event): " Toggle between selection types. " types = [SelectionType.LINES, SelectionType.BLOCK, SelectionType.CHARACTERS] selection_state = event.current_buffer.selection_state try: index = types.index(selection_state.type) except ValueError: index = 0 selection_state.type = types[(index + 1) % len(types)] @Condition def popup_displayed(): return self.pymux.get_client_state().display_popup @kb.add(, filter=popup_displayed, eager=True) def _(event): " Quit pop-up dialog. " self.pymux.get_client_state().display_popup = False @kb.add(Keys.Any, eager=True, filter=display_pane_numbers) def _(event): " When the pane numbers are shown. Any key press should hide them. " pymux.display_pane_numbers = False @Condition def clock_displayed(): " " pane = pymux.arrangement.get_active_pane() return pane.clock_mode @kb.add(Keys.Any, eager=True, filter=clock_displayed) def _(event): " When the clock is displayed. Any key press should hide it. " pane = pymux.arrangement.get_active_pane() pane.clock_mode = False return kb
Fill the Registry with the hard coded key bindings.
def normalize_list_of_dicts(value, default_key, default_value=UNDEFINED): if value is None: if default_value is UNDEFINED: return [] value = default_value if isinstance(value, dict): return [value] if isinstance(value, text_type): return [{default_key: value}] if isinstance(value, list): if not all(isinstance(x, dict) for x in value): def _fix(x): return {default_key: x} if isinstance(x, text_type) else x return list(map(_fix, value)) return value
Converts given value to a list of dictionaries as follows: * ``[{...}]`` → ``[{...}]`` * ``{...}`` → ``[{...}]`` * ``'xyz'`` → ``[{default_key: 'xyz'}]`` * ``None`` → ``[{default_key: default_value}]`` (if specified) * ``None`` → ``[]`` :param default_value: only Unicode, i.e. `str` in Python 3.x and **only** `unicode` in Python 2.x
def mtz2tw(n,c,e,l): model = Model("tsptw - mtz-strong") x,u = {},{} for i in range(1,n+1): u[i] = model.addVar(lb=e[i], ub=l[i], vtype="C", name="u(%s)"%i) for j in range(1,n+1): if i != j: x[i,j] = model.addVar(vtype="B", name="x(%s,%s)"%(i,j)) for i in range(1,n+1): model.addCons(quicksum(x[i,j] for j in range(1,n+1) if j != i) == 1, "Out(%s)"%i) model.addCons(quicksum(x[j,i] for j in range(1,n+1) if j != i) == 1, "In(%s)"%i) for j in range(2,n+1): if i != j: M1 = max(l[i] + c[i,j] - e[j], 0) M2 = max(l[i] + min(-c[j,i], e[j]-e[i]) - e[j], 0) model.addCons(u[i] + c[i,j] - M1*(1-x[i,j]) + M2*x[j,i] <= u[j], "LiftedMTZ(%s,%s)"%(i,j)) for i in range(2,n+1): model.addCons(e[i] + quicksum(max(e[j]+c[j,i]-e[i],0) * x[j,i] for j in range(1,n+1) if i != j) \ <= u[i], "LiftedLB(%s)"%i) model.addCons(u[i] <= l[i] - \ quicksum(max(l[i]-l[j]+c[i,j],0) * x[i,j] for j in range(2,n+1) if i != j), \ "LiftedUB(%s)"%i) model.setObjective(quicksum(c[i,j]*x[i,j] for (i,j) in x), "minimize") model.data = x,u return model
mtz: model for the traveling salesman problem with time windows (based on Miller-Tucker-Zemlin's one-index potential formulation, stronger constraints) Parameters: - n: number of nodes - c[i,j]: cost for traversing arc (i,j) - e[i]: earliest date for visiting node i - l[i]: latest date for visiting node i Returns a model, ready to be solved.
def create_error(msg, cause=None): status_code = config.exc_to_code(cause) status_name = config.NAME_STATUS_CODES.get(status_code) if status_name == : return InvalidArgumentError(msg, cause=cause) else: return GaxError(msg, cause=cause)
Creates a ``GaxError`` or subclass. Attributes: msg (string): describes the error that occurred. cause (Exception, optional): the exception raised by a lower layer of the RPC stack (for example, gRPC) that caused this exception, or None if this exception originated in GAX. Returns: .GaxError: The exception that wraps ``cause``.
def bin2hex(fin, fout, offset=0): h = IntelHex() try: h.loadbin(fin, offset) except IOError: e = sys.exc_info()[1] txt = , str(e) print(txt) return 1 try: h.tofile(fout, format=) except IOError: e = sys.exc_info()[1] txt = "ERROR: Could not write to file: %s: %s" % (fout, str(e)) print(txt) return 1 return 0
Simple bin-to-hex convertor. @return 0 if all OK @param fin input bin file (filename or file-like object) @param fout output hex file (filename or file-like object) @param offset starting address offset for loading bin
def track_progress(**context): model = context["model"] train_X = context["train_X"] dev_X = context["dev_X"] dev_y = context["dev_y"] n_train = len(train_X) trainer = context["trainer"] def each_epoch(): global epoch_train_acc, epoch with model.use_params(trainer.optimizer.averages): avg_acc = model.evaluate_logloss(dev_X, dev_y) stats = (avg_acc, float(epoch_train_acc) / n_train, trainer.dropout) print("%.3f dev acc, %.3f train acc, %.4f drop" % stats) epoch_train_acc = 0.0 epoch += 1 return each_epoch
Print training progress. Called after each epoch.
def annotate(title, format_type, message=None, data=None, metric=1.0): if format_type not in TYPES: raise ValueError( "Invalid type. Expected one of: {}.".format(", ".join(TYPES))) def decorator(func): func.annotation = dict( title=title, summary=extended_summary(func), message=message, data=data, format_type=format_type, metric=metric) return func return decorator
Annotate a test case with info that should be displayed in the reports. Parameters ---------- title : str A human-readable descriptive title of the test case. format_type : str A string that determines how the result data is formatted in the report. It is expected not to be None. * 'number' : 'data' is a single number which can be an integer or float and should be represented as such. * 'count' : 'data' is a list, set or tuple. Choosing 'count' will display the length of that list e.g. number of metabolites without formula. * 'percent' : Instead of 'data' the content of 'metric' ought to be displayed e.g. percentage of metabolites without charge. 'metric' is expected to be a floating point number. * 'raw' : 'data' is ought to be displayed "as is" without formatting. This option is appropriate for single strings or a boolean output. message : str A short written explanation that states and possibly explains the test result. data Raw data which the test case generates and assesses. Can be of the following types: list, set, tuple, string, float, integer, and boolean. metric: float A value x in the range of 0 <= x <= 1 which represents the fraction of 'data' to the total in the model. For example, if 'data' are all metabolites without formula, 'metric' should be the fraction of metabolites without formula from the total of metabolites in the model. Returns ------- function The decorated function, now extended by the attribute 'annotation'. Notes ----- Adds "annotation" attribute to the function object, which stores values for predefined keys as a dictionary.
def haversine(lon1, lat1, lon2, lat2, earth_radius=6357000): lon1, lat1, lon2, lat2 = list(map(math.radians, [lon1, lat1, lon2, lat2])) dlon = lon2 - lon1 dlat = lat2 - lat1 a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2 c = 2 * math.asin(math.sqrt(a)) distance = earth_radius * c return distance
Calculate the great circle distance between two points on earth in Kilometers on the earth (specified in decimal degrees) .. seealso:: :func:`distance_points` :param float lon1: longitude of first place (decimal degrees) :param float lat1: latitude of first place (decimal degrees) :param float lon2: longitude of second place (decimal degrees) :param float lat2: latitude of second place (decimal degrees) :param earth_radius: earth_radius (use 6367 for KM 6367000 for meters 3956 for miles - http://stackoverflow.com/questions/5283900/what-earth-radius-should-i-use-to-calculate-distances-near-the-poles :Example: >>> London_long=-0.126 ; London_lat=51.50; Paris_long = 2.350; Paris_lat = 48.856 >>> haversine(London_long, London_lat, Paris_long, Paris_lat) 342.55375272454864 :returns: float distance in Kilometers
def list_user_threads_view(request, targetUsername): targetUser = get_object_or_404(User, username=targetUsername) targetProfile = get_object_or_404(UserProfile, user=targetUser) threads = Thread.objects.filter(owner=targetProfile) page_name = "{0}THREAD_ERRORlist_threads.htmlpage_namethreadstargetUsername': targetUsername, }, context_instance=RequestContext(request))
View of threads a user has created.
def beam(problem, beam_size=100, iterations_limit=0, viewer=None): t find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.value, and SearchProblem.generate_random_state. ' return _local_search(problem, _all_expander, iterations_limit=iterations_limit, fringe_size=beam_size, random_initial_states=True, stop_when_no_better=iterations_limit==0, viewer=viewer)
Beam search. beam_size is the size of the beam. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.value, and SearchProblem.generate_random_state.
def get_epochs_given_midtimes_and_period( t_mid, period, err_t_mid=None, t0_fixed=None, t0_percentile=None, verbose=False ): kwargarr = np.array([isinstance(err_t_mid,np.ndarray), t0_fixed, t0_percentile]) if not _single_true(kwargarr) and not np.all(~kwargarr.astype(bool)): raise AssertionError( ) t_mid = t_mid[np.isfinite(t_mid)] N_midtimes = len(t_mid) if t0_fixed: t0 = t0_fixed elif isinstance(err_t_mid,np.ndarray): t0_avg = np.average(t_mid, weights=1/err_t_mid**2) t0_options = np.arange(min(t_mid), max(t_mid)+period, period) t0 = t0_options[np.argmin(np.abs(t0_options - t0_avg))] else: if not t0_percentile: if N_midtimes % 2 == 1: t0 = np.median(t_mid) else: t0 = t_mid[int(N_midtimes/2)] else: t0 = np.sort(t_mid)[int(N_midtimes*t0_percentile/100)] epoch = (t_mid - t0)/period int_epoch = np.round(epoch, 0) if verbose: LOGINFO() LOGINFO(.format(repr(epoch))) LOGINFO() LOGINFO(.format(repr(int_epoch))) return int_epoch, t0
This calculates the future epochs for a transit, given a period and a starting epoch The equation used is:: t_mid = period*epoch + t0 Default behavior if no kwargs are used is to define `t0` as the median finite time of the passed `t_mid` array. Only one of `err_t_mid` or `t0_fixed` should be passed. Parameters ---------- t_mid : np.array A np.array of transit mid-time measurements period : float The period used to calculate epochs, per the equation above. For typical use cases, a period precise to ~1e-5 days is sufficient to get correct epochs. err_t_mid : None or np.array If provided, contains the errors of the transit mid-time measurements. The zero-point epoch is then set equal to the average of the transit times, weighted as `1/err_t_mid^2` . This minimizes the covariance between the transit epoch and the period (e.g., Gibson et al. 2013). For standard O-C analysis this is the best method. t0_fixed : None or float: If provided, use this t0 as the starting epoch. (Overrides all others). t0_percentile : None or float If provided, use this percentile of `t_mid` to define `t0`. Returns ------- tuple This is the of the form `(integer_epoch_array, t0)`. `integer_epoch_array` is an array of integer epochs (float-type), of length equal to the number of *finite* mid-times passed.
def build_html(path_jinja2, template_name, path_outfile, template_kwargs=None): latex_template_object = LatexBuild( path_jinja2, template_name, template_kwargs, ) return latex_template_object.build_html(path_outfile)
Helper function for building an html from a latex jinja2 template :param path_jinja2: the root directory for latex jinja2 templates :param template_name: the relative path, to path_jinja2, to the desired jinja2 Latex template :param path_outfile: the full path to the desired final output file Must contain the same file extension as files generated by cmd_wo_infile, otherwise the process will fail :param template_kwargs: a dictionary of key/values for jinja2 variables
def run(command, raw_output=False): p = Popen(command.split(), stdout=PIPE, stderr=PIPE) (stdout, stderr) = p.communicate() if not raw_output: return ( p.returncode, [line.rstrip() for line in stdout.decode("utf-8").splitlines()], [line.rstrip() for line in stderr.decode("utf-8").splitlines()] ) else: return (p.returncode, stdout, stderr)
Run a command using subprocess. :param command: command line to be run :type command: str :param raw_output: does not attempt to convert the output as unicode :type raw_output: bool :return: error code, output (``stdout``) and error (``stderr``) :rtype: tuple
def _cont_norm_running_quantile_regions(wl, fluxes, ivars, q, delta_lambda, ranges, verbose=True): print("contnorm.py: continuum norm using running quantile") print("Taking spectra in %s chunks" % len(ranges)) nstars = fluxes.shape[0] norm_fluxes = np.zeros(fluxes.shape) norm_ivars = np.zeros(ivars.shape) for chunk in ranges: start = chunk[0] stop = chunk[1] output = _cont_norm_running_quantile( wl[start:stop], fluxes[:,start:stop], ivars[:,start:stop], q, delta_lambda) norm_fluxes[:,start:stop] = output[0] norm_ivars[:,start:stop] = output[1] return norm_fluxes, norm_ivars
Perform continuum normalization using running quantile, for spectrum that comes in chunks
def convert_nexus_to_format(dataset_as_nexus, dataset_format): fake_handle = StringIO(dataset_as_nexus) nexus_al = AlignIO.parse(fake_handle, ) tmp_file = make_random_filename() AlignIO.write(nexus_al, tmp_file, dataset_format) dataset_as_fasta = read_and_delete_tmp_file(tmp_file) return dataset_as_fasta
Converts nexus format to Phylip and Fasta using Biopython tools. :param dataset_as_nexus: :param dataset_format: :return:
def p_statement_draw_attr(p): p[0] = make_sentence(, make_typecast(TYPE.integer, p[3], p.lineno(4)), make_typecast(TYPE.integer, p[5], p.lineno(4)), p[2])
statement : DRAW attr_list expr COMMA expr
def content_edge_check(self, url): prefixes = ["http://", "https://"] for prefix in prefixes: if url.startswith(prefix): url = url[len(prefix):] break content = self._fetch("/content/edge_check/%s" % url) return content
Retrieve headers and MD5 hash of the content for a particular url from each Fastly edge server.
def _compute_key(self, id, nbytes): "id is - for the various keys used by ssh" m = Message() m.add_mpint(self.K) m.add_bytes(self.H) m.add_byte(id) m.add_bytes(self.session_id) out = sofar = SHA.new(str(m)).digest() while len(out) < nbytes: m = Message() m.add_mpint(self.K) m.add_bytes(self.H) m.add_bytes(sofar) digest = SHA.new(str(m)).digest() out += digest sofar += digest return out[:nbytes]
id is 'A' - 'F' for the various keys used by ssh
def make_datastore_query(self, cursor=None): filters = {} filters[] = _key_for_namespace( self.namespace_start, self.app) filters[] = _key_for_namespace( self.namespace_end, self.app) return datastore.Query(, filters=filters, keys_only=True, cursor=cursor, _app=self.app)
Returns a datastore.Query that generates all namespaces in the range. Args: cursor: start cursor for the query. Returns: A datastore.Query instance that generates db.Keys for each namespace in the NamespaceRange.
def get_backoff_time(self): if self._observed_errors <= 1: return 0 backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1)) return min(self.BACKOFF_MAX, backoff_value)
Formula for computing the current backoff :rtype: float
def plot(self, numPoints=100): fig = plt.figure() ax = fig.add_subplot(111, projection=) x = np.linspace(- self.radius, self.radius, numPoints) z = np.linspace(- self.height / 2., self.height / 2., numPoints) Xc, Zc = np.meshgrid(x, z) Yc = np.sqrt(self.radius ** 2 - Xc ** 2) ax.plot_surface(Xc, Yc, Zc, alpha=0.2, rstride=20, cstride=10) ax.plot_surface(Xc, -Yc, Zc, alpha=0.2, rstride=20, cstride=10) ax.set_xlabel("X") ax.set_ylabel("Y") ax.set_zlabel("Z") plt.title("{}".format(self)) return fig, ax
Specific plotting method for cylinders.
def run(self): eta = self.settings[] gamma = 2 * np.pi * self.settings[] dt = 1. / self.settings[] control = self.settings[] self._state = self._output while self._stop is False: A = -gamma * dt noise = np.sqrt(2*gamma*eta)*np.random.randn() self._state *= (1. + A) self._state += noise + control self._output = self._state self.msleep(int(1e3 / self.settings[]))
this is the actual execution of the instrument thread: continuously read values from the probes
def get_wigner(z, freq, sample_freq, histbins=200, show_plot=False): phase, phase_slices = extract_slices(z, freq, sample_freq, show_plot=False) counts_array, bin_edges = histogram_phase(phase_slices, phase, histbins, show_plot=show_plot) diff = bin_edges[1] - bin_edges[0] bin_centres = bin_edges[:-1] + diff iradon_output = _iradon_sart(counts_array, theta=phase) return iradon_output, bin_centres
Calculates an approximation to the wigner quasi-probability distribution by splitting the z position array into slices of the length of one period of the motion. This slice is then associated with phase from -180 to 180 degrees. These slices are then histogramed in order to get a distribution of counts of where the particle is observed at each phase. The 2d array containing the counts varying with position and phase is then passed through the inverse radon transformation using the Simultaneous Algebraic Reconstruction Technique approximation from the scikit-image package. Parameters ---------- z : ndarray trace of z motion freq : float frequency of motion sample_freq : float sample frequency of the z array histbins : int, optional (default=200) number of bins to use in histogramming data for each phase show_plot : bool, optional (default=False) Whether or not to plot the phase distribution Returns ------- iradon_output : ndarray 2d array of size (histbins x histbins) bin_centres : ndarray positions of the bin centres
def set_event(self, ref, tk_event, callback): if tk_event not in self._event_callbacks: self._event_callbacks[tk_event] = EventCallback(self._widget, self._tks, tk_event) self._refs[ref] = self._event_callbacks[tk_event] self._refs[ref].set_callback(ref, callback)
Sets a callback for this widget against a ref (reference) for a tk_event, setting the callback to None will remove it.
def exists(self, path): (bucket, key) = self._path_to_bucket_and_key(path) if self._is_root(key): return True if self._exists(bucket, key): return True if self.isdir(path): return True logger.debug(, path) return False
Does provided path exist on S3?
def read_root_generation_progress(self): api_path = response = self._adapter.get( url=api_path, ) return response.json()
Read the configuration and process of the current root generation attempt. Supported methods: GET: /sys/generate-root/attempt. Produces: 200 application/json :return: The JSON response of the request. :rtype: dict
def get_limits(self, limit_sum=None): with self.limit_lock: if limit_sum and self.limit_sum == limit_sum: raise NoChangeException() return (self.limit_sum, self.limit_data)
Gets the current limit data if it is different from the data indicated by limit_sum. The db argument is used for hydrating the limit objects. Raises a NoChangeException if the limit_sum represents no change, otherwise returns a tuple consisting of the current limit_sum and a list of Limit objects.
def ping(self): return self.handleresult(self.r.get(self.url, params={"q": "this"})).text
Attempts to ping the server using current credentials, and responds with the path of the currently authenticated device
def get_config(self, name, default=_MISSING): val = self._config.get(name, default) if val is _MISSING: raise ArgumentError("DeviceAdapter config {} did not exist and no default".format(name)) return val
Get a configuration setting from this DeviceAdapter. See :meth:`AbstractDeviceAdapter.get_config`.
def simulate_w(self, index: int, half_turns: float, axis_half_turns: float): args = self._shard_num_args({ : index, : half_turns, : axis_half_turns }) if index >= self._num_shard_qubits: self._pool.map(_clear_scratch, args) self._pool.map(_w_between_shards, args) self._pool.map(_copy_scratch_to_state, args) else: self._pool.map(_w_within_shard, args) norm_squared = np.sum(self._pool.map(_norm_squared, args)) args = self._shard_num_args({ : norm_squared }) self._pool.map(_renorm, args)
Simulate a single qubit rotation gate about a X + b Y. The gate simulated is U = exp(-i pi/2 W half_turns) where W = cos(pi axis_half_turns) X + sin(pi axis_half_turns) Y Args: index: The qubit to act on. half_turns: The amount of the overall rotation, see the formula above. axis_half_turns: The angle between the pauli X and Y operators, see the formula above.
def _scope_vars(scope, trainable_only=False): return tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.GraphKeys.VARIABLES, scope=scope if isinstance(scope, str) else scope.name)
Get variables inside a scope The scope can be specified as a string Parameters ---------- scope: str or VariableScope scope in which the variables reside. trainable_only: bool whether or not to return only the variables that were marked as trainable. Returns ------- vars: [tf.Variable] list of variables in `scope`.
def getFileHandle(self, dataFile, openMethod): if dataFile in self._memoTable: handle = self._memoTable[dataFile] self._update(dataFile, handle) return handle else: try: handle = openMethod(dataFile) except ValueError: raise exceptions.FileOpenFailedException(dataFile) self._memoTable[dataFile] = handle self._add(dataFile, handle) if len(self._memoTable) > self._maxCacheSize: dataFile = self._removeLru() del self._memoTable[dataFile] return handle
Returns handle associated to the filename. If the file is already opened, update its priority in the cache and return its handle. Otherwise, open the file using openMethod, store it in the cache and return the corresponding handle.
def uniq(args): p = OptionParser(uniq.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastqfile, = args fw = must_open(opts.outfile, "w") nduplicates = nreads = 0 seen = set() for rec in iter_fastq(fastqfile): nreads += 1 if rec is None: break name = rec.name if name in seen: nduplicates += 1 continue seen.add(name) print(rec, file=fw) logging.debug("Removed duplicate reads: {}".\ format(percentage(nduplicates, nreads)))
%prog uniq fastqfile Retain only first instance of duplicate reads. Duplicate is defined as having the same read name.
def expr2dimacscnf(ex): litmap, nvars, clauses = ex.encode_cnf() return litmap, DimacsCNF(nvars, clauses)
Convert an expression into an equivalent DIMACS CNF.
def kl_setup(num_eig,sr,struct,prefixes, factors_file="kl_factors.dat",islog=True, basis_file=None, tpl_dir="."): try: import flopy except Exception as e: raise Exception("error import flopy: {0}".format(str(e))) assert isinstance(sr,flopy.utils.SpatialReference) if isinstance(struct,str): assert os.path.exists(struct) gs = pyemu.utils.read_struct_file(struct) else: gs = struct names = [] for i in range(sr.nrow): names.extend(["i{0:04d}j{1:04d}".format(i,j) for j in range(sr.ncol)]) cov = gs.covariance_matrix(sr.xcentergrid.flatten(), sr.ycentergrid.flatten(), names=names) eig_names = ["eig_{0:04d}".format(i) for i in range(cov.shape[0])] trunc_basis = cov.u trunc_basis.col_names = eig_names if basis_file is not None: trunc_basis.to_binary(basis_file) trunc_basis = trunc_basis[:,:num_eig] eig_names = eig_names[:num_eig] pp_df = pd.DataFrame({"name":eig_names},index=eig_names) pp_df.loc[:,"x"] = -1.0 * sr.ncol pp_df.loc[:,"y"] = -1.0 * sr.nrow pp_df.loc[:,"zone"] = -999 pp_df.loc[:,"parval1"] = 1.0 pyemu.pp_utils.write_pp_file(os.path.join("temp.dat"),pp_df) eigen_basis_to_factor_file(sr.nrow,sr.ncol,trunc_basis,factors_file=factors_file,islog=islog) dfs = [] for prefix in prefixes: tpl_file = os.path.join(tpl_dir,"{0}.dat_kl.tpl".format(prefix)) df = pyemu.pp_utils.pilot_points_to_tpl("temp.dat",tpl_file,prefix) shutil.copy2("temp.dat",tpl_file.replace(".tpl","")) df.loc[:,"tpl_file"] = tpl_file df.loc[:,"in_file"] = tpl_file.replace(".tpl","") df.loc[:,"prefix"] = prefix df.loc[:,"pargp"] = "kl_{0}".format(prefix) dfs.append(df) df = pd.concat(dfs) df.loc[:,"parubnd"] = 10.0 df.loc[:,"parlbnd"] = 0.1 return pd.concat(dfs)
setup a karhuenen-Loeve based parameterization for a given geostatistical structure. Parameters ---------- num_eig : int number of basis vectors to retain in the reduced basis sr : flopy.reference.SpatialReference struct : str or pyemu.geostats.Geostruct geostatistical structure (or file containing one) array_dict : dict a dict of arrays to setup as KL-based parameters. The key becomes the parameter name prefix. The total number of parameters is len(array_dict) * num_eig basis_file : str the name of the PEST-format binary file where the reduced basis will be saved tpl_file : str the name of the template file to make. The template file is a csv file with the parameter names, the original factor values,and the template entries. The original values can be used to set the parval1 entries in the control file Returns ------- back_array_dict : dict a dictionary of back transformed arrays. This is useful to see how much "smoothing" is taking place compared to the original arrays. Note ---- requires flopy Example ------- ``>>>import flopy`` ``>>>import pyemu`` ``>>>m = flopy.modflow.Modflow.load("mymodel.nam")`` ``>>>a_dict = {"hk":m.lpf.hk[0].array}`` ``>>>ba_dict = pyemu.helpers.kl_setup(10,m.sr,"struct.dat",a_dict)``
def get_review_sh(self, revision, item): identity = self.get_sh_identity(revision) update = parser.parse(item[self.get_field_date()]) erevision = self.get_item_sh_fields(identity, update) return erevision
Add sorting hat enrichment fields for the author of the revision
def adam_minimax(grad_both, init_params_max, init_params_min, callback=None, num_iters=100, step_size_max=0.001, step_size_min=0.001, b1=0.9, b2=0.999, eps=10**-8): x_max, unflatten_max = flatten(init_params_max) x_min, unflatten_min = flatten(init_params_min) m_max = np.zeros(len(x_max)) v_max = np.zeros(len(x_max)) m_min = np.zeros(len(x_min)) v_min = np.zeros(len(x_min)) for i in range(num_iters): g_max_uf, g_min_uf = grad_both(unflatten_max(x_max), unflatten_min(x_min), i) g_max, _ = flatten(g_max_uf) g_min, _ = flatten(g_min_uf) if callback: callback(unflatten_max(x_max), unflatten_min(x_min), i, unflatten_max(g_max), unflatten_min(g_min)) m_max = (1 - b1) * g_max + b1 * m_max v_max = (1 - b2) * (g_max**2) + b2 * v_max mhat_max = m_max / (1 - b1**(i + 1)) vhat_max = v_max / (1 - b2**(i + 1)) x_max = x_max + step_size_max * mhat_max / (np.sqrt(vhat_max) + eps) m_min = (1 - b1) * g_min + b1 * m_min v_min = (1 - b2) * (g_min**2) + b2 * v_min mhat_min = m_min / (1 - b1**(i + 1)) vhat_min = v_min / (1 - b2**(i + 1)) x_min = x_min - step_size_min * mhat_min / (np.sqrt(vhat_min) + eps) return unflatten_max(x_max), unflatten_min(x_min)
Adam modified to do minimiax optimization, for instance to help with training generative adversarial networks.
def _get_supported_py_config(tops, extended_cfg): pymap = [] for py_ver, tops in _six.iteritems(copy.deepcopy(tops)): py_ver = int(py_ver) if py_ver == 2: pymap.append() elif py_ver == 3: pymap.append() for ns, cfg in _six.iteritems(copy.deepcopy(extended_cfg) or {}): pymap.append(.format(ns, *cfg.get())) pymap.append() return salt.utils.stringutils.to_bytes(os.linesep.join(pymap))
Based on the Salt SSH configuration, create a YAML configuration for the supported Python interpreter versions. This is then written into the thin.tgz archive and then verified by salt.client.ssh.ssh_py_shim.get_executable() Note: Minimum default of 2.x versions is 2.7 and 3.x is 3.0, unless specified in namespaces. :return:
def _run_bunny(args): main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "bunny_work")) flags = ["-b", work_dir] log_file = os.path.join(work_dir, "%s-bunny.log" % project_name) if os.path.exists(work_dir): caches = [os.path.join(work_dir, d) for d in os.listdir(work_dir) if os.path.isdir(os.path.join(work_dir, d))] if caches: flags += ["--cache-dir", max(caches, key=os.path.getmtime)] if args.no_container: _remove_bcbiovm_path() flags += ["--no-container"] cmd = ["rabix"] + flags + [main_file, json_file] with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir, log_file)
Run CWL with rabix bunny.
def cidr_netmask(cidr): ips = netaddr.IPNetwork(cidr) return six.text_type(ips.netmask)
Get the netmask address associated with a CIDR address. CLI example:: salt myminion netaddress.cidr_netmask 192.168.0.0/20
def clustdealer(pairdealer, optim): ccnt = 0 chunk = [] while ccnt < optim: try: taker = itertools.takewhile(lambda x: x[0] != "//\n", pairdealer) oneclust = ["".join(taker.next())] except StopIteration: return 1, chunk while 1: try: oneclust.append("".join(taker.next())) except StopIteration: break chunk.append("".join(oneclust)) ccnt += 1 return 0, chunk
return optim clusters given iterators, and whether it got all or not