code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def makeStickyEdataFile(Economy,ignore_periods,description=,filename=None,save_data=False,calc_micro_stats=True,meas_err_base=None): Data.txt if hasattr(Economy,): if len(Economy.agents) > 1: pLvlAll_hist = np.concatenate([this_type.pLvlTrue_hist for this_type in Economy.agents],axis=1) aLvlAll_hist = np.concatenate([this_type.aLvlNow_hist for this_type in Economy.agents],axis=1) cLvlAll_hist = np.concatenate([this_type.cLvlNow_hist for this_type in Economy.agents],axis=1) yLvlAll_hist = np.concatenate([this_type.yLvlNow_hist for this_type in Economy.agents],axis=1) else: my_writer.writerow(VarNames) for i in range(DataArray.shape[0]): my_writer.writerow(DataArray[i,:]) f.close()
Makes descriptive statistics and macroeconomic data file. Behaves slightly differently for heterogeneous agents vs representative agent models. Parameters ---------- Economy : Market or AgentType A representation of the model economy. For heterogeneous agents specifications, this will be an instance of a subclass of Market. For representative agent specifications, this will be an instance of an AgentType subclass. ignore_periods : int Number of periods at the start of the simulation to throw out. description : str Description of the economy that is prepended on the output string. filename : str Name of the output log file, if any; .txt will be appended automatically. save_data : bool When True, save simulation data to filename + 'Data.txt' for use in Stata. calc_micro_stats : bool When True, calculate microeconomic statistics like in Table 2 of the paper draft. meas_err_base : float or None Base value of measurement error standard deviation, which will be adjusted. When None (default), value is calculated as stdev(DeltaLogC). Returns ------- None
def widgetForName(self, name): for iwidget in range(len(self)): if self.widget(iwidget).name() == name: return self.widget(iwidget)
Gets a widget with *name* :param name: the widgets in this container should all have a name() method. This is the string to match to that result :type name: str
def SetValue(self, value=None, act=True): " main method to set value " if value is None: value = wx.TextCtrl.GetValue(self).strip() self.__CheckValid(value) self.__GetMark() if value is not None: wx.TextCtrl.SetValue(self, self.format % set_float(value)) if self.is_valid and hasattr(self.__action, ) and act: self.__action(value=self.__val) elif not self.is_valid and self.bell_on_invalid: wx.Bell() self.__SetMark()
main method to set value
def peer_ips(peer_relation=, addr_key=): peers = {} for r_id in relation_ids(peer_relation): for unit in relation_list(r_id): peers[unit] = relation_get(addr_key, rid=r_id, unit=unit) return peers
Return a dict of peers and their private-address
def vmdk_to_ami(args): aws_importer = AWSUtilities.AWSUtils(args.directory, args.aws_profile, args.s3_bucket, args.aws_regions, args.ami_name, args.vmdk_upload_file) aws_importer.import_vmdk()
Calls methods to perform vmdk import :param args: :return:
def create_relationship(manager, handle_id, other_handle_id, rel_type): meta_type = get_node_meta_type(manager, handle_id) if meta_type == : return create_location_relationship(manager, handle_id, other_handle_id, rel_type) elif meta_type == : return create_logical_relationship(manager, handle_id, other_handle_id, rel_type) elif meta_type == : return create_relation_relationship(manager, handle_id, other_handle_id, rel_type) elif meta_type == : return create_physical_relationship(manager, handle_id, other_handle_id, rel_type) other_meta_type = get_node_meta_type(manager, other_handle_id) raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
Makes a relationship from node to other_node depending on which meta_type the nodes are. Returns the relationship or raises NoRelationshipPossible exception.
def isLoggedOn(rh, userid): rh.printSysLog("Enter vmUtils.isLoggedOn, userid: " + userid) results = { : 0, : 0, : 0, } cmd = ["sudo", "/sbin/vmcp", "query", "user", userid] strCmd = .join(cmd) rh.printSysLog("Invoking: " + strCmd) try: subprocess.check_output( cmd, close_fds=True, stderr=subprocess.STDOUT) except CalledProcessError as e: search_pattern = .encode() match = re.search(search_pattern, e.output) if match: results[] = 1 else: rh.printLn("ES", msgs.msg[][1] % (modId, strCmd, e.returncode, e.output)) results = msgs.msg[][0] results[] = e.returncode except Exception as e: results = msgs.msg[][0] rh.printLn("ES", msgs.msg[][1] % (modId, strCmd, type(e).__name__, str(e))) rh.printSysLog("Exit vmUtils.isLoggedOn, overallRC: " + str(results[]) + " rc: " + str(results[]) + " rs: " + str(results[])) return results
Determine whether a virtual machine is logged on. Input: Request Handle: userid being queried Output: Dictionary containing the following: overallRC - overall return code, 0: success, non-zero: failure rc - 0: if we got status. Otherwise, it is the error return code from the commands issued. rs - Based on rc value. For rc==0, rs is: 0: if we determined it is logged on. 1: if we determined it is logged off.
def get_properties(self): if self.features_layer is not None: for property in self.features_layer.get_properties(): yield propertyfound_entities.get(mention.string)
Returns all the properties of the features layer (iterator) @rtype: L{Cproperty} @return: list of properties
def _on_close(self, socket): self.logger.debug() for subscription in self.subscriptions.values(): if subscription.state == : subscription.state =
Called when the connection was closed.
def xmlparser(xml, objectify=True): doclose = None if isinstance(xml, (etree._Element, ObjectifiedElement, etree._ElementTree)): return xml elif isinstance(xml, text_type): xml = StringIO(xml) doclose = True elif not isinstance(xml, IOBase): raise TypeError("Unsupported type of resource {}".format(type(xml))) if objectify is False: parsed = etree.parse(xml).getroot() else: parsed = parse(xml).getroot() if doclose: xml.close() return parsed
Parse xml :param xml: XML element :type xml: Union[text_type, lxml.etree._Element] :rtype: lxml.etree._Element :returns: An element object :raises: TypeError if element is not in accepted type
def software_breakpoint_set(self, addr, thumb=False, arm=False, flash=False, ram=False): if flash and not ram: flags = enums.JLinkBreakpoint.SW_FLASH elif not flash and ram: flags = enums.JLinkBreakpoint.SW_RAM else: flags = enums.JLinkBreakpoint.SW if thumb: flags = flags | enums.JLinkBreakpoint.THUMB elif arm: flags = flags | enums.JLinkBreakpoint.ARM handle = self._dll.JLINKARM_SetBPEx(int(addr), flags) if handle <= 0: raise errors.JLinkException() return handle
Sets a software breakpoint at the specified address. If ``thumb`` is ``True``, the breakpoint is set in THUMB-mode, while if ``arm`` is ``True``, the breakpoint is set in ARM-mode, otherwise a normal breakpoint is set. If ``flash`` is ``True``, the breakpoint is set in flash, otherwise if ``ram`` is ``True``, the breakpoint is set in RAM. If both are ``True`` or both are ``False``, then the best option is chosen for setting the breakpoint in software. Args: self (JLink): the ``JLink`` instance addr (int): the address where the breakpoint will be set thumb (bool): boolean indicating to set the breakpoint in THUMB mode arm (bool): boolean indicating to set the breakpoint in ARM mode flash (bool): boolean indicating to set the breakpoint in flash ram (bool): boolean indicating to set the breakpoint in RAM Returns: An integer specifying the breakpoint handle. This handle should sbe retained for future breakpoint operations. Raises: TypeError: if the given address is not an integer. JLinkException: if the breakpoint could not be set.
def find_differences(self, refindex: int): differences = defaultdict(set) if refindex >= len(self.protocol.messages): return differences if self.proto_view == 0: proto = self.protocol.decoded_proto_bits_str elif self.proto_view == 1: proto = self.protocol.decoded_hex_str elif self.proto_view == 2: proto = self.protocol.decoded_ascii_str else: return differences ref_message = proto[refindex] ref_offset = self.get_alignment_offset_at(refindex) for i, message in enumerate(proto): if i == refindex: continue msg_offset = self.get_alignment_offset_at(i) short, long = sorted([len(ref_message) + ref_offset, len(message) + msg_offset]) differences[i] = { j for j in range(max(msg_offset, ref_offset), long) if j >= short or message[j - msg_offset] != ref_message[j - ref_offset] } return differences
Search all differences between protocol messages regarding a reference message :param refindex: index of reference message :rtype: dict[int, set[int]]
def execute(self, sql, parameters=None, bulk_parameters=None): if self.connection._closed: raise ProgrammingError("Connection closed") if self._closed: raise ProgrammingError("Cursor closed") self._result = self.connection.client.sql(sql, parameters, bulk_parameters) if "rows" in self._result: self.rows = iter(self._result["rows"])
Prepare and execute a database operation (query or command).
def sort_by(self, *ids): files = {str(f_.file_id): f_.key for f_ in self} self.filesmap = OrderedDict([ (files.get(id_, id_), self[files.get(id_, id_)].dumps()) for id_ in ids ]) self.flush()
Update files order. :param ids: List of ids specifying the final status of the list.
def worker_task(work_item, config): global _workspace _ensure_workspace(config) result = worker( work_item.module_path, config.python_version, work_item.operator_name, work_item.occurrence, config.test_command, config.timeout) return work_item.job_id, result
The celery task which performs a single mutation and runs a test suite. This runs `cosmic-ray worker` in a subprocess and returns the results, passing `config` to it via stdin. Args: work_item: A dict describing a WorkItem. config: The configuration to use for the test execution. Returns: An updated WorkItem
def determine_collections(self): for value in record_get_field_values(self.record, , code=): if in value.upper(): self.collections.add() if in value.upper(): self.collections.add() if in value.upper(): self.collections.add() if "HIDDEN" in value.upper(): self.hidden = True if self.is_published(): self.collections.add("PUBLISHED") self.collections.add("CITEABLE") if not in self.collections: from itertools import product kb = [, , , , , ] values = record_get_field_values(self.record, "088", code=) for val, rep in product(values, kb): if val.startswith(rep): self.collections.add() break if record_get_field_values(self.record, , filter_subfield_code="a", filter_subfield_value="arXiv"): self.collections.add("arXiv") self.collections.add() self.collections.add() if not in self.collections: for value in record_get_field_values(self.record, tag=, code=): if value[-2:].isdigit(): self.collections.add() break record_delete_fields(self.record, "980")
Try to determine which collections this record should belong to.
def imshow(image, format, **kwargs): if format == : ipyimage = image elif format == : with open(image, ) as f: data = f.read() ipyimage = ipyImage(value=data) else: ipyimage = ipyImage(value=image, format=format) kwargs[] = ipyimage kwargs.setdefault(, [0., 1.]) kwargs.setdefault(, [0., 1.]) return _draw_mark(Image, **kwargs)
Draw an image in the current context figure. Parameters ---------- image: image data Image data, depending on the passed format, can be one of: - an instance of an ipywidgets Image - a file name - a raw byte string format: {'widget', 'filename', ...} Type of the input argument. If not 'widget' or 'filename', must be a format supported by the ipywidgets Image. options: dict (default: {}) Options for the scales to be created. If a scale labeled 'x' is required for that mark, options['x'] contains optional keyword arguments for the constructor of the corresponding scale type. axes_options: dict (default: {}) Options for the axes to be created. If an axis labeled 'x' is required for that mark, axes_options['x'] contains optional keyword arguments for the constructor of the corresponding axis type.
def gauge(self, stats, value): self.update_stats(stats, value, self.SC_GAUGE)
Log gauges >>> client = StatsdClient() >>> client.gauge('example.gauge', 47) >>> client.gauge(('example.gauge41', 'example.gauge43'), 47)
def convert_subject_ids(self, subject_ids): if subject_ids is not None: subject_ids = set( (.format(s) if isinstance(s, int) else s) for s in subject_ids) return subject_ids
Convert subject ids to strings if they are integers
def reopen(self): try: self._con.reopen() except Exception: if self._transcation: self._transaction = False try: self._con.query() except Exception: pass else: self._transaction = False self._closed = False self._setsession() self._usage = 0
Reopen the tough connection. It will not complain if the connection cannot be reopened.
def pool_args(function, sequence, kwargs): return zip(itertools.repeat(function), sequence, itertools.repeat(kwargs))
Return a single iterator of n elements of lists of length 3, given a sequence of len n.
def run(tpu_job_name, tpu, gcp_project, tpu_zone, model_dir, model_type="bitransformer", vocabulary=gin.REQUIRED, train_dataset_fn=None, eval_dataset_fn=None, dataset_split="train", autostack=True, checkpoint_path="", mode="train", iterations_per_loop=100, save_checkpoints_steps=1000, eval_steps=10, train_steps=1000000, batch_size=auto_batch_size, sequence_length=gin.REQUIRED, mesh_shape=gin.REQUIRED, layout_rules=gin.REQUIRED, get_components_fn=None): if not isinstance(batch_size, int): batch_size = batch_size(sequence_length, mesh_shape, layout_rules) tf.logging.info("mode=%s" % mode,) tf.logging.info("batch_size=%s" % batch_size,) tf.logging.info("sequence_length=%s" % sequence_length,) tf.logging.info("mesh_shape=%s" % mesh_shape,) tf.logging.info("layout_rules=%s" % layout_rules,) if mode == "train" and dataset_split != "train": raise ValueError("mode==\"train\" requires dataset_split==\"train\"") mesh_shape = mtf.convert_to_shape(mesh_shape) layout_rules = mtf.convert_to_layout_rules(layout_rules) cluster = tf.contrib.cluster_resolver.TPUClusterResolver( tpu if (tpu) else "", zone=tpu_zone, project=gcp_project) tf.logging.info( "Building TPUConfig with tpu_job_name={}".format(tpu_job_name) ) my_tpu_config = tpu_config.TPUConfig( tpu_job_name=tpu_job_name, iterations_per_loop=iterations_per_loop, num_cores_per_replica=1, per_host_input_for_training=tpu_config.InputPipelineConfig.BROADCAST, ) run_config = tpu_config.RunConfig( cluster=cluster, model_dir=model_dir, save_checkpoints_steps=save_checkpoints_steps, tpu_config=my_tpu_config) transformer_model = build_model( model_type=model_type, input_vocab_size=inputs_vocabulary(vocabulary).vocab_size, output_vocab_size=targets_vocabulary(vocabulary).vocab_size, layout_rules=layout_rules, mesh_shape=mesh_shape) model_fn = tpu_estimator_model_fn( model_type=model_type, transformer_model=transformer_model, model_dir=model_dir, use_tpu=tpu, mesh_shape=mesh_shape, layout_rules=layout_rules, batch_size=batch_size, sequence_length=sequence_length, autostack=autostack, metric_names=None) estimator = tpu_estimator.TPUEstimator( model_fn=model_fn, config=run_config, train_batch_size=batch_size, eval_batch_size=batch_size, predict_batch_size=batch_size, use_tpu=tpu, export_to_tpu=False, params={}) if mode == "train": if train_dataset_fn is None: raise ValueError("Must provide train_dataset_fn through gin for train.") def input_fn(params): del params dataset = train_dataset_fn(batch_size=batch_size, sequence_length=sequence_length, vocabulary=vocabulary, dataset_split=dataset_split) return dataset estimator.train(input_fn=input_fn, max_steps=train_steps) elif mode == "continuous_eval": if get_components_fn is None: raise ValueError("Must provide get_components_fn through gin for eval.") if eval_dataset_fn is None: raise ValueError("Must provide eval_dataset_fn through gin for eval.") metrics_inputs = get_components_fn() for _ in tf.contrib.training.checkpoints_iterator(estimator.model_dir): for metric_names, component in metrics_inputs: tf.logging.info("Evaluating {}".format(component.__dict__)) tf.logging.info("on split {}".format(dataset_split)) metric_names = [ "eval/{}/{}".format(dataset_split, n) for n in metric_names ] model_fn = tpu_estimator_model_fn( model_type=model_type, transformer_model=transformer_model, model_dir=model_dir, use_tpu=tpu, mesh_shape=mesh_shape, layout_rules=layout_rules, batch_size=batch_size, sequence_length=sequence_length, autostack=autostack, metric_names=metric_names) estimator = tpu_estimator.TPUEstimator( model_fn=model_fn, config=run_config, train_batch_size=batch_size, eval_batch_size=batch_size, predict_batch_size=batch_size, use_tpu=tpu, export_to_tpu=False, params={}) def input_fn(params): del params dataset = eval_dataset_fn(component, batch_size=batch_size, sequence_length=sequence_length, vocabulary=vocabulary, dataset_split=dataset_split, pack=False) return dataset eval_args = {"eval": (input_fn, eval_steps)} _ = evaluate(estimator, eval_args) elif mode == "infer": decode_from_file( estimator, vocabulary=vocabulary, model_type=model_type, batch_size=batch_size, sequence_length=sequence_length, checkpoint_path=checkpoint_path) else: raise ValueError( "unknown mode %s - must be train/evaluate/continuous_eval/infer" % mode)
Run training/eval/inference. Args: tpu_job_name: string, name of TPU worker binary tpu: string, the Cloud TPU to use for training gcp_project: string, project name for the Cloud TPU-enabled project tpu_zone: string, GCE zone where the Cloud TPU is located in model_dir: string, estimator model_dir model_type: a string - either "bitransformer", "lm" or "aligned" vocabulary: a vocabulary.Vocabulary or (inputs_vocabulary, targets_vocabulary) tuple. train_dataset_fn: A function returning a tf.data.Dataset. Must be provided for mode=train eval_dataset_fn: A function returning a tf.data.Dataset. Must be provided for model=eval dataset_split: a string autostack: boolean, internally combine variables checkpoint_path: a string - which checkpoint to load for inference mode: string, train/evaluate/infer iterations_per_loop: integer, steps per train loop save_checkpoints_steps: integer, steps per checkpoint eval_steps: integer, number of evaluation steps train_steps: Total number of training steps. batch_size: An integer or a function with the same signature as auto_batch_size(). Mini-batch size for the training. Note that this is the global batch size and not the per-shard batch size. sequence_length: an integer mesh_shape: an input to mtf.convert_to_shape() layout_rules: an input to mtf.convert_to_layout_rules() get_components_fn: an optional function that gets a list of tuples of (metric_names, component) for each component. Required if mode is "continuous_eval"
def request_doi_status_by_batch_id(self, doi_batch_id, data_type=): endpoint = self.get_endpoint() params = { : self.api_user, : self.api_key, : doi_batch_id, : data_type } result = self.do_http_request( , endpoint, data=params, timeout=10, custom_header=str(self.etiquette) ) return result
This method retrieve the DOI requests status. file_name: Used as unique ID to identify a deposit. data_type: [contents, result] contents - retrieve the XML submited by the publisher result - retrieve a XML with the status of the submission
def range(self, location, distance): return (segment.range(location, distance) for segment in self)
Test whether locations are within a given range of ``location``. Args: location (Point): Location to test range against distance (float): Distance to test location is within Returns: list of list of Point: Groups of points in range per segment
def convert(self, inp): inp = self._preprocess(inp) n = NumberService().longestNumber(inp) units = self.extractUnits(inp) quantity = pq.Quantity(float(n), units[0]) quantity.units = units[1] return quantity
Converts a string representation of some quantity of units into a quantities object. Args: inp (str): A textual representation of some quantity of units, e.g., "fifty kilograms". Returns: A quantities object representing the described quantity and its units.
def find_by_project(self, project, params={}, **options): path = "/projects/%s/sections" % (project) return self.client.get(path, params, **options)
Returns the compact records for all sections in the specified project. Parameters ---------- project : {Id} The project to get sections from. [params] : {Object} Parameters for the request
def clean_previous_run(self): super(Alignak, self).clean_previous_run() self.pollers.clear() self.reactionners.clear() self.brokers.clear()
Clean variables from previous configuration :return: None
def open_spreadsheet(self, path, as_template=False): desktop = self.cls(self.hostname, self.port) return desktop.open_spreadsheet(path, as_template=as_template)
Opens an exiting spreadsheet document on the local file system.
def close(self): if self.parent: self.parent.update(self.parent.offset + self.offset) return self.output.write("\n") self.output.flush()
Stop overwriting display, or update parent.
def create_singleplots(plotman, cov, mag, pha, pha_fpi, alpha, options): magunit = if not pha == []: [real, imag] = calc_complex(mag, pha) if not pha_fpi == []: [real_fpi, imag_fpi] = calc_complex(mag, pha_fpi) if options.cmaglin: mag = np.power(10, mag) magunit = data = np.column_stack((mag, cov, pha, real, imag, pha_fpi, real_fpi, imag_fpi)) titles = [, , , , , , , ] unites = [ magunit, , , , , , , ] vmins = [options.mag_vmin, options.cov_vmin, options.pha_vmin, options.real_vmin, options.imag_vmin, options.pha_vmin, options.real_vmin, options.imag_vmin] vmaxs = [options.mag_vmax, options.cov_vmax, options.pha_vmax, options.real_vmax, options.imag_vmax, options.pha_vmax, options.real_vmax, options.imag_vmax] cmaps = [, , , , , , , ] saves = [, , , , , , , ] else: if options.cmaglin: mag = np.power(10, mag) magunit = data = np.column_stack((mag, cov, pha, real, imag)) titles = [, , , , ] unites = [magunit, , , , ] vmins = [options.mag_vmin, options.cov_vmin, options.pha_vmin, options.real_vmin, options.imag_vmin] vmaxs = [options.mag_vmax, options.cov_vmax, options.pha_vmax, options.real_vmax, options.imag_vmax] cmaps = [, , , , ] saves = [, , , , ] else: data = np.column_stack((mag, cov)) titles = [, ] unites = [magunit, ] vmins = [options.mag_vmin, options.cov_vmin] vmaxs = [options.mag_vmax, options.cov_vmax] cmaps = [, ] saves = [, ] try: mod_rho = np.genfromtxt(, skip_header=1, usecols=([0])) mod_pha = np.genfromtxt(, skip_header=1, usecols=([1])) data = np.column_stack((data, mod_rho, mod_pha)) titles.append() titles.append() unites.append() unites.append() vmins.append(options.mag_vmin) vmins.append(options.pha_vmin) vmaxs.append(options.mag_vmax) vmaxs.append(options.pha_vmax) cmaps.append() cmaps.append() saves.append() saves.append() except: pass for datum, title, unit, vmin, vmax, cm, save in zip( np.transpose(data), titles, unites, vmins, vmaxs, cmaps, saves): sizex, sizez = getfigsize(plotman) f, ax = plt.subplots(1, figsize=(sizex, sizez)) cid = plotman.parman.add_data(datum) cblabel = units.get_label(unit) if options.title is not None: title = options.title zlabel = + options.unit + xlabel = + options.unit + xmin, xmax, zmin, zmax, vmin, vmax = check_minmax( plotman, cid, options.xmin, options.xmax, options.zmin, options.zmax, vmin, vmax ) cmap = mpl_cm.get_cmap(cm) fig, ax, cnorm, cmap, cb, scalarMap = plotman.plot_elements_to_ax( cid=cid, cid_alpha=alpha, ax=ax, xmin=xmin, xmax=xmax, zmin=zmin, zmax=zmax, cblabel=cblabel, title=title, zlabel=zlabel, xlabel=xlabel, plot_colorbar=True, cmap_name=cm, over=cmap(1.0), under=cmap(0.0), no_elecs=options.no_elecs, cbmin=vmin, cbmax=vmax, ) f.tight_layout() f.savefig(save + , dpi=300)
Plot the data of the tomodir in individual plots.
def _mean_prediction(self, lmda, Y, scores, h, t_params): lmda_exp = lmda.copy() scores_exp = scores.copy() Y_exp = Y.copy() m1 = (np.sqrt(t_params[-2])*sp.gamma((t_params[-2]-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(t_params[-2]/2.0)) temp_theta = t_params[-1] + (t_params[-3] - (1.0/t_params[-3]))*np.exp(lmda_exp[-1]/2.0)*m1 for t in range(0,h): new_value = t_params[0] if self.p != 0: for j in range(1,self.p+1): new_value += t_params[j]*lmda_exp[-j] if self.q != 0: for k in range(1,self.q+1): new_value += t_params[k+self.p]*scores_exp[-k] if self.leverage is True: m1 = (np.sqrt(t_params[-2])*sp.gamma((t_params[-2]-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(t_params[-2]/2.0)) new_value += t_params[1+self.p+self.q]*np.sign(-(Y_exp[-1]-temp_theta))*(scores_exp[-1]+1) temp_theta = t_params[-1] + (t_params[-3] - (1.0/t_params[-3]))*np.exp(new_value/2.0)*m1 lmda_exp = np.append(lmda_exp,[new_value]) scores_exp = np.append(scores_exp,[0]) Y_exp = np.append(Y_exp,[temp_theta]) return lmda_exp
Creates a h-step ahead mean prediction Parameters ---------- lmda : np.array The past predicted values Y : np.array The past data scores : np.array The past scores h : int How many steps ahead for the prediction t_params : np.array A vector of (transformed) latent variables Returns ---------- h-length vector of mean predictions
def better_print(self, printer=None): printer = printer or pprint.pprint printer(self.value)
Print the value using a *printer*. :param printer: Callable used to print the value, by default: :func:`pprint.pprint`
def to_json(self, extras=None): extras = extras or {} to_dict = model_to_dict(self) to_dict.update(extras) return json.dumps(to_dict, cls=sel.serializers.JsonEncoder)
Convert a model into a json using the playhouse shortcut.
def create(cls, parent, child, relation_type, index=None): try: with db.session.begin_nested(): obj = cls(parent_id=parent.id, child_id=child.id, relation_type=relation_type, index=index) db.session.add(obj) except IntegrityError: raise Exception("PID Relation already exists.") return obj
Create a PID relation for given parent and child.
def read_mutating_webhook_configuration(self, name, **kwargs): kwargs[] = True if kwargs.get(): return self.read_mutating_webhook_configuration_with_http_info(name, **kwargs) else: (data) = self.read_mutating_webhook_configuration_with_http_info(name, **kwargs) return data
read_mutating_webhook_configuration # noqa: E501 read the specified MutatingWebhookConfiguration # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_mutating_webhook_configuration(name, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the MutatingWebhookConfiguration (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1beta1MutatingWebhookConfiguration If the method is called asynchronously, returns the request thread.
def get_locations(self): location.id } } /locationsgrouplocationslocations'] return self.locations
a method to retrieve all the locations tracked by the model :return: dictionary with location id keys NOTE: results are added to self.locations property { 'location.id': { ' } }
def save_file(self, filename = ): filename = filename + with open(filename, ) as f: f.write(self.htmlcontent) f.closed
save htmlcontent as .html file
def setup_build_path(build_path): if os.path.isdir(build_path): fname = os.path.join(build_path, ) if os.path.exists(fname): sys.stderr.write() sys.stderr.write( . format(build_path)) sys.stderr.write( ) sys.exit(1) else: os.makedirs(build_path, 0o755)
Create build directory. If this already exists, print informative error message and quit.
def where(cls, **kwargs): _id = kwargs.pop(, ) return cls.paginated_results(*cls.http_get(_id, params=kwargs))
Returns a generator which yields instances matching the given query arguments. For example, this would yield all :py:class:`.Project`:: Project.where() And this would yield all launch approved :py:class:`.Project`:: Project.where(launch_approved=True)
def get_iso_packet_buffer_list(transfer_p): transfer = transfer_p.contents offset = 0 result = [] append = result.append for iso_transfer in _get_iso_packet_list(transfer): length = iso_transfer.length append(_get_iso_packet_buffer(transfer, offset, length)) offset += length return result
Python-specific helper extracting a list of iso packet buffers.
def ackermann_naive(m: int, n: int) -> int: if m == 0: return n + 1 elif n == 0: return ackermann(m - 1, 1) else: return ackermann(m - 1, ackermann(m, n - 1))
Ackermann number.
def to_0d_array(value: Any) -> np.ndarray: if np.isscalar(value) or (isinstance(value, np.ndarray) and value.ndim == 0): return np.array(value) else: return to_0d_object_array(value)
Given a value, wrap it in a 0-D numpy.ndarray.
def pcolor_axes(array, px_to_units=px_to_units): x_size = array.shape[0]+1 y_size = array.shape[1]+1 x = _np.empty((x_size, y_size)) y = _np.empty((x_size, y_size)) for i in range(x_size): for j in range(y_size): x[i, j], y[i, j] = px_to_units(i-0.5, j-0.5) return x, y
Return axes :code:`x, y` for *array* to be used with :func:`matplotlib.pyplot.color`. *px_to_units* is a function to convert pixels to units. By default, returns pixels.
def umode(self, nick, modes=): with self.lock: if not modes: self.send( % nick) if self.readable(): msg = self._recv(expected_replies=(,)) if msg[0] == : modes = msg[2].replace(, ).replace(, , 1) return modes self.send( % (nick, modes)) if self.readable(): msg = self._recv(expected_replies=(,)) if msg[0] == : if not self.hide_called_events: self.stepback() return msg[2].replace(, , 1)
Sets/gets user modes. Required arguments: * nick - Nick to set/get user modes for. Optional arguments: * modes='' - Sets these user modes on a nick.
def _colorize_single_line(line, regexp, color_def): match = regexp.match(line) groupdict = match.groupdict() groups = match.groups() if not groupdict: color = color_def[0] dark = color_def[1] cprint("%s\n" % line, color, fg_dark=dark) else: rev_groups = {v: k for k, v in groupdict.items()} for part in groups: if part in rev_groups and rev_groups[part] in color_def: group_name = rev_groups[part] cprint( part, color_def[group_name][0], fg_dark=color_def[group_name][1], ) else: cprint(part) cprint("\n")
Print single line to console with ability to colorize parts of it.
def namedb_get_name_preorder( db, preorder_hash, current_block ): select_query = "SELECT * FROM preorders WHERE preorder_hash = ? AND op = ? AND block_number < ?;" args = (preorder_hash, NAME_PREORDER, current_block + NAME_PREORDER_EXPIRE) cur = db.cursor() preorder_rows = namedb_query_execute( cur, select_query, args ) preorder_row = preorder_rows.fetchone() if preorder_row is None: return None preorder_rec = {} preorder_rec.update( preorder_row ) unexpired_query, unexpired_args = namedb_select_where_unexpired_names( current_block ) select_query = "SELECT name_records.preorder_hash " + \ "FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + \ "WHERE name_records.preorder_hash = ? AND " + \ unexpired_query + ";" args = (preorder_hash,) + unexpired_args cur = db.cursor() nm_rows = namedb_query_execute( cur, select_query, args ) nm_row = nm_rows.fetchone() if nm_row is not None: return None return preorder_rec
Get a (singular) name preorder record outstanding at the given block, given the preorder hash. NOTE: returns expired preorders. Return the preorder record on success. Return None if not found.
def _parse_tree_structmap(self, tree, parent_elem, normative_parent_elem=None): siblings = [] el_to_normative = self._get_el_to_normative(parent_elem, normative_parent_elem) for elem, normative_elem in el_to_normative.items(): if elem.tag != utils.lxmlns("mets") + "div": continue entry_type = elem.get("TYPE") label = elem.get("LABEL") fptr_elems = elem.findall("mets:fptr", namespaces=utils.NAMESPACES) if entry_type.lower() == "directory": children = self._parse_tree_structmap( tree, elem, normative_parent_elem=normative_elem ) fs_entry = fsentry.FSEntry.dir(label, children) self._add_dmdsecs_to_fs_entry(elem, fs_entry, tree) siblings.append(fs_entry) for fptr_elem in fptr_elems: fptr = self._analyze_fptr(fptr_elem, tree, entry_type) fs_entry = fsentry.FSEntry.from_fptr( label=None, type_=u"Item", fptr=fptr ) self._add_amdsecs_to_fs_entry(fptr.amdids, fs_entry, tree) siblings.append(fs_entry) continue if not len(fptr_elems): continue fptr = self._analyze_fptr(fptr_elems[0], tree, entry_type) fs_entry = fsentry.FSEntry.from_fptr(label, entry_type, fptr) self._add_dmdsecs_to_fs_entry(elem, fs_entry, tree) self._add_amdsecs_to_fs_entry(fptr.amdids, fs_entry, tree) siblings.append(fs_entry) return siblings
Recursively parse all the children of parent_elem, including amdSecs and dmdSecs. :param lxml._ElementTree tree: encodes the entire METS file. :param lxml._Element parent_elem: the element whose children we are parsing. :param lxml._Element normative_parent_elem: the normative counterpart of ``parent_elem`` taken from the logical structMap labelled "Normative Directory Structure".
def get_fastq_dir(fc_dir): full_goat_bc = glob.glob(os.path.join(fc_dir, "Data", "*Firecrest*", "Bustard*")) bustard_bc = glob.glob(os.path.join(fc_dir, "Data", "Intensities", "*Bustard*")) machine_bc = os.path.join(fc_dir, "Data", "Intensities", "BaseCalls") if os.path.exists(machine_bc): return os.path.join(machine_bc, "fastq") elif len(full_goat_bc) > 0: return os.path.join(full_goat_bc[0], "fastq") elif len(bustard_bc) > 0: return os.path.join(bustard_bc[0], "fastq") else: return fc_dir
Retrieve the fastq directory within Solexa flowcell output.
def Dir_anis_corr(InDir, AniSpec): Dir = np.zeros((3), ) Dir[0] = InDir[0] Dir[1] = InDir[1] Dir[2] = 1. chi, chi_inv = check_F(AniSpec) if chi[0][0] == 1.: return Dir X = dir2cart(Dir) M = np.array(X) H = np.dot(M, chi_inv) return cart2dir(H)
takes the 6 element 's' vector and the Dec,Inc 'InDir' data, performs simple anisotropy correction. returns corrected Dec, Inc
def _read_master_branch_resource(self, fn, is_json=False): with self._master_branch_repo_lock: ga = self._create_git_action_for_global_resource() with ga.lock(): ga.checkout_master() if os.path.exists(fn): if is_json: return read_as_json(fn) with codecs.open(fn, , encoding=) as f: ret = f.read() return ret return None
This will force the current branch to master!
def change_and_save(self, update_only_changed_fields=False, **changed_fields): bulk_change_and_save(self, update_only_changed_fields=update_only_changed_fields, **changed_fields) return self.filter()
Changes a given `changed_fields` on each object in the queryset, saves objects and returns the changed objects in the queryset.
def log_file(self): log_file = self.get() if not log_file: log_file = % (self.name) self.set(, log_file) return os.path.join(self.initial_dir, self.get())
The path to the log file for this job.
def avg_bp_from_range(self, bp): try: if in bp: maxlen = float(bp.split("-",1)[1]) minlen = float(bp.split("-",1)[0]) bp = ((maxlen - minlen)/2) + minlen except TypeError: pass return(int(bp))
Helper function - FastQC often gives base pair ranges (eg. 10-15) which are not helpful when plotting. This returns the average from such ranges as an int, which is helpful. If not a range, just returns the int
def human2bytes(s): symbols = (, , , , , , , , ) letter = s[-1:].strip().upper() num = s[:-1] assert num.isdigit() and letter in symbols, s num = float(num) prefix = {symbols[0]: 1} for i, s in enumerate(symbols[1:]): prefix[s] = 1 << (i + 1) * 10 return int(num * prefix[letter])
>>> human2bytes('1M') 1048576 >>> human2bytes('1G') 1073741824
def parseDateText(self, dateString): yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime() currentMth = mth currentDy = dy s = dateString.lower() m = self.ptc.CRE_DATE3.search(s) mth = m.group() mth = self.ptc.MonthOffsets[mth] if m.group() != None: dy = int(m.group()) else: dy = 1 if m.group() != None: yr = int(m.group()) if yr < self.ptc.BirthdayEpoch: yr += 2000 elif yr < 100: yr += 1900 elif (mth < currentMth) or (mth == currentMth and dy < currentDy): yr += 1 if dy > 0 and dy <= self.ptc.daysInMonth(mth, yr): sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst) else: self.dateFlag = 0 self.timeFlag = 0 sourceTime = time.localtime() return sourceTime
Parse long-form date strings:: 'May 31st, 2006' 'Jan 1st' 'July 2006' @type dateString: string @param dateString: text to convert to a datetime @rtype: struct_time @return: calculated C{struct_time} value of dateString
def generic_ref_formatter(view, context, model, name, lazy=False): try: if lazy: rel_model = getattr(model, name).fetch() else: rel_model = getattr(model, name) except (mongoengine.DoesNotExist, AttributeError) as e: % rel_model.__class__.__name__.lower(), id=rel_model.id, ), rel_model, ) ) except werkzeug.routing.BuildError as e: return Markup( % e )
For GenericReferenceField and LazyGenericReferenceField See Also -------- diff_formatter
def compose(self, *args, **kwargs): from psd_tools.api.composer import compose_layer if self.bbox == (0, 0, 0, 0): return None return compose_layer(self, *args, **kwargs)
Compose layer and masks (mask, vector mask, and clipping layers). :return: :py:class:`PIL.Image`, or `None` if the layer has no pixel.
def doc(inherit=None, **kwargs): def wrapper(func): annotate(func, , [kwargs], inherit=inherit) return activate(func) return wrapper
Annotate the decorated view function or class with the specified Swagger attributes. Usage: .. code-block:: python @doc(tags=['pet'], description='a pet store') def get_pet(pet_id): return Pet.query.filter(Pet.id == pet_id).one() :param inherit: Inherit Swagger documentation from parent classes
def upload(ctx, release, rebuild): dist_path = Path(DIST_PATH) if rebuild is False: if not dist_path.exists() or not list(dist_path.glob()): print("No distribution files found. Please run command first") return else: ctx.invoke(build, force=True) if release: args = [, , ] else: repository = args = [, , , repository, ] env = os.environ.copy() p = subprocess.Popen(args, env=env) p.wait()
Uploads distribuition files to pypi or pypitest.
def get_fig_data_attrs(self, delimiter=None): if self.project is not None: delimiter = next(filter(lambda d: d is not None, [ delimiter, self.delimiter, self.rc[]])) figs = self.project.figs fig = self.ax.get_figure() if self.plotter._initialized and fig in figs: ret = figs[fig].joined_attrs(delimiter=delimiter, plot_data=True) else: ret = self.get_enhanced_attrs(self.plotter.plot_data) self.logger.debug( ) return ret else: return self.get_enhanced_attrs(self.plotter.plot_data)
Join the data attributes with other plotters in the project This method joins the attributes of the :class:`~psyplot.InteractiveBase` instances in the project that draw on the same figure as this instance does. Parameters ---------- delimiter: str Specifies the delimiter with what the attributes are joined. If None, the :attr:`delimiter` attribute of this instance or (if the latter is also None), the rcParams['texts.delimiter'] item is used. Returns ------- dict A dictionary with all the meta attributes joined by the specified `delimiter`
def docoptcfg(doc, argv=None, env_prefix=None, config_option=None, ignore=None, *args, **kwargs): docopt_dict = docopt.docopt(doc, argv, *args, **kwargs) if env_prefix is None and config_option is None: return docopt_dict if argv is None: argv = sys.argv[1:] if ignore is None: ignore = (, ) settable, booleans, repeatable, short_map = settable_options(doc, argv, ignore, kwargs.get(, False)) if not settable: return docopt_dict if env_prefix is not None: defaults = values_from_env(env_prefix, settable, booleans, repeatable) settable -= set(defaults.keys()) docopt_dict.update(defaults) if config_option is not None: defaults = values_from_file( docopt_dict, short_map.get(config_option, config_option), settable, booleans, repeatable, ) docopt_dict.update(defaults) return docopt_dict
Pass most args/kwargs to docopt. Handle `env_prefix` and `config_option`. :raise DocoptcfgError: If `config_option` isn't found in docstring. :raise DocoptcfgFileError: On any error while trying to read and parse config file (if enabled). :param str doc: Docstring passed to docopt. :param iter argv: sys.argv[1:] passed to docopt. :param str env_prefix: Enable environment variable support, prefix of said variables. :param str config_option: Enable config file support, docopt option defining path to config file. :param iter ignore: Options to ignore. Default is --help and --version. :param iter args: Additional positional arguments passed to docopt. :param dict kwargs: Additional keyword arguments passed to docopt. :return: Dictionary constructed by docopt and updated by docoptcfg. :rtype: dict
def without_global_scope(self, scope): if isinstance(scope, basestring): del self._scopes[scope] return self keys = [] for key, value in self._scopes.items(): if scope == value.__class__ or isinstance(scope, value.__class__): keys.append(key) for key in keys: del self._scopes[key] return self
Remove a registered global scope. :param scope: The scope to remove :type scope: Scope or str :rtype: Builder
def setup(self, loop): self._loop = loop self._fd = LibC.inotify_init() for alias, (path, flags) in self.requests.items(): self._setup_watch(alias, path, flags) self._stream, self._transport = yield from aioutils.stream_from_fd(self._fd, loop)
Start the watcher, registering new watches if any.
def _get_types(self): types = [] num_values = [] for hp in self.configspace.get_hyperparameters(): if isinstance(hp, CS.CategoricalHyperparameter): types.append() num_values.append(len(hp.choices)) elif isinstance(hp, CS.UniformIntegerHyperparameter): types.append() num_values.append((hp.upper - hp.lower + 1)) elif isinstance(hp, CS.UniformFloatHyperparameter): types.append() num_values.append(np.inf) elif isinstance(hp, CS.OrdinalHyperparameter): types.append() num_values.append(len(hp.sequence)) else: raise ValueError(%type(hp)) return(types, num_values)
extracts the needed types from the configspace for faster retrival later type = 0 - numerical (continuous or integer) parameter type >=1 - categorical parameter TODO: figure out a way to properly handle ordinal parameters
def tensors_to(tensors, *args, **kwargs): if torch.is_tensor(tensors): return tensors.to(*args, **kwargs) elif isinstance(tensors, dict): return {k: tensors_to(v, *args, **kwargs) for k, v in tensors.items()} elif hasattr(tensors, ) and isinstance(tensors, tuple): return tensors.__class__(**tensors_to(tensors._asdict(), *args, **kwargs)) elif isinstance(tensors, list): return [tensors_to(t, *args, **kwargs) for t in tensors] elif isinstance(tensors, tuple): return tuple([tensors_to(t, *args, **kwargs) for t in tensors]) else: return tensors
Apply ``torch.Tensor.to`` to tensors in a generic data structure. Inspired by: https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/collate.py#L31 Args: tensors (tensor, dict, list, namedtuple or tuple): Data structure with tensor values to move. *args: Arguments passed to ``torch.Tensor.to``. **kwargs: Keyword arguments passed to ``torch.Tensor.to``. Example use case: This is useful as a complementary function to ``collate_tensors``. Following collating, it's important to move your tensors to the appropriate device. Returns: The inputted ``tensors`` with ``torch.Tensor.to`` applied. Example: >>> import torch >>> batch = [ ... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) }, ... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) }, ... ] >>> tensors_to(batch, torch.device('cpu')) # doctest: +ELLIPSIS [{'column_a': tensor(...}]
def submitEntry(self): mcclogs, physlogs = self.selectedLogs() success = True if mcclogs != []: if not self.acceptedUser("MCC"): QMessageBox().warning(self, "Invalid User", "Please enter a valid user name!") return fileName = self.xmlSetup("MCC", mcclogs) if fileName is None: return if not self.imagePixmap.isNull(): self.prepareImages(fileName, "MCC") success = self.sendToLogbook(fileName, "MCC") if physlogs != []: for i in range(len(physlogs)): fileName = self.xmlSetup("Physics", physlogs[i]) if fileName is None: return if not self.imagePixmap.isNull(): self.prepareImages(fileName, "Physics") success_phys = self.sendToLogbook(fileName, "Physics", physlogs[i]) success = success and success_phys self.done(success)
Process user inputs and subit logbook entry when user clicks Submit button
def n_cap(self, n_cap=, cap_dihedral=None): if n_cap == : methylacetamide = Ligand( atoms=None, mol_code=, is_hetero=True) atoms = OrderedDict() atoms[] = Atom([0.9500, -0.2290, 0.5090], , res_label=) atoms[] = Atom([0.7450, -0.9430, 1.8040], , res_label=) atoms[] = Atom([0.1660, -2.0230, 1.8130], , res_label=) atoms[] = Atom([1.2540, -0.2750, 2.9010], , res_label=) atoms[] = Atom([1.1630, -0.7870, 4.2500], , res_label=) methylacetamide.atoms = atoms s1, e1, s2, e2 = [ x._vector for x in [methylacetamide[], methylacetamide[], self._monomers[0][], self._monomers[0][]]] translation, angle, axis, point = find_transformations( s1, e1, s2, e2, radians=False) methylacetamide.rotate( angle=angle, axis=axis, point=point, radians=False) methylacetamide.translate(vector=translation) start_angle = dihedral( methylacetamide[], self._monomers[0][], self._monomers[0][], self._monomers[0][]) ref_angle = dihedral( self._monomers[0][], self._monomers[1][], self._monomers[1][], self._monomers[1][]) if cap_dihedral is not None: methylacetamide.rotate(ref_angle - start_angle + cap_dihedral, axis=methylacetamide[]._vector - self._monomers[0][]._vector, point=methylacetamide[]._vector) else: methylacetamide.rotate(ref_angle - start_angle, axis=methylacetamide[]._vector - self._monomers[0][]._vector, point=methylacetamide[]._vector) if self.ligands is None: self.ligands = LigandGroup(ampal_parent=self) acetamide = Ligand(mol_code=, ampal_parent=self.ligands) acetamide_atoms = OrderedDict() acetamide_atoms[] = atoms[] acetamide_atoms[] = atoms[] acetamide_atoms[] = atoms[] for atom in acetamide_atoms.values(): atom.ampal_parent = acetamide acetamide.atoms = acetamide_atoms self.ligands.append(acetamide) else: pass self.tags[] = False return
Adds an N-terminal acetamide cap. Notes ----- Default behaviour is to duplicate the dihedral angle of the succeeding residues such that the orientation of the carbonyl of the acetyl will resemble that of the first residue. This can be adjusted by supplying a cap_dihedral value. Currently only acetyl cap is supported, but this structure should work for other caps. Parameters ---------- cap : str, optional Type of cap to be added. Options: 'acetyl' cap_dihedral : bool Alternate psi angle to be used when added cap.
def get_bytes(self, bridge): if self.cmd_2 is not None: cmd = [self.cmd_1, self.cmd_2] else: cmd = [self.cmd_1, self.SUFFIX_BYTE] if bridge.version < self.BRIDGE_SHORT_VERSION_MIN: cmd.append(self.BRIDGE_LONG_BYTE) return bytearray(cmd)
Gets the full command as bytes. :param bridge: The bridge, to which the command should be sent.
def new(project_name): try: locale.setlocale(locale.LC_ALL, ) except: print("Warning: Unable to set locale. Expect encoding problems.") config = utils.get_config() config[][] = project_name values = new_project_ui(config) if type(values) is not str: print() pprint.pprint(values) project_dir = render.render_project(**values) git.init_repo(project_dir, **values) else: print(values)
Creates a new project
def word_under_mouse_cursor(self): editor = self._editor text_cursor = editor.cursorForPosition(editor._last_mouse_pos) text_cursor = self.word_under_cursor(True, text_cursor) return text_cursor
Selects the word under the **mouse** cursor. :return: A QTextCursor with the word under mouse cursor selected.
def comments(self, ticket, include_inline_images=False): return self._query_zendesk(self.endpoint.comments, , id=ticket, include_inline_images=repr(include_inline_images).lower())
Retrieve the comments for a ticket. :param ticket: Ticket object or id :param include_inline_images: Boolean. If `True`, inline image attachments will be returned in each comments' `attachments` field alongside non-inline attachments
def add_granule(self, data, store, workspace=None): ext = os.path.splitext(data)[-1] if ext == ".zip": type = "file.imagemosaic" upload_data = open(data, ) headers = { "Content-type": "application/zip", "Accept": "application/xml" } else: type = "external.imagemosaic" upload_data = data if data.startswith("file:") else "file:{data}".format(data=data) headers = { "Content-type": "text/plain", "Accept": "application/xml" } params = dict() workspace_name = workspace if isinstance(store, basestring): store_name = store else: store_name = store.name workspace_name = store.workspace.name if workspace_name is None: raise ValueError("Must specify workspace") url = build_url( self.service_url, [ "workspaces", workspace_name, "coveragestores", store_name, type ], params ) try: resp = self.http_request(url, method=, data=upload_data, headers=headers) if resp.status_code != 202: FailedRequestError(.format(store, resp.status_code, resp.text)) self._cache.clear() finally: if hasattr(upload_data, "close"): upload_data.close() return None
Harvest/add a granule into an existing imagemosaic
def set_user_session(user): from uliweb import settings, request user_fieldname = settings.get_var(, ) share_session = settings.get_var(, False) if isinstance(user, dict): user_id = user[user_fieldname] else: user_id = getattr(user, user_fieldname) if share_session: cache = functions.get_cache() key = get_user_session_key(user_id) session_id = cache.get(key, None) log.debug(.format(user_id, session_id, key)) if not session_id: request.session.save() log.debug( .format(user_id, request.session.key, request.session.expiry_time)) cache.set(key, request.session.key, expire=request.session.expiry_time) elif session_id != request.session.key: log.debug(.format(request.session.key, session_id)) request.session.delete() request.session.load(session_id) if isinstance(user, dict): request.session[_get_auth_key()] = user else: request.session[_get_auth_key()] = user_id request.user = user
Set user session :param user: user object chould be model instance or dict :return:
def read_examples(input_files, batch_size, shuffle, num_epochs=None): files = [] for e in input_files: for path in e.split(): files.extend(file_io.get_matching_files(path)) thread_count = multiprocessing.cpu_count() min_after_dequeue = 1000 queue_size_multiplier = thread_count + 3 num_epochs = num_epochs or None filename_queue = tf.train.string_input_producer(files, num_epochs, shuffle) example_id, encoded_example = tf.TextLineReader().read_up_to( filename_queue, batch_size) if shuffle: capacity = min_after_dequeue + queue_size_multiplier * batch_size return tf.train.shuffle_batch( [example_id, encoded_example], batch_size, capacity, min_after_dequeue, enqueue_many=True, num_threads=thread_count) else: capacity = queue_size_multiplier * batch_size return tf.train.batch( [example_id, encoded_example], batch_size, capacity=capacity, enqueue_many=True, num_threads=thread_count)
Creates readers and queues for reading example protos.
def _check_pillar_exact_minions(self, expr, delimiter, greedy): return self._check_cache_minions(expr, delimiter, greedy, , exact_match=True)
Return the minions found by looking via pillar
def run_qaml(self): logging.info() qaml_call = \ .format(tf=self.qaml_path, rf=self.qaml_report) make_path(self.reportpath) if not os.path.isfile(self.qaml_report): out, err = run_subprocess(qaml_call) self.threadlock.acquire() write_to_logfile(qaml_call, qaml_call, self.logfile) write_to_logfile(out, err, self.logfile) self.threadlock.release()
Create and run the GenomeQAML system call
def _rule_option(self): name = self._get_token(self.RE_NAME) value = self._rule_value() self._expect_token() return [name, value]
Parses the production rule:: option : NAME value ';' Returns list (name, value_list).
def compare_schemas(one, two): one = _normalize_string_type(one) two = _normalize_string_type(two) _assert_same_types(one, two) if isinstance(one, list): return _compare_lists(one, two) elif isinstance(one, dict): return _compare_dicts(one, two) elif isinstance(one, SCALAR_TYPES): return one == two elif one is None: return one is two else: raise RuntimeError(.format( type=type(one).__name__))
Compare two structures that represents JSON schemas. For comparison you can't use normal comparison, because in JSON schema lists DO NOT keep order (and Python lists do), so this must be taken into account during comparison. Note this wont check all configurations, only first one that seems to match, which can lead to wrong results. :param one: First schema to compare. :param two: Second schema to compare. :rtype: `bool`
def pathstrip(path, n): pathlist = [path] while os.path.dirname(pathlist[0]) != b: pathlist[0:1] = os.path.split(pathlist[0]) return b.join(pathlist[n:])
Strip n leading components from the given path
def Main(url): web_scrape = WebScraping() document = web_scrape.scrape(url) auto_abstractor = NgramAutoAbstractor() auto_abstractor.n_gram = Ngram() auto_abstractor.n = 3 auto_abstractor.tokenizable_doc = MeCabTokenizer() abstractable_doc = TopNRankAbstractor() result_dict = auto_abstractor.summarize(document, abstractable_doc) limit = 3 i = 1 for sentence in result_dict["summarize_result"]: print(sentence) if i >= limit: break i += 1
Entry Point. Args: url: target url.
def write(filename, data): with open(filename, ) as fh: fh.write(bibdatabase2bibtex(data))
Create a new BibTeX file. :param filename: The name of the BibTeX file to write. :param data: A ``bibtexparser.BibDatabase`` object.
def getmoduleinfo(path): filename = os.path.basename(path) suffixes = map(lambda (suffix, mode, mtype): (-len(suffix), suffix, mode, mtype), imp.get_suffixes()) suffixes.sort() for neglen, suffix, mode, mtype in suffixes: if filename[neglen:] == suffix: return filename[:neglen], suffix, mode, mtype
Get the module name, suffix, mode, and module type for a given file.
def spawn_managed_host(config_file, manager, connect_on_start=True): data = manager.request_host_status(config_file) is_running = data[] if is_running: host_status = json.loads(data[][]) logfile = data[][] else: data = manager.start_host(config_file) host_status = json.loads(data[]) logfile = data[] host = JSHost( status=host_status, logfile=logfile, config_file=config_file, manager=manager ) if not is_running and settings.VERBOSITY >= verbosity.PROCESS_START: print(.format(host.get_name())) if connect_on_start: host.connect() return host
Spawns a managed host, if it is not already running
def decodeTagAttributes(self, text): attribs = {} if text.strip() == u: return attribs scanner = _attributePat.scanner(text) match = scanner.search() while match: key, val1, val2, val3, val4 = match.groups() value = val1 or val2 or val3 or val4 if value: value = _space.sub(u, value).strip() else: value = attribs[key] = self.decodeCharReferences(value) match = scanner.search() return attribs
docstring for decodeTagAttributes
def find_button(browser, value): field_types = ( , , , , , , ) return reduce( operator.add, (find_field_with_value(browser, field_type, value) for field_type in field_types) )
Find a button with the given value. Searches for the following different kinds of buttons: <input type="submit"> <input type="reset"> <input type="button"> <input type="image"> <button> <{a,p,div,span,...} role="button"> Returns: an :class:`ElementSelector`
def to_record_per_alt(self): record_list = [] for alt in self.ALT: record_list.append(copy.copy(self)) record_list[-1].ALT = [alt] return record_list
Returns list of vcf_records. One per variant in the ALT column. Does not change INFO/FORMAT etc columns, which means that they are now broken
def from_config(config, **options): required_args = (,) optional_args = {: 25000} rconfig.check_config_options("SyncedRotationEventStores", required_args, tuple(optional_args.keys()), options) if "events_per_batch" in options: events_per_batch = int(options["events_per_batch"]) else: events_per_batch = optional_args["events_per_batch"] estore = SyncedRotationEventStores(events_per_batch) for section in options[].split(): try: substore = rconfig.construct_eventstore(config, section) estore.add_rotated_store(substore) except Exception as e: _logger.exception( , section) estore.close() raise return estore
Instantiate an `SyncedRotationEventStores` from config. Parameters: config -- the configuration file options read from file(s). **options -- various options given to the specific event store. Shall not be used with this event store. Warning will be logged for every extra non-recognized option. The only required key to this function is 'path'. returns -- a newly instantiated `SyncedRotationEventStores`.
def refund_order(self, request, pk): order = Order.objects.get(id=pk) order.refund() return Response(status=status.HTTP_204_NO_CONTENT)
Refund the order specified by the pk
def PopupGetFolder(message, title=None, default_path=, no_window=False, size=(None, None), button_color=None, background_color=None, text_color=None, icon=DEFAULT_WINDOW_ICON, font=None, no_titlebar=False, grab_anywhere=False, keep_on_top=False, location=(None, None), initial_folder=None): if no_window: app = wx.App(False) frame = wx.Frame() if initial_folder: dialog = wx.DirDialog(frame, style=wx.FD_OPEN) else: dialog = wx.DirDialog(frame) folder_name = if dialog.ShowModal() == wx.ID_OK: folder_name = dialog.GetPath() return folder_name layout = [[Text(message, auto_size_text=True, text_color=text_color, background_color=background_color)], [InputText(default_text=default_path, size=size), FolderBrowse(initial_folder=initial_folder)], [Button(, size=(60, 20), bind_return_key=True), Button(, size=(60, 20))]] _title = title if title is not None else message window = Window(title=_title, icon=icon, auto_size_text=True, button_color=button_color, background_color=background_color, font=font, no_titlebar=no_titlebar, grab_anywhere=grab_anywhere, keep_on_top=keep_on_top, location=location) (button, input_values) = window.Layout(layout).Read() window.Close() if button != : return None else: path = input_values[0] return path
Display popup with text entry field and browse button. Browse for folder :param message: :param default_path: :param no_window: :param size: :param button_color: :param background_color: :param text_color: :param icon: :param font: :param no_titlebar: :param grab_anywhere: :param keep_on_top: :param location: :return: Contents of text field. None if closed using X or cancelled
def create_friendship(self, access_token, user_id=None, user_name=None): url = data = { : self.client_id, : access_token, : user_id, : user_name } data = remove_none_value(data) r = requests.post(url, data=data) check_error(r) return r.json()
doc: http://open.youku.com/docs/doc?id=28
def _rename(self): newname = self.action[] try: newpath = self.fs.rename(self.fp,newname) except OSError: raise tornado.web.HTTPError(400) return newpath
Called during a PUT request where the action specifies a rename operation. Returns resource URI of the renamed file.
def main(arguments=None): su = tools( arguments=arguments, docString=__doc__, logLevel="WARNING", options_first=False, projectName="qubits" ) arguments, settings, log, dbConn = su.setup() for arg, val in arguments.iteritems(): if arg[0] == "-": varname = arg.replace("-", "") + "Flag" else: varname = arg.replace("<", "").replace(">", "") if varname == "import": varname = "iimport" if isinstance(val, str) or isinstance(val, unicode): exec(varname + " = " % (val,)) else: exec(varname + " = %s" % (val,)) if arg == "--dbConn": dbConn = val log.debug( % (varname, val,)) startTime = times.get_now_sql_datetime() log.info( % (startTime,)) if init: from . import workspace ws = workspace( log=log, pathToWorkspace=pathToWorkspace ) ws.setup() return (allSettings, programSettings, limitingMags, sampleNumber, peakMagnitudeDistributions, explosionDaysFromSettings, extendLightCurveTail, relativeSNRates, lowerRedshiftLimit, upperRedshiftLimit, redshiftResolution, restFrameFilter, kCorrectionTemporalResolution, kCorPolyOrder, kCorMinimumDataPoints, extinctionType, extinctionConstant, hostExtinctionDistributions, galacticExtinctionDistribution, surveyCadenceSettings, snLightCurves, surveyArea, CCSNRateFraction, transientToCCSNRateFraction, extraSurveyConstraints, lightCurvePolyOrder, logLevel) = cu.read_in_survey_parameters( log, pathToSettingsFile=pathToSettingsFile ) logFilePath = pathToOutputDirectory + "/qubits.log" del log log = _set_up_command_line_tool( level=str(logLevel), logFilePath=logFilePath ) startTime = dcu.get_now_sql_datetime() log.info( % (startTime,)) resultsDict = {} pathToOutputPlotDirectory = pathToOutputDirectory + "/plots/" dcu.dryx_mkdir( log, directoryPath=pathToOutputPlotDirectory ) pathToResultsFolder = pathToOutputDirectory + "/results/" dcu.dryx_mkdir( log, directoryPath=pathToResultsFolder ) if not programSettings[] and not programSettings[] and not programSettings[] and not programSettings[]: print "All stages of the simulatation have been switched off. Please switch on at least one stage of the simulation under the in the settings file `%(pathToSettingsFile)s`" % locals() if programSettings[]: log.info() dg.generate_model_lightcurves( log=log, pathToSpectralDatabase=pathToSpectralDatabase, pathToOutputDirectory=pathToOutputDirectory, pathToOutputPlotDirectory=pathToOutputPlotDirectory, explosionDaysFromSettings=explosionDaysFromSettings, extendLightCurveTail=extendLightCurveTail, polyOrder=lightCurvePolyOrder ) print "The lightcurve file can be found here: %(pathToOutputDirectory)stransient_light_curves.yaml" % locals() print "The lightcurve plots can be found in %(pathToOutputPlotDirectory)s" % locals() if programSettings[]: log.info() dg.generate_kcorrection_listing_database( log, pathToOutputDirectory=pathToOutputDirectory, pathToSpectralDatabase=pathToSpectralDatabase, restFrameFilter=restFrameFilter, temporalResolution=kCorrectionTemporalResolution, redshiftResolution=redshiftResolution, redshiftLower=lowerRedshiftLimit, redshiftUpper=upperRedshiftLimit + redshiftResolution) log.info() dg.generate_kcorrection_polynomial_database( log, pathToOutputDirectory=pathToOutputDirectory, restFrameFilter=restFrameFilter, kCorPolyOrder=kCorPolyOrder, kCorMinimumDataPoints=kCorMinimumDataPoints, redshiftResolution=redshiftResolution, redshiftLower=lowerRedshiftLimit, redshiftUpper=upperRedshiftLimit + redshiftResolution, plot=programSettings[]) print "The k-correction database has been generated here: %(pathToOutputDirectory)sk_corrections" % locals() if programSettings[]: print "The k-correction polynomial plots can also be found in %(pathToOutputDirectory)sk_corrections" % locals() if programSettings[]: log.info() redshiftArray = u.random_redshift_array( log, sampleNumber, lowerRedshiftLimit, upperRedshiftLimit, redshiftResolution=redshiftResolution, pathToOutputPlotDirectory=pathToOutputPlotDirectory, plot=programSettings[]) resultsDict[] = redshiftArray.tolist() log.info() snTypesArray = u.random_sn_types_array( log, sampleNumber, relativeSNRates, pathToOutputPlotDirectory=pathToOutputPlotDirectory, plot=programSettings[]) resultsDict[] = snTypesArray.tolist() log.info() peakMagnitudesArray = u.random_peak_magnitudes( log, peakMagnitudeDistributions, snTypesArray, plot=programSettings[]) log.info() hostExtinctionArray = u.random_host_extinction( log, sampleNumber, extinctionType, extinctionConstant, hostExtinctionDistributions, plot=programSettings[]) log.info() galacticExtinctionArray = u.random_galactic_extinction( log, sampleNumber, extinctionType, extinctionConstant, galacticExtinctionDistribution, plot=programSettings[]) log.info() rawLightCurveDict = u.generate_numpy_polynomial_lightcurves( log, snLightCurves=snLightCurves, pathToOutputDirectory=pathToOutputDirectory, pathToOutputPlotDirectory=pathToOutputPlotDirectory, plot=programSettings[]) log.info() kCorrectionArray = u.build_kcorrection_array( log, redshiftArray, snTypesArray, snLightCurves, pathToOutputDirectory=pathToOutputDirectory, plot=programSettings[]) log.info() observedFrameLightCurveInfo, peakAppMagList = u.convert_lightcurves_to_observered_frame( log, snLightCurves=snLightCurves, rawLightCurveDict=rawLightCurveDict, redshiftArray=redshiftArray, snTypesArray=snTypesArray, peakMagnitudesArray=peakMagnitudesArray, kCorrectionArray=kCorrectionArray, hostExtinctionArray=hostExtinctionArray, galacticExtinctionArray=galacticExtinctionArray, restFrameFilter=restFrameFilter, pathToOutputDirectory=pathToOutputDirectory, pathToOutputPlotDirectory=pathToOutputPlotDirectory, polyOrder=lightCurvePolyOrder, plot=programSettings[]) log.info() cadenceDictionary = ss.survey_cadence_arrays( log, surveyCadenceSettings, pathToOutputDirectory=pathToOutputDirectory, pathToOutputPlotDirectory=pathToOutputPlotDirectory, plot=programSettings[]) log.info() discoverableList = ss.determine_if_sne_are_discoverable( log, redshiftArray=redshiftArray, limitingMags=limitingMags, observedFrameLightCurveInfo=observedFrameLightCurveInfo, pathToOutputDirectory=pathToOutputDirectory, pathToOutputPlotDirectory=pathToOutputPlotDirectory, plot=programSettings[]) log.info( ) ripeDayList = ss.determine_when_sne_are_ripe_for_discovery( log, redshiftArray=redshiftArray, limitingMags=limitingMags, discoverableList=discoverableList, observedFrameLightCurveInfo=observedFrameLightCurveInfo, plot=programSettings[]) log.info() lightCurveDiscoveryDayList, surveyDiscoveryDayList, snCampaignLengthList = ss.determine_if_sne_are_discovered( log, limitingMags=limitingMags, ripeDayList=ripeDayList, cadenceDictionary=cadenceDictionary, observedFrameLightCurveInfo=observedFrameLightCurveInfo, extraSurveyConstraints=extraSurveyConstraints, plot=programSettings[]) resultsDict[ ] = lightCurveDiscoveryDayList resultsDict[ ] = surveyDiscoveryDayList resultsDict[] = snCampaignLengthList resultsDict[] = cadenceDictionary resultsDict[] = peakAppMagList now = datetime.now() now = now.strftime("%Y%m%dt%H%M%S") fileName = pathToOutputDirectory + \ "/simulation_results_%s.yaml" % (now,) stream = file(fileName, ) yamlContent = dict(allSettings.items() + resultsDict.items()) yaml.dump(yamlContent, stream, default_flow_style=False) stream.close() print "The simulation output file can be found here: %(fileName)s. Remember to update your settings file parameter with this filename before compiling the results." % locals() if programSettings[]: print "The simulation helper-plots found in %(pathToOutputPlotDirectory)s" % locals() if programSettings[]: pathToYamlFile = pathToOutputDirectory + "/" + \ programSettings[] result_log = r.log_the_survey_settings(log, pathToYamlFile) snSurveyDiscoveryTimes, lightCurveDiscoveryTimes, snTypes, redshifts, cadenceDictionary, peakAppMagList, snCampaignLengthList = r.import_results( log, pathToYamlFile) snRatePlotLink, totalRate, tooFaintRate, shortCampaignRate = r.determine_sn_rate( log, lightCurveDiscoveryTimes, snSurveyDiscoveryTimes, redshifts, surveyCadenceSettings=surveyCadenceSettings, lowerRedshiftLimit=lowerRedshiftLimit, upperRedshiftLimit=upperRedshiftLimit, redshiftResolution=redshiftResolution, surveyArea=surveyArea, CCSNRateFraction=CCSNRateFraction, transientToCCSNRateFraction=transientToCCSNRateFraction, peakAppMagList=peakAppMagList, snCampaignLengthList=snCampaignLengthList, extraSurveyConstraints=extraSurveyConstraints, pathToOutputPlotFolder=pathToOutputPlotDirectory) result_log += % (totalRate, tooFaintRate, shortCampaignRate, extraSurveyConstraints["Observable for at least ? number of days"]) cadenceWheelLink = r.plot_cadence_wheel( log, cadenceDictionary, pathToOutputPlotFolder=pathToOutputPlotDirectory) result_log += % (cadenceWheelLink,) discoveryMapLink = r.plot_sn_discovery_map( log, snSurveyDiscoveryTimes, peakAppMagList, snCampaignLengthList, redshifts, extraSurveyConstraints, pathToOutputPlotFolder=pathToOutputPlotDirectory) result_log += % (discoveryMapLink,) ratioMapLink = r.plot_sn_discovery_ratio_map( log, snSurveyDiscoveryTimes, redshifts, peakAppMagList, snCampaignLengthList, extraSurveyConstraints, pathToOutputPlotFolder=pathToOutputPlotDirectory) result_log += % (ratioMapLink,) result_log += % (snRatePlotLink,) now = datetime.now() now = now.strftime("%Y%m%dt%H%M%S") mdLogPath = pathToResultsFolder + \ "simulation_result_log_%s.md" % (now,) mdLog = open(mdLogPath, ) mdLog.write(result_log) mdLog.close() dmd.convert_to_html( log=log, pathToMMDFile=mdLogPath, css="amblin" ) print "Results can be found here: %(pathToResultsFolder)s" % locals() html = mdLogPath.replace(".md", ".html") print "Open this file in your browser: %(html)s" % locals() if "dbConn" in locals() and dbConn: dbConn.commit() dbConn.close() endTime = times.get_now_sql_datetime() runningTime = times.calculate_time_difference(startTime, endTime) log.info( % (endTime, runningTime, )) return
*The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command*
def wr_txt_section_hdrgos(self, fout_txt, sortby=None, prt_section=True): sec2d_go = self.grprobj.get_sections_2d() sec2d_nt = self.get_sections_2dnt(sec2d_go) if sortby is None: sortby = self.fncsortnt with open(fout_txt, ) as prt: self.prt_ver(prt) prt.write(" for section_name, nthdrgos_actual in sec2d_nt: if prt_section: prt.write(" self.prt_ntgos(prt, nthdrgos_actual) if prt_section: prt.write("\n") dat = SummarySec2dHdrGos().summarize_sec2hdrgos(sec2d_go) sys.stdout.write(self.grprobj.fmtsum.format( GO_DESC=, SECs=len(dat[]), GOs=len(dat[]), UNGRP=len(dat[]), undesc="unused", ACTION="WROTE:", FILE=fout_txt)) return sec2d_nt
Write high GO IDs that are actually used to group current set of GO IDs.
def replaceChild(self, child, content): if child not in self.children: raise Exception() index = self.children.index(child) self.remove(child) if not isinstance(content, (list, tuple)): content = (content,) for node in content: self.children.insert(index, node.detach()) node.parent = self index += 1
Replace I{child} with the specified I{content}. @param child: A child element. @type child: L{Element} @param content: An element or collection of elements. @type content: L{Element} or [L{Element},]
def get_space_information(self, space_key, expand=None, callback=None): params = {} if expand: params["expand"] = expand return self._service_get_request("rest/api/space/{key}".format(key=space_key), params=params, callback=callback)
Returns information about a space. :param space_key (string): A string containing the key of the space. :param expand (string): OPTIONAL: A comma separated list of properties to expand on the space. Default: Empty. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the space/{spaceKey} endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
def _handle_default(value, script_name): if value: return value installed_script = which("pulsar-%s" % script_name.replace("_", "-")) if installed_script: return installed_script else: return "scripts/%s.bash" % script_name
There are two potential variants of these scripts, the Bash scripts that are meant to be run within PULSAR_ROOT for older-style installs and the binaries created by setup.py as part of a proper pulsar installation. This method first looks for the newer style variant of these scripts and returns the full path to them if needed and falls back to the bash scripts if these cannot be found.
def returner(load): conn = _get_conn() if conn is None: return None cur = conn.cursor() sql = try: ret = six.text_type(load[]) except UnicodeDecodeError: ret = str(load[]) job_ret = {: ret} if in load: job_ret[] = load[] if in load: job_ret[] = load[] cur.execute( sql, ( load[], load[], salt.utils.json.dumps(job_ret), load[], load.get(), ) ) _close_conn(conn)
Return data to a postgres server
def load_stubs(self, log_mem=False): if log_mem: import psutil process = psutil.Process(os.getpid()) rss = process.memory_info().rss LOG_MEMORY_INT = 1000 MEMORY_LIMIT = 1000.0 def _add_stub_manually(_fname): format(rss, MEMORY_LIMIT, ii, _fname)) self.log.error(err) raise RuntimeError(err) return self.entries
Load all events in their `stub` (name, alias, etc only) form. Used in `update` mode.
def display_reports(self, layout): try: score = self.linter.stats[] except (AttributeError, KeyError): pass else: self.tc.message(, key=, value=str(score))
Issues the final PyLint score as a TeamCity build statistic value
def _contextkey(jail=None, chroot=None, root=None, prefix=): if jail: return six.text_type(prefix) + .format(jail) elif chroot: return six.text_type(prefix) + .format(chroot) elif root: return six.text_type(prefix) + .format(root) return prefix
As this module is designed to manipulate packages in jails and chroots, use the passed jail/chroot to ensure that a key in the __context__ dict that is unique to that jail/chroot is used.