code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def post(request):
res = Result()
data = request.POST or json.loads(request.body)[]
name = data.get(, None)
if not name:
res.isError = True
res.message = "No name given"
return JsonResponse(res.asDict())
tag = Tag.objects.get_or_create(name=name.lower())[0]
r... | Creates a tag object
:param name: Name for tag
:type name: str
:returns: json |
def serialize(self, queryset, **options):
self.options = options
self.stream = options.get("stream", StringIO())
self.primary_key = options.get("primary_key", None)
self.properties = options.get("properties")
self.geometry_field = options.get("geometry_field", "geom")
... | Serialize a queryset. |
def make_plot(
self, count, plot=None, show=False, plottype=,
bar=dict(alpha=0.15, color=, linewidth=1.0, edgecolor=),
errorbar=dict(fmt=),
gaussian=dict(ls=, c=)
):
if numpy.ndim(count) != 1:
raise ValueError()
if plot is None:
im... | Convert histogram counts in array ``count`` into a plot.
Args:
count (array): Array of histogram counts (see
:meth:`PDFHistogram.count`).
plot (plotter): :mod:`matplotlib` plotting window. If ``None``
uses the default window. Default is ``None``.
... |
def create_all(graph):
head = get_current_head(graph)
if head is None:
Model.metadata.create_all(graph.postgres)
stamp_head(graph) | Create all database tables. |
def get_method_map(self, viewset, method_map):
bound_methods = {}
for method, action in method_map.items():
if hasattr(viewset, action):
bound_methods[method] = action
return bound_methods | Given a viewset, and a mapping of http methods to actions, return a new mapping which only
includes any mappings that are actually implemented by the viewset. |
def logger_initial_config(service_name=None,
log_level=None,
logger_format=None,
logger_date_format=None):
if not log_level:
log_level = os.getenv(, )
if not logger_format:
logger_format = (
"%(asctime... | Set initial logging configurations.
:param service_name: Name of the service
:type logger: String
:param log_level: A string or integer corresponding to a Python logging level
:type log_level: String
:param logger_format: A string defining the format of the logs
:type log_level: String
:... |
def render_subject(self, context):
rendered = self.subject_template.render(unescape(context))
return rendered.strip() | Renders the message subject for the given context.
The context data is automatically unescaped to avoid rendering HTML
entities in ``text/plain`` content.
:param context: The context to use when rendering the subject template.
:type context: :class:`~django.template.Context`
:r... |
def hazard_at_times(self, times, label=None):
label = coalesce(label, self._label)
return pd.Series(self._hazard(self._fitted_parameters_, times), index=_to_array(times), name=label) | Return a Pandas series of the predicted hazard at specific times.
Parameters
-----------
times: iterable or float
values to return the hazard at.
label: string, optional
Rename the series returned. Useful for plotting.
Returns
--------
pd.Ser... |
def handle_delete_scan_command(self, scan_et):
scan_id = scan_et.attrib.get()
if scan_id is None:
return simple_response_str(, 404,
)
if not self.scan_exists(scan_id):
text = "Failed to find scan ".format(scan_id)
... | Handles <delete_scan> command.
@return: Response string for <delete_scan> command. |
def _process_sasl_failure(self, stream, element):
_unused = stream
if not self.authenticator:
logger.debug("Unexpected SASL response")
return False
logger.debug("SASL authentication failed: {0!r}".format(
element_t... | Process incoming <sasl:failure/> element.
[initiating entity only] |
def run():
args = client_helper.grab_server_args()
workbench = zerorpc.Client(timeout=300, heartbeat=60)
workbench.connect(+args[]++args[])
all_set = workbench.generate_sample_set()
results = workbench.set_work_request(, all_set)
for customer in results:
print custo... | This client generates customer reports on all the samples in workbench. |
def jsonify(symbol):
try:
return json.dumps(symbol.toJson(), indent=)
except AttributeError:
pass
return json.dumps(symbol, indent=) | returns json format for symbol |
def parse_names_and_default(self):
result = {}
for title, text in self.formal_content.items():
if not text:
result[title] = []
continue
logger.debug( + text)
collect = []
to_list = text.splitlines()
... | parse for `parse_content`
{title: [('-a, --all=STH', 'default'), ...]} |
def read(self, cmd_args):
args = [
"vagrant",
"--machine-readable"
]
args.extend(cmd_args)
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
for line in proc.stdout.readlines():
if len(line) == 0:
break
yield line.decode("UTF-8").rsplit(",")
proc.wait() | Execute Vagrant read command.
:param list cmd_args:
Command argument list. |
def remove_watcher(self, issue, watcher):
url = self._get_url( + str(issue) + )
params = {: watcher}
result = self._session.delete(url, params=params)
return result | Remove a user from an issue's watch list.
:param issue: ID or key of the issue affected
:param watcher: username of the user to remove from the watchers list
:rtype: Response |
def info(self, remote_path):
urn = Urn(remote_path)
if not self.check(urn.path()) and not self.check(Urn(remote_path, directory=True).path()):
raise RemoteResourceNotFound(remote_path)
response = self.execute_request(action=, path=urn.quote())
path = self.get_full_p... | Gets information about resource on WebDAV.
More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND
:param remote_path: the path to remote resource.
:return: a dictionary of information attributes and them values with following keys:
`created`:... |
def add_unique_element(self, location, element):
return self._create_entry(location, element, unique=True) | Create an entry located at ``location``.
Args:
location: String or :class:`LocationDescriptor` to describe a "separator location" (i.e. dir1/dir2/dir3 for
instance).
element: Element to store.
Returns:
The created node with the elemen... |
def maybe_stream(s):
if isinstance(s, Stream):
return s
if s is None:
stream = InMemStream()
stream.close()
return stream
return s | Ensure that the given argument is a stream. |
def with_index(self, new_index):
return TimeSeriesRDD(None, None, self._jtsrdd.withIndex(new_index._jdt_index), self.ctx) | Returns a TimeSeriesRDD rebased on top of a new index. Any timestamps that exist in the new
index but not in the existing index will be filled in with NaNs.
Parameters
----------
new_index : DateTimeIndex |
def get_env(env_file=):
try:
with open(env_file) as f:
for line in f.readlines():
try:
key, val = line.split(, maxsplit=1)
os.environ.setdefault(key.strip(), val.strip())
except ValueError:
pass
... | Set default environment variables from .env file |
def validate(cert, ca_name, crl_file):
store = OpenSSL.crypto.X509Store()
cert_obj = _read_cert(cert)
if cert_obj is None:
raise CommandExecutionError(
.format(cert)
)
ca_dir = .format(cert_base_path(), ca_name)
ca_cert = _read_cert(.format(ca_dir, ca_name))
stor... | .. versionadded:: Neon
Validate a certificate against a given CA/CRL.
cert
path to the certifiate PEM file or string
ca_name
name of the CA
crl_file
full path to the CRL file |
def AgregarUbicacionTambo(self, latitud, longitud, domicilio,
cod_localidad, cod_provincia, codigo_postal,
nombre_partido_depto, **kwargs):
"Agrego los datos del productor a la liq."
ubic_tambo = {: latitud,
: longitud,
... | Agrego los datos del productor a la liq. |
def operator_oropt(self, graph, solution, op_diff_round_digits, anim=None):
no_ctr = 100
dm = graph._matrix
dn = graph._nodes
for route in solution.routes():
if len(route._nodes) == 1:
if solution._problem._is_... | Applies Or-Opt intra-route operator to solution
Takes chains of nodes (length=3..1 consecutive nodes) from a given
route and calculates savings when inserted into another position on the
same route (all possible positions). Performes best move (max. saving)
and starts over again... |
def get_attribute_from_indices(self, indices: list, attribute_name: str):
return list(np.array(self.graph.vs[attribute_name])[indices]) | Get attribute values for the requested indices.
:param indices: Indices of vertices for which the attribute values are requested.
:param attribute_name: The name of the attribute.
:return: A list of attribute values for the requested indices. |
def QA_fetch_risk(message={}, params={"_id": 0, : 0, : 0, : 0, : 0, : 0}, db=DATABASE):
collection = DATABASE.risk
return [res for res in collection.find(message, params)] | get the risk message
Arguments:
query_mes {[type]} -- [description]
Keyword Arguments:
collection {[type]} -- [description] (default: {DATABASE})
Returns:
[type] -- [description] |
def from_histogram(cls, histogram, bin_edges, axis_names=None):
bin_edges = np.array(bin_edges)
self = cls(bins=bin_edges, axis_names=axis_names)
self.histogram = histogram
return self | Make a HistdD from numpy histogram + bin edges
:param histogram: Initial histogram
:param bin_edges: x bin edges of histogram, y bin edges, ...
:return: Histnd instance |
def asset_create_combo(self, name, combo, tag=, description=):
operand1operand12operand22operationunionoperand23operationintersection
return self.raw_query(, , data={
: name,
: description,
: ,
: combo,
}) | asset_create_combo name, combination, tag, description
Creates a new combination asset list. Operands can be either asset list
IDs or be a nested combination asset list.
UN-DOCUMENTED CALL: This function is not considered stable.
AND = intersection
OR = union
operand =... |
def fix_variable(self, v, value):
variables = self.variables
try:
idx = variables.index(v)
except ValueError:
raise ValueError("given variable {} is not part of the constraint".format(v))
if value not in self.vartype.value:
raise ValueError("... | Fix the value of a variable and remove it from the constraint.
Args:
v (variable):
Variable in the constraint to be set to a constant value.
val (int):
Value assigned to the variable. Values must match the :class:`.Vartype` of the
constra... |
def _merge_prims(prims, *, debug=False, stagenames=None, stages=None):
if isinstance(prims, FrameSequence):
merged_prims = FrameSequence(prims._chain)
else:
merged_prims = []
working_prim = prims[0]
i = 1
logging_tmp = []
while i < len(prims):
tmp = prims[i]
... | Helper method to greedily combine Frames (of Primitives) or Primitives based on the rules defined in the Primitive's class.
Used by a CommandQueue during compilation and optimization of
Primitives.
Args:
prims: A list or FrameSequence of Primitives or Frames (respectively) to try to merge together... |
def _get_corr_stddevs(C, tau_ss, stddev_types, num_sites, phi_ss, NL=None,
tau_value=None):
stddevs = []
temp_stddev = phi_ss * phi_ss
if tau_value is not None and NL is not None:
temp_stddev = temp_stddev + tau_value * tau_value * ((1 + NL) ** 2)
else:
temp_s... | Return standard deviations adjusted for single station sigma
as the total standard deviation - as proposed to be used in
the Swiss Hazard Model [2014]. |
def _vagrant_ssh_config(vm_):
s info as we have it now
:return: dictionary of ssh stuff
machinerequesting vagrant ssh-config for VM %s(default)vagrant ssh-config {}cmd.shellrunascwd\nssh_config=%s', repr(ssh_config))
return ssh_config | get the information for ssh communication from the new VM
:param vm_: the VM's info as we have it now
:return: dictionary of ssh stuff |
def get_version():
release = get_release()
split_version = release.split(".")
if len(split_version) == 3:
return ".".join(split_version[:2])
return release | Reads the version (MAJOR.MINOR) from this module. |
def warn(self, msg, whitespace_strp=True):
if self.errors_display:
if whitespace_strp:
msg = strip_whitespace(msg)
if not self.log_to_file:
msg = colors[] + "[+] " + msg + colors[]
else:
msg = "[" + time.strftime("%c")... | For things that have gone seriously wrong but don't merit a program
halt.
Outputs to stderr, so JsonOutput does not need to override.
@param msg: warning to output.
@param whitespace_strp: whether to strip whitespace. |
def ts_to_df(metadata):
logger_dataframes.info("enter ts_to_df")
dfs = {}
dfs["paleoData"] = pd.DataFrame(_plot_ts_cols(metadata))
dfs["chronData"] = _get_key_data(metadata, "chronData_df")
if "chronData_df" in metadata:
del metadata["chronData_df"]
s = collec... | Create a data frame from one TimeSeries object
:param dict metadata: Time Series dictionary
:return dict: One data frame per table, organized in a dictionary by name |
def set_defaults(self, config_file):
self.defaults = Defaults(config_file)
self.python = Python()
self.setuptools = Setuptools()
self.docutils = Docutils()
self.styles = self.defaults.styles
self.browser = self.defaults.browser
self.list = False | Set defaults. |
def null_advance(self, blocksize):
self.raw_buffer.roll(-int(blocksize * self.raw_sample_rate))
self.read_pos += blocksize
self.raw_buffer.start_time += blocksize | Advance and insert zeros
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel |
def get(self):
return ExecutionContextContext(
self._version,
flow_sid=self._solution[],
execution_sid=self._solution[],
) | Constructs a ExecutionContextContext
:returns: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext
:rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext |
def _convert_before_2_0_0_b3(self, dynamips_id):
dynamips_dir = self.project.module_working_directory(self.manager.module_name.lower())
for path in glob.glob(os.path.join(glob.escape(dynamips_dir), "configs", "i{}_*".format(dynamips_id))):
dst = os.path.join(self._working_directory,... | Before 2.0.0 beta3 the node didn't have a folder by node
when we start we move the file, we can't do it in the topology
conversion due to case of remote servers |
def censor_background(sample_frame, ntc_samples=[], margin=log2(10)):
ntcs = sample_frame.loc[ sample_frame[].apply(lambda x: x in ntc_samples), ]
if ntcs.empty:
return sample_frame
g = ntcs.groupby()
min_ntcs = g[].min()
censored = sample_frame.loc[ ~(sample_frame[]... | Selects rows from the sample data frame that fall `margin` or greater
cycles earlier than the NTC for that target. NTC wells are recognized by
string matching against the Sample column.
:param DataFrame sample_frame: A sample data frame.
:param iterable ntc_samples: A sequence of strings giving the sam... |
def clustering_coef_bu(G):
n = len(G)
C = np.zeros((n,))
for u in range(n):
V, = np.where(G[u, :])
k = len(V)
if k >= 2:
S = G[np.ix_(V, V)]
C[u] = np.sum(S) / (k * k - k)
return C | The clustering coefficient is the fraction of triangles around a node
(equiv. the fraction of nodes neighbors that are neighbors of each other).
Parameters
----------
A : NxN np.ndarray
binary undirected connection matrix
Returns
-------
C : Nx1 np.ndarray
clustering coeffi... |
def get_ht_capability(cap):
answers = list()
if cap & 1:
answers.append()
if cap & 2:
answers.append()
if not cap & 2:
answers.append()
if (cap >> 2) & 0x3 == 0:
answers.append()
if (cap >> 2) & 0x3 == 1:
answers.append()
if (cap >> 2) & 0x3 == 3:... | http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/util.c?id=v3.17#n541.
Positional arguments:
cap -- c_uint16
Returns:
List. |
def is_valid(self, name=None, debug=False):
valid_tags = self.action_tree
invalid = False
for item in self.current_tree:
try:
if item in valid_tags or self.ALL_TAGS in valid_tags:
valid_tags = valid_tags[item if item in valid_tags else sel... | Check to see if the current xml path is to be processed. |
def software_fibonacci(n):
a, b = 0, 1
for i in range(n):
a, b = b, a + b
return a | a normal old python function to return the Nth fibonacci number. |
def set_context(self, filename):
local_loc = self._init_file(filename)
self.handler = logging.FileHandler(local_loc)
self.handler.setFormatter(self.formatter)
self.handler.setLevel(self.level)
if self._cur_date < datetime.today():
self._symlink_latest_log_di... | Provide filename context to airflow task handler.
:param filename: filename in which the dag is located |
def is_subdict(self, a,b):
return all((k in b and b[k]==v) for k,v in a.iteritems()) | Return True if a is a subdict of b |
def insert(self, bs, pos=None):
bs = Bits(bs)
if not bs.len:
return self
if bs is self:
bs = self.__copy__()
if pos is None:
try:
pos = self._pos
except AttributeError:
raise TypeError("insert requir... | Insert bs at bit position pos.
bs -- The bitstring to insert.
pos -- The bit position to insert at.
Raises ValueError if pos < 0 or pos > self.len. |
def get_collection(self, session, query, api_key):
model = self._fetch_model(api_key)
include = self._parse_include(query.get(, ).split())
fields = self._parse_fields(query)
included = {}
sorts = query.get(, ).split()
order_by = []
collection = session.q... | Fetch a collection of resources of a specified type.
:param session: SQLAlchemy session
:param query: Dict of query args
:param api_type: The type of the model |
def add_child(self, child):
child.parent = self
self.children.append(child)
return child | Adds self as parent to child, and then adds child. |
def pluck(self, key):
return self._wrap([x.get(key) for x in self.obj]) | Convenience version of a common use case of
`map`: fetching a property. |
def drawBackground( self, painter, rect ):
if ( self._dirty ):
self.rebuild()
if ( self.showGrid() ):
self.drawGrid(painter) | Draws the backgrounds for the different chart types.
:param painter | <QPainter>
rect | <QRect> |
def _or_query(self, term_list, field, field_type):
term_list = [self._term_query(term, field, field_type) for term in term_list]
return xapian.Query(xapian.Query.OP_OR, term_list) | Joins each item of term_list decorated by _term_query with an OR. |
def has_insert(self, shape):
for insert in self.inserts:
if insert.shape == shape:
return True
return False | Returns True if any of the inserts have the given shape. |
def _gen_ticket(prefix=None, lg=settings.CAS_TICKET_LEN):
random_part = u.join(
random.choice(
string.ascii_letters + string.digits
) for _ in range(lg - len(prefix or "") - 1)
)
if prefix is not None:
return u % (prefix, random_part)
else:
return random_... | Generate a ticket with prefix ``prefix`` and length ``lg``
:param unicode prefix: An optional prefix (probably ST, PT, PGT or PGTIOU)
:param int lg: The length of the generated ticket (with the prefix)
:return: A randomlly generated ticket of length ``lg``
:rtype: unicode |
def makerandCIJ_dir(n, k, seed=None):
s global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
directed random connection matrix
Notes
-----
no connections are pla... | This function generates a directed random network
Parameters
----------
N : int
number of vertices
K : int
number of edges
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.... |
def remove_product_version(self, id, product_version_id, **kwargs):
kwargs[] = True
if kwargs.get():
return self.remove_product_version_with_http_info(id, product_version_id, **kwargs)
else:
(data) = self.remove_product_version_with_http_info(id, product_version_... | Removes a product version from the specified config set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> ... |
def subtract_metabolites(self, metabolites, combine=True, reversibly=True):
self.add_metabolites({
k: -v for k, v in iteritems(metabolites)},
combine=combine, reversibly=reversibly) | Subtract metabolites from a reaction.
That means add the metabolites with -1*coefficient. If the final
coefficient for a metabolite is 0 then the metabolite is removed from
the reaction.
Notes
-----
* A final coefficient < 0 implies a reactant.
* The change is r... |
def create_str(help_string=NO_HELP, default=NO_DEFAULT):
return ParamFunctions(
help_string=help_string,
default=default,
type_name="str",
function_s2t=convert_string_to_string,
function_t2s=convert_string_to_string,
... | Create a string parameter
:param help_string:
:param default:
:return: |
def call_only_once(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
assert func.__name__ in dir(self), "call_only_once can only be used on method or property!"
if not hasattr(self, ):
cache = self._CALL_ONLY_ONCE_CACHE = set()... | Decorate a method or property of a class, so that this method can only
be called once for every instance.
Calling it more than once will result in exception. |
def kernels_initialize(self, folder):
if not os.path.isdir(folder):
raise ValueError( + folder)
resources = []
resource = {: }
resources.append(resource)
username = self.get_config_value(self.CONFIG_NAME_USER)
meta_data = {
: username + ... | create a new kernel in a specified folder from template, including
json metadata that grabs values from the configuration.
Parameters
==========
folder: the path of the folder |
def close(self):
if self._SID:
self._auth.service.closeSession()
self._SID = None | The close operation loads the session if it is valid and then closes
it and releases the session seat. All the session data are deleted and
become invalid after the request is processed. The session ID can no
longer be used in subsequent requests. |
def is_purrlog(path):
if not os.path.isdir(path):
return False
if list(filter(os.path.isdir, glob.glob(
os.path.join(path, "entry-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9]")))):
return True
return os.path.exists(os.pa... | Checks if path refers to a valid purrlog.
Path must exist, and must contain either at least one directory called entry-YYYYMMDD-HHMMSS, or the file "dirconfig" |
def retrain(self):
folder = TrainData.from_folder(self.args.folder)
train_data, test_data = folder.load(True, not self.args.no_validation)
train_data = TrainData.merge(train_data, self.sampled_data)
test_data = TrainData.merge(test_data, self.test)
train_inputs, train_o... | Train for a session, pulling in any new data from the filesystem |
def describe_field(k, v, timestamp_parser=default_timestamp_parser):
def bq_schema_field(name, bq_type, mode):
return {"name": name, "type": bq_type, "mode": mode}
if isinstance(v, list):
if len(v) == 0:
raise Exception(
"Canfields'] = schema_from_record(v, tim... | Given a key representing a column name and value representing the value
stored in the column, return a representation of the BigQuery schema
element describing that field. Raise errors if invalid value types are
provided.
Parameters
----------
k : Union[str, unicode]
Key representing th... |
def is_prime( n ):
global miller_rabin_test_count
miller_rabin_test_count = 0
if n <= smallprimes[-1]:
if n in smallprimes: return True
else: return False
if gcd( n, 2*3*5*7*11 ) != 1: return False
t = 40
n_bits = 1 + int( math.log( n, 2 ) )
for k, tt in ( ( 100, 27 ),
... | Return True if x is prime, False otherwise.
We use the Miller-Rabin test, as given in Menezes et al. p. 138.
This test is not exact: there are composite values n for which
it returns True.
In testing the odd numbers from 10000001 to 19999999,
about 66 composites got past the first test,
5 got past the sec... |
def view_all_work_queues():
count_list = list(
db.session.query(
work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status,
func.count(work_queue.WorkQueue.task_id))
.group_by(work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status))... | Page for viewing the index of all active work queues. |
def _set_rho_grids(self):
rho_grids = np.arange(self.rho_bins) * 2 / self.rho_bins - 1 \
+ 1 / self.rho_bins
rho_weights = np.ones(self.rho_bins) / self.rho_bins
return rho_grids, rho_weights | Set the grids and weights for rho used in numerical integration
of AR(1) parameters. |
def _maybe_download_corpora(tmp_dir):
mnli_filename = "MNLI.zip"
mnli_finalpath = os.path.join(tmp_dir, "MNLI")
if not tf.gfile.Exists(mnli_finalpath):
zip_filepath = generator_utils.maybe_download(
tmp_dir, mnli_filename, _MNLI_URL)
zip_ref = zipfile.ZipFile(zip_filepath, "r")
zip_ref.extr... | Download corpora for multinli.
Args:
tmp_dir: a string
Returns:
a string |
def get_grade_entry_form_for_update(self, grade_entry_id):
collection = JSONClientValidated(,
collection=,
runtime=self._runtime)
if not isinstance(grade_entry_id, ABCId):
raise errors.InvalidArgument(... | Gets the grade entry form for updating an existing entry.
A new grade entry form should be requested for each update
transaction.
arg: grade_entry_id (osid.id.Id): the ``Id`` of the
``GradeEntry``
return: (osid.grading.GradeEntryForm) - the grade entry form
r... |
def perc(arr, p=95, **kwargs):
offset = (100 - p) / 2
return np.percentile(arr, (offset, 100 - offset), **kwargs) | Create symmetric percentiles, with ``p`` coverage. |
def close(self):
if self.isClosed:
raise ValueError("operation illegal for closed doc")
if hasattr(self, ) and self._outline:
self._dropOutline(self._outline)
self._outline = None
self._reset_page_refs()
self.metadata = None
self.s... | close(self) |
def create_cache(directory, compress_level=6, value_type_is_binary=False, **kwargs):
cache = diskcache.Cache(
directory,
disk=CompressedDisk,
disk_compress_level=compress_level,
disk_value_type_is_binary=value_type_is_binary,
**kwargs
)
return cache | Create a html cache. Html string will be automatically compressed.
:param directory: path for the cache directory.
:param compress_level: 0 ~ 9, 9 is slowest and smallest.
:param kwargs: other arguments.
:return: a `diskcache.Cache()` |
def plotnoise(noisepkl, mergepkl, plot_width=950, plot_height=400):
d = pickle.load(open(mergepkl))
ndist, imstd, flagfrac = plotnoisedist(noisepkl, plot_width=plot_width/2, plot_height=plot_height)
fluxscale = calcfluxscale(d, imstd, flagfrac)
logger.info(.format(fluxscale*imstd))
ncum, imnoi... | Make two panel plot to summary noise analysis with estimated flux scale |
def readTable(self, tableName):
lock_and_call(
lambda: self._impl.readTable(tableName),
self._lock
) | Read the table corresponding to the specified name, equivalent to the
AMPL statement:
.. code-block:: ampl
read table tableName;
Args:
tableName: Name of the table to be read. |
def batching_scheme(batch_size,
max_length,
min_length_bucket,
length_bucket_step,
drop_long_sequences=False,
shard_multiplier=1,
length_multiplier=1,
min_length=0):
max_lengt... | A batching scheme based on model hyperparameters.
Every batch contains a number of sequences divisible by `shard_multiplier`.
Args:
batch_size: int, total number of tokens in a batch.
max_length: int, sequences longer than this will be skipped. Defaults to
batch_size.
min_length_bucket: int
... |
def fixpath(path):
return os.path.normpath(os.path.realpath(os.path.expanduser(path))) | Uniformly format a path. |
def _create_cpe_parts(self, system, components):
if system not in CPEComponent.SYSTEM_VALUES:
errmsg = "Key is not exist".format(system)
raise ValueError(errmsg)
elements = []
elements.append(components)
pk = CPE._system_and_parts[system]
self... | Create the structure to store the input type of system associated
with components of CPE Name (hardware, operating system and software).
:param string system: type of system associated with CPE Name
:param dict components: CPE Name components to store
:returns: None
:exception: ... |
def equal(self, value_a, value_b):
equal = value_a == value_b
if hasattr(equal, ):
return all(equal)
return equal | Check if two valid Property values are equal
.. note::
This method assumes that :code:`None` and
:code:`properties.undefined` are never passed in as values |
def add_entry(self, row):
var_call = VCFEntry(self.individuals)
var_call.parse_entry( row )
self.entries[(var_call.chrom, var_call.pos)] = var_call
return var_call | This will parse the VCF entry and also store it within the VCFFile. It will also
return the VCFEntry as well. |
def queries(self, rcSuffix=, rcNeeded=False, padChar=,
queryInsertionChar=, unknownQualityChar=,
allowDuplicateIds=False, addAlignment=False):
referenceLength = self.referenceLength
idCount = Counter()
MATCH_OPERATIONS = {CMATCH, CEQUA... | Produce padded (with gaps) queries according to the CIGAR string and
reference sequence length for each matching query sequence.
@param rcSuffix: A C{str} to add to the end of query names that are
reverse complemented. This is added before the /1, /2, etc., that
are added for du... |
def build_time(start_time):
diff_time = round(time.time() - start_time, 2)
if diff_time <= 59.99:
sum_time = str(diff_time) + " Sec"
elif diff_time > 59.99 and diff_time <= 3599.99:
sum_time = round(diff_time / 60, 2)
sum_time_list = re.findall(r"\d+", str(sum_time))
sum... | Calculate build time per package |
def any(pred: Callable, xs: Iterable):
b = find_first(pred, xs)
return True if b is not None else False | Check if at least one element of the iterable `xs`
fullfills predicate `pred`.
:param pred:
predicate function.
:param xs:
iterable object.
:returns: boolean |
def Solver_CMFR_N(t_data, C_data, theta_guess, C_bar_guess):
C_unitless = C_data.magnitude
C_units = str(C_bar_guess.units)
t_seconds = (t_data.to(u.s)).magnitude
p0 = [theta_guess.to(u.s).magnitude, C_bar_guess.magnitude,1]
popt, pcov = curve_fit(Tracer_CMFR_N, t_seconds, C_unitless, p0)
... | Use non-linear least squares to fit the function
Tracer_CMFR_N(t_seconds, t_bar, C_bar, N) to reactor data.
:param t_data: Array of times with units
:type t_data: float list
:param C_data: Array of tracer concentration data with units
:type C_data: float list
:param theta_guess: Estimate of tim... |
def plot_high_levels_data(self):
high_level = self.level_box.GetValue()
self.UPPER_LEVEL_NAME = self.level_names.GetValue()
self.UPPER_LEVEL_MEAN = self.mean_type_box.GetValue()
draw_net(self.high_level_eqarea)
what_is_it = self.level_box.GetValue()+": "+self.level_nam... | Complicated function that draws the high level mean plot on canvas4,
draws all specimen, sample, or site interpretations according to the
UPPER_LEVEL_SHOW variable, draws the fisher mean or fisher mean by
polarity of all interpretations displayed, draws sample orientation
check if on, an... |
def _orientation_ok_to_bridge_contigs(self, start_hit, end_hit):
assert start_hit.qry_name == end_hit.qry_name
if start_hit.ref_name == end_hit.ref_name:
return False
if (
(self._is_at_ref_end(start_hit) and start_hit.on_same_strand())
or (self._is_a... | Returns True iff the orientation of the hits means that the query contig of both hits can bridge the reference contigs of the hits |
def find_previous_sibling(self, *args, **kwargs):
op = operator.methodcaller(, *args, **kwargs)
return self._wrap_node(op) | Like :meth:`find`, but searches through :attr:`previous_siblings` |
def table_mask(self):
margin = compress_pruned(
self._slice.margin(
axis=None,
weighted=False,
include_transforms_for_dims=self._hs_dims,
prune=self._prune,
)
)
mask = margin < self._size
if... | ndarray, True where table margin <= min_base_size, same shape as slice. |
def _opposite_axis_margin(self):
off_axis = 1 - self._axis
return self._slice.margin(axis=off_axis, include_mr_cat=self._include_mr_cat) | ndarray representing margin along the axis opposite of self._axis
In the process of calculating p-values for the column significance testing we
need both the margin along the primary axis and the percentage margin along
the opposite axis. |
def parse_nni_function(code):
name, call = parse_annotation_function(code, )
funcs = [ast.dump(func, False) for func in call.args]
convert_args_to_dict(call, with_lambda=True)
name_str = astor.to_source(name).strip()
call.keywords[0].value = ast.Str(s=name_str)
return call, funcs | Parse `nni.function_choice` expression.
Return the AST node of annotated expression and a list of dumped function call expressions.
code: annotation string |
def set_volume(self, pct, channel=None):
if channel is None:
channel = self._get_channel()
cmd_line = .format(channel, pct)
Popen(shlex.split(cmd_line)).wait() | Sets the sound volume to the given percentage [0-100] by calling
``amixer -q set <channel> <pct>%``.
If the channel is not specified, it tries to determine the default one
by running ``amixer scontrols``. If that fails as well, it uses the
``Playback`` channel, as that is the only channe... |
def connect(self):
raise Exception(msg) | Registers a new device + username with the bridge |
def sparsify_rows(x, quantile=0.01):
<type ><type >
if x.ndim == 1:
x = x.reshape((1, -1))
elif x.ndim > 2:
raise ParameterError(
.format(x.shape))
if not 0.0 <= quantile < 1:
raise ParameterError(.format(quantile))
x_sparse = scipy.sparse.lil... | Return a row-sparse matrix approximating the input `x`.
Parameters
----------
x : np.ndarray [ndim <= 2]
The input matrix to sparsify.
quantile : float in [0, 1.0)
Percentage of magnitude to discard in each row of `x`
Returns
-------
x_sparse : `scipy.sparse.csr_matrix` [s... |
def get_access_token_from_code(
self, code, redirect_uri, app_id, app_secret
):
args = {
"code": code,
"redirect_uri": redirect_uri,
"client_id": app_id,
"client_secret": app_secret,
}
return self.request(
"{0}/oau... | Get an access token from the "code" returned from an OAuth dialog.
Returns a dict containing the user-specific access token and its
expiration date (if applicable). |
def entry_point(__func: Callable) -> Callable:
if __func.__module__ == :
import sys
sys.exit(__func())
else:
return __func | Execute function when module is run directly.
Note:
This allows fall through for importing modules that use it.
Args:
__func: Function to run |
def build_duration(self):
return int(self.state.build_done) - int(self.state.build) | Return the difference between build and build_done states |
def set_default_format_options(self, format_options, read=False):
if self.default_notebook_metadata_filter:
format_options.setdefault(, self.default_notebook_metadata_filter)
if self.default_cell_metadata_filter:
format_options.setdefault(, self.default_cell_metadata_fil... | Set default format option |
def suggest(q=, results=15, buckets=None, limit=False, max_familiarity=None, min_familiarity=None,
max_hotttnesss=None, min_hotttnesss=None):
buckets = buckets or []
kwargs = {}
kwargs[] = q
if max_familiarity is not None:
kwargs[] = max_familiarity
if min_familiarity... | Suggest artists based upon partial names.
Args:
Kwargs:
q (str): The text to suggest artists from
results (int): An integer number of results to return
buckets (list): A list of strings specifying which buckets to retrieve
limit (bool): A boolean indicating whether or not to... |
def annotation_rows(prefix, annotations):
ncol = len(annotations[])
return {name.replace(prefix, , 1) : values + [] * (ncol - len(values))
for name, values in annotations.items() if name.startswith(prefix)} | Helper function to extract N: and C: rows from annotations and pad their values |
def reply_ok(self):
return (self.mtype == self.REPLY and self.arguments and
self.arguments[0] == self.OK) | Return True if this is a reply and its first argument is 'ok'. |
def mark(self):
def pos(text, index):
return ParseError.loc_info(text, index)
@Parser
def mark_parser(text, index):
res = self(text, index)
if res.status:
return Value.success(res.index, (pos(text, index), res.value, pos(text, res.ind... | Mark the line and column information of the result of this parser. |
def sample(self, withReplacement=None, fraction=None, seed=None):
is_withReplacement_set = \
type(withReplacement) == bool and isinstance(fraction, float)
is_withReplacement_omitted_kwargs = \
withReplacement is Non... | Returns a sampled subset of this :class:`DataFrame`.
:param withReplacement: Sample with replacement or not (default False).
:param fraction: Fraction of rows to generate, range [0.0, 1.0].
:param seed: Seed for sampling (default a random seed).
.. note:: This is not guaranteed to prov... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.