code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def start_proxy(self):
self._download_sql_proxy_if_needed()
if self.sql_proxy_process:
raise AirflowException("The sql proxy is already running: {}".format(
self.sql_proxy_process))
else:
command_to_run = [self.sql_proxy_path]
command_to_run.extend(self.command_line_parameters)
try:
self.log.info("Creating directory %s",
self.cloud_sql_proxy_socket_directory)
os.makedirs(self.cloud_sql_proxy_socket_directory)
except OSError:
pass
command_to_run.extend(self._get_credential_parameters())
self.log.info("Running the command: `%s`", " ".join(command_to_run))
self.sql_proxy_process = Popen(command_to_run,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
self.log.info("The pid of cloud_sql_proxy: %s", self.sql_proxy_process.pid)
while True:
line = self.sql_proxy_process.stderr.readline().decode()
return_code = self.sql_proxy_process.poll()
if line == and return_code is not None:
self.sql_proxy_process = None
raise AirflowException(
"The cloud_sql_proxy finished early with return code {}!".format(
return_code))
if line != :
self.log.info(line)
if "googleapi: Error" in line or "invalid instance name:" in line:
self.stop_proxy()
raise AirflowException(
"Error when starting the cloud_sql_proxy {}!".format(
line))
if "Ready for new connections" in line:
return | Starts Cloud SQL Proxy.
You have to remember to stop the proxy if you started it! |
def _AddStrMethod(message_descriptor, cls):
def __str__(self):
return text_format.MessageToString(self)
cls.__str__ = __str__ | Helper for _AddMessageMethods(). |
def _validated(self, value):
if value is None:
return None
try:
return self._format_num(value)
except (TypeError, ValueError):
self.fail(, input=value)
except OverflowError:
self.fail(, input=value) | Format the value or raise a :exc:`ValidationError` if an error occurs. |
def flatten_element(p):
rd = {"time": p.time}
for member in p.members:
rd[member["standard"]] = member["value"]
return rd | Convenience function to return record-style time series representation
from elements ('p') members in station element.
member['standard'] is a standard_name parameter name, typically CF based.
Ideally, member['value'] should already be floating point value,
so it's ready to use.
Useful with most pyoos collectors. |
def evaluate_model_single_recording(model_file, recording):
(preprocessing_queue, feature_list, model,
output_semantics) = load_model(model_file)
results = evaluate_model_single_recording_preloaded(preprocessing_queue,
feature_list,
model,
output_semantics,
recording)
return results | Evaluate a model for a single recording.
Parameters
----------
model_file : string
Model file (.tar)
recording :
The handwritten recording. |
def undecorate(cls, function):
if cls.is_function_validated(function):
return cls.get_function_validator(function).function
return function | Remove validator decoration from a function.
The `function` argument is the function to be cleaned from
the validator decorator. |
def _from_json(json_data):
if in json_data:
box = BoundingBox._from_json(json_data[])
else:
box = BoundingBox(Coordinate(0.,0.,0.), Coordinate(0.,0.,0.))
if in json_data and json_data[]:
quakes = list(map(Earthquake._from_json, json_data[]))
else:
quakes = []
try:
title = json_data[][]
except KeyError:
raise USGSException("No report title information returned by server")
return Report(box, quakes, title) | Creates a Report from json data.
:param json_data: The raw json data to parse
:type json_data: dict
:returns: Report |
def set_source(self, source_id):
if self.get_source_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_id(source_id):
raise errors.InvalidArgument()
self._my_map[] = str(source_id) | Sets the source.
arg: source_id (osid.id.Id): the new publisher
raise: InvalidArgument - ``source_id`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``source_id`` is ``null``
*compliance: mandatory -- This method must be implemented.* |
def value(board, who=):
w = board.winner()
if w == who:
return 1
if w == opp(who):
return -1
if board.turn == 9:
return 0
if who == board.whose_turn:
return max([value(b, who) for b in board.possible()])
else:
return min([value(b, who) for b in board.possible()]) | Returns the value of a board
>>> b = Board(); b._rows = [['x', 'x', 'x'], ['x', 'x', 'x'], ['x', 'x', 'x']]
>>> value(b)
1
>>> b = Board(); b._rows = [['o', 'o', 'o'], ['o', 'o', 'o'], ['o', 'o', 'o']]
>>> value(b)
-1
>>> b = Board(); b._rows = [['x', 'o', ' '], ['x', 'o', ' '], [' ', ' ', ' ']]
>>> value(b)
1
>>> b._rows[0][2] = 'x'
>>> value(b)
-1 |
def _init_properties(self):
self._missing = {}
for k, p in self.params.items():
if p.required:
self._missing[k] = p
if isinstance(p, Derived):
if p.loader is None:
p.loader = self.__getattribute__("_%s" % k)
elif isinstance(p.loader, str):
p.loader = self.__getattribute__(p.loader) | Loop through the list of Properties,
extract the derived and required properties and do the
appropriate book-keeping |
def check_hash(self, checker, filename, tfp):
checker.report(
self.debug,
"Validating %%s checksum for %s" % filename)
if not checker.is_valid():
tfp.close()
os.unlink(filename)
raise DistutilsError(
"%s validation failed for %s; "
"possible download problem?"
% (checker.hash.name, os.path.basename(filename))
) | checker is a ContentChecker |
def dimension_values(self, dimension, expanded=True, flat=True):
dimension = self.get_dimension(dimension, strict=True).name
return self.main.dimension_values(dimension, expanded, flat) | Return the values along the requested dimension.
Applies to the main object in the AdjointLayout.
Args:
dimension: The dimension to return values for
expanded (bool, optional): Whether to expand values
Whether to return the expanded values, behavior depends
on the type of data:
* Columnar: If false returns unique values
* Geometry: If false returns scalar values per geometry
* Gridded: If false returns 1D coordinates
flat (bool, optional): Whether to flatten array
Returns:
NumPy array of values along the requested dimension |
def update_channels(cls, installation_id, channels_to_add=set(),
channels_to_remove=set(), **kw):
installation_url = cls._get_installation_url(installation_id)
current_config = cls.GET(installation_url)
new_channels = list(set(current_config[]).union(channels_to_add).difference(channels_to_remove))
cls.PUT(installation_url, channels=new_channels) | Allow an application to manually subscribe or unsubscribe an
installation to a certain push channel in a unified operation.
this is based on:
https://www.parse.com/docs/rest#installations-updating
installation_id: the installation id you'd like to add a channel to
channels_to_add: the name of the channel you'd like to subscribe the user to
channels_to_remove: the name of the channel you'd like to unsubscribe the user from |
def get_instance(self, payload):
return UsageInstance(self._version, payload, account_sid=self._solution[], ) | Build an instance of UsageInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.UsageInstance
:rtype: twilio.rest.api.v2010.account.usage.UsageInstance |
def total(self):
feats = imap(lambda name: self[name], self._counters())
return sum(chain(*map(lambda mset: map(abs, mset.values()), feats))) | Returns sum of all counts in all features that are multisets. |
def read_pixel_register(self, pix_regs=None, dcs=range(40), overwrite_config=False):
if pix_regs is None:
pix_regs = ["EnableDigInj", "Imon", "Enable", "C_High", "C_Low", "TDAC", "FDAC"]
self.register_utils.send_commands(self.register.get_commands("ConfMode"))
result = []
for pix_reg in pix_regs:
pixel_data = np.ma.masked_array(np.zeros(shape=(80, 336), dtype=np.uint32), mask=True)
for dc in dcs:
with self.readout(fill_buffer=True, callback=None, errback=None):
self.register_utils.send_commands(self.register.get_commands("RdFrontEnd", name=[pix_reg], dcs=[dc]))
data = self.read_data()
interpret_pixel_data(data, dc, pixel_data, invert=False if pix_reg == "EnableDigInj" else True)
if overwrite_config:
self.register.set_pixel_register(pix_reg, pixel_data.data)
result.append(pixel_data)
return result | The function reads the pixel register, interprets the data and returns a masked numpy arrays with the data for the chosen pixel register.
Pixels without any data are masked.
Parameters
----------
pix_regs : iterable, string
List of pixel register to read (e.g. Enable, C_High, ...).
If None all are read: "EnableDigInj", "Imon", "Enable", "C_High", "C_Low", "TDAC", "FDAC"
dcs : iterable, int
List of double columns to read.
overwrite_config : bool
The read values overwrite the config in RAM if true.
Returns
-------
list of masked numpy.ndarrays |
def flux(self, photon_energy, distance=1 * u.kpc, seed=None):
model = super(InverseCompton, self).flux(
photon_energy, distance=distance
)
if seed is not None:
if not isinstance(seed, int):
if seed not in self.seed_photon_fields:
raise ValueError(
"Provided seed photon field name is not in"
" the definition of the InverseCompton instance"
)
else:
seed = list(self.seed_photon_fields.keys()).index(seed)
elif seed > len(self.seed_photon_fields):
raise ValueError(
"Provided seed photon field number is larger"
" than the number of seed photon fields defined in the"
" InverseCompton instance"
)
if distance != 0:
distance = validate_scalar(
"distance", distance, physical_type="length"
)
dfac = 4 * np.pi * distance.to("cm") ** 2
out_unit = "1/(s cm2 eV)"
else:
dfac = 1
out_unit = "1/(s eV)"
model = (self.specic[seed] / dfac).to(out_unit)
return model | Differential flux at a given distance from the source from a single
seed photon field
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` float or array
Photon energy array.
distance : :class:`~astropy.units.Quantity` float, optional
Distance to the source. If set to 0, the intrinsic luminosity will
be returned. Default is 1 kpc.
seed : int, str or None
Number or name of seed photon field for which the IC contribution
is required. If set to None it will return the sum of all
contributions (default). |
def cli(execute, region, aws_access_key_id, aws_secret_access_key,
s3_staging_dir, athenaclirc, profile, database):
if (athenaclirc == ATHENACLIRC) and (not os.path.exists(os.path.expanduser(ATHENACLIRC))):
err_msg = % ATHENACLIRC
print(err_msg)
write_default_config(DEFAULT_CONFIG_FILE, ATHENACLIRC)
sys.exit(1)
if profile != :
os.environ[] = profile
athenacli = AthenaCli(
region=region,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key= aws_secret_access_key,
s3_staging_dir=s3_staging_dir,
athenaclirc=athenaclirc,
profile=profile,
database=database
)
if execute:
if os.path.exists(execute):
with open(execute) as f:
query = f.read()
else:
query = execute
try:
athenacli.formatter.format_name =
athenacli.run_query(query)
exit(0)
except Exception as e:
click.secho(str(e), err=True, fg=)
exit(1)
athenacli.run_cli() | A Athena terminal client with auto-completion and syntax highlighting.
\b
Examples:
- athenacli
- athenacli my_database |
def upload_object(bucket_path, bucket, content=,
metadata=None, acl=None, cache_control=None,
content_type=None):
obj = bucket.Object(bucket_path)
args = {}
if metadata is not None and len(metadata) > 0:
args[] = metadata
if acl is not None:
args[] = acl
if cache_control is not None:
args[] = cache_control
if content_type is not None:
args[] = content_type
obj.put(Body=content, **args) | Upload an arbitrary object to an S3 bucket.
Parameters
----------
bucket_path : `str`
Destination path (also known as the key name) of the file in the
S3 bucket.
content : `str` or `bytes`, optional
Object content.
bucket : boto3 Bucket instance
S3 bucket.
metadata : `dict`, optional
Header metadata values. These keys will appear in headers as
``x-amz-meta-*``.
acl : `str`, optional
A pre-canned access control list. See
https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Default is `None`, meaning that no ACL is applied to the object.
cache_control : `str`, optional
The cache-control header value. For example, ``'max-age=31536000'``.
content_type : `str`, optional
The object's content type (such as ``text/html``). If left unset,
no MIME type is passed to boto3 (which defaults to
``binary/octet-stream``). |
def insert(self, key, minhash, check_duplication=True):
s recommended to not change the default, but
if you want to avoid the overhead during insert
you can set `check_duplication = False`.
'
self._insert(key, minhash, check_duplication=check_duplication, buffer=False) | Insert a key to the index, together
with a MinHash (or weighted MinHash) of the set referenced by
the key.
:param str key: The identifier of the set.
:param datasketch.MinHash minhash: The MinHash of the set.
:param bool check_duplication: To avoid duplicate keys in the storage (`default=True`).
It's recommended to not change the default, but
if you want to avoid the overhead during insert
you can set `check_duplication = False`. |
def check(self, values, namespace):
is_tuplish_type = (issubclass(self._cls, tg.Tuple) or
issubclass(type(values), self._cls))
if (not _is_sequence(values) or not is_tuplish_type or
len(values) != len(self._checks)):
return False
for thischeck, thisvalue in zip(self._checks, values):
if not thischeck(thisvalue, namespace):
return False
return True | specifying a plain tuple allows arguments that are tuples or lists;
specifying a specialized (subclassed) tuple allows only that type;
specifying a list allows only that list type. |
def _consolidate_slices(slices):
result = []
last_slice = slice(None)
for slice_ in slices:
if not isinstance(slice_, slice):
raise ValueError( % slice_)
if (result and last_slice.stop == slice_.start and
_is_one_or_none(last_slice.step) and
_is_one_or_none(slice_.step)):
last_slice = slice(last_slice.start, slice_.stop, slice_.step)
result[-1] = last_slice
else:
result.append(slice_)
last_slice = slice_
return result | Consolidate adjacent slices in a list of slices. |
def _colorize(self, depth_im, color_im):
pc_data = pc_color.data.T
dists = np.linalg.norm(pc_data, axis=1)
order = np.argsort(dists)
pc_data = pc_data[order]
pc_color = PointCloud(pc_data.T, frame=self._webcam.color_intrinsics.frame)
sorted_dists = dists[order]
sorted_depths = depth_im.data.flatten()[order]
icds = self._webcam.color_intrinsics.project(pc_color).data.T
rounded_icds = np.array(icds / 3.0, dtype=np.uint32)
unique_icds, unique_inds, unique_inv = np.unique(rounded_icds, axis=0, return_index=True, return_inverse=True)
icd_depths = sorted_dists[unique_inds]
min_depths_pp = icd_depths[unique_inv]
depth_delta_mask = np.abs(min_depths_pp - sorted_dists) < 5e-3
valid_mask = np.logical_and(np.logical_and(icds[:,0] >= 0, icds[:,0] < self._webcam.color_intrinsics.width),
np.logical_and(icds[:,1] >= 0, icds[:,1] < self._webcam.color_intrinsics.height))
valid_mask = np.logical_and(valid_mask, sorted_depths != 0.0)
valid_mask = np.logical_and(valid_mask, depth_delta_mask)
valid_icds = icds[valid_mask]
colors = color_im.data[valid_icds[:,1],valid_icds[:,0],:]
color_im_data = np.zeros((target_shape[0] * target_shape[1], target_shape[2]), dtype=np.uint8)
color_im_data[valid_mask] = colors
color_im_data[order] = color_im_data.copy()
color_im_data = color_im_data.reshape(target_shape)
return ColorImage(color_im_data, frame=self._frame) | Colorize a depth image from the PhoXi using a color image from the webcam.
Parameters
----------
depth_im : DepthImage
The PhoXi depth image.
color_im : ColorImage
Corresponding color image.
Returns
-------
ColorImage
A colorized image corresponding to the PhoXi depth image. |
def imshow(data, photometric=None, planarconfig=None, bitspersample=None,
interpolation=None, cmap=None, vmin=None, vmax=None,
figure=None, title=None, dpi=96, subplot=None, maxdim=None,
**kwargs):
if photometric is None:
photometric =
if maxdim is None:
maxdim = 2**16
isrgb = photometric in (, )
if data.dtype == :
data = data.astype()
if data.dtype.kind == :
isrgb = False
if isrgb and not (data.shape[-1] in (3, 4) or (
data.ndim > 2 and data.shape[-3] in (3, 4))):
isrgb = False
photometric =
data = data.squeeze()
if photometric in (, , None):
data = reshape_nd(data, 2)
else:
data = reshape_nd(data, 3)
dims = data.ndim
if dims < 2:
raise ValueError()
if dims == 2:
dims = 0
isrgb = False
else:
if isrgb and data.shape[-3] in (3, 4):
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
elif not isrgb and (data.shape[-1] < data.shape[-2] // 8 and
data.shape[-1] < data.shape[-3] // 8 and
data.shape[-1] < 5):
data = numpy.swapaxes(data, -3, -1)
data = numpy.swapaxes(data, -2, -1)
isrgb = isrgb and data.shape[-1] in (3, 4)
dims -= 3 if isrgb else 2
if interpolation is None:
threshold = 512
elif isinstance(interpolation, int):
threshold = interpolation
else:
threshold = 0
if isrgb:
data = data[..., :maxdim, :maxdim, :maxdim]
if threshold:
if (data.shape[-2] > threshold or data.shape[-3] > threshold):
interpolation =
else:
interpolation =
else:
data = data[..., :maxdim, :maxdim]
if threshold:
if (data.shape[-1] > threshold or data.shape[-2] > threshold):
interpolation =
else:
interpolation =
if photometric == and isrgb:
datamax = data.max()
if datamax > 255:
data = data >> 8
data = data.astype()
elif data.dtype.kind in :
if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None:
try:
bitspersample = int(math.ceil(math.log(data.max(), 2)))
except Exception:
bitspersample = data.dtype.itemsize * 8
elif not isinstance(bitspersample, inttypes):
bitspersample = data.dtype.itemsize * 8
datamax = 2**bitspersample
if isrgb:
if bitspersample < 8:
data = data << (8 - bitspersample)
elif bitspersample > 8:
data = data >> (bitspersample - 8)
data = data.astype()
elif data.dtype.kind == :
datamax = data.max()
if isrgb and datamax > 1.0:
if data.dtype.char == :
data = data.astype()
data /= datamax
else:
data = data / datamax
elif data.dtype.kind == :
datamax = 1
elif data.dtype.kind == :
data = numpy.absolute(data)
datamax = data.max()
if isrgb:
vmin = 0
else:
if vmax is None:
vmax = datamax
if vmin is None:
if data.dtype.kind == :
dtmin = numpy.iinfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data[data > dtmin])
elif data.dtype.kind == :
dtmin = numpy.finfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data[data > dtmin])
else:
vmin = 0
pyplot = sys.modules[]
if figure is None:
pyplot.rc(, family=, weight=, size=8)
figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True,
facecolor=, edgecolor=)
try:
figure.canvas.manager.window.title(title)
except Exception:
pass
size = len(title.splitlines()) if title else 1
pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.98-size*0.03,
left=0.1, right=0.95, hspace=0.05, wspace=0.0)
if subplot is None:
subplot = 111
subplot = pyplot.subplot(subplot)
subplot.set_facecolor((0, 0, 0))
if title:
try:
title = unicode(title, )
except TypeError:
pass
pyplot.title(title, size=11)
if cmap is None:
if data.dtype.char == :
cmap =
elif data.dtype.kind in or vmin == 0:
cmap =
else:
cmap =
if photometric == :
cmap +=
image = pyplot.imshow(numpy.atleast_2d(data[(0,) * dims].squeeze()),
vmin=vmin, vmax=vmax, cmap=cmap,
interpolation=interpolation, **kwargs)
if not isrgb:
pyplot.colorbar()
def format_coord(x, y):
x = int(x + 0.5)
y = int(y + 0.5)
try:
if dims:
return % (
curaxdat[1][y, x], current, y, x)
return % (data[y, x], y, x)
except IndexError:
return
def none(event):
return
subplot.format_coord = format_coord
image.get_cursor_data = none
image.format_cursor_data = none
if dims:
current = list((0,) * dims)
curaxdat = [0, data[tuple(current)].squeeze()]
sliders = [pyplot.Slider(
pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]),
% axis, 0, data.shape[axis]-1, 0, facecolor=,
valfmt= % data.shape[axis]) for axis in range(dims)]
for slider in sliders:
slider.drawon = False
def set_image(current, sliders=sliders, data=data):
curaxdat[1] = data[tuple(current)].squeeze()
image.set_data(curaxdat[1])
for ctrl, index in zip(sliders, current):
ctrl.eventson = False
ctrl.set_val(index)
ctrl.eventson = True
figure.canvas.draw()
def on_changed(index, axis, data=data, current=current):
index = int(round(index))
curaxdat[0] = axis
if index == current[axis]:
return
if index >= data.shape[axis]:
index = 0
elif index < 0:
index = data.shape[axis] - 1
current[axis] = index
set_image(current)
def on_keypressed(event, data=data, current=current):
key = event.key
axis = curaxdat[0]
if str(key) in :
on_changed(key, axis)
elif key == :
on_changed(current[axis] + 1, axis)
elif key == :
on_changed(current[axis] - 1, axis)
elif key == :
curaxdat[0] = 0 if axis == len(data.shape)-1 else axis + 1
elif key == :
curaxdat[0] = len(data.shape)-1 if axis == 0 else axis - 1
elif key == :
on_changed(data.shape[axis] - 1, axis)
elif key == :
on_changed(0, axis)
figure.canvas.mpl_connect(, on_keypressed)
for axis, ctrl in enumerate(sliders):
ctrl.on_changed(lambda k, a=axis: on_changed(k, a))
return figure, subplot, image | Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported C{from matplotlib import pyplot}.
Parameters
----------
data : nd array
The image data.
photometric : {'MINISWHITE', 'MINISBLACK', 'RGB', or 'PALETTE'}
The color space of the image data.
planarconfig : {'CONTIG' or 'SEPARATE'}
Defines how components of each pixel are stored.
bitspersample : int
Number of bits per channel in integer RGB images.
interpolation : str
The image interpolation method used in matplotlib.imshow. By default,
'nearest' will be used for image dimensions <= 512, else 'bilinear'.
cmap : str or matplotlib.colors.Colormap
The colormap maps non-RGBA scalar data to colors.
vmin, vmax : scalar
Data range covered by the colormap. By default, the complete
range of the data is covered.
figure : matplotlib.figure.Figure
Matplotlib figure to use for plotting.
title : str
Window and subplot title.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
Maximum image width and length.
kwargs : dict
Additional arguments for matplotlib.pyplot.imshow. |
def update_note(note, **kwargs):
note_i = _get_note(note.id)
if note.ref_key != note_i.ref_key:
raise HydraError("Cannot convert a %s note to a %s note. Please create a new note instead."%(note_i.ref_key, note.ref_key))
note_i.set_ref(note.ref_key, note.ref_id)
note_i.value = note.value
db.DBSession.flush()
return note_i | Update a note |
def analyse_text(text):
if re.match(r, text, re.IGNORECASE):
return 1.0
elif re.search(r, text, re.IGNORECASE):
return 0.5 | Check if code contains REBOL header and so it probably not R code |
def pageassert(func):
@wraps(func)
def wrapper(*args, **kwargs):
if args[0] < 1 or args[0] > 40:
raise ValueError()
return func(*args, **kwargs)
return wrapper | Decorator that assert page number |
def build_locator(selector):
if type(selector) is tuple:
return selector
if not isinstance(selector, six.string_types):
raise InvalidSelectorException("Invalid locator values passed in")
s = selector.strip()
for test, by, index in selectors:
if test(s):
return by, s[index:]
raise InvalidSelectorException("Invalid locator values passed in: {}".format(selector)) | - ID = "#valid_id"
- CLASS_NAME = ".valid_class_name"
- TAG_NAME = "valid_tag_name"
- XPATH = start with "./" or "//" or "$x:"
- LINK_TEXT = start with "$link_text:"
- PARTIAL_LINK_TEXT = start with "$partial_link_text:"
- NAME = "@valid_name_attribute_value"
CSS_SELECTOR = all other that starts with *|.|#|[\w-]|\[|:
:type selector: str|tuple
:param selector:
:rtype: tuple[selenium.webdriver.common.by.By, str]
:return: |
def parse_text_to_table(txt):
res = []
delim = identify_delim(txt)
print(, txt, ,delim)
if delim == or delim == :
fixed_split = identify_col_pos(txt)
if fixed_split == []:
res = []
else:
res = parse_text_by_col_pos(txt, fixed_split)
else:
res = parse_text_by_delim(txt, delim)
return res | takes a blob of text and finds delimiter OR guesses
the column positions to parse into a table.
input: txt = blob of text, lines separated by \n
output: res = table of text |
def close(self):
with self.halting:
if not self.finished:
self.finished = True
while True:
with self.lock:
assert not self._pa._streams
self._pa.terminate() | Destructor for this audio interface. Waits the threads to finish their
streams, if desired. |
def allow_origins(self, *origins, methods=None, max_age=None, credentials=None, headers=None, **overrides):
response_headers = {}
if origins:
@hug.response_middleware()
def process_data(request, response, resource):
if in request.headers:
origin = request.headers[]
if origin in origins:
response.set_header(, origin)
else:
response_headers[] =
if methods:
response_headers[] = .join(methods)
if max_age:
response_headers[] = max_age
if credentials:
response_headers[] = str(credentials).lower()
if headers:
response_headers[] = headers
return self.add_response_headers(response_headers, **overrides) | Convenience method for quickly allowing other resources to access this one |
def tag_del(self, item, tag):
tags = list(self.item(item, "tags"))
if tag in tags:
tags.remove(tag)
self.item(item, tags=tuple(tags)) | Remove tag from the tags of item.
:param item: item identifier
:type item: str
:param tag: tag name
:type tag: str |
def get_record(self):
self.recid = self.get_recid()
self.remove_controlfields()
self.update_system_numbers()
self.add_systemnumber("Inspire", recid=self.recid)
self.add_control_number("003", "SzGeCERN")
self.update_collections()
self.update_languages()
self.update_reportnumbers()
self.update_authors()
self.update_journals()
self.update_subject_categories("INSPIRE", "SzGeCERN", "categories_cds")
self.update_pagenumber()
self.update_notes()
self.update_experiments()
self.update_isbn()
self.update_dois()
self.update_links_and_ffts()
self.update_date()
self.update_date_year()
self.update_hidden_notes()
self.update_oai_info()
self.update_cnum()
self.update_conference_info()
self.fields_list = [
"909", "541", "961",
"970", "690", "695",
"981",
]
self.strip_fields()
if "ANNOUNCEMENT" in self.collections:
self.update_conference_111()
self.update_conference_links()
record_add_field(self.record, "690", ind1="C", subfields=[("a", "CONFERENCE")])
if "THESIS" in self.collections:
self.update_thesis_information()
self.update_thesis_supervisors()
if "PROCEEDINGS" in self.collections:
self.update_title_to_proceeding()
self.update_author_to_proceeding()
record_add_field(self.record, "690", ind1="C", subfields=[("a", "CONFERENCE")])
if self.tag_as_cern:
record_add_field(self.record, "690", ind1="C", subfields=[("a", "CERN")])
return self.record | Override the base. |
def guard_activate(analysis_service):
calculation = analysis_service.getCalculation()
if not calculation:
return True
if not api.is_active(calculation):
return False
dependencies = calculation.getDependentServices()
for dependency in dependencies:
if not api.is_active(dependency):
return False
return True | Returns whether the transition activate can be performed for the
analysis service passed in |
def get_sections(self, s, base,
sections=[, ]):
params = self.params
s = self._remove_summary(s)
for section in sections:
key = % (base, section.lower().replace(, ))
params[key] = self._get_section(s, section)
return s | Method that extracts the specified sections out of the given string if
(and only if) the docstring follows the numpy documentation guidelines
[1]_. Note that the section either must appear in the
:attr:`param_like_sections` or the :attr:`text_sections` attribute.
Parameters
----------
s: str
Docstring to split
base: str
base to use in the :attr:`sections` attribute
sections: list of str
sections to look for. Each section must be followed by a newline
character ('\\n') and a bar of '-' (following the numpy (napoleon)
docstring conventions).
Returns
-------
str
The replaced string
References
----------
.. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
See Also
--------
delete_params, keep_params, delete_types, keep_types, delete_kwargs:
For manipulating the docstring sections
save_docstring:
for saving an entire docstring |
def _sync(self, timeout_ms=30000):
response = self.api.sync(self.sync_token, timeout_ms)
prev_sync_token = self.sync_token
self.sync_token = response["next_batch"]
if self._handle_thread is not None:
self._handle_thread.get()
is_first_sync = (prev_sync_token is None)
self._handle_thread = gevent.Greenlet(self._handle_response, response, is_first_sync)
self._handle_thread.name = (
f
)
self._handle_thread.link_exception(lambda g: self.sync_thread.kill(g.exception))
self._handle_thread.start()
if self._post_hook_func is not None:
self._post_hook_func(self.sync_token) | Reimplements MatrixClient._sync, add 'account_data' support to /sync |
def __purge():
global __receivers
newreceivers = collections.defaultdict(list)
for signal, receivers in six.iteritems(__receivers):
alive = [x for x in receivers if not __is_dead(x)]
newreceivers[signal] = alive
__receivers = newreceivers | Remove all dead signal receivers from the global receivers collection.
Note:
It is assumed that the caller holds the __lock. |
def is_F_hypergraph(self):
for hyperedge_id in self._hyperedge_attributes:
tail = self.get_hyperedge_tail(hyperedge_id)
if len(tail) > 1:
return False
return True | Indicates whether the hypergraph is an F-hypergraph.
In an F-hypergraph, all hyperedges are F-hyperedges -- that is, every
hyperedge has exactly one node in the tail.
:returns: bool -- True iff the hypergraph is an F-hypergraph. |
def authenticate(
self, end_user_ip, personal_number=None, requirement=None, **kwargs
):
data = {"endUserIp": end_user_ip}
if personal_number:
data["personalNumber"] = personal_number
if requirement and isinstance(requirement, dict):
data["requirement"] = requirement
data.update(kwargs)
response = self.client.post(self._auth_endpoint, json=data)
if response.status_code == 200:
return response.json()
else:
raise get_json_error_class(response) | Request an authentication order. The :py:meth:`collect` method
is used to query the status of the order.
Note that personal number is not needed when authentication is to
be done on the same device, provided that the returned
``autoStartToken`` is used to open the BankID Client.
Example data returned:
.. code-block:: json
{
"orderRef":"131daac9-16c6-4618-beb0-365768f37288",
"autoStartToken":"7c40b5c9-fa74-49cf-b98c-bfe651f9a7c6"
}
:param end_user_ip: IP address of the user requesting
the authentication.
:type end_user_ip: str
:param personal_number: The Swedish personal number in
format YYYYMMDDXXXX.
:type personal_number: str
:param requirement: An optional dictionary stating how the signature
must be created and verified. See BankID Relying Party Guidelines,
section 13.5 for more details.
:type requirement: dict
:return: The order response.
:rtype: dict
:raises BankIDError: raises a subclass of this error
when error has been returned from server. |
def _DoCopyFile(source_filename, target_filename, copy_symlink=True):
from six.moves.urllib.parse import urlparse
source_url = urlparse(source_filename)
target_url = urlparse(target_filename)
if _UrlIsLocal(source_url):
if not Exists(source_filename):
from ._exceptions import FileNotFoundError
raise FileNotFoundError(source_filename)
if _UrlIsLocal(target_url):
_CopyFileLocal(source_filename, target_filename, copy_symlink=copy_symlink)
elif target_url.scheme in []:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(target_url.scheme)
else:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(target_url.scheme)
elif source_url.scheme in [, , ]:
if _UrlIsLocal(target_url):
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(target_url.scheme)
else:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(target_url.scheme)
else:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(source_url.scheme) | :param unicode source_filename:
The source filename.
Schemas: local, ftp, http
:param unicode target_filename:
Target filename.
Schemas: local, ftp
:param copy_symlink:
@see _CopyFileLocal
:raises FileNotFoundError:
If source_filename does not exist |
def truncate(self, size):
self.sftp._log(
DEBUG, "truncate({}, {!r})".format(hexlify(self.handle), size)
)
attr = SFTPAttributes()
attr.st_size = size
self.sftp._request(CMD_FSETSTAT, self.handle, attr) | Change the size of this file. This usually extends
or shrinks the size of the file, just like the ``truncate()`` method on
Python file objects.
:param size: the new size of the file |
def apply_cats(df, trn):
for n,c in df.items():
if (n in trn.columns) and (trn[n].dtype.name==):
df[n] = c.astype().cat.as_ordered()
df[n].cat.set_categories(trn[n].cat.categories, ordered=True, inplace=True) | Changes any columns of strings in df into categorical variables using trn as
a template for the category codes.
Parameters:
-----------
df: A pandas dataframe. Any columns of strings will be changed to
categorical values. The category codes are determined by trn.
trn: A pandas dataframe. When creating a category for df, it looks up the
what the category's code were in trn and makes those the category codes
for df.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
note the type of col2 is string
>>> train_cats(df)
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
now the type of col2 is category {a : 1, b : 2}
>>> df2 = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['b', 'a', 'a']})
>>> apply_cats(df2, df)
col1 col2
0 1 b
1 2 a
2 3 a
now the type of col is category {a : 1, b : 2} |
def mul_table(self, other):
other = coerceBigInt(other)
if not other:
return NotImplemented
other %= orderG2()
if not self._table:
self._table = lwnafTable()
librelic.ep2_mul_pre_lwnaf(byref(self._table), byref(self))
result = G2Element()
librelic.ep2_mul_fix_lwnaf(byref(result), byref(self._table),
byref(other))
return result | Fast multiplication using a the LWNAF precomputation table. |
def to_dataframe(self, dtypes=None):
if pandas is None:
raise ImportError(_PANDAS_REQUIRED)
if dtypes is None:
dtypes = {}
columns = collections.defaultdict(list)
for row in self:
for column in row:
columns[column].append(row[column])
for column in dtypes:
columns[column] = pandas.Series(columns[column], dtype=dtypes[column])
return pandas.DataFrame(columns, columns=self._column_names) | Create a :class:`pandas.DataFrame` of rows in the page.
This method requires the pandas libary to create a data frame and the
fastavro library to parse row blocks.
.. warning::
DATETIME columns are not supported. They are currently parsed as
strings in the fastavro library.
Args:
dtypes ( \
Map[str, Union[str, pandas.Series.dtype]] \
):
Optional. A dictionary of column names pandas ``dtype``s. The
provided ``dtype`` is used when constructing the series for
the column specified. Otherwise, the default pandas behavior
is used.
Returns:
pandas.DataFrame:
A data frame of all rows in the stream. |
def wrap_hvac(msg):
def wrap_call(func):
def func_wrapper(self, vault_client):
try:
return func(self, vault_client)
except (hvac.exceptions.InvalidRequest,
hvac.exceptions.Forbidden) as vault_exception:
if vault_exception.errors[0] == :
emsg = "Permission denied %s from %s" % (msg, self.path)
raise aomi.exceptions.AomiCredentials(emsg)
else:
raise
return func_wrapper
return wrap_call | Error catching Vault API wrapper
This decorator wraps API interactions with Vault. It will
catch and return appropriate error output on common
problems. Do we even need this now that we extend the
hvac class? |
def summarize_variable(self, variable = None, use_baseline = False, weighted = False, force_compute = False):
if use_baseline:
simulation = self.baseline_simulation
else:
simulation = self.simulation
tax_benefit_system = simulation.tax_benefit_system
assert variable in tax_benefit_system.variables, "{} is not a valid variable".format(variable)
variable_instance = tax_benefit_system.variables[variable]
default_value = variable_instance.default_value
value_type = variable_instance.value_type
if weighted:
weight_variable = self.weight_column_name_by_entity[variable_instance.entity.key]
weights = simulation.calculate(weight_variable, simulation.period)
infos = simulation.get_memory_usage(variables = [variable])[].get(variable)
if not infos:
if force_compute:
self.calculate_variable(variable = variable, period = simulation.period, use_baseline = use_baseline)
self.summarize_variable(variable = variable, use_baseline = use_baseline, weighted = weighted)
return
else:
print("{} is not computed yet. Use keyword argument force_compute = True".format(variable))
return
header_line = "{}: {} periods * {} cells * item size {} ({}, default = {}) = {}".format(
variable,
infos[],
infos[],
infos[],
infos[],
default_value,
humanize.naturalsize(infos[], gnu = True),
)
print("")
print(header_line)
print("Details:")
holder = simulation.get_holder(variable)
if holder is not None:
if holder.variable.definition_period == ETERNITY:
array = holder.get_array(ETERNITY)
print("permanent: mean = {}, min = {}, max = {}, median = {}, default = {:.1%}".format(
array.mean() if not weighted else np.average(array, weights = weights),
array.min(),
array.max(),
np.median(array),
(
(array == default_value).sum() / len(array)
if not weighted
else ((array == default_value) * weights).sum() / weights.sum()
)
))
else:
for period in sorted(holder.get_known_periods()):
array = holder.get_array(period)
if array.shape == ():
print("{}: always = {}".format(period, array))
continue
if value_type == Enum:
possible_values = variable_instance.possible_values
categories_by_index = dict(zip(
range(len(possible_values._member_names_)),
possible_values._member_names_
))
categories_type = pd.api.types.CategoricalDtype(categories = possible_values._member_names_, ordered = True)
df = pd.DataFrame({variable: array}).replace(categories_by_index).astype(categories_type)
df[] = weights if weighted else 1
groupby = df.groupby(variable)[].sum()
total = groupby.sum()
expr = [" {} = {:.2e} ({:.1%})".format(index, row, row / total) for index, row in groupby.iteritems()]
print("{}:{}.".format(period, ",".join(expr)))
continue
print("{}: mean = {}, min = {}, max = {}, mass = {:.2e}, default = {:.1%}, median = {}".format(
period,
array.mean() if not weighted else np.average(array, weights = weights),
array.min(),
array.max(),
array.sum() if not weighted else np.sum(array * weights),
(
(array == default_value).sum() / len(array)
if not weighted
else ((array == default_value) * weights).sum() / weights.sum()
),
np.median(array),
)) | Prints a summary of a variable including its memory usage.
:param string variable: the variable being summarized
:param bool use_baseline: the tax-benefit-system considered
:param bool weighted: whether the produced statistics should be weigthted or not
:param bool force_compute: whether the computation of the variable should be forced
Example:
>>> from openfisca_survey_manager.tests.test_scenario import create_randomly_initialized_survey_scenario
>>> survey_scenario = create_randomly_initialized_survey_scenario()
>>> survey_scenario.summarize_variable(variable = "housing_occupancy_status", force_compute = True)
<BLANKLINE>
housing_occupancy_status: 1 periods * 5 cells * item size 2 (<type 'numpy.int16'>, default = HousingOccupancyStatus.tenant) = 10B
Details:
2017-01: owner = 0.00e+00 (0.0%), tenant = 5.00e+00 (100.0%), free_lodger = 0.00e+00 (0.0%), homeless = 0.00e+00 (0.0%).
>>> survey_scenario.summarize_variable(variable = "rent", force_compute = True)
<BLANKLINE>
rent: 2 periods * 5 cells * item size 4 (<type 'numpy.float32'>, default = 0) = 40B
Details:
2017-01: mean = 562.385070801, min = 156.01864624, max = 950.714294434, mass = 2.81e+03, default = 0.0%, median = 598.658508301
2018-01: mean = 562.385070801, min = 156.01864624, max = 950.714294434, mass = 2.81e+03, default = 0.0%, median = 598.658508301 |
def main(self):
if not os.path.isfile(self.gdcs_report):
logging.info(.format(self.analysistype))
ShortKSippingMethods(self, self.cutoff)
self.reporter()
else:
self.report_parse() | Run the necessary methods in the correct order |
def __GetAuthorizationTokenUsingMasterKey(verb,
resource_id_or_fullname,
resource_type,
headers,
master_key):
key = base64.b64decode(master_key)
text = .format(
verb=(verb.lower() or ),
resource_type=(resource_type.lower() or ),
resource_id_or_fullname=(resource_id_or_fullname or ),
x_date=headers.get(http_constants.HttpHeaders.XDate, ).lower(),
http_date=headers.get(http_constants.HttpHeaders.HttpDate, ).lower())
if six.PY2:
body = text.decode()
digest = hmac.new(key, body, sha256).digest()
signature = digest.encode()
else:
body = text.encode()
digest = hmac.new(key, body, sha256).digest()
signature = base64.encodebytes(digest).decode()
master_token =
token_version =
return .format(type=master_token,
ver=token_version,
sig=signature[:-1]) | Gets the authorization token using `master_key.
:param str verb:
:param str resource_id_or_fullname:
:param str resource_type:
:param dict headers:
:param str master_key:
:return:
The authorization token.
:rtype: dict |
def makedbthreads(self, folder):
for i in range(len(folder)):
threads = Thread(target=self.makeblastdb, args=())
threads.setDaemon(True)
threads.start()
for alleledir in folder:
allelefiles = glob(.format(alleledir))
for allelefile in allelefiles:
self.dqueue.put(allelefile)
self.dqueue.join() | Setup and create threads for class
:param folder: folder with sequence files with which to create blast databases |
def sampleLocationFromFeature(self, feature):
if feature == "face":
return self._sampleFromFaces()
elif feature == "edge":
return self._sampleFromEdges()
elif feature == "vertex":
return self._sampleFromVertices()
elif feature == "random":
return self.sampleLocation()
else:
raise NameError("No such feature in {}: {}".format(self, feature)) | Samples a location from one specific feature.
This is only supported with three dimensions. |
def related_lua_args(self):
related = self.queryelem.select_related
if related:
meta = self.meta
for rel in related:
field = meta.dfields[rel]
relmodel = field.relmodel
bk = self.backend.basekey(relmodel._meta) if relmodel else
fields = list(related[rel])
if meta.pkname() in fields:
fields.remove(meta.pkname())
if not fields:
fields.append()
ftype = field.type if field in meta.multifields else
data = {: field.attname, : ftype,
: bk, : fields}
yield field.name, data | Generator of load_related arguments |
def generate_output_network(self, json_data=None, hr=True, show_name=False,
colorize=True):
if json_data is None:
json_data = {}
output = generate_output(
line=,
short=HR_RDAP[][] if hr else ,
name=HR_RDAP[][] if (hr and show_name) else None,
is_parent=True,
colorize=colorize
)
for key, val in json_data[].items():
if key in [, ]:
output += self.generate_output_list(
source=,
key=key,
val=val,
line=,
hr=hr,
show_name=show_name,
colorize=colorize
)
elif key in [, ]:
output += self.generate_output_notices(
source=,
key=key,
val=val,
line=,
hr=hr,
show_name=show_name,
colorize=colorize
)
elif key == :
output += self.generate_output_events(
source=,
key=key,
val=val,
line=,
hr=hr,
show_name=show_name,
colorize=colorize
)
elif key not in []:
output += generate_output(
line=,
short=HR_RDAP[][key][] if hr else key,
name=HR_RDAP[][key][] if (
hr and show_name) else None,
value=val,
colorize=colorize
)
return output | The function for generating CLI output RDAP network results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output. |
def reset(self, iface=None, client_mac=None, xid=None, scriptfile=None):
logger.debug()
if iface is None:
iface = conf.iface
if client_mac is None:
tempmac = get_if_raw_hwaddr(iface)
if isinstance(tempmac, tuple) and len(tempmac) == 2:
mac = tempmac[1]
else:
mac = tempmac
client_mac = str2mac(mac)
self.client = DHCPCAP(iface=iface, client_mac=client_mac, xid=xid)
if scriptfile is not None:
self.script = ClientScript(scriptfile)
else:
self.script = None
self.time_sent_request = None
self.discover_attempts = 0
self.request_attempts = 0
self.current_state = STATE_PREINIT
self.offers = list() | Reset object attributes when state is INIT. |
def remove_writer(self, address):
log_debug("[
self.routing_table.writers.discard(address)
log_debug("[ | Remove a writer address from the routing table, if present. |
def get_rmse(self, data_x=None, data_y=None):
if data_x is None:
data_x = np.array(self.args["x"])
if data_y is None:
data_y = np.array(self.args["y"])
if len(data_x) != len(data_y):
raise ValueError("Lengths of data_x and data_y are different")
rmse_y = self.bestfit_func(data_x)
return np.sqrt(np.mean((rmse_y - data_y) ** 2)) | Get Root Mean Square Error using
self.bestfit_func
args:
x_min: scalar, default=min(x)
minimum x value of the line
x_max: scalar, default=max(x)
maximum x value of the line
resolution: int, default=1000
how many steps between x_min and x_max |
def PROFILE_SDRAUTIAN(sg0,GamD,Gam0,Gam2,Shift0,Shift2,anuVC,sg):
return pcqsdhc(sg0,GamD,Gam0,Gam2,Shift0,Shift2,anuVC,cZero,sg) | # Speed dependent Rautian profile based on HTP.
# Input parameters:
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# Gam2 : Speed dependence of the line-width in cm-1 (Input).
# anuVC : Velocity-changing frequency in cm-1 (Input).
# Shift0 : Speed-averaged line-shift in cm-1 (Input).
# Shift2 : Speed dependence of the line-shift in cm-1 (Input)
# sg : Current WaveNumber of the Computation in cm-1 (Input). |
def find_by_user(self, user, params={}, **options):
path = "/users/%s/teams" % (user)
return self.client.get_collection(path, params, **options) | Returns the compact records for all teams to which user is assigned.
Parameters
----------
user : {String} An identifier for the user. Can be one of an email address,
the globally unique identifier for the user, or the keyword `me`
to indicate the current user making the request.
[params] : {Object} Parameters for the request
- [organization] : {Id} The workspace or organization to filter teams on. |
def start(self):
t **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
userStarting upStopping')
self.shutdown() | Start the actual syndic.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`. |
def assume(self, cond):
fn = self.module.declare_intrinsic("llvm.assume")
return self.call(fn, [cond]) | Optimizer hint: assume *cond* is always true. |
def _forward_request(transaction, destination, path):
client = HelperClient(destination)
request = Request()
request.options = copy.deepcopy(transaction.request.options)
del request.block2
del request.block1
del request.uri_path
del request.proxy_uri
del request.proxy_schema
del request.observe
request.uri_path = path
request.destination = destination
request.payload = transaction.request.payload
request.code = transaction.request.code
response = client.send_request(request)
client.stop()
if response is not None:
transaction.response.payload = response.payload
transaction.response.code = response.code
transaction.response.options = response.options
else:
transaction.response.code = defines.Codes.SERVICE_UNAVAILABLE.number
return transaction | Forward requests.
:type transaction: Transaction
:param transaction: the transaction that owns the request
:param destination: the destination of the request (IP, port)
:param path: the path of the request.
:rtype : Transaction
:return: the edited transaction |
def fromML(mat):
if isinstance(mat, newlinalg.DenseMatrix):
return DenseMatrix(mat.numRows, mat.numCols, mat.values, mat.isTransposed)
elif isinstance(mat, newlinalg.SparseMatrix):
return SparseMatrix(mat.numRows, mat.numCols, mat.colPtrs, mat.rowIndices,
mat.values, mat.isTransposed)
else:
raise TypeError("Unsupported matrix type %s" % type(mat)) | Convert a matrix from the new mllib-local representation.
This does NOT copy the data; it copies references.
:param mat: a :py:class:`pyspark.ml.linalg.Matrix`
:return: a :py:class:`pyspark.mllib.linalg.Matrix`
.. versionadded:: 2.0.0 |
def _RunMethod(self, method_config, request, global_params=None,
upload=None, upload_config=None, download=None):
if upload is not None and download is not None:
raise exceptions.NotYetImplementedError(
)
http_request = self.PrepareHttpRequest(
method_config, request, global_params, upload, upload_config,
download)
if download is not None:
download.InitializeDownload(http_request, client=self.client)
return
http_response = None
if upload is not None:
http_response = upload.InitializeUpload(
http_request, client=self.client)
if http_response is None:
http = self.__client.http
if upload and upload.bytes_http:
http = upload.bytes_http
opts = {
: self.__client.num_retries,
: self.__client.max_retry_wait,
}
if self.__client.check_response_func:
opts[] = self.__client.check_response_func
if self.__client.retry_func:
opts[] = self.__client.retry_func
http_response = http_wrapper.MakeRequest(
http, http_request, **opts)
return self.ProcessHttpResponse(method_config, http_response, request) | Call this method with request. |
def make_datastore_api(client):
parse_result = six.moves.urllib_parse.urlparse(client._base_url)
host = parse_result.netloc
if parse_result.scheme == "https":
channel = make_secure_channel(client._credentials, DEFAULT_USER_AGENT, host)
else:
channel = insecure_channel(host)
return datastore_client.DatastoreClient(
channel=channel,
client_info=client_info.ClientInfo(
client_library_version=__version__, gapic_version=__version__
),
) | Create an instance of the GAPIC Datastore API.
:type client: :class:`~google.cloud.datastore.client.Client`
:param client: The client that holds configuration details.
:rtype: :class:`.datastore.v1.datastore_client.DatastoreClient`
:returns: A datastore API instance with the proper credentials. |
def set_learning_rate(self, lr):
if not isinstance(self._optimizer, opt.Optimizer):
raise UserWarning("Optimizer has to be defined before its learning "
"rate is mutated.")
else:
self._optimizer.set_learning_rate(lr) | Sets a new learning rate of the optimizer.
Parameters
----------
lr : float
The new learning rate of the optimizer. |
def write(self, data: Union[bytes, memoryview]) -> "Future[None]":
self._check_closed()
if data:
if (
self.max_write_buffer_size is not None
and len(self._write_buffer) + len(data) > self.max_write_buffer_size
):
raise StreamBufferFullError("Reached maximum write buffer size")
self._write_buffer.append(data)
self._total_write_index += len(data)
future = Future()
future.add_done_callback(lambda f: f.exception())
self._write_futures.append((self._total_write_index, future))
if not self._connecting:
self._handle_write()
if self._write_buffer:
self._add_io_state(self.io_loop.WRITE)
self._maybe_add_error_listener()
return future | Asynchronously write the given data to this stream.
This method returns a `.Future` that resolves (with a result
of ``None``) when the write has been completed.
The ``data`` argument may be of type `bytes` or `memoryview`.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
.. versionchanged:: 4.5
Added support for `memoryview` arguments.
.. versionchanged:: 6.0
The ``callback`` argument was removed. Use the returned
`.Future` instead. |
def autoencoder_residual():
hparams = autoencoder_autoregressive()
hparams.optimizer = "Adafactor"
hparams.clip_grad_norm = 1.0
hparams.learning_rate_constant = 0.5
hparams.learning_rate_warmup_steps = 500
hparams.learning_rate_schedule = "constant * linear_warmup * rsqrt_decay"
hparams.num_hidden_layers = 5
hparams.hidden_size = 64
hparams.max_hidden_size = 1024
hparams.add_hparam("num_residual_layers", 2)
hparams.add_hparam("residual_kernel_height", 3)
hparams.add_hparam("residual_kernel_width", 3)
hparams.add_hparam("residual_filter_multiplier", 2.0)
hparams.add_hparam("residual_dropout", 0.2)
hparams.add_hparam("residual_use_separable_conv", int(True))
hparams.add_hparam("kl_beta", 1.0)
return hparams | Residual autoencoder model. |
def mapkeys(function, dict_):
ensure_mapping(dict_)
function = identity() if function is None else ensure_callable(function)
return dict_.__class__((function(k), v) for k, v in iteritems(dict_)) | Return a new dictionary where the keys come from applying ``function``
to the keys of given dictionary.
.. warning::
If ``function`` returns the same value for more than one key,
it is undefined which key will be chosen for the resulting dictionary.
:param function: Function taking a dictionary key,
or None (corresponding to identity function)
.. versionadded:: 0.0.2 |
def _ensure_patient_group_is_ok(patient_object, patient_name=None):
from protect.addons.common import TCGAToGTEx
assert isinstance(patient_object, (set, dict)), % (patient_object, patient_name)
test_set = set(patient_object)
if not in patient_object:
raise ParameterError(( % patient_name) +
)
elif patient_object[] not in TCGAToGTEx:
raise ParameterError(( % patient_name) +
)
if {, , }.issubset(test_set):
pass
else:
if not in test_set:
raise ParameterError(( % patient_name) +
)
if (({re.search(, x) for x in test_set} == {None} or
{re.search(, x) for x in test_set} == {None}) and
( not in test_set and not in test_set)):
raise ParameterError(( % patient_name) +
)
if {re.search(, x) for x in test_set} == {None}:
if not in test_set and in test_set:
pass
else:
raise ParameterError(( % patient_name) +
)
if in test_set and not in test_set:
if not in test_set:
raise ParameterError(( % patient_name +
)) | Ensure that the provided entries for the patient groups is formatted properly.
:param set|dict patient_object: The values passed to the samples patient group
:param str patient_name: Optional name for the set
:raises ParameterError: If required entry doesnt exist |
def calculate_lstm_output_shapes(operator):
s conversion function for its output shapes.
Input must be a 2-D tensorNoneNones _fix_shapes function
if len(operator.inputs) > 1:
Y_h_in = operator.inputs[1]
Y_h_in.type.shape = state_shape
if len(operator.inputs) > 2:
Y_c_in = operator.inputs[2]
Y_c_in.type.shape = state_shape
operator.outputs[0].type.shape = output_shape
if len(operator.outputs) > 1:
operator.outputs[1].type.shape = state_shape
if len(operator.outputs) > 2:
operator.outputs[2].type.shape = state_shape | See LSTM's conversion function for its output shapes. |
def construct_start_message(self):
uname = getpass.getuser().encode()
hashed_username = hashlib.sha256(uname).hexdigest()[0:10]
hname = socket.gethostname().encode()
hashed_hostname = hashlib.sha256(hname).hexdigest()[0:10]
message = {: self.uuid,
: hashed_username,
: hashed_hostname,
: self.test_mode,
: self.parsl_version,
: self.python_version,
: platform.system(),
: platform.release(),
: time.time()}
return json.dumps(message) | Collect preliminary run info at the start of the DFK.
Returns :
- Message dict dumped as json string, ready for UDP |
def get_output_files(self):
output_files = list(self.__output_files)
if isinstance(self.job(), CondorDAGJob):
output_files = output_files + self.job().get_output_files()
return output_files | Return list of output files for this DAG node and its job. |
def matchTypes(accept_types, have_types):
if not accept_types:
default = 1
else:
default = 0
match_main = {}
match_sub = {}
for (main, sub, q) in accept_types:
if main == :
default = max(default, q)
continue
elif sub == :
match_main[main] = max(match_main.get(main, 0), q)
else:
match_sub[(main, sub)] = max(match_sub.get((main, sub), 0), q)
accepted_list = []
order_maintainer = 0
for mtype in have_types:
main, sub = mtype.split()
if (main, sub) in match_sub:
q = match_sub[(main, sub)]
else:
q = match_main.get(main, default)
if q:
accepted_list.append((1 - q, order_maintainer, q, mtype))
order_maintainer += 1
accepted_list.sort()
return [(mtype, q) for (_, _, q, mtype) in accepted_list] | Given the result of parsing an Accept: header, and the
available MIME types, return the acceptable types with their
quality markdowns.
For example:
>>> acceptable = parseAcceptHeader('text/html, text/plain; q=0.5')
>>> matchTypes(acceptable, ['text/plain', 'text/html', 'image/jpeg'])
[('text/html', 1.0), ('text/plain', 0.5)]
Type signature: ([(str, str, float)], [str]) -> [(str, float)] |
def _respond(self, channel, text):
result = self._format_message(channel, text)
if result is not None:
logger.info(
,
truncate(result, max_len=50),
)
self.socket.send_str(result) | Respond to a message on the current socket.
Args:
channel (:py:class:`str`): The channel to send to.
text (:py:class:`str`): The message text to send. |
def set_or_clear_breakpoint(self):
editorstack = self.get_current_editorstack()
if editorstack is not None:
self.switch_to_plugin()
editorstack.set_or_clear_breakpoint() | Set/Clear breakpoint |
def start(client, container, interactive=True, stdout=None, stderr=None, stdin=None, logs=None):
operation = RunOperation(client, container, interactive=interactive, stdout=stdout,
stderr=stderr, stdin=stdin, logs=logs)
PseudoTerminal(client, operation).start() | Present the PTY of the container inside the current process.
This is just a wrapper for PseudoTerminal(client, container).start() |
def ensure_directory(path):
dirname = os.path.dirname(path)
py31compat.makedirs(dirname, exist_ok=True) | Ensure that the parent directory of `path` exists |
def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
match = Match(r, line)
if match:
closing_brace_pos = match.group(1).rfind()
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r, line_prefix)
func = Match(r, line_prefix)
if ((macro and
macro.group(1) not in (
, , , , ,
, ,
, )) or
(func and not Search(r, func.group(1))) or
Search(r, line_prefix) or
Search(r, line_prefix) or
Search(r, line_prefix)):
match = None
if (match and
opening_parenthesis[1] > 1 and
Search(r, clean_lines.elided[opening_parenthesis[1] - 1])):
match = None
else:
match = Match(r, line)
if not match:
| Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. |
def del_tag(
self,
tag):
if tag.replace("@", "") not in self.tags:
return
self.refresh
oldContent = self.to_string(indentLevel=1)
newTags = []
newTags[:] = [n for n in newTags if tag not in n]
self.tags = newTags
newContent = self.to_string(indentLevel=1)
self.parent._update_document_tree(
oldContent=oldContent,
newContent=newContent
)
self.refresh
return None | *delete a tag this taskpaper object*
**Key Arguments:**
- ``tag`` -- the tag to delete to the object
**Usage:**
.. code-block:: python
aTask.del_tag("@due") |
def insert_pattern(self, pattern, index):
LOGGER.debug("> Inserting at index.".format(pattern, index))
self.remove_pattern(pattern)
self.beginInsertRows(self.get_node_index(self.root_node), index, index)
pattern_node = PatternNode(name=pattern)
self.root_node.insert_child(pattern_node, index)
self.endInsertRows()
self.pattern_inserted.emit(pattern_node)
return True | Inserts given pattern into the Model.
:param pattern: Pattern.
:type pattern: unicode
:param index: Insertion index.
:type index: int
:return: Method success.
:rtype: bool |
def temp_file_context(raw_dump_path, logger=None):
try:
yield raw_dump_path
finally:
if in raw_dump_path:
try:
os.unlink(raw_dump_path)
except OSError:
if logger is None:
logger = FakeLogger()
logger.warning(
,
raw_dump_path,
exc_info=True
) | this contextmanager implements conditionally deleting a pathname
at the end of a context if the pathname indicates that it is a temp
file by having the word 'TEMPORARY' embedded in it. |
def RobotFactory(path, parent=None):
s a file or
folder, and if a file, the contents of the file. If there is a
testcase table, this will return an instance of SuiteFile,
otherwise it will return an instance of ResourceFile.
'
if os.path.isdir(path):
return SuiteFolder(path, parent)
else:
rf = RobotFile(path, parent)
for table in rf.tables:
if isinstance(table, TestcaseTable):
rf.__class__ = SuiteFile
return rf
rf.__class__ = ResourceFile
return rf | Return an instance of SuiteFile, ResourceFile, SuiteFolder
Exactly which is returned depends on whether it's a file or
folder, and if a file, the contents of the file. If there is a
testcase table, this will return an instance of SuiteFile,
otherwise it will return an instance of ResourceFile. |
def get_singleton(cls, annotators=None, **options):
if annotators is not None:
annotators = tuple(annotators)
if annotators not in cls._singletons:
cls._singletons[annotators] = cls(annotators, **options)
return cls._singletons[annotators] | Get or create a corenlp parser with the given annotator and options
Note: multiple parsers with the same annotator and different options
are not supported. |
def set_code_exprs(self, codes):
self.code_objs = dict()
self._codes = []
for code in codes:
self.append_code_expr(code) | Convenience: sets all the code expressions at once. |
def _query_entities(self, table_name, filter=None, select=None, max_results=None,
marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA,
property_resolver=None, timeout=None, _context=None):
_validate_not_none(, table_name)
_validate_not_none(, accept)
next_partition_key = None if marker is None else marker.get()
next_row_key = None if marker is None else marker.get()
request = HTTPRequest()
request.method =
request.host_locations = self._get_host_locations(secondary=True)
request.path = + _to_str(table_name) +
request.headers = {: _to_str(accept)}
request.query = {
: _to_str(filter),
: _to_str(select),
: _int_to_str(max_results),
: _to_str(next_partition_key),
: _to_str(next_row_key),
: _int_to_str(timeout),
}
return self._perform_request(request, _convert_json_response_to_entities,
[property_resolver, self.require_encryption,
self.key_encryption_key, self.key_resolver_function],
operation_context=_context) | Returns a list of entities under the specified table. Makes a single list
request to the service. Used internally by the query_entities method.
:param str table_name:
The name of the table to query.
:param str filter:
Returns only entities that satisfy the specified filter. Note that
no more than 15 discrete comparisons are permitted within a $filter
string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
for more information on constructing filters.
:param str select:
Returns only the desired properties of an entity from the set.
:param int max_results:
The maximum number of entities to return.
:param obj marker:
A dictionary which identifies the portion of the query to be
returned with the next query operation. The operation returns a
next_marker element within the response body if the list returned
was not complete. This value may then be used as a query parameter
in a subsequent call to request the next portion of the list of
table. The marker value is opaque to the client.
:param str accept:
Specifies the accepted content type of the response payload. See
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
values.
:param property_resolver:
A function which given the partition key, row key, property name,
property value, and the property EdmType if returned by the service,
returns the EdmType of the property. Generally used if accept is set
to JSON_NO_METADATA.
:type property_resolver: func(pk, rk, prop_name, prop_value, service_edm_type)
:param int timeout:
The server timeout, expressed in seconds.
:return: A list of entities, potentially with a next_marker property.
:rtype: list(:class:`~azure.storage.table.models.Entity`) |
def do_trace(self, arg):
if arg:
raise CmdError("too many arguments")
if self.lastEvent is None:
raise CmdError("no current thread set")
self.lastEvent.get_thread().set_tf()
return True | t - trace at the current assembly instruction
trace - trace at the current assembly instruction |
def parseExternalSubset(self, ExternalID, SystemID):
libxml2mod.xmlParseExternalSubset(self._o, ExternalID, SystemID) | parse Markup declarations from an external subset [30]
extSubset ::= textDecl? extSubsetDecl [31] extSubsetDecl
::= (markupdecl | conditionalSect | PEReference | S) * |
def read(self, size=None):
if size is None:
return self.buf.read() + self.open_file.read()
contents = self.buf.read(size)
if len(contents) < size:
contents += self.open_file.read(size - len(contents))
return contents | Read `size` of bytes. |
def _preSynapticTRNCells(self, i, j):
xmin = max(i - 1, 0)
xmax = min(i + 2, self.trnWidth)
ymin = max(j - 1, 0)
ymax = min(j + 2, self.trnHeight)
trnCells = [
(x, y) for x in range(xmin, xmax) for y in range(ymin, ymax)
]
return trnCells | Given a relay cell at the given coordinate, return a list of the (x,y)
coordinates of all TRN cells that project to it. This assumes a 3X3 fan-in.
:param i, j: relay cell Coordinates
:return: |
def open(self, url):
protocol, location = self.__split(url)
content = self.__find(location)
if protocol == and content is None:
raise Exception, % location
return content | Open a document at the specified URL.
The document URL's needs not contain a protocol identifier, and if it
does, that protocol identifier is ignored when looking up the store
content.
Missing documents referenced using the internal 'suds' protocol are
reported by raising an exception. For other protocols, None is returned
instead.
@param url: A document URL.
@type url: str
@return: Document content or None if not found.
@rtype: bytes |
def _expand(dat, counts, start, end):
for pos in range(start, end):
for s in counts:
dat[s][pos] += counts[s]
return dat | expand the same counts from start to end |
def _abort_all_transfers(self, exception):
pending_reads = len(self._commands_to_read)
for transfer in self._transfer_list:
transfer.add_error(exception)
self._init_deferred_buffers()
if isinstance(exception, DAPAccessIntf.TransferError):
for _ in range(pending_reads):
self._interface.read() | Abort any ongoing transfers and clear all buffers |
def build_ast(expression, debug = False):
G = DiGraph()
stack = []
for n in expression:
if isinstance(n,OperatorNode):
if n.ttype == "operator-infix":
arg2 = stack.pop()
arg1 = stack.pop()
if(n.tvalue == ):
if in arg1.tvalue and arg2.ttype == and not in arg2.tvalue:
arg2.tvalue = arg1.tvalue.split()[0] + + arg2.tvalue
G.add_node(arg1,pos = 1)
G.add_node(arg2,pos = 2)
G.add_edge(arg1, n)
G.add_edge(arg2, n)
else:
arg1 = stack.pop()
G.add_node(arg1,pos = 1)
G.add_edge(arg1, n)
elif isinstance(n,FunctionNode):
args = []
for _ in range(n.num_args):
try:
args.append(stack.pop())
except:
raise Exception()
args.reverse()
for i,a in enumerate(args):
G.add_node(a,pos = i)
G.add_edge(a,n)
else:
G.add_node(n,pos=0)
stack.append(n)
return G,stack.pop() | build an AST from an Excel formula expression in reverse polish notation |
def translate_url(context, language):
try:
request = context["request"]
except KeyError:
return ""
view = resolve(request.path)
current_language = translation.get_language()
translation.activate(language)
try:
url = reverse(view.func, args=view.args, kwargs=view.kwargs)
except NoReverseMatch:
try:
url_name = (view.url_name if not view.namespace
else % (view.namespace, view.url_name))
url = reverse(url_name, args=view.args, kwargs=view.kwargs)
except NoReverseMatch:
url_name = "admin:" + view.url_name
url = reverse(url_name, args=view.args, kwargs=view.kwargs)
translation.activate(current_language)
if context[].META["QUERY_STRING"]:
url += "?" + context[].META["QUERY_STRING"]
return url | Translates the current URL for the given language code, eg:
{% translate_url de %} |
def commit(self):
num_mutations = len(self._rule_pb_list)
if num_mutations == 0:
return {}
if num_mutations > MAX_MUTATIONS:
raise ValueError(
"%d total append mutations exceed the maximum "
"allowable %d." % (num_mutations, MAX_MUTATIONS)
)
data_client = self._table._instance._client.table_data_client
row_response = data_client.read_modify_write_row(
table_name=self._table.name, row_key=self._row_key, rules=self._rule_pb_list
)
self.clear()
return _parse_rmw_row_response(row_response) | Makes a ``ReadModifyWriteRow`` API request.
This commits modifications made by :meth:`append_cell_value` and
:meth:`increment_cell_value`. If no modifications were made, makes
no API request and just returns ``{}``.
Modifies a row atomically, reading the latest existing
timestamp / value from the specified columns and writing a new value by
appending / incrementing. The new cell created uses either the current
server time or the highest timestamp of a cell in that column (if it
exceeds the server time).
After committing the accumulated mutations, resets the local mutations.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_commit]
:end-before: [END bigtable_row_commit]
:rtype: dict
:returns: The new contents of all modified cells. Returned as a
dictionary of column families, each of which holds a
dictionary of columns. Each column contains a list of cells
modified. Each cell is represented with a two-tuple with the
value (in bytes) and the timestamp for the cell.
:raises: :class:`ValueError <exceptions.ValueError>` if the number of
mutations exceeds the :data:`MAX_MUTATIONS`. |
def most_recent_submission(project, group):
return (Submission.query_by(project=project, group=group)
.order_by(Submission.created_at.desc()).first()) | Return the most recent submission for the user and project id. |
def bkg_subtract(self, analyte, bkg, ind=None, focus_stage=):
if not in self.data.keys():
self.data[] = Bunch()
self.data[][analyte] = self.data[focus_stage][analyte] - bkg
if ind is not None:
self.data[][analyte][ind] = np.nan
return | Subtract provided background from signal (focus stage).
Results is saved in new 'bkgsub' focus stage
Returns
-------
None |
def sink_update(
self, project, sink_name, filter_, destination, unique_writer_identity=False
):
target = "/projects/%s/sinks/%s" % (project, sink_name)
data = {"name": sink_name, "filter": filter_, "destination": destination}
query_params = {"uniqueWriterIdentity": unique_writer_identity}
return self.api_request(
method="PUT", path=target, query_params=query_params, data=data
) | API call: update a sink resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update
:type project: str
:param project: ID of the project containing the sink.
:type sink_name: str
:param sink_name: the name of the sink
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the sink.
:type destination: str
:param destination: destination URI for the entries exported by
the sink.
:type unique_writer_identity: bool
:param unique_writer_identity: (Optional) determines the kind of
IAM identity returned as
writer_identity in the new sink.
:rtype: dict
:returns: The returned (updated) resource. |
def parse_authentication_request(self, request_body, http_headers=None):
auth_req = AuthorizationRequest().deserialize(request_body)
for validator in self.authentication_request_validators:
validator(auth_req)
logger.debug(, auth_req)
return auth_req | Parses and verifies an authentication request.
:param request_body: urlencoded authentication request
:param http_headers: http headers |
def walk_dir(path, args, state):
if args.debug:
sys.stderr.write("Walking %s\n" % path)
for root, _dirs, files in os.walk(path):
if not safe_process_files(root, files, args, state):
return False
if state.should_quit():
return False
return True | Check all files in `path' to see if there is any requests that
we should send out on the bus. |
def train_set_producer(socket, train_archive, patch_archive, wnid_map):
patch_images = extract_patch_images(patch_archive, )
num_patched = 0
with tar_open(train_archive) as tar:
for inner_tar_info in tar:
with tar_open(tar.extractfile(inner_tar_info.name)) as inner:
wnid = inner_tar_info.name.split()[0]
class_index = wnid_map[wnid]
filenames = sorted(info.name for info in inner
if info.isfile())
images_gen = (load_from_tar_or_patch(inner, filename,
patch_images)
for filename in filenames)
pathless_filenames = (os.path.split(fn)[-1]
for fn in filenames)
stream = equizip(pathless_filenames, images_gen)
for image_fn, (image_data, patched) in stream:
if patched:
num_patched += 1
socket.send_pyobj((image_fn, class_index), zmq.SNDMORE)
socket.send(image_data)
if num_patched != len(patch_images):
raise ValueError() | Load/send images from the training set TAR file or patch images.
Parameters
----------
socket : :class:`zmq.Socket`
PUSH socket on which to send loaded images.
train_archive : str or file-like object
Filename or file handle for the TAR archive of training images.
patch_archive : str or file-like object
Filename or file handle for the TAR archive of patch images.
wnid_map : dict
A dictionary that maps WordNet IDs to 0-based class indices.
Used to decode the filenames of the inner TAR files. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.