code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def wait(self, **kwargs):
return self.client.api.wait(self.id, **kwargs) | Block until the container stops, then return its exit code. Similar to
the ``docker wait`` command.
Args:
timeout (int): Request timeout
condition (str): Wait until a container state reaches the given
condition, either ``not-running`` (default), ``next-exit``,
... |
def search(lines, pattern):
p = pattern.replace("*", ".*")
test = re.compile(p)
result = []
for l in lines:
if test.search(l):
result.append(l)
return result | return all lines that match the pattern
#TODO: we need an example
:param lines:
:param pattern:
:return: |
def register(adapter):
if adapter.model and adapter.model not in adapter_catalog:
adapter_catalog[adapter.model] = adapter
post_save.connect(reindex_model_on_save, sender=adapter.model)
post_delete.connect(unindex_model_on_delete, sender=adapter.model)
return adapter | Register a search adapter |
def rec_new(self, val):
if val not in self.things:
for child in val.children():
self.rec_new(child)
self.new(val)
return val | Recursively add a new value and its children to me.
Args:
val (LispVal): The value to be added.
Returns:
LispVal: The added value. |
def write_results(self, filename):
with self.io(filename, ) as fp:
fp.write_samples(self.samples, self.model.variable_params,
last_iteration=self.niterations)
fp.write_samples(self.model_stats,
l... | Writes samples, model stats, acceptance fraction, and random state
to the given file.
Parameters
-----------
filename : str
The file to write to. The file is opened using the ``io`` class
in an an append state. |
def _wrap_users(users, request):
result = set()
for u in users:
if u is SELF and is_authenticated(request):
result.add(request.user.get_username())
else:
result.add(u)
return result | Returns a list with the given list of users and/or the currently logged in user, if the list
contains the magic item SELF. |
def restore(self, filename):
matfile = loadmat(filename)
if matfile[] == 1:
matfile[] = matfile[][0, :]
self.elapsed_time = matfile[][0, 0]
self.solution = matfile[]
return self | Restore object from mat-file. TODO: determine format specification |
def _compute_H(self, t, index, t2, index2, update_derivatives=False, stationary=False):
if stationary:
raise NotImplementedError, "Error, stationary version of this covariance not yet implemented."
Decay = self.decay[index]
Decay2 = self.decay[index2]
t_mat... | Helper function for computing part of the ode1 covariance function.
:param t: first time input.
:type t: array
:param index: Indices of first output.
:type index: array of int
:param t2: second time input.
:type t2: array
:param index2: Indices of second output.
... |
def load_geo_adwords(filename=):
df = pd.read_csv(filename, header=0, index_col=0, low_memory=False)
df.columns = [c.replace(, ).lower() for c in df.columns]
canonical = pd.DataFrame([list(row) for row in df.canonical_name.str.split().values])
def cleaner(row):
cleaned = pd.np.array(
... | WARN: Not a good source of city names. This table has many errors, even after cleaning |
def buildIcon(icon):
if icon is None:
return QIcon()
if type(icon) == buffer:
try:
icon = QIcon(projexui.generatePixmap(icon))
except:
icon = QIcon()
else:
try:
icon = QI... | Builds an icon from the inputed information.
:param icon | <variant> |
def update(self, *data, **kwargs) -> :
logger.debug(f)
model_cls = repo_factory.get_model(self.__class__)
repository = repo_factory.get_repository(self.__class__)
try:
self._update_data(*data, **kwargs)
self._validate... | Update a Record in the repository.
Also performs unique validations before creating the entity.
Supports both dictionary and keyword argument updates to the entity::
dog.update({'age': 10})
dog.update(age=10)
:param data: Dictionary of values to be updated for the en... |
def _recursive_bezier(self, x1, y1, x2, y2, x3, y3, attr, row, level=0):
m_approximation_scale = 10.0
m_distance_tolerance = (0.5 / m_approximation_scale) ** 2
m_angle_tolerance = 1 * 2*math.pi/360
curve_angle_tolerance_epsilon = 0.01
curve_recursion_limit = 32
... | from http://www.antigrain.com/research/adaptive_bezier/ |
def run_validators(self, value):
value = self.to_python(value)
value = self.value_to_string(value)
return super(RegexField, self).run_validators(value) | Make sure value is a string so it can run through django validators |
def _accept_reflected_fn(simplex,
objective_values,
worst_index,
reflected,
objective_at_reflected):
def _replace_worst_with_reflected():
next_simplex = _replace_at_index(simplex, worst_index, reflected)
nex... | Creates the condition function pair for a reflection to be accepted. |
def are_equal_or_superset(superset_tree, base_tree):
try:
_compare_attr(superset_tree, base_tree)
_compare_text(superset_tree, base_tree)
except CompareError as e:
logger.debug(str(e))
return False
return True | Return True if ``superset_tree`` is equal to or a superset of ``base_tree``
- Checks that all elements and attributes in ``superset_tree`` are present and
contain the same values as in ``base_tree``. For elements, also checks that the
order is the same.
- Can be used for checking if one XML documen... |
def download(self, download_key, raise_exception_on_failure=False):
query = {"output": "json", "user_credentials": self.api_key}
resp = requests.get(
"%sdownload/%s" % (self._url, download_key),
params=query,
timeout=self._timeout,
)
if raise... | Download the file represented by the download_key. |
def _getTempFile(self, jobStoreID=None):
if jobStoreID != None:
self._checkJobStoreId(jobStoreID)
return tempfile.mkstemp(suffix=".tmp",
dir=os.path.join(self._getAbsPath(jobStoreID), "g"))
else:
retur... | :rtype : file-descriptor, string, string is the absolute path to a temporary file within
the given job's (referenced by jobStoreID's) temporary file directory. The file-descriptor
is integer pointing to open operating system file handle. Should be closed using os.close()
after writing some mater... |
def parse_blob_snapshot_parameter(url):
if blob_is_snapshot(url):
tmp = url.split()
if len(tmp) == 2:
return tmp[0], tmp[1]
return None | Retrieves the blob snapshot parameter from a url
:param url str: blob url
:rtype: str
:return: snapshot parameter |
def show_filetypes(extensions):
for item in extensions.items():
val = item[1]
if type(item[1]) == list:
val = ", ".join(str(x) for x in item[1])
print("{0:4}: {1}".format(val, item[0])) | function to show valid file extensions |
def get_pickling_errors(obj, seen=None):
if seen == None:
seen = []
if hasattr(obj, "__getstate__"):
state = obj.__getstate__()
else:
return None
if state == None:
return
if isinstance(state,tuple):
if not isinstance(st... | Investigate pickling errors. |
def destroyTempDir(self, tempDir):
assert os.path.isdir(tempDir)
assert os.path.commonprefix((self.rootDir, tempDir)) == self.rootDir
self.tempFilesDestroyed += 1
try:
os.rmdir(tempDir)
except OSError:
shutil.rmtree(tem... | Removes a temporary directory in the temp file dir, checking its in the temp file tree.
The dir will be removed regardless of if it is empty. |
def difference(self, instrument1, instrument2, bounds, data_labels,
cost_function):
labels = [dl1 for dl1, dl2 in data_labels] + [+b[0] for b in bounds] + [+b[1] for b in bounds] + []
data = {label: [] for label in labels}
inst1 = instrume... | Calculates the difference in signals from multiple
instruments within the given bounds.
Parameters
----------
instrument1 : Instrument
Information must already be loaded into the
instrument.
instrument2 : Instrument
Information must already b... |
def chunks(items, chunksize):
items = iter(items)
for first in items:
chunk = chain((first,), islice(items, chunksize - 1))
yield chunk
deque(chunk, 0) | Turn generator sequence into sequence of chunks. |
def add(self, item):
if not item.startswith(self.prefix):
item = os.path.join(self.base, item)
self.files.add(os.path.normpath(item)) | Add a file to the manifest.
:param item: The pathname to add. This can be relative to the base. |
def save(self, path):
with open(path, ) as f:
f.write(self.contents()) | Save svg as file(.svg)
Args:
path (str): destination to save file |
def request(self, send_terminator = False):
self.m_a_crc = False
start_context = self.getContext()
self.setContext("request[v3A]")
try:
self.m_serial_port.write("2f3f".decode("hex") +
self.m_meter_address +
... | Required request() override for v3 and standard method to read meter.
Args:
send_terminator (bool): Send termination string at end of read.
Returns:
bool: CRC request flag result from most recent read |
def get_user(self, login):
return youtrack.User(self._get("/admin/user/" + urlquote(login.encode())), self) | http://confluence.jetbrains.net/display/YTD2/GET+user |
def iterator(self):
for item in self.query.results():
obj = self.resource(**item)
yield obj | An iterator over the results from applying this QuerySet to the api. |
def load_febrl4(return_links=False):
df_a = _febrl_load_data()
df_b = _febrl_load_data()
if return_links:
links = pandas.MultiIndex.from_arrays([
["rec-{}-org".format(i) for i in range(0, 5000)],
["rec-{}-dup-0".format(i) for i in range(0, 5000)]]
)
ret... | Load the FEBRL 4 datasets.
The Freely Extensible Biomedical Record Linkage (Febrl) package is
distributed with a dataset generator and four datasets generated
with the generator. This function returns the fourth Febrl dataset
as a :class:`pandas.DataFrame`.
*"Generated as one data set with... |
def GT(classical_reg1, classical_reg2, classical_reg3):
classical_reg1, classical_reg2, classical_reg3 = prepare_ternary_operands(classical_reg1,
classical_reg2,
... | Produce an GT instruction.
:param classical_reg1: Memory address to which to store the comparison result.
:param classical_reg2: Left comparison operand.
:param classical_reg3: Right comparison operand.
:return: A ClassicalGreaterThan instance. |
def _domain_event_io_error_cb(conn, domain, srcpath, devalias, action, reason, opaque):
_salt_send_domain_event(opaque, conn, domain, opaque[], {
: srcpath,
: devalias,
: _get_libvirt_enum_string(, action),
: reason
}) | Domain I/O Error events handler |
def func_timeout(timeout, func, args=(), kwargs=None):
just work
if not kwargs:
kwargs = {}
if not args:
args = ()
ret = []
exception = []
isStopped = False
def funcwrap(args2, kwargs2):
try:
ret.append( func(*args2, **kwargs2) )
except Function... | func_timeout - Runs the given function for up to #timeout# seconds.
Raises any exceptions #func# would raise, returns what #func# would return (unless timeout is exceeded), in which case it raises FunctionTimedOut
@param timeout <float> - Maximum number of seconds to run #func# before terminating
... |
def register(self, category):
def decorator(func):
name = func.__name__
key = f
self._metrics[key] = func
return func
return decorator | Usage:
@metrics.register('finance')
def approved_funds(pronac, data):
return metric_from_data_and_pronac_number(data, pronac) |
def handleError(self, test, err):
want_error = self._handle_test_error_or_failure(test, err)
if not want_error and id(test) in self._tests_that_reran:
self._nose_result.addError(test, err)
return want_error or None | Baseclass override. Called when a test raises an exception.
If the test isn't going to be rerun again, then report the error
to the nose test result.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test`
:param err:
... |
def get_voltage_at_bus_bar(grid, tree):
r_mv_grid, x_mv_grid = get_mv_impedance(grid)
r_trafo = sum([tr.r for tr in grid._station._transformers])
x_trafo = sum([tr.x for tr in grid._station._transformers])
cos_phi_load = cfg_ding0.get(, )
cos_phi_feedin = cfg_ding0.get(, )
v_nom = c... | Determine voltage level at bus bar of MV-LV substation
Parameters
----------
grid : LVGridDing0
Ding0 grid object
tree : :networkx:`NetworkX Graph Obj< >`
Tree of grid topology:
Returns
-------
:any:`list`
Voltage at bus bar. First item refers to load case, second i... |
def _visited_callback(self, state, pc, instr):
pc = state.platform.current.PC
with self.locked_context(, dict) as ctx:
ctx[pc] = ctx.get(pc, 0) + 1 | Maintain our own copy of the visited set |
def main( gpu:Param("GPU to run on", str)=None ):
path = Path()
tot_epochs,size,bs,lr = 60,224,256,3e-1
dirname =
gpu = setup_distrib(gpu)
if gpu is None: bs *= torch.cuda.device_count()
n_gpus = num_distrib() or 1
workers = min(12, num_cpus()//n_gpus)
data = get_data(path/dirname... | Distributed training of Imagenet. Fastest speed is if you run with: python -m fastai.launch |
def tables_in_schema(self, schema):
sql =
return [t[0] for t in self.query(sql, (schema,)).fetchall()] | Get a listing of all tables in given schema |
def LoadSNPs(self, snps=[]):
for snp in snps:
bounds = snp.split("-")
if len(bounds) == 1:
if bounds[0] != "":
self.target_rs.append(bounds[0])
else:
raise InvalidBoundarySpec(snp) | Define the SNP inclusions (by RSID). This overrides true boundary \
definition.
:param snps: array of RSIDs
:return: None
This doesn't define RSID ranges, so it throws InvalidBoundarySpec if it
encounters what appears to be a range (SNP contains a "-") |
def _read_mesafile(filename,data_rows=0,only=):
f=open(filename,)
vv=[]
v=[]
lines = []
line =
for i in range(0,6):
line = f.readline()
lines.extend([line])
hval = lines[2].split()
hlist = lines[1].split()
header_attr = {}
for a,b in zip(hlist,hval):
... | private routine that is not directly called by the user |
def to_native(self, value):
if isinstance(value, dict):
return value
elif isinstance(value, six.string_types):
native_value = json.loads(value)
if isinstance(native_value, dict):
return native_value
else:
raise Conv... | Return the value as a dict, raising error if conversion to dict is not possible |
def decompose_once_with_qubits(val: Any,
qubits: Iterable[],
default=RaiseTypeErrorIfNotProvided):
return decompose_once(val, default, qubits=tuple(qubits)) | Decomposes a value into operations on the given qubits.
This method is used when decomposing gates, which don't know which qubits
they are being applied to unless told. It decomposes the gate exactly once,
instead of decomposing it and then continuing to decomposing the decomposed
operations recursivel... |
def is_imap(self, model):
from pgmpy.models import BayesianModel
if not isinstance(model, BayesianModel):
raise TypeError("model must be an instance of BayesianModel")
factors = [cpd.to_factor() for cpd in model.get_cpds()]
factor_prod = six.moves.reduce(mul, factors... | Checks whether the given BayesianModel is Imap of JointProbabilityDistribution
Parameters
-----------
model : An instance of BayesianModel Class, for which you want to
check the Imap
Returns
--------
boolean : True if given bayesian model is Imap for Joint P... |
def distance(a, b):
R = 3963
lat1, lon1 = math.radians(a[0]), math.radians(a[1])
lat2, lon2 = math.radians(b[0]), math.radians(b[1])
return math.acos(math.sin(lat1) * math.sin(lat2) +
math.cos(lat1) * math.cos(lat2) * math.cos(lon1 - lon2)) * R | Calculates distance between two latitude-longitude coordinates. |
def _parse_json(cls, resources, exactly_one=True):
if not len(resources[]):
return None
if exactly_one:
return cls.parse_resource(resources[][0])
else:
return [cls.parse_resource(resource) for resource
in resources[]] | Parse display name, latitude, and longitude from a JSON response. |
def get_next_objective(self):
try:
next_object = next(self)
except StopIteration:
raise IllegalState()
except Exception:
raise OperationFailed()
else:
return next_object | Gets the next Objective in this list.
return: (osid.learning.Objective) - the next Objective in this
list. The has_next() method should be used to test that
a next Objective is available before calling this
method.
raise: IllegalState - no more elements ... |
def refweights(self):
return numpy.full(self.shape, 1./self.shape[0], dtype=float) | A |numpy| |numpy.ndarray| with equal weights for all segment
junctions..
>>> from hydpy.models.hstream import *
>>> parameterstep('1d')
>>> states.qjoints.shape = 5
>>> states.qjoints.refweights
array([ 0.2, 0.2, 0.2, 0.2, 0.2]) |
def load_file(folder_path, idx, corpus):
xml_path = os.path.join(folder_path, .format(idx))
wav_paths = glob.glob(os.path.join(folder_path, .format(idx)))
if len(wav_paths) == 0:
return []
xml_file = open(xml_path, , encoding=)
soup = BeautifulSoup(xml_file... | Load speaker, file, utterance, labels for the file with the given id. |
def response(self, response_data):
if "address" not in response_data:
return None, "address missing from response_data payload"
if "function" not in response_data:
return None, "method missing from response_data payload"
if "params" not in response_data:
... | called by the event handler with the result data
:param response_data: result data
:return: |
def generate_psk(self, security_key):
if not self._psk:
existing_psk_id = self._psk_id
self._psk_id =
self._psk = security_key
self._psk = self.request(Gateway().generate_psk(existing_psk_id))
... | Generate and set a psk from the security key. |
def listTasks(self, opts={}, queryOpts={}):
opts[] = True
data = yield self.call(, opts, queryOpts)
tasks = []
for tdata in data:
task = Task.fromDict(tdata)
task.connection = self
tasks.append(task)
defer.returnValue(tasks) | Get information about all Koji tasks.
Calls "listTasks" XML-RPC.
:param dict opts: Eg. {'state': [task_states.OPEN]}
:param dict queryOpts: Eg. {'order' : 'priority,create_time'}
:returns: deferred that when fired returns a list of Task objects. |
def handle(client, request):
formaters = request.get(, None)
if not formaters:
formaters = [{: }]
logging.debug( + json.dumps(formaters, indent=4))
data = request.get(, None)
if not isinstance(data, str):
return send(client, , None)
max_line_length = None
for formater i... | Handle format request
request struct:
{
'data': 'data_need_format',
'formaters': [
{
'name': 'formater_name',
'config': {} # None or dict
},
... # formaters
]
}
if no f... |
def add_element(self, elt):
if not isinstance(elt, Element):
raise TypeError("argument should be a subclass of Element")
self.elements[elt.get_name()] = elt
return elt | Helper to add a element to the current section. The Element name
will be used as an identifier. |
def sunrise(self, date=None, local=True, use_elevation=True):
if local and self.timezone is None:
raise ValueError("Local time requested but Location has no timezone set.")
if self.astral is None:
self.astral = Astral()
if date is None:
date = date... | Return sunrise time.
Calculates the time in the morning when the sun is a 0.833 degrees
below the horizon. This is to account for refraction.
:param date: The date for which to calculate the sunrise time.
If no date is specified then the current date will be used.
... |
def _writable(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
if self.bucket.locked or self.bucket.deleted:
raise InvalidOperationError()
return method(self, *args, **kwargs)
return wrapper | Check that record is in defined status.
:param method: Method to be decorated.
:returns: Function decorated. |
def type_object_attrgetter(obj, attr, *defargs):
for base in obj.__mro__:
if attr in base.__dict__:
if isinstance(base.__dict__[attr], property):
return base.__dict__[attr]
break
return getattr(obj, attr, *... | This implements an improved attrgetter for type objects (i.e. classes)
that can handle class attributes that are implemented as properties on
a metaclass.
Normally `getattr` on a class with a `property` (say, "foo"), would return
the `property` object itself. However, if the class has a metaclass whic... |
def setCurrentRegItem(self, regItem):
rowIndex = self.model().indexFromItem(regItem)
if not rowIndex.isValid():
logger.warn("Can't select {!r} in table".format(regItem))
self.setCurrentIndex(rowIndex) | Sets the current registry item. |
def recipe_status(self, kitchen, recipe, local_dir=None):
rc = DKReturnCode()
if kitchen is None or isinstance(kitchen, basestring) is False:
rc.set(rc.DK_FAIL, )
return rc
if recipe is None or isinstance(recipe, basestring) is False:
rc.set(rc.DK_FAI... | gets the status of a recipe
:param self: DKCloudAPI
:param kitchen: string
:param recipe: string
:param local_dir: string --
:rtype: dict |
async def add(self, setname, ip, timeout):
if timeout > 0:
to_ban = "{{ {0} timeout {1}s }}".format(ip, timeout)
else:
to_ban = "{{ {0} }}".format(ip)
args = [, , self.table_family, self.table_name, setname, to_ban]
return await self.start(__cl... | Adds the given IP address to the specified set.
If timeout is specified, the IP will stay in the set for the given
duration. Else it will stay in the set during the set default timeout.
timeout must be given in seconds.
The resulting command looks like this:
``nft add element... |
def small_abc_image_recognition():
images = [];
images += IMAGE_SYMBOL_SAMPLES.LIST_IMAGES_SYMBOL_A;
images += IMAGE_SYMBOL_SAMPLES.LIST_IMAGES_SYMBOL_B;
images += IMAGE_SYMBOL_SAMPLES.LIST_IMAGES_SYMBOL_C;
template_recognition_image(images, 250, 25); | !
@brief Trains network using letters 'A', 'B', 'C', and recognize each of them with and without noise. |
def post_refresh_system_metadata(request):
d1_gmn.app.views.assert_db.post_has_mime_parts(
request,
(
(, ),
(, ),
(, ),
),
)
d1_gmn.app.views.assert_db.is_existing_object(request.POST[])
d1_gmn.app.models.sysmeta_refresh_queue(
req... | MNStorage.systemMetadataChanged(session, did, serialVersion,
dateSysMetaLastModified) → boolean. |
def mul(left, right):
from .mv_mul import MvMul
length = max(left, right)
if length == 1:
return Mul(left, right)
return MvMul(left, right) | Distribution multiplication.
Args:
left (Dist, numpy.ndarray) : left hand side.
right (Dist, numpy.ndarray) : right hand side. |
def borrow_readwrite_instance(cls, working_dir, block_number, expected_snapshots={}):
global blockstack_db, blockstack_db_lastblock, blockstack_db_lock
import virtualchain_hooks
db_path = virtualchain.get_db_filename(virtualchain_hooks, working_dir)
blockstack_db_lock... | Get a read/write database handle to the blockstack db.
At most one such handle can exist within the program.
When the caller is done with the handle, it should call release_readwrite_instance()
Returns the handle on success
Returns None if we can't set up the db.
Aborts if ther... |
def parse(self, input):
result = self._parseIso8601(input)
if not result:
result = self._parseSimple(input)
if result is not None:
return result
else:
raise ParameterException("Invalid time delta - could not parse %s" % input) | Parses a time delta from the input.
See :py:class:`TimeDeltaParameter` for details on supported formats. |
def _eval_xpath(self, xpath):
if isinstance(xpath, etree.XPath):
result = xpath(self._dataObject)
else:
result = self._dataObject.xpath(xpath,namespaces=self._namespaces)
return result | Evaluates xpath expressions.
Either string or XPath object. |
def setup_client_rpc(self):
self.clnt = rpc.DfaRpcClient(self._url, constants.DFA_SERVER_QUEUE,
exchange=constants.DFA_EXCHANGE) | Setup RPC client for dfa agent. |
def query_form_data(self):
form_data = {}
slice_id = request.args.get()
if slice_id:
slc = db.session.query(models.Slice).filter_by(id=slice_id).one_or_none()
if slc:
form_data = slc.form_data.copy()
update_time_range(form_data)
... | Get the formdata stored in the database for existing slice.
params: slice_id: integer |
def _parse_conf(conf_file=None, in_mem=False, family=):
if _conf() and not conf_file and not in_mem:
conf_file = _conf(family)
rules =
if conf_file:
with salt.utils.files.fopen(conf_file, ) as ifile:
rules = ifile.read()
elif in_mem:
cmd = . format(_iptables_c... | If a file is not passed in, and the correct one for this OS is not
detected, return False |
def is_isomorphic_to(self, other):
return (isinstance(other, self.__class__)
and
len(self.fields) == len(other.fields)
and
all(a.is_isomorphic_to(b) for a, b in zip(self.fields,
oth... | Returns true if all fields of other struct are isomorphic to this
struct's fields |
def can_add_new_content(self, block, file_info):
return ((self._max_files_per_container == 0 or self._max_files_per_container > len(block.content_file_infos))
and (self.does_content_fit(file_info, block)
or
... | new content from file_info can be added into block iff
- file count limit hasn't been reached for the block
- there is enough space to completely fit the info into the block
- OR the info can be split and some info can fit into the block |
def validate_arg(f,
arg_name,
*validation_func,
**kwargs
):
return decorate_with_validation(f, arg_name, *validation_func, **kwargs) | A decorator to apply function input validation for the given argument name, with the provided base validation
function(s). You may use several such decorators on a given function as long as they are stacked on top of each
other (no external decorator in the middle)
:param arg_name:
:param validation_fu... |
def read_geo(fid, key):
dsid = GEO_NAMES[key.name]
add_epoch = False
if "time" in key.name:
days = fid["/L1C/" + dsid["day"]].value
msecs = fid["/L1C/" + dsid["msec"]].value
data = _form_datetimes(days, msecs)
add_epoch = True
dtype = np.float64
else:
... | Read geolocation and related datasets. |
def last_index_of(self, item):
check_not_none(item, "Value can't be None")
item_data = self._to_data(item)
return self._encode_invoke(list_last_index_of_codec, value=item_data) | Returns the last index of specified items's occurrences in this list. If specified item is not present in this
list, returns -1.
:param item: (object), the specified item to be searched for.
:return: (int), the last index of specified items's occurrences, -1 if item is not present in this list. |
def fix_surrogates(text):
if SURROGATE_RE.search(text):
text = SURROGATE_PAIR_RE.sub(convert_surrogate_pair, text)
text = SURROGATE_RE.sub(, text)
return text | Replace 16-bit surrogate codepoints with the characters they represent
(when properly paired), or with \ufffd otherwise.
>>> high_surrogate = chr(0xd83d)
>>> low_surrogate = chr(0xdca9)
>>> print(fix_surrogates(high_surrogate + low_surrogate))
💩
>>> print(fix_surrogates(low... |
def parse_time(time_input):
if isinstance(time_input, datetime.date):
return time_input.isoformat()
if len(time_input) < 8:
raise ValueError(
.format(time_input))
time = dateutil.parser.parse(time_input)
if len(time_input) <= 10:
return time.date(... | Parse input time/date string into ISO 8601 string
:param time_input: time/date to parse
:type time_input: str or datetime.date or datetime.datetime
:return: parsed string in ISO 8601 format
:rtype: str |
def _render_val_with_prev(self, w, n, current_val, symbol_len):
sl = symbol_len-1
if len(w) > 1:
out = self._revstart
if current_val != self.prior_val:
out += self._x + hex(current_val).rstrip().ljust(sl)[:sl]
elif n == 0:
out ... | Return a string encoding the given value in a waveform.
:param w: The WireVector we are rendering to a waveform
:param n: An integer from 0 to segment_len-1
:param current_val: the value to be rendered
:param symbol_len: and integer for how big to draw the current value
Returns... |
def no_counterpart_found(string, options, rc_so_far):
logger.debug("options.else_action: %s", options.else_action)
if options.else_action == "passthrough":
format_list = [string]
output_fd = sys.stdout
elif options.else_action == "exception":
raise KeyError("No counterpart found... | Takes action determined by options.else_action. Unless told to
raise an exception, this function returns the errno that is supposed
to be returned in this case.
:param string: The lookup string.
:param options: ArgumentParser or equivalent to provide
options.else_action, options.else_errno, op... |
def ensure_workspace(self, target):
gopath = self.get_gopath(target)
for d in (, , ):
safe_mkdir(os.path.join(gopath, d))
required_links = set()
for dep in target.closure():
if not isinstance(dep, GoTarget):
continue
if self.is_remote_lib(dep):
self._symlink_remote... | Ensures that an up-to-date Go workspace exists for the given target.
Creates any necessary symlinks to source files based on the target and its transitive
dependencies, and removes any symlinks which do not correspond to any needed dep. |
def timezone(self, value):
self._timezone = (value if isinstance(value, datetime.tzinfo)
else tz.gettz(value)) | Set the timezone. |
def constraint_from_parent_conflicts(self):
from pipenv.vendor.packaging.specifiers import Specifier
parent_dependencies = set()
has_mismatch = False
can_use_original = True
for p in self.parent_deps:
if p.is_updated:
con... | Given a resolved entry with multiple parent dependencies with different
constraints, searches for the resolution that satisfies all of the parent
constraints.
:return: A new **InstallRequirement** satisfying all parent constraints
:raises: :exc:`~pipenv.exceptions.DependencyConflict` if... |
def get_interface_detail_input_request_type_get_next_request_last_rcvd_interface_interface_name(self, **kwargs):
config = ET.Element("config")
get_interface_detail = ET.Element("get_interface_detail")
config = get_interface_detail
input = ET.SubElement(get_interface_detail, "inp... | Auto Generated Code |
def get_shots(self):
shots = self.response.json()[][0][]
headers = self.response.json()[][0][]
return pd.DataFrame(shots, columns=headers) | Returns the shot chart data as a pandas DataFrame. |
def save_state(state, output_dir, keep=False):
params_file = os.path.join(output_dir, "model.pkl")
with gfile.GFile(params_file, "wb") as f:
pickle.dump((state.params, state.step, state.history), f)
if keep:
params_file = os.path.join(output_dir, "model_{}.pkl".format(state.step))
with gfile.GFile(... | Save State and optionally gin config. |
def get_dataframe(self, tickers,
startDate=None, endDate=None, metric_name=None, frequency=):
valid_columns = [, , , , , , , ,
, , , ]
if metric_name is not None and metric_name not in valid_columns:
raise APIColumnNameError( + str(v... | Return a pandas.DataFrame of historical prices for one or more ticker symbols.
By default, return latest EOD Composite Price for a list of stock tickers.
On average, each feed contains 3 data sources.
Supported tickers + Available Day Ranges are here:
https://apimedia.t... |
def _longestCommonPrefix(seq1, seq2, start1=0, start2=0):
len1 = len(seq1) - start1
len2 = len(seq2) - start2
if len1 < len2:
seq1, seq2 = seq2, seq1
start1, start2 = start2, start1
len1, len2 = len2, len1
if len2 == 0:
return 0
i = 0
pos2 = sta... | Returns the length of the longest common prefix of seq1
starting at offset start1 and seq2 starting at offset start2.
>>> _longestCommonPrefix("abcdef", "abcghj")
3
>>> _longestCommonPrefix("abcghj", "abcdef")
3
>>> _longestCommonPrefix("miss", "")
0
>>> _longestCommonPrefix("", "mr"... |
def compressBWTPoolProcess(tup):
inputFN = tup[0]
startIndex = tup[1]
endIndex = tup[2]
tempFN = tup[3]
whereSol = np.add(startIndex+1, np.where(bwt[startIndex:endIndex-1] != bwt[startIndex+1:endIndex])[0])
deltas = np.zeros(dtype=, shape=(whereSol.shape[0]+1,))
... | During compression, each available process will calculate a subportion of the BWT independently using this
function. This process takes the chunk and rewrites it into a given filename using the technique described
in the compressBWT(...) function header |
def flush_one(self, process_name, ignore_priority=False):
q = self.reprocess_uows[process_name]
self._flush_queue(q, ignore_priority) | method iterates over the reprocessing queue for the given process
and re-submits UOW whose waiting time has expired |
def record(self):
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError()
rec = struct.pack(self.FMT, b * 16,
self.vol_desc_seqnum, self.desc_num,
self.vol_ident, 1, 1, 2, self.max_interchange_level, 1, 1,... | A method to generate the string representing this UDF Primary Volume
Descriptor.
Parameters:
None.
Returns:
A string representing this UDF Primary Volume Descriptor. |
def do_worker(self, arg):
if arg[]:
self.worker_approve(arg[], arg[], arg[], arg[], arg[])
elif arg[]:
self.amt_services_wrapper.worker_reject(arg[], arg[])
elif arg[]:
self.amt_services_wrapper.worker_unreject(arg[], arg[])
elif arg[]:
... | Usage:
worker approve (--all | --hit <hit_id> ... | <assignment_id> ...) [--all-studies] [--force]
worker reject (--hit <hit_id> | <assignment_id> ...)
worker unreject (--hit <hit_id> | <assignment_id> ...)
worker bonus (--amount <amount> | --auto) (--hit <hit_id> | <assignment_... |
def serializeG1(x, compress=True):
assertType(x, G1Element)
return _serialize(x, compress, librelic.g1_size_bin_abi,
librelic.g1_write_bin_abi) | Converts G1 element @x into an array of bytes. If @compress is True,
the point will be compressed resulting in a much shorter string of bytes. |
def parse_quantitationesultsline(self, line):
if line == :
return 0
if line.startswith():
self._end_header = True
self._quantitationresultsheader = [token.strip() for token
in line.split(self.COMMAS)
... | Parses quantitation result lines
Please see samples/GC-MS output.txt
[MS Quantitative Results] section |
def respond_list_directory(self, dir_path, query=None):
del query
try:
dir_contents = os.listdir(dir_path)
except os.error:
self.respond_not_found()
return
if os.path.normpath(dir_path) != self.__config[]:
dir_contents.append()
dir_contents.sort(key=lambda a: a.lower())
displaypath = html.esc... | Respond to the client with an HTML page listing the contents of
the specified directory.
:param str dir_path: The path of the directory to list the contents of. |
def read_list_from_csv(filepath, dict_form=False, headers=None, **kwargs):
stream = Stream(filepath, headers=headers, **kwargs)
stream.open()
result = stream.read(keyed=dict_form)
stream.close()
return result | Read a list of rows in dict or list form from a csv. (The headers argument is either a row
number or list of row numbers (in case of multi-line headers) to be considered as headers
(rows start counting at 1), or the actual headers defined a list of strings. If not set,
all rows will be treated as c... |
def compute(datetimes, to_np=None):
if not isinstance(datetimes, (list, tuple, np.ndarray)):
raise TypeError()
if isinstance(datetimes[0], numbers.Number):
items = len(datetimes)
elif isinstance(datetimes[0], (list, tuple, np.ndarray)):
items = le... | Computes the provided date/time components into CDF epoch value(s).
For CDF_EPOCH:
For computing into CDF_EPOCH value, each date/time elements should
have exactly seven (7) components, as year, month, day, hour, minute,
second and millisecond, in a list. For exam... |
def register_model(self, key, *models, **kwargs):
cache_group = CacheGroup(key)
for model in models:
cache_group.register(model, **kwargs)
self.register_cache(cache_group) | Register a cache_group with this manager.
Use this method to register more simple
groups where all models share the same parameters.
Any arguments are treated as models that you would like
to register.
Any keyword arguments received are passed to the
register method wh... |
def save(self, obj):
if not obj.id:
obj.id = uuid()
stored_data = {
: obj.id,
: obj.to_data()
}
index_vals = obj.indexes() or {}
for key in obj.__class__.index_names() or []:
val = index_vals.get(key, )
stored... | Required functionality. |
def close(self) -> None:
if self.close_task is None:
self.close_task = self.loop.create_task(self._close()) | Close the server and terminate connections with close code 1001.
This method is idempotent. |
def _run_atexit():
global _atexit
for callback, args, kwargs in reversed(_atexit):
callback(*args, **kwargs)
del _atexit[:] | Hook frameworks must invoke this after the main hook body has
successfully completed. Do not invoke it if the hook fails. |
def dispatch(self, *args, **kwargs):
return super(GetAppListJsonView, self).dispatch(*args, **kwargs) | Only staff members can access this view |
def _calculate_optimal_column_widths(self):
columns = len(self.data[0])
str_labels = [parse_colorama(str(l)) for l in
self.labels]
str_data = [[parse_colorama(str(col)) for col in row] for row in
self.data]
widths = [0] * c... | Calculates widths of columns
:return: Length of longest data in each column (labels and data) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.