code stringlengths 70 11.9k | docstring stringlengths 4 7.08k | text stringlengths 128 15k |
|---|---|---|
def requires_swimlane_version(min_version=None, max_version=None):
if min_version is None and max_version is None:
raise ValueError()
if min_version and max_version and compare_versions(min_version, max_version) < 0:
raise ValueError(.format(min_version, max_version))
def decorator(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
swimlane = self._swimlane
if min_version and compare_versions(min_version, swimlane.build_version, True) < 0:
raise InvalidSwimlaneBuildVersion(swimlane, min_version, max_version)
if max_version and compare_versions(swimlane.build_version, max_version, True) < 0:
raise InvalidSwimlaneBuildVersion(swimlane, min_version, max_version)
return func(self, *args, **kwargs)
return wrapper
return decorator | Decorator for SwimlaneResolver methods verifying Swimlane server build version is within a given inclusive range
Raises:
InvalidVersion: Raised before decorated method call if Swimlane server version is out of provided range
ValueError: If neither min_version or max_version were provided, or if those values conflict (2.15 < 2.14) | ### Input:
Decorator for SwimlaneResolver methods verifying Swimlane server build version is within a given inclusive range
Raises:
InvalidVersion: Raised before decorated method call if Swimlane server version is out of provided range
ValueError: If neither min_version or max_version were provided, or if those values conflict (2.15 < 2.14)
### Response:
def requires_swimlane_version(min_version=None, max_version=None):
if min_version is None and max_version is None:
raise ValueError()
if min_version and max_version and compare_versions(min_version, max_version) < 0:
raise ValueError(.format(min_version, max_version))
def decorator(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
swimlane = self._swimlane
if min_version and compare_versions(min_version, swimlane.build_version, True) < 0:
raise InvalidSwimlaneBuildVersion(swimlane, min_version, max_version)
if max_version and compare_versions(swimlane.build_version, max_version, True) < 0:
raise InvalidSwimlaneBuildVersion(swimlane, min_version, max_version)
return func(self, *args, **kwargs)
return wrapper
return decorator |
def _x10_command(self, house_code, unit_number, state):
log = default_logger
if state.startswith() or state.startswith() or state.startswith():
raise NotImplementedError( % ((house_code, unit_num, state), ))
if unit_number is not None:
house_and_unit = % (house_code, unit_number)
else:
raise NotImplementedError( % ((house_code, unit_number, state), ))
house_and_unit = house_code
house_and_unit = to_bytes(house_and_unit)
state = to_bytes(state)
mochad_cmd = self.default_type + b + house_and_unit + b + state + b
log.debug(, mochad_cmd)
mochad_host, mochad_port = self.device_address
result = netcat(mochad_host, mochad_port, mochad_cmd)
log.debug(, result) | Real implementation | ### Input:
Real implementation
### Response:
def _x10_command(self, house_code, unit_number, state):
log = default_logger
if state.startswith() or state.startswith() or state.startswith():
raise NotImplementedError( % ((house_code, unit_num, state), ))
if unit_number is not None:
house_and_unit = % (house_code, unit_number)
else:
raise NotImplementedError( % ((house_code, unit_number, state), ))
house_and_unit = house_code
house_and_unit = to_bytes(house_and_unit)
state = to_bytes(state)
mochad_cmd = self.default_type + b + house_and_unit + b + state + b
log.debug(, mochad_cmd)
mochad_host, mochad_port = self.device_address
result = netcat(mochad_host, mochad_port, mochad_cmd)
log.debug(, result) |
def datetime_to_http_date(the_datetime):
timeval = calendar.timegm(the_datetime.utctimetuple())
return formatdate(timeval=timeval,
localtime=False,
usegmt=True) | >>> datetime_to_http_date(datetime.datetime(2013, 12, 26, 9, 50, 10))
'Thu, 26 Dec 2013 09:50:10 GMT'
# Verify inverses
>>> x = 'Thu, 26 Dec 2013 09:50:10 GMT'
>>> datetime_to_http_date(http_date_to_datetime(x)) == x
True | ### Input:
>>> datetime_to_http_date(datetime.datetime(2013, 12, 26, 9, 50, 10))
'Thu, 26 Dec 2013 09:50:10 GMT'
# Verify inverses
>>> x = 'Thu, 26 Dec 2013 09:50:10 GMT'
>>> datetime_to_http_date(http_date_to_datetime(x)) == x
True
### Response:
def datetime_to_http_date(the_datetime):
timeval = calendar.timegm(the_datetime.utctimetuple())
return formatdate(timeval=timeval,
localtime=False,
usegmt=True) |
def get_start_and_end_time(self, ref=None):
now = time.localtime(ref)
if self.syear == 0:
self.syear = now.tm_year
month_start_id = now.tm_mon
day_start = find_day_by_weekday_offset(self.syear,
month_start_id, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
if self.eyear == 0:
self.eyear = now.tm_year
month_end_id = now.tm_mon
day_end = find_day_by_weekday_offset(self.eyear, month_end_id, self.ewday,
self.ewday_offset)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
if start_time > end_time:
month_end_id += 1
if month_end_id > 12:
month_end_id = 1
self.eyear += 1
day_end = find_day_by_weekday_offset(self.eyear,
month_end_id, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
now_epoch = time.mktime(now)
if end_time < now_epoch:
month_end_id += 1
month_start_id += 1
if month_end_id > 12:
month_end_id = 1
self.eyear += 1
if month_start_id > 12:
month_start_id = 1
self.syear += 1
day_start = find_day_by_weekday_offset(self.syear,
month_start_id, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
day_end = find_day_by_weekday_offset(self.eyear,
month_end_id, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
return (start_time, end_time) | Specific function to get start time and end time for WeekDayDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int) | ### Input:
Specific function to get start time and end time for WeekDayDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
### Response:
def get_start_and_end_time(self, ref=None):
now = time.localtime(ref)
if self.syear == 0:
self.syear = now.tm_year
month_start_id = now.tm_mon
day_start = find_day_by_weekday_offset(self.syear,
month_start_id, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
if self.eyear == 0:
self.eyear = now.tm_year
month_end_id = now.tm_mon
day_end = find_day_by_weekday_offset(self.eyear, month_end_id, self.ewday,
self.ewday_offset)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
if start_time > end_time:
month_end_id += 1
if month_end_id > 12:
month_end_id = 1
self.eyear += 1
day_end = find_day_by_weekday_offset(self.eyear,
month_end_id, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
now_epoch = time.mktime(now)
if end_time < now_epoch:
month_end_id += 1
month_start_id += 1
if month_end_id > 12:
month_end_id = 1
self.eyear += 1
if month_start_id > 12:
month_start_id = 1
self.syear += 1
day_start = find_day_by_weekday_offset(self.syear,
month_start_id, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
day_end = find_day_by_weekday_offset(self.eyear,
month_end_id, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
return (start_time, end_time) |
def loadState(self, state):
self._duration = state[]
self._intensity = state[]
self._risefall = state[] | Loads previously saved values to this component.
:param state: return value from `stateDict`
:type state: dict | ### Input:
Loads previously saved values to this component.
:param state: return value from `stateDict`
:type state: dict
### Response:
def loadState(self, state):
self._duration = state[]
self._intensity = state[]
self._risefall = state[] |
def create(self):
input_params = {
"type": self.type,
"data": self.data,
"name": self.name,
"priority": self.priority,
"port": self.port,
"ttl": self.ttl,
"weight": self.weight,
"flags": self.flags,
"tags": self.tags
}
data = self.get_data(
"domains/%s/records" % (self.domain),
type=POST,
params=input_params,
)
if data:
self.id = data[][] | Creates a new record for a domain.
Args:
type (str): The type of the DNS record (e.g. A, CNAME, TXT).
name (str): The host name, alias, or service being defined by the
record.
data (int): Variable data depending on record type.
priority (int): The priority for SRV and MX records.
port (int): The port for SRV records.
ttl (int): The time to live for the record, in seconds.
weight (int): The weight for SRV records.
flags (int): An unsigned integer between 0-255 used for CAA records.
tags (string): The parameter tag for CAA records. Valid values are
"issue", "wildissue", or "iodef" | ### Input:
Creates a new record for a domain.
Args:
type (str): The type of the DNS record (e.g. A, CNAME, TXT).
name (str): The host name, alias, or service being defined by the
record.
data (int): Variable data depending on record type.
priority (int): The priority for SRV and MX records.
port (int): The port for SRV records.
ttl (int): The time to live for the record, in seconds.
weight (int): The weight for SRV records.
flags (int): An unsigned integer between 0-255 used for CAA records.
tags (string): The parameter tag for CAA records. Valid values are
"issue", "wildissue", or "iodef"
### Response:
def create(self):
input_params = {
"type": self.type,
"data": self.data,
"name": self.name,
"priority": self.priority,
"port": self.port,
"ttl": self.ttl,
"weight": self.weight,
"flags": self.flags,
"tags": self.tags
}
data = self.get_data(
"domains/%s/records" % (self.domain),
type=POST,
params=input_params,
)
if data:
self.id = data[][] |
def forms(self):
forms = MultiDict()
for name, item in self.POST.iterallitems():
if not hasattr(item, ):
forms[name] = item
return forms | POST form values parsed into an instance of :class:`MultiDict`.
This property contains form values parsed from an `url-encoded`
or `multipart/form-data` encoded POST request bidy. The values are
native strings. | ### Input:
POST form values parsed into an instance of :class:`MultiDict`.
This property contains form values parsed from an `url-encoded`
or `multipart/form-data` encoded POST request bidy. The values are
native strings.
### Response:
def forms(self):
forms = MultiDict()
for name, item in self.POST.iterallitems():
if not hasattr(item, ):
forms[name] = item
return forms |
def _valid_packet(raw_packet):
if raw_packet[0:1] != b:
return False
if len(raw_packet) != 19:
return False
checksum = 0
for i in range(1, 17):
checksum += raw_packet[i]
if checksum != raw_packet[18]:
return False
return True | Validate incoming packet. | ### Input:
Validate incoming packet.
### Response:
def _valid_packet(raw_packet):
if raw_packet[0:1] != b:
return False
if len(raw_packet) != 19:
return False
checksum = 0
for i in range(1, 17):
checksum += raw_packet[i]
if checksum != raw_packet[18]:
return False
return True |
def on_error(e):
exname = {: , : }
sys.stderr.write(.format(exname[e.__class__.__name__], str(e)))
sys.stderr.write()
sys.exit(1) | Error handler
RuntimeError or ValueError exceptions raised by commands will be handled
by this function. | ### Input:
Error handler
RuntimeError or ValueError exceptions raised by commands will be handled
by this function.
### Response:
def on_error(e):
exname = {: , : }
sys.stderr.write(.format(exname[e.__class__.__name__], str(e)))
sys.stderr.write()
sys.exit(1) |
def add_edge_end_unused(intersection, duplicates, intersections):
found = None
for other in intersections:
if (
intersection.index_first == other.index_first
and intersection.index_second == other.index_second
):
if intersection.s == 0.0 and other.s == 0.0:
found = other
break
if intersection.t == 0.0 and other.t == 0.0:
found = other
break
if found is not None:
intersections.remove(found)
duplicates.append(found)
intersections.append(intersection) | Add intersection that is ``COINCIDENT_UNUSED`` but on an edge end.
This is a helper for :func:`~._surface_intersection.add_intersection`.
It assumes that
* ``intersection`` will have at least one of ``s == 0.0`` or ``t == 0.0``
* A "misclassified" intersection in ``intersections`` that matches
``intersection`` will be the "same" if it matches both ``index_first``
and ``index_second`` and if it matches the start index exactly
Args:
intersection (.Intersection): An intersection to be added.
duplicates (List[.Intersection]): List of duplicate intersections.
intersections (List[.Intersection]): List of "accepted" (i.e.
non-duplicate) intersections. | ### Input:
Add intersection that is ``COINCIDENT_UNUSED`` but on an edge end.
This is a helper for :func:`~._surface_intersection.add_intersection`.
It assumes that
* ``intersection`` will have at least one of ``s == 0.0`` or ``t == 0.0``
* A "misclassified" intersection in ``intersections`` that matches
``intersection`` will be the "same" if it matches both ``index_first``
and ``index_second`` and if it matches the start index exactly
Args:
intersection (.Intersection): An intersection to be added.
duplicates (List[.Intersection]): List of duplicate intersections.
intersections (List[.Intersection]): List of "accepted" (i.e.
non-duplicate) intersections.
### Response:
def add_edge_end_unused(intersection, duplicates, intersections):
found = None
for other in intersections:
if (
intersection.index_first == other.index_first
and intersection.index_second == other.index_second
):
if intersection.s == 0.0 and other.s == 0.0:
found = other
break
if intersection.t == 0.0 and other.t == 0.0:
found = other
break
if found is not None:
intersections.remove(found)
duplicates.append(found)
intersections.append(intersection) |
def get_host(self, domain_part):
if self.map.host_matching:
if domain_part is None:
return self.server_name
return to_unicode(domain_part, )
subdomain = domain_part
if subdomain is None:
subdomain = self.subdomain
else:
subdomain = to_unicode(subdomain, )
return (subdomain and subdomain + u or u) + self.server_name | Figures out the full host name for the given domain part. The
domain part is a subdomain in case host matching is disabled or
a full host name. | ### Input:
Figures out the full host name for the given domain part. The
domain part is a subdomain in case host matching is disabled or
a full host name.
### Response:
def get_host(self, domain_part):
if self.map.host_matching:
if domain_part is None:
return self.server_name
return to_unicode(domain_part, )
subdomain = domain_part
if subdomain is None:
subdomain = self.subdomain
else:
subdomain = to_unicode(subdomain, )
return (subdomain and subdomain + u or u) + self.server_name |
def train(self, params):
if params[]:
n = params[]
self.net = buildNetwork(n, params[], n,
hiddenclass=LSTMLayer,
bias=True,
outputbias=params[],
recurrent=True)
self.net.reset()
ds = SequentialDataSet(params[], params[])
history = self.window(self.history, params)
resets = self.window(self.resets, params)
for i in xrange(1, len(history)):
if not resets[i - 1]:
ds.addSample(self.encoder.encode(history[i - 1]),
self.encoder.encode(history[i]))
if resets[i]:
ds.newSequence()
print "Train LSTM network on buffered dataset of length ", len(history)
if params[] > 1:
trainer = RPropMinusTrainer(self.net,
dataset=ds,
verbose=params[] > 0)
if len(history) > 1:
trainer.trainEpochs(params[])
self.net.reset()
for i in xrange(len(history) - 1):
symbol = history[i]
output = self.net.activate(self.encoder.encode(symbol))
self.encoder.classify(output, num=params[])
if resets[i]:
self.net.reset()
else:
self.trainer.setData(ds)
self.trainer.train()
self.net.reset()
for i in xrange(len(history) - 1):
symbol = history[i]
output = self.net.activate(self.encoder.encode(symbol))
self.encoder.classify(output, num=params[])
if resets[i]:
self.net.reset() | Train LSTM network on buffered dataset history
After training, run LSTM on history[:-1] to get the state correct
:param params:
:return: | ### Input:
Train LSTM network on buffered dataset history
After training, run LSTM on history[:-1] to get the state correct
:param params:
:return:
### Response:
def train(self, params):
if params[]:
n = params[]
self.net = buildNetwork(n, params[], n,
hiddenclass=LSTMLayer,
bias=True,
outputbias=params[],
recurrent=True)
self.net.reset()
ds = SequentialDataSet(params[], params[])
history = self.window(self.history, params)
resets = self.window(self.resets, params)
for i in xrange(1, len(history)):
if not resets[i - 1]:
ds.addSample(self.encoder.encode(history[i - 1]),
self.encoder.encode(history[i]))
if resets[i]:
ds.newSequence()
print "Train LSTM network on buffered dataset of length ", len(history)
if params[] > 1:
trainer = RPropMinusTrainer(self.net,
dataset=ds,
verbose=params[] > 0)
if len(history) > 1:
trainer.trainEpochs(params[])
self.net.reset()
for i in xrange(len(history) - 1):
symbol = history[i]
output = self.net.activate(self.encoder.encode(symbol))
self.encoder.classify(output, num=params[])
if resets[i]:
self.net.reset()
else:
self.trainer.setData(ds)
self.trainer.train()
self.net.reset()
for i in xrange(len(history) - 1):
symbol = history[i]
output = self.net.activate(self.encoder.encode(symbol))
self.encoder.classify(output, num=params[])
if resets[i]:
self.net.reset() |
def random_crop(src, size):
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size)
return out, (x0, y0, new_w, new_h) | Randomly crop src with size. Upsample result if src is smaller than size | ### Input:
Randomly crop src with size. Upsample result if src is smaller than size
### Response:
def random_crop(src, size):
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size)
return out, (x0, y0, new_w, new_h) |
def write_data(data, out_file):
with open(out_file, ) as handle_out:
handle_out.write(json.dumps([data], skipkeys=True, indent=2)) | write json file from seqcluster cluster | ### Input:
write json file from seqcluster cluster
### Response:
def write_data(data, out_file):
with open(out_file, ) as handle_out:
handle_out.write(json.dumps([data], skipkeys=True, indent=2)) |
def get_header(self, header_name):
if header_name in self.headers:
return self.headers[header_name]
return self.add_header_name(header_name) | Returns a header with that name, creates it if it does not exist. | ### Input:
Returns a header with that name, creates it if it does not exist.
### Response:
def get_header(self, header_name):
if header_name in self.headers:
return self.headers[header_name]
return self.add_header_name(header_name) |
def builder(func):
import copy
def _copy(self, *args, **kwargs):
self_copy = copy.copy(self)
result = func(self_copy, *args, **kwargs)
if result is None:
return self_copy
return result
return _copy | Decorator for wrapper "builder" functions. These are functions on the Query class or other classes used for
building queries which mutate the query and return self. To make the build functions immutable, this decorator is
used which will deepcopy the current instance. This decorator will return the return value of the inner function
or the new copy of the instance. The inner function does not need to return self. | ### Input:
Decorator for wrapper "builder" functions. These are functions on the Query class or other classes used for
building queries which mutate the query and return self. To make the build functions immutable, this decorator is
used which will deepcopy the current instance. This decorator will return the return value of the inner function
or the new copy of the instance. The inner function does not need to return self.
### Response:
def builder(func):
import copy
def _copy(self, *args, **kwargs):
self_copy = copy.copy(self)
result = func(self_copy, *args, **kwargs)
if result is None:
return self_copy
return result
return _copy |
def get_generated_vcl_html(self, service_id, version_number):
content = self._fetch("/service/%s/version/%d/generated_vcl/content" % (service_id, version_number))
return content.get("content", None) | Display the content of generated VCL with HTML syntax highlighting. | ### Input:
Display the content of generated VCL with HTML syntax highlighting.
### Response:
def get_generated_vcl_html(self, service_id, version_number):
content = self._fetch("/service/%s/version/%d/generated_vcl/content" % (service_id, version_number))
return content.get("content", None) |
def G(w, Xs):
n = len(Xs)
P = projection_matrix(w)
Ys = [np.dot(P, X) for X in Xs]
A = calc_A(Ys)
A_hat = calc_A_hat(A, skew_matrix(w))
u = sum(np.dot(Y, Y) for Y in Ys) / n
v = np.dot(A_hat, sum(np.dot(Y, Y) * Y for Y in Ys)) / np.trace(np.dot(A_hat, A))
return sum((np.dot(Y, Y) - u - 2 * np.dot(Y, v)) ** 2 for Y in Ys) | Calculate the G function given a cylinder direction w and a
list of data points Xs to be fitted. | ### Input:
Calculate the G function given a cylinder direction w and a
list of data points Xs to be fitted.
### Response:
def G(w, Xs):
n = len(Xs)
P = projection_matrix(w)
Ys = [np.dot(P, X) for X in Xs]
A = calc_A(Ys)
A_hat = calc_A_hat(A, skew_matrix(w))
u = sum(np.dot(Y, Y) for Y in Ys) / n
v = np.dot(A_hat, sum(np.dot(Y, Y) * Y for Y in Ys)) / np.trace(np.dot(A_hat, A))
return sum((np.dot(Y, Y) - u - 2 * np.dot(Y, v)) ** 2 for Y in Ys) |
def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port,
public_port, expected, expected_num_eps=3):
self.log.debug()
self.log.debug(.format(repr(endpoints)))
found = []
for ep in endpoints:
self.log.debug(.format(repr(ep)))
if ((admin_port in ep.url and ep.interface == ) or
(internal_port in ep.url and ep.interface == ) or
(public_port in ep.url and ep.interface == )):
found.append(ep.interface)
actual = {: ep.id,
: ep.region,
: ep.region_id,
: self.not_null,
: ep.url,
: ep.service_id, }
ret = self._validate_dict_data(expected, actual)
if ret:
return .format(ret)
if len(found) != expected_num_eps:
return | Validate keystone v3 endpoint data.
Validate the v3 endpoint data which has changed from v2. The
ports are used to find the matching endpoint.
The new v3 endpoint data looks like:
[<Endpoint enabled=True,
id=0432655fc2f74d1e9fa17bdaa6f6e60b,
interface=admin,
links={u'self': u'<RESTful URL of this endpoint>'},
region=RegionOne,
region_id=RegionOne,
service_id=17f842a0dc084b928e476fafe67e4095,
url=http://10.5.6.5:9312>,
<Endpoint enabled=True,
id=6536cb6cb92f4f41bf22b079935c7707,
interface=admin,
links={u'self': u'<RESTful url of this endpoint>'},
region=RegionOne,
region_id=RegionOne,
service_id=72fc8736fb41435e8b3584205bb2cfa3,
url=http://10.5.6.6:35357/v3>,
... ] | ### Input:
Validate keystone v3 endpoint data.
Validate the v3 endpoint data which has changed from v2. The
ports are used to find the matching endpoint.
The new v3 endpoint data looks like:
[<Endpoint enabled=True,
id=0432655fc2f74d1e9fa17bdaa6f6e60b,
interface=admin,
links={u'self': u'<RESTful URL of this endpoint>'},
region=RegionOne,
region_id=RegionOne,
service_id=17f842a0dc084b928e476fafe67e4095,
url=http://10.5.6.5:9312>,
<Endpoint enabled=True,
id=6536cb6cb92f4f41bf22b079935c7707,
interface=admin,
links={u'self': u'<RESTful url of this endpoint>'},
region=RegionOne,
region_id=RegionOne,
service_id=72fc8736fb41435e8b3584205bb2cfa3,
url=http://10.5.6.6:35357/v3>,
... ]
### Response:
def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port,
public_port, expected, expected_num_eps=3):
self.log.debug()
self.log.debug(.format(repr(endpoints)))
found = []
for ep in endpoints:
self.log.debug(.format(repr(ep)))
if ((admin_port in ep.url and ep.interface == ) or
(internal_port in ep.url and ep.interface == ) or
(public_port in ep.url and ep.interface == )):
found.append(ep.interface)
actual = {: ep.id,
: ep.region,
: ep.region_id,
: self.not_null,
: ep.url,
: ep.service_id, }
ret = self._validate_dict_data(expected, actual)
if ret:
return .format(ret)
if len(found) != expected_num_eps:
return |
def nextCmd(snmpDispatcher, authData, transportTarget,
*varBinds, **options):
def cbFun(*args, **kwargs):
response[:] = args + (kwargs.get(, ()),)
options[] = cbFun
lexicographicMode = options.pop(, True)
maxRows = options.pop(, 0)
maxCalls = options.pop(, 0)
initialVarBinds = VB_PROCESSOR.makeVarBinds(snmpDispatcher.cache, varBinds)
totalRows = totalCalls = 0
errorIndication, errorStatus, errorIndex, varBindTable = None, 0, 0, ()
response = []
while True:
if not varBinds:
yield (errorIndication, errorStatus, errorIndex,
varBindTable and varBindTable[0] or [])
return
cmdgen.nextCmd(snmpDispatcher, authData, transportTarget,
*[(x[0], Null()) for x in varBinds], **options)
snmpDispatcher.transportDispatcher.runDispatcher()
errorIndication, errorStatus, errorIndex, varBindTable, varBinds = response
if errorIndication:
yield (errorIndication, errorStatus, errorIndex,
varBindTable and varBindTable[0] or [])
return
elif errorStatus:
if errorStatus == 2:
errorStatus = errorStatus.clone(0)
errorIndex = errorIndex.clone(0)
yield (errorIndication, errorStatus, errorIndex,
varBindTable and varBindTable[0] or [])
return
else:
varBindRow = varBindTable and varBindTable[-1]
if not lexicographicMode:
for idx, varBind in enumerate(varBindRow):
name, val = varBind
if not isinstance(val, Null):
if initialVarBinds[idx][0].isPrefixOf(name):
break
else:
return
for varBindRow in varBindTable:
nextVarBinds = (yield errorIndication, errorStatus, errorIndex, varBindRow)
if nextVarBinds:
initialVarBinds = varBinds = VB_PROCESSOR.makeVarBinds(snmpDispatcher.cache, nextVarBinds)
totalRows += 1
totalCalls += 1
if maxRows and totalRows >= maxRows:
return
if maxCalls and totalCalls >= maxCalls:
return | Create a generator to perform one or more SNMP GETNEXT queries.
On each iteration, new SNMP GETNEXT request is send
(:RFC:`1905#section-4.2.2`). The iterator blocks waiting for response
to arrive or error to occur.
Parameters
----------
snmpDispatcher : :py:class:`~pysnmp.hlapi.snmpDispatcher`
Class instance representing SNMP engine.
authData : :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData`
Class instance representing SNMP credentials.
transportTarget : :py:class:`~pysnmp.hlapi.asyncore.UdpTransportTarget` or :py:class:`~pysnmp.hlapi.asyncore.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer address.
\*varBinds : :py:class:`~pysnmp.smi.rfc1902.ObjectType`
One or more class instances representing MIB variables to place
into SNMP request.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `True`.
Default is `True`.
* `lexicographicMode` - walk SNMP agent's MIB till the end (if `True`),
otherwise (if `False`) stop iteration when all response MIB
variables leave the scope of initial MIB variables in
`varBinds`. Default is `True`.
* `ignoreNonIncreasingOid` - continue iteration even if response
MIB variables (OIDs) are not greater then request MIB variables.
Be aware that setting it to `True` may cause infinite loop between
SNMP management and agent applications. Default is `False`.
* `maxRows` - stop iteration once this generator instance processed
`maxRows` of SNMP conceptual table. Default is `0` (no limit).
* `maxCalls` - stop iteration once this generator instance processed
`maxCalls` responses. Default is 0 (no limit).
Yields
------
errorIndication: str
True value indicates SNMP engine error.
errorStatus: str
True value indicates SNMP PDU error.
errorIndex: int
Non-zero value refers to `varBinds[errorIndex-1]`
varBindTable: tuple
A 2-dimensional array of :py:class:`~pysnmp.smi.rfc1902.ObjectType` class
instances representing a table of MIB variables returned in SNMP response.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Notes
-----
The `nextCmd` generator will be exhausted on any of the following
conditions:
* SNMP engine error occurs thus `errorIndication` is `True`
* SNMP PDU `errorStatus` is reported as `True`
* SNMP :py:class:`~pysnmp.proto.rfc1905.EndOfMibView` values
(also known as *SNMP exception values*) are reported for all
MIB variables in `varBinds`
* *lexicographicMode* option is `True` and SNMP agent reports
end-of-mib or *lexicographicMode* is `False` and all
response MIB variables leave the scope of `varBinds`
At any moment a new sequence of `varBinds` could be send back into
running generator (supported since Python 2.6).
Examples
--------
>>> from pysnmp.hlapi.v1arch import *
>>>
>>> g = nextCmd(snmpDispatcher(),
>>> CommunityData('public'),
>>> UdpTransportTarget(('demo.snmplabs.com', 161)),
>>> ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr')))
>>> next(g)
(None, 0, 0, [[ObjectType(ObjectIdentity(ObjectName('1.3.6.1.2.1.1.1.0')), DisplayString('SunOS zeus.snmplabs.com 4.1.3_U1 1 sun4m'))]])
>>> g.send([ObjectType(ObjectIdentity('IF-MIB', 'ifInOctets'))])
(None, 0, 0, [(ObjectName('1.3.6.1.2.1.2.2.1.10.1'), Counter32(284817787))]) | ### Input:
Create a generator to perform one or more SNMP GETNEXT queries.
On each iteration, new SNMP GETNEXT request is send
(:RFC:`1905#section-4.2.2`). The iterator blocks waiting for response
to arrive or error to occur.
Parameters
----------
snmpDispatcher : :py:class:`~pysnmp.hlapi.snmpDispatcher`
Class instance representing SNMP engine.
authData : :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData`
Class instance representing SNMP credentials.
transportTarget : :py:class:`~pysnmp.hlapi.asyncore.UdpTransportTarget` or :py:class:`~pysnmp.hlapi.asyncore.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer address.
\*varBinds : :py:class:`~pysnmp.smi.rfc1902.ObjectType`
One or more class instances representing MIB variables to place
into SNMP request.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `True`.
Default is `True`.
* `lexicographicMode` - walk SNMP agent's MIB till the end (if `True`),
otherwise (if `False`) stop iteration when all response MIB
variables leave the scope of initial MIB variables in
`varBinds`. Default is `True`.
* `ignoreNonIncreasingOid` - continue iteration even if response
MIB variables (OIDs) are not greater then request MIB variables.
Be aware that setting it to `True` may cause infinite loop between
SNMP management and agent applications. Default is `False`.
* `maxRows` - stop iteration once this generator instance processed
`maxRows` of SNMP conceptual table. Default is `0` (no limit).
* `maxCalls` - stop iteration once this generator instance processed
`maxCalls` responses. Default is 0 (no limit).
Yields
------
errorIndication: str
True value indicates SNMP engine error.
errorStatus: str
True value indicates SNMP PDU error.
errorIndex: int
Non-zero value refers to `varBinds[errorIndex-1]`
varBindTable: tuple
A 2-dimensional array of :py:class:`~pysnmp.smi.rfc1902.ObjectType` class
instances representing a table of MIB variables returned in SNMP response.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Notes
-----
The `nextCmd` generator will be exhausted on any of the following
conditions:
* SNMP engine error occurs thus `errorIndication` is `True`
* SNMP PDU `errorStatus` is reported as `True`
* SNMP :py:class:`~pysnmp.proto.rfc1905.EndOfMibView` values
(also known as *SNMP exception values*) are reported for all
MIB variables in `varBinds`
* *lexicographicMode* option is `True` and SNMP agent reports
end-of-mib or *lexicographicMode* is `False` and all
response MIB variables leave the scope of `varBinds`
At any moment a new sequence of `varBinds` could be send back into
running generator (supported since Python 2.6).
Examples
--------
>>> from pysnmp.hlapi.v1arch import *
>>>
>>> g = nextCmd(snmpDispatcher(),
>>> CommunityData('public'),
>>> UdpTransportTarget(('demo.snmplabs.com', 161)),
>>> ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr')))
>>> next(g)
(None, 0, 0, [[ObjectType(ObjectIdentity(ObjectName('1.3.6.1.2.1.1.1.0')), DisplayString('SunOS zeus.snmplabs.com 4.1.3_U1 1 sun4m'))]])
>>> g.send([ObjectType(ObjectIdentity('IF-MIB', 'ifInOctets'))])
(None, 0, 0, [(ObjectName('1.3.6.1.2.1.2.2.1.10.1'), Counter32(284817787))])
### Response:
def nextCmd(snmpDispatcher, authData, transportTarget,
*varBinds, **options):
def cbFun(*args, **kwargs):
response[:] = args + (kwargs.get(, ()),)
options[] = cbFun
lexicographicMode = options.pop(, True)
maxRows = options.pop(, 0)
maxCalls = options.pop(, 0)
initialVarBinds = VB_PROCESSOR.makeVarBinds(snmpDispatcher.cache, varBinds)
totalRows = totalCalls = 0
errorIndication, errorStatus, errorIndex, varBindTable = None, 0, 0, ()
response = []
while True:
if not varBinds:
yield (errorIndication, errorStatus, errorIndex,
varBindTable and varBindTable[0] or [])
return
cmdgen.nextCmd(snmpDispatcher, authData, transportTarget,
*[(x[0], Null()) for x in varBinds], **options)
snmpDispatcher.transportDispatcher.runDispatcher()
errorIndication, errorStatus, errorIndex, varBindTable, varBinds = response
if errorIndication:
yield (errorIndication, errorStatus, errorIndex,
varBindTable and varBindTable[0] or [])
return
elif errorStatus:
if errorStatus == 2:
errorStatus = errorStatus.clone(0)
errorIndex = errorIndex.clone(0)
yield (errorIndication, errorStatus, errorIndex,
varBindTable and varBindTable[0] or [])
return
else:
varBindRow = varBindTable and varBindTable[-1]
if not lexicographicMode:
for idx, varBind in enumerate(varBindRow):
name, val = varBind
if not isinstance(val, Null):
if initialVarBinds[idx][0].isPrefixOf(name):
break
else:
return
for varBindRow in varBindTable:
nextVarBinds = (yield errorIndication, errorStatus, errorIndex, varBindRow)
if nextVarBinds:
initialVarBinds = varBinds = VB_PROCESSOR.makeVarBinds(snmpDispatcher.cache, nextVarBinds)
totalRows += 1
totalCalls += 1
if maxRows and totalRows >= maxRows:
return
if maxCalls and totalCalls >= maxCalls:
return |
def update(self, obj, matches=None, mt=None, lt=None, eq=None):
updated = False
objects = list()
for _obj in self.get(obj.__class__):
if self.__criteria(_obj, matches=matches, mt=mt, lt=lt, eq=eq):
objects.append(obj)
updated = True
else:
objects.append(_obj)
self.flush(obj._TABLE)
self.create_table_from_object(obj)
for obj in objects:
self.store(obj)
return updated | Update object(s) in the database.
:param obj:
:param matches:
:param mt:
:param lt:
:param eq:
:return: | ### Input:
Update object(s) in the database.
:param obj:
:param matches:
:param mt:
:param lt:
:param eq:
:return:
### Response:
def update(self, obj, matches=None, mt=None, lt=None, eq=None):
updated = False
objects = list()
for _obj in self.get(obj.__class__):
if self.__criteria(_obj, matches=matches, mt=mt, lt=lt, eq=eq):
objects.append(obj)
updated = True
else:
objects.append(_obj)
self.flush(obj._TABLE)
self.create_table_from_object(obj)
for obj in objects:
self.store(obj)
return updated |
def process_ether_frame(self,
id=None,
msg=None):
df = json_normalize(msg)
dt = json.loads(df.to_json())
flat_msg = {}
for k in dt:
new_key = "eth_{}".format(k)
flat_msg[new_key] = dt[k]["0"]
if new_key not in self.eth_keys:
self.eth_keys[new_key] = k
dt["eth_id"] = id
self.all_eth.append(dt)
log.debug("ETHER data updated:")
log.debug(self.eth_keys)
log.debug(self.all_eth)
log.debug("")
return flat_msg | process_ether_frame
Convert a complex nested json dictionary
to a flattened dictionary and capture
all unique keys for table construction
:param id: key for this msg
:param msg: ether frame for packet | ### Input:
process_ether_frame
Convert a complex nested json dictionary
to a flattened dictionary and capture
all unique keys for table construction
:param id: key for this msg
:param msg: ether frame for packet
### Response:
def process_ether_frame(self,
id=None,
msg=None):
df = json_normalize(msg)
dt = json.loads(df.to_json())
flat_msg = {}
for k in dt:
new_key = "eth_{}".format(k)
flat_msg[new_key] = dt[k]["0"]
if new_key not in self.eth_keys:
self.eth_keys[new_key] = k
dt["eth_id"] = id
self.all_eth.append(dt)
log.debug("ETHER data updated:")
log.debug(self.eth_keys)
log.debug(self.all_eth)
log.debug("")
return flat_msg |
def _encode_multipart(**kw):
boundary = % hex(int(time.time() * 1000))
data = []
for k, v in kw.iteritems():
data.append( % boundary)
if hasattr(v, ):
filename = getattr(v, , )
content = v.read()
data.append( % k)
data.append( % len(content))
data.append( % _guess_content_type(filename))
data.append(content)
else:
data.append( % k)
data.append(v.encode() if isinstance(v, unicode) else v)
data.append( % boundary)
return .join(data), boundary | build a multipart/form-data body with randomly generated boundary | ### Input:
build a multipart/form-data body with randomly generated boundary
### Response:
def _encode_multipart(**kw):
boundary = % hex(int(time.time() * 1000))
data = []
for k, v in kw.iteritems():
data.append( % boundary)
if hasattr(v, ):
filename = getattr(v, , )
content = v.read()
data.append( % k)
data.append( % len(content))
data.append( % _guess_content_type(filename))
data.append(content)
else:
data.append( % k)
data.append(v.encode() if isinstance(v, unicode) else v)
data.append( % boundary)
return .join(data), boundary |
def load_class(full_class_string, verbose=False, silent=False):
if not isinstance(full_class_string, str):
if not silent:
print("Error, loadClass: input not a string: %s" % str(full_class_string))
return None
if len(full_class_string) == 0:
if not silent:
print("Error, loadClass: empty string")
return None
class_data = full_class_string.split(".")
module_path = ".".join(class_data[:-1])
class_str = class_data[-1]
try:
module_ = importlib.import_module(module_path)
return getattr(module_, class_str)
except AttributeError:
return None
except Exception:
raise | dynamically load a class from a string | ### Input:
dynamically load a class from a string
### Response:
def load_class(full_class_string, verbose=False, silent=False):
if not isinstance(full_class_string, str):
if not silent:
print("Error, loadClass: input not a string: %s" % str(full_class_string))
return None
if len(full_class_string) == 0:
if not silent:
print("Error, loadClass: empty string")
return None
class_data = full_class_string.split(".")
module_path = ".".join(class_data[:-1])
class_str = class_data[-1]
try:
module_ = importlib.import_module(module_path)
return getattr(module_, class_str)
except AttributeError:
return None
except Exception:
raise |
def visitNegation(self, ctx):
return -conversions.to_decimal(self.visit(ctx.expression()), self._eval_context) | expression: MINUS expression | ### Input:
expression: MINUS expression
### Response:
def visitNegation(self, ctx):
return -conversions.to_decimal(self.visit(ctx.expression()), self._eval_context) |
def sort(self, key_or_list, direction=None):
self.__check_okay_to_chain()
keys = helpers._index_list(key_or_list, direction)
self.__ordering = helpers._index_document(keys)
return self | Sorts this cursor's results.
Pass a field name and a direction, either
:data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`::
for doc in collection.find().sort('field', pymongo.ASCENDING):
print(doc)
To sort by multiple fields, pass a list of (key, direction) pairs::
for doc in collection.find().sort([
('field1', pymongo.ASCENDING),
('field2', pymongo.DESCENDING)]):
print(doc)
Beginning with MongoDB version 2.6, text search results can be
sorted by relevance::
cursor = db.test.find(
{'$text': {'$search': 'some words'}},
{'score': {'$meta': 'textScore'}})
# Sort by 'score' field.
cursor.sort([('score', {'$meta': 'textScore'})])
for doc in cursor:
print(doc)
Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has
already been used. Only the last :meth:`sort` applied to this
cursor has any effect.
:Parameters:
- `key_or_list`: a single key or a list of (key, direction)
pairs specifying the keys to sort on
- `direction` (optional): only used if `key_or_list` is a single
key, if not given :data:`~pymongo.ASCENDING` is assumed | ### Input:
Sorts this cursor's results.
Pass a field name and a direction, either
:data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`::
for doc in collection.find().sort('field', pymongo.ASCENDING):
print(doc)
To sort by multiple fields, pass a list of (key, direction) pairs::
for doc in collection.find().sort([
('field1', pymongo.ASCENDING),
('field2', pymongo.DESCENDING)]):
print(doc)
Beginning with MongoDB version 2.6, text search results can be
sorted by relevance::
cursor = db.test.find(
{'$text': {'$search': 'some words'}},
{'score': {'$meta': 'textScore'}})
# Sort by 'score' field.
cursor.sort([('score', {'$meta': 'textScore'})])
for doc in cursor:
print(doc)
Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has
already been used. Only the last :meth:`sort` applied to this
cursor has any effect.
:Parameters:
- `key_or_list`: a single key or a list of (key, direction)
pairs specifying the keys to sort on
- `direction` (optional): only used if `key_or_list` is a single
key, if not given :data:`~pymongo.ASCENDING` is assumed
### Response:
def sort(self, key_or_list, direction=None):
self.__check_okay_to_chain()
keys = helpers._index_list(key_or_list, direction)
self.__ordering = helpers._index_document(keys)
return self |
def retrieve_interval(self, start_time, end_time, compute_missing=False):
events = self._compute_buckets(start_time, end_time,
compute_missing=compute_missing)
for event in events:
yield event | Return the results for `query_function` on every `bucket_width`
time period between `start_time` and `end_time`. Look for
previously cached results to avoid recomputation.
:param start_time: A datetime for the beginning of the range,
aligned with `bucket_width`.
:param end_time: A datetime for the end of the range, aligned with
`bucket_width`.
:param compute_missing: A boolean that, if True, will compute any
non-cached results. | ### Input:
Return the results for `query_function` on every `bucket_width`
time period between `start_time` and `end_time`. Look for
previously cached results to avoid recomputation.
:param start_time: A datetime for the beginning of the range,
aligned with `bucket_width`.
:param end_time: A datetime for the end of the range, aligned with
`bucket_width`.
:param compute_missing: A boolean that, if True, will compute any
non-cached results.
### Response:
def retrieve_interval(self, start_time, end_time, compute_missing=False):
events = self._compute_buckets(start_time, end_time,
compute_missing=compute_missing)
for event in events:
yield event |
def pvT(self,vT,R,z,gl=True,ngl=_DEFAULTNGL2,nsigma=4.):
sigmaR1= self._sr*numpy.exp((self._refr-R)/self._hsr)
sigmaz1= self._sz*numpy.exp((self._refr-R)/self._hsz)
if gl:
if ngl % 2 == 1:
raise ValueError("ngl must be even")
if ngl == _DEFAULTNGL:
glx, glw= self._glxdef, self._glwdef
glx12, glw12= self._glxdef12, self._glwdef12
elif ngl == _DEFAULTNGL2:
glx, glw= self._glxdef2, self._glwdef2
glx12, glw12= self._glxdef, self._glwdef
else:
glx, glw= numpy.polynomial.legendre.leggauss(ngl)
glx12, glw12= numpy.polynomial.legendre.leggauss(ngl//2)
if isinstance(self._aA,(actionAngle.actionAngleAdiabatic,
actionAngle.actionAngleAdiabaticGrid)):
vRgl= nsigma*sigmaR1/2.*(glx+1.)
vzgl= nsigma*sigmaz1/2.*(glx+1.)
vRglw= glw
vzglw= glw
vRfac= nsigma*sigmaR1
vzfac= nsigma*sigmaz1
else:
vRgl= nsigma*sigmaR1/2.*(glx12+1.)
vRgl= list(vRgl)
vRgl.extend(-nsigma*sigmaR1/2.*(glx12+1.))
vRgl= numpy.array(vRgl)
vzgl= nsigma*sigmaz1/2.*(glx12+1.)
vzgl= list(vzgl)
vzgl.extend(-nsigma*sigmaz1/2.*(glx12+1.))
vzgl= numpy.array(vzgl)
vRglw= glw12
vRglw= list(vRglw)
vRglw.extend(glw12)
vRglw= numpy.array(vRglw)
vzglw= glw12
vzglw= list(vzglw)
vzglw.extend(glw12)
vzglw= numpy.array(vzglw)
vRfac = 0.5*nsigma*sigmaR1
vzfac = 0.5*nsigma*sigmaz1
vRgl= numpy.tile(vRgl,(ngl,1)).T
vzgl= numpy.tile(vzgl,(ngl,1))
vRglw= numpy.tile(vRglw,(ngl,1)).T
vzglw= numpy.tile(vzglw,(ngl,1))
logqeval= numpy.reshape(self(R+numpy.zeros(ngl*ngl),
vRgl.flatten(),
vT+numpy.zeros(ngl*ngl),
z+numpy.zeros(ngl*ngl),
vzgl.flatten(),
log=True,
use_physical=False),
(ngl,ngl))
return numpy.sum(numpy.exp(logqeval)*vRglw*vzglw*vRfac*vzfac) | NAME:
pvT
PURPOSE:
calculate the marginalized vT probability at this location (NOT normalized by the density)
INPUT:
vT - tangential velocity (can be Quantity)
R - radius (can be Quantity)
z - height (can be Quantity)
gl - use Gauss-Legendre integration (True, currently the only option)
ngl - order of Gauss-Legendre integration
nsigma - sets integration limits to [-1,+1]*nsigma*sigma(R) for integration over vz and vR (default: 4)
OUTPUT:
p(vT,R,z)
HISTORY:
2012-12-22 - Written - Bovy (IAS)
2018-01-12 - Added Gauss-Legendre integration prefactor nsigma^2/4 - Trick (MPA) | ### Input:
NAME:
pvT
PURPOSE:
calculate the marginalized vT probability at this location (NOT normalized by the density)
INPUT:
vT - tangential velocity (can be Quantity)
R - radius (can be Quantity)
z - height (can be Quantity)
gl - use Gauss-Legendre integration (True, currently the only option)
ngl - order of Gauss-Legendre integration
nsigma - sets integration limits to [-1,+1]*nsigma*sigma(R) for integration over vz and vR (default: 4)
OUTPUT:
p(vT,R,z)
HISTORY:
2012-12-22 - Written - Bovy (IAS)
2018-01-12 - Added Gauss-Legendre integration prefactor nsigma^2/4 - Trick (MPA)
### Response:
def pvT(self,vT,R,z,gl=True,ngl=_DEFAULTNGL2,nsigma=4.):
sigmaR1= self._sr*numpy.exp((self._refr-R)/self._hsr)
sigmaz1= self._sz*numpy.exp((self._refr-R)/self._hsz)
if gl:
if ngl % 2 == 1:
raise ValueError("ngl must be even")
if ngl == _DEFAULTNGL:
glx, glw= self._glxdef, self._glwdef
glx12, glw12= self._glxdef12, self._glwdef12
elif ngl == _DEFAULTNGL2:
glx, glw= self._glxdef2, self._glwdef2
glx12, glw12= self._glxdef, self._glwdef
else:
glx, glw= numpy.polynomial.legendre.leggauss(ngl)
glx12, glw12= numpy.polynomial.legendre.leggauss(ngl//2)
if isinstance(self._aA,(actionAngle.actionAngleAdiabatic,
actionAngle.actionAngleAdiabaticGrid)):
vRgl= nsigma*sigmaR1/2.*(glx+1.)
vzgl= nsigma*sigmaz1/2.*(glx+1.)
vRglw= glw
vzglw= glw
vRfac= nsigma*sigmaR1
vzfac= nsigma*sigmaz1
else:
vRgl= nsigma*sigmaR1/2.*(glx12+1.)
vRgl= list(vRgl)
vRgl.extend(-nsigma*sigmaR1/2.*(glx12+1.))
vRgl= numpy.array(vRgl)
vzgl= nsigma*sigmaz1/2.*(glx12+1.)
vzgl= list(vzgl)
vzgl.extend(-nsigma*sigmaz1/2.*(glx12+1.))
vzgl= numpy.array(vzgl)
vRglw= glw12
vRglw= list(vRglw)
vRglw.extend(glw12)
vRglw= numpy.array(vRglw)
vzglw= glw12
vzglw= list(vzglw)
vzglw.extend(glw12)
vzglw= numpy.array(vzglw)
vRfac = 0.5*nsigma*sigmaR1
vzfac = 0.5*nsigma*sigmaz1
vRgl= numpy.tile(vRgl,(ngl,1)).T
vzgl= numpy.tile(vzgl,(ngl,1))
vRglw= numpy.tile(vRglw,(ngl,1)).T
vzglw= numpy.tile(vzglw,(ngl,1))
logqeval= numpy.reshape(self(R+numpy.zeros(ngl*ngl),
vRgl.flatten(),
vT+numpy.zeros(ngl*ngl),
z+numpy.zeros(ngl*ngl),
vzgl.flatten(),
log=True,
use_physical=False),
(ngl,ngl))
return numpy.sum(numpy.exp(logqeval)*vRglw*vzglw*vRfac*vzfac) |
def page(self, priority=values.unset, assignment_status=values.unset,
workflow_sid=values.unset, workflow_name=values.unset,
task_queue_sid=values.unset, task_queue_name=values.unset,
evaluate_task_attributes=values.unset, ordering=values.unset,
has_addons=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
params = values.of({
: priority,
: serialize.map(assignment_status, lambda e: e),
: workflow_sid,
: workflow_name,
: task_queue_sid,
: task_queue_name,
: evaluate_task_attributes,
: ordering,
: has_addons,
: page_token,
: page_number,
: page_size,
})
response = self._version.page(
,
self._uri,
params=params,
)
return TaskPage(self._version, response, self._solution) | Retrieve a single page of TaskInstance records from the API.
Request is executed immediately
:param unicode priority: Retrieve the list of all Tasks in the workspace with the specified priority.
:param unicode assignment_status: Returns the list of all Tasks in the workspace with the specified AssignmentStatus.
:param unicode workflow_sid: Returns the list of Tasks that are being controlled by the Workflow with the specified Sid value.
:param unicode workflow_name: Returns the list of Tasks that are being controlled by the Workflow with the specified FriendlyName value.
:param unicode task_queue_sid: Returns the list of Tasks that are currently waiting in the TaskQueue identified by the Sid specified.
:param unicode task_queue_name: Returns the list of Tasks that are currently waiting in the TaskQueue identified by the FriendlyName specified.
:param unicode evaluate_task_attributes: Provide a task attributes expression, and this will return tasks which match the attributes.
:param unicode ordering: Use this parameter to control the order of the Tasks returned.
:param bool has_addons: The has_addons
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of TaskInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.TaskPage | ### Input:
Retrieve a single page of TaskInstance records from the API.
Request is executed immediately
:param unicode priority: Retrieve the list of all Tasks in the workspace with the specified priority.
:param unicode assignment_status: Returns the list of all Tasks in the workspace with the specified AssignmentStatus.
:param unicode workflow_sid: Returns the list of Tasks that are being controlled by the Workflow with the specified Sid value.
:param unicode workflow_name: Returns the list of Tasks that are being controlled by the Workflow with the specified FriendlyName value.
:param unicode task_queue_sid: Returns the list of Tasks that are currently waiting in the TaskQueue identified by the Sid specified.
:param unicode task_queue_name: Returns the list of Tasks that are currently waiting in the TaskQueue identified by the FriendlyName specified.
:param unicode evaluate_task_attributes: Provide a task attributes expression, and this will return tasks which match the attributes.
:param unicode ordering: Use this parameter to control the order of the Tasks returned.
:param bool has_addons: The has_addons
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of TaskInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.TaskPage
### Response:
def page(self, priority=values.unset, assignment_status=values.unset,
workflow_sid=values.unset, workflow_name=values.unset,
task_queue_sid=values.unset, task_queue_name=values.unset,
evaluate_task_attributes=values.unset, ordering=values.unset,
has_addons=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
params = values.of({
: priority,
: serialize.map(assignment_status, lambda e: e),
: workflow_sid,
: workflow_name,
: task_queue_sid,
: task_queue_name,
: evaluate_task_attributes,
: ordering,
: has_addons,
: page_token,
: page_number,
: page_size,
})
response = self._version.page(
,
self._uri,
params=params,
)
return TaskPage(self._version, response, self._solution) |
def dir_on_bezier_curve(P=[(0.0, 0.0)], t=0.5):
s right shoulder.
'
assert isinstance(P, list)
assert len(P) > 0
if not len(P) > 1:
return None
for p in P:
assert isinstance(p, tuple)
for i in p:
assert len(p) > 1
assert isinstance(i, float)
assert isinstance(t, float)
assert 0 <= t <= 1
O = len(P) - 1
Q = P
while O > 1:
Q = [pt_between_pts(Q[l], Q[l+1], t) for l in range(O)]
O -= 1
assert len(Q) == 2
q0 = Q[0]
q1 = Q[1]
return dir_between_pts(q0, q1) | Return direction at t on bezier curve defined by control points P.
List of vectors per pair of dimensions are returned in radians.
E.g. Where X is "right", Y is "up", Z is "in" on a computer screen, and
returned value is [pi/4, -pi/4], then the vector will be coming out the
screen over the viewer's right shoulder. | ### Input:
Return direction at t on bezier curve defined by control points P.
List of vectors per pair of dimensions are returned in radians.
E.g. Where X is "right", Y is "up", Z is "in" on a computer screen, and
returned value is [pi/4, -pi/4], then the vector will be coming out the
screen over the viewer's right shoulder.
### Response:
def dir_on_bezier_curve(P=[(0.0, 0.0)], t=0.5):
s right shoulder.
'
assert isinstance(P, list)
assert len(P) > 0
if not len(P) > 1:
return None
for p in P:
assert isinstance(p, tuple)
for i in p:
assert len(p) > 1
assert isinstance(i, float)
assert isinstance(t, float)
assert 0 <= t <= 1
O = len(P) - 1
Q = P
while O > 1:
Q = [pt_between_pts(Q[l], Q[l+1], t) for l in range(O)]
O -= 1
assert len(Q) == 2
q0 = Q[0]
q1 = Q[1]
return dir_between_pts(q0, q1) |
def get_periodicfeatures(
pfpickle,
lcbasedir,
outdir,
fourierorder=5,
transitparams=(-0.01,0.1,0.1),
ebparams=(-0.2,0.3,0.7,0.5),
pdiff_threshold=1.0e-4,
sidereal_threshold=1.0e-4,
sampling_peak_multiplier=5.0,
sampling_startp=None,
sampling_endp=None,
starfeatures=None,
timecols=None,
magcols=None,
errcols=None,
lcformat=,
lcformatdir=None,
sigclip=10.0,
verbose=True,
raiseonfail=False
):
bestbests
normalized periodogram peak over the sampling periodogram peak at the
same period required to accept the period as possibly real.
sampling_startp, sampling_endp : float
If the `pgramlist` doesns light curve and phase it with
this objectve stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file thatasymmetrics LC.
t figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can.gzlcfbasenameobjectidkwargskwargst exist, use the defaults from the lcformat def
if kwargs and in kwargs and timecols is None:
timecols = kwargs[]
elif not kwargs and not timecols:
timecols = dtimecols
if kwargs and in kwargs and magcols is None:
magcols = kwargs[]
elif not kwargs and not magcols:
magcols = dmagcols
if kwargs and in kwargs and errcols is None:
errcols = kwargs[]
elif not kwargs and not errcols:
errcols = derrcols
if not os.path.exists(lcfile):
LOGERROR("canrbclosestnbrlcfnameclosestnbrlcfnames not there, check for this file at the full LC location
elif os.path.exists(nbr_full_lcf):
nbrlcf = nbr_full_lcf
"its original directory: %s, or in this objectperiodicfeatures-%s.pkl -......winmethodbestperiodperiodicfeatures-%spfmethodsnot enough finite measurements in magcol: %s, for pfpickle: %s, skipping this magcolperiodicfeatures-%speriodicfeatures-%s.pkl -wbfailed to run for pf: %s, lcfile: %s' %
(pfpickle, lcfile))
if raiseonfail:
raise
else:
return None | This gets all periodic features for the object.
Parameters
----------
pfpickle : str
The period-finding result pickle containing period-finder results to use
for the calculation of LC fit, periodogram, and phased LC features.
lcbasedir : str
The base directory where the light curve for the current object is
located.
outdir : str
The output directory where the results will be written.
fourierorder : int
The Fourier order to use to generate sinusoidal function and fit that to
the phased light curve.
transitparams : list of floats
The transit depth, duration, and ingress duration to use to generate a
trapezoid planet transit model fit to the phased light curve. The period
used is the one provided in `period`, while the epoch is automatically
obtained from a spline fit to the phased light curve.
ebparams : list of floats
The primary eclipse depth, eclipse duration, the primary-secondary depth
ratio, and the phase of the secondary eclipse to use to generate an
eclipsing binary model fit to the phased light curve. The period used is
the one provided in `period`, while the epoch is automatically obtained
from a spline fit to the phased light curve.
pdiff_threshold : float
This is the max difference between periods to consider them the same.
sidereal_threshold : float
This is the max difference between any of the 'best' periods and the
sidereal day periods to consider them the same.
sampling_peak_multiplier : float
This is the minimum multiplicative factor of a 'best' period's
normalized periodogram peak over the sampling periodogram peak at the
same period required to accept the 'best' period as possibly real.
sampling_startp, sampling_endp : float
If the `pgramlist` doesn't have a time-sampling Lomb-Scargle
periodogram, it will be obtained automatically. Use these kwargs to
control the minimum and maximum period interval to be searched when
generating this periodogram.
starfeatures : str or None
If not None, this should be the filename of the
`starfeatures-<objectid>.pkl` created by
:py:func:`astrobase.lcproc.lcsfeatures.get_starfeatures` for this
object. This is used to get the neighbor's light curve and phase it with
this object's period to see if this object is blended.
timecols : list of str or None
The timecol keys to use from the lcdict in calculating the features.
magcols : list of str or None
The magcol keys to use from the lcdict in calculating the features.
errcols : list of str or None
The errcol keys to use from the lcdict in calculating the features.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
verbose : bool
If True, will indicate progress while working.
raiseonfail : bool
If True, will raise an Exception if something goes wrong.
Returns
-------
str
Returns a filename for the output pickle containing all of the periodic
features for the input object's LC. | ### Input:
This gets all periodic features for the object.
Parameters
----------
pfpickle : str
The period-finding result pickle containing period-finder results to use
for the calculation of LC fit, periodogram, and phased LC features.
lcbasedir : str
The base directory where the light curve for the current object is
located.
outdir : str
The output directory where the results will be written.
fourierorder : int
The Fourier order to use to generate sinusoidal function and fit that to
the phased light curve.
transitparams : list of floats
The transit depth, duration, and ingress duration to use to generate a
trapezoid planet transit model fit to the phased light curve. The period
used is the one provided in `period`, while the epoch is automatically
obtained from a spline fit to the phased light curve.
ebparams : list of floats
The primary eclipse depth, eclipse duration, the primary-secondary depth
ratio, and the phase of the secondary eclipse to use to generate an
eclipsing binary model fit to the phased light curve. The period used is
the one provided in `period`, while the epoch is automatically obtained
from a spline fit to the phased light curve.
pdiff_threshold : float
This is the max difference between periods to consider them the same.
sidereal_threshold : float
This is the max difference between any of the 'best' periods and the
sidereal day periods to consider them the same.
sampling_peak_multiplier : float
This is the minimum multiplicative factor of a 'best' period's
normalized periodogram peak over the sampling periodogram peak at the
same period required to accept the 'best' period as possibly real.
sampling_startp, sampling_endp : float
If the `pgramlist` doesn't have a time-sampling Lomb-Scargle
periodogram, it will be obtained automatically. Use these kwargs to
control the minimum and maximum period interval to be searched when
generating this periodogram.
starfeatures : str or None
If not None, this should be the filename of the
`starfeatures-<objectid>.pkl` created by
:py:func:`astrobase.lcproc.lcsfeatures.get_starfeatures` for this
object. This is used to get the neighbor's light curve and phase it with
this object's period to see if this object is blended.
timecols : list of str or None
The timecol keys to use from the lcdict in calculating the features.
magcols : list of str or None
The magcol keys to use from the lcdict in calculating the features.
errcols : list of str or None
The errcol keys to use from the lcdict in calculating the features.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
verbose : bool
If True, will indicate progress while working.
raiseonfail : bool
If True, will raise an Exception if something goes wrong.
Returns
-------
str
Returns a filename for the output pickle containing all of the periodic
features for the input object's LC.
### Response:
def get_periodicfeatures(
pfpickle,
lcbasedir,
outdir,
fourierorder=5,
transitparams=(-0.01,0.1,0.1),
ebparams=(-0.2,0.3,0.7,0.5),
pdiff_threshold=1.0e-4,
sidereal_threshold=1.0e-4,
sampling_peak_multiplier=5.0,
sampling_startp=None,
sampling_endp=None,
starfeatures=None,
timecols=None,
magcols=None,
errcols=None,
lcformat=,
lcformatdir=None,
sigclip=10.0,
verbose=True,
raiseonfail=False
):
bestbests
normalized periodogram peak over the sampling periodogram peak at the
same period required to accept the period as possibly real.
sampling_startp, sampling_endp : float
If the `pgramlist` doesns light curve and phase it with
this objectve stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file thatasymmetrics LC.
t figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can.gzlcfbasenameobjectidkwargskwargst exist, use the defaults from the lcformat def
if kwargs and in kwargs and timecols is None:
timecols = kwargs[]
elif not kwargs and not timecols:
timecols = dtimecols
if kwargs and in kwargs and magcols is None:
magcols = kwargs[]
elif not kwargs and not magcols:
magcols = dmagcols
if kwargs and in kwargs and errcols is None:
errcols = kwargs[]
elif not kwargs and not errcols:
errcols = derrcols
if not os.path.exists(lcfile):
LOGERROR("canrbclosestnbrlcfnameclosestnbrlcfnames not there, check for this file at the full LC location
elif os.path.exists(nbr_full_lcf):
nbrlcf = nbr_full_lcf
"its original directory: %s, or in this objectperiodicfeatures-%s.pkl -......winmethodbestperiodperiodicfeatures-%spfmethodsnot enough finite measurements in magcol: %s, for pfpickle: %s, skipping this magcolperiodicfeatures-%speriodicfeatures-%s.pkl -wbfailed to run for pf: %s, lcfile: %s' %
(pfpickle, lcfile))
if raiseonfail:
raise
else:
return None |
def cleanup(arg):
arg = numpy.asarray(arg)
if len(arg.shape) <= 1:
arg = arg.reshape(arg.size, 1)
elif len(arg.shape) > 2:
raise ValueError("shapes must be smaller than 3")
return arg | Clean up the input variable. | ### Input:
Clean up the input variable.
### Response:
def cleanup(arg):
arg = numpy.asarray(arg)
if len(arg.shape) <= 1:
arg = arg.reshape(arg.size, 1)
elif len(arg.shape) > 2:
raise ValueError("shapes must be smaller than 3")
return arg |
def start_node(self):
return db.cypher_query("MATCH (aNode) "
"WHERE id(aNode)={nodeid} "
"RETURN aNode".format(nodeid=self._start_node_id),
resolve_objects = True)[0][0][0] | Get start node
:return: StructuredNode | ### Input:
Get start node
:return: StructuredNode
### Response:
def start_node(self):
return db.cypher_query("MATCH (aNode) "
"WHERE id(aNode)={nodeid} "
"RETURN aNode".format(nodeid=self._start_node_id),
resolve_objects = True)[0][0][0] |
def neighbor_gaia_features(objectinfo,
lclist_kdtree,
neighbor_radius_arcsec,
gaia_matchdist_arcsec=3.0,
verbose=True,
gaia_submit_timeout=10.0,
gaia_submit_tries=3,
gaia_max_timeout=180.0,
gaia_mirror=None,
complete_query_later=True,
search_simbad=False):
s light curve. This must
contain at least the following keys::
{: the right ascension of the object,
: the declination of the object}
lclist_kdtree : scipy.spatial.cKDTree object
This is a KD-Tree built on the Cartesian xyz coordinates from (ra, dec)
of all objects in the same field as this object. It is similar to that
produced by :py:func:`astrobase.lcproc.catalogs.make_lclist`, and is
used to carry out the spatial search required to find neighbors for this
object.
neighbor_radius_arcsec : float
The maximum radius in arcseconds around this object to search for
neighbors in both the light curve catalog and in the GAIA DR2 catalog.
gaia_matchdist_arcsec : float
The maximum distance in arcseconds to use for a GAIA cross-match to this
object.
verbose : bool
If True, indicates progress and warns of problems.
gaia_submit_timeout : float
Sets the timeout in seconds to use when submitting a request to look up
the objects information. If `fast_mode` is set, this is
ignored, and the services will be contacted only once (meaning that a
failure to respond will be silently ignored and no GAIA data will be
added to the checkplots information. Note that if
`fast_mode` is set, this is ignored.
gaia_mirror : str
This sets the GAIA mirror to use. This is a key in the
`services.gaia.GAIA_URLS` dict which defines the URLs to hit for each
mirror.
search_simbad : bool
If this is True, searches for objects in SIMBAD at this objects SIMBAD main ID, type, and stellar
classification if available.
Returns
-------
dict
Returns a dict with neighbor, GAIA, and SIMBAD features.
radeclradeclradeclneighborsnbrindicesdistarcsecclosestdistarcsecclosestdistnbrindsearchradarcsecneighborsnbrindicesdistarcsecclosestdistarcsecclosestdistnbrindsearchradarcsect get observed neighbors")
resultdict = {
:np.nan,
:np.array([]),
:np.array([]),
:np.nan,
:np.array([]),
:neighbor_radius_arcsec,
}
if ( in objectinfo and in objectinfo and
objectinfo[] is not None and objectinfo[] is not None):
gaia_result = gaia.objectlist_conesearch(
objectinfo[],
objectinfo[],
neighbor_radius_arcsec,
verbose=verbose,
timeout=gaia_submit_timeout,
maxtimeout=gaia_max_timeout,
maxtries=gaia_submit_tries,
gaia_mirror=gaia_mirror,
complete_query_later=complete_query_later
)
if gaia_result:
gaia_objlistf = gaia_result[]
with gzip.open(gaia_objlistf,) as infd:
try:
gaia_objlist = np.genfromtxt(
infd,
names=True,
delimiter=,
dtype=,
usecols=(0,1,2,3,4,5,6,7,8,9,10,11,12)
)
except Exception as e:
gaia_objlist = []
gaia_objlist = np.atleast_1d(gaia_objlist)
if gaia_objlist.size > 0:
stampres = skyview.get_stamp(objectinfo[],
objectinfo[])
if (stampres and
in stampres and
stampres[] is not None and
os.path.exists(stampres[])):
stampwcs = WCS(stampres[])
gaia_xypos = stampwcs.all_world2pix(
np.column_stack((gaia_objlist[],
gaia_objlist[])),
1
)
else:
gaia_xypos = None
if gaia_objlist[][0] < gaia_matchdist_arcsec:
if gaia_objlist.size > 1:
gaia_nneighbors = gaia_objlist[1:].size
gaia_status = (
%
gaia_nneighbors
)
gaia_ids = gaia_objlist[]
gaia_mags = gaia_objlist[]
gaia_parallaxes = gaia_objlist[]
gaia_parallax_errs = gaia_objlist[]
gaia_pmra = gaia_objlist[]
gaia_pmra_err = gaia_objlist[]
gaia_pmdecl = gaia_objlist[]
gaia_pmdecl_err = gaia_objlist[]
gaia_absolute_mags = magnitudes.absolute_gaia_magnitude(
gaia_mags, gaia_parallaxes
)
if ( in objectinfo and
objectinfo[] is not None and
np.isfinite(objectinfo[])):
gaiak_colors = gaia_mags - objectinfo[]
else:
gaiak_colors = None
gaia_dists = gaia_objlist[]
gaia_closest_distarcsec = gaia_objlist[][1]
gaia_closest_gmagdiff = (
gaia_objlist[][0] -
gaia_objlist[][1]
)
else:
LOGWARNING(
% (objectinfo[],
objectinfo[]))
gaia_nneighbors = 0
gaia_status = (
)
gaia_ids = gaia_objlist[]
gaia_mags = gaia_objlist[]
gaia_parallaxes = gaia_objlist[]
gaia_parallax_errs = gaia_objlist[]
gaia_pmra = gaia_objlist[]
gaia_pmra_err = gaia_objlist[]
gaia_pmdecl = gaia_objlist[]
gaia_pmdecl_err = gaia_objlist[]
gaia_absolute_mags = magnitudes.absolute_gaia_magnitude(
gaia_mags, gaia_parallaxes
)
if ( in objectinfo and
objectinfo[] is not None and
np.isfinite(objectinfo[])):
gaiak_colors = gaia_mags - objectinfo[]
else:
gaiak_colors = None
gaia_dists = gaia_objlist[]
gaia_closest_distarcsec = np.nan
gaia_closest_gmagdiff = np.nan
resultdict.update(
{:,
:np.nan,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
:np.nan,
:np.nan}
)
if ( in objectinfo and in objectinfo and
objectinfo[] is not None and objectinfo[] is not None and
search_simbad):
simbad_result = simbad.objectnames_conesearch(
objectinfo[],
objectinfo[],
neighbor_radius_arcsec,
verbose=verbose,
timeout=gaia_submit_timeout,
maxtimeout=gaia_max_timeout,
maxtries=gaia_submit_tries,
complete_query_later=complete_query_later
)
else:
simbad_result = None
if (simbad_result and
simbad_result[] and
os.path.exists(simbad_result[])):
with gzip.open(simbad_result[],) as infd:
try:
simbad_objectnames = np.genfromtxt(
infd,
names=True,
delimiter=,
dtype=,
usecols=(0,1,2,3,4,5,6,7,8),
comments=,
)
except Exception as e:
simbad_objectnames = []
simbad_objectnames = np.atleast_1d(simbad_objectnames)
if simbad_objectnames.size > 0:
simbad_mainid = simbad_objectnames[].tolist()
simbad_allids = simbad_objectnames[].tolist()
simbad_objtype = simbad_objectnames[].tolist()
simbad_distarcsec = simbad_objectnames[].tolist()
simbad_nmatches = len(simbad_mainid)
simbad_mainid = [x.replace(,) for x in simbad_mainid]
simbad_allids = [x.replace(,) for x in simbad_allids]
simbad_objtype = [x.replace(,) for x in simbad_objtype]
resultdict.update({
:simbad_nmatches,
:simbad_mainid,
:simbad_objtype,
:simbad_allids,
:simbad_distarcsec
})
if simbad_nmatches > 1:
resultdict[] = (
)
else:
resultdict[] =
if simbad_distarcsec[0] < gaia_matchdist_arcsec:
resultdict.update({
:simbad_mainid[0],
:simbad_objtype[0],
:simbad_allids[0],
:simbad_distarcsec[0],
:
})
else:
LOGWARNING(
%
(gaia_matchdist_arcsec,
objectinfo[],
objectinfo[],
simbad_mainid[0],
simbad_distarcsec[0]))
simbad_status = (
%
(gaia_matchdist_arcsec,
simbad_distarcsec[0]))
resultdict.update({
:None,
:None,
:None,
:None,
:simbad_status
})
else:
resultdict.update({
:,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
})
else:
if search_simbad:
simbad_status =
else:
simbad_status =
resultdict.update({
:simbad_status,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
})
return resultdict | Gets several neighbor, GAIA, and SIMBAD features:
From the KD-Tree in the given light curve catalog the object is in:
`lclist_kdtree`:
- distance to closest neighbor in arcsec
- total number of all neighbors within 2 x `neighbor_radius_arcsec`
From the GAIA DR2 catalog:
- distance to closest neighbor in arcsec
- total number of all neighbors within 2 x `neighbor_radius_arcsec`
- gets the parallax for the object and neighbors
- calculates the absolute GAIA mag and `G-K` color for use in CMDs
- gets the proper motion in RA/Dec if available
From the SIMBAD catalog:
- the name of the object
- the type of the object
Parameters
----------
objectinfo : dict
This is the objectinfo dict from an object's light curve. This must
contain at least the following keys::
{'ra': the right ascension of the object,
'decl': the declination of the object}
lclist_kdtree : scipy.spatial.cKDTree object
This is a KD-Tree built on the Cartesian xyz coordinates from (ra, dec)
of all objects in the same field as this object. It is similar to that
produced by :py:func:`astrobase.lcproc.catalogs.make_lclist`, and is
used to carry out the spatial search required to find neighbors for this
object.
neighbor_radius_arcsec : float
The maximum radius in arcseconds around this object to search for
neighbors in both the light curve catalog and in the GAIA DR2 catalog.
gaia_matchdist_arcsec : float
The maximum distance in arcseconds to use for a GAIA cross-match to this
object.
verbose : bool
If True, indicates progress and warns of problems.
gaia_submit_timeout : float
Sets the timeout in seconds to use when submitting a request to look up
the object's information to the GAIA service. Note that if `fast_mode`
is set, this is ignored.
gaia_submit_tries : int
Sets the maximum number of times the GAIA services will be contacted to
obtain this object's information. If `fast_mode` is set, this is
ignored, and the services will be contacted only once (meaning that a
failure to respond will be silently ignored and no GAIA data will be
added to the checkplot's objectinfo dict).
gaia_max_timeout : float
Sets the timeout in seconds to use when waiting for the GAIA service to
respond to our request for the object's information. Note that if
`fast_mode` is set, this is ignored.
gaia_mirror : str
This sets the GAIA mirror to use. This is a key in the
`services.gaia.GAIA_URLS` dict which defines the URLs to hit for each
mirror.
search_simbad : bool
If this is True, searches for objects in SIMBAD at this object's
location and gets the object's SIMBAD main ID, type, and stellar
classification if available.
Returns
-------
dict
Returns a dict with neighbor, GAIA, and SIMBAD features. | ### Input:
Gets several neighbor, GAIA, and SIMBAD features:
From the KD-Tree in the given light curve catalog the object is in:
`lclist_kdtree`:
- distance to closest neighbor in arcsec
- total number of all neighbors within 2 x `neighbor_radius_arcsec`
From the GAIA DR2 catalog:
- distance to closest neighbor in arcsec
- total number of all neighbors within 2 x `neighbor_radius_arcsec`
- gets the parallax for the object and neighbors
- calculates the absolute GAIA mag and `G-K` color for use in CMDs
- gets the proper motion in RA/Dec if available
From the SIMBAD catalog:
- the name of the object
- the type of the object
Parameters
----------
objectinfo : dict
This is the objectinfo dict from an object's light curve. This must
contain at least the following keys::
{'ra': the right ascension of the object,
'decl': the declination of the object}
lclist_kdtree : scipy.spatial.cKDTree object
This is a KD-Tree built on the Cartesian xyz coordinates from (ra, dec)
of all objects in the same field as this object. It is similar to that
produced by :py:func:`astrobase.lcproc.catalogs.make_lclist`, and is
used to carry out the spatial search required to find neighbors for this
object.
neighbor_radius_arcsec : float
The maximum radius in arcseconds around this object to search for
neighbors in both the light curve catalog and in the GAIA DR2 catalog.
gaia_matchdist_arcsec : float
The maximum distance in arcseconds to use for a GAIA cross-match to this
object.
verbose : bool
If True, indicates progress and warns of problems.
gaia_submit_timeout : float
Sets the timeout in seconds to use when submitting a request to look up
the object's information to the GAIA service. Note that if `fast_mode`
is set, this is ignored.
gaia_submit_tries : int
Sets the maximum number of times the GAIA services will be contacted to
obtain this object's information. If `fast_mode` is set, this is
ignored, and the services will be contacted only once (meaning that a
failure to respond will be silently ignored and no GAIA data will be
added to the checkplot's objectinfo dict).
gaia_max_timeout : float
Sets the timeout in seconds to use when waiting for the GAIA service to
respond to our request for the object's information. Note that if
`fast_mode` is set, this is ignored.
gaia_mirror : str
This sets the GAIA mirror to use. This is a key in the
`services.gaia.GAIA_URLS` dict which defines the URLs to hit for each
mirror.
search_simbad : bool
If this is True, searches for objects in SIMBAD at this object's
location and gets the object's SIMBAD main ID, type, and stellar
classification if available.
Returns
-------
dict
Returns a dict with neighbor, GAIA, and SIMBAD features.
### Response:
def neighbor_gaia_features(objectinfo,
lclist_kdtree,
neighbor_radius_arcsec,
gaia_matchdist_arcsec=3.0,
verbose=True,
gaia_submit_timeout=10.0,
gaia_submit_tries=3,
gaia_max_timeout=180.0,
gaia_mirror=None,
complete_query_later=True,
search_simbad=False):
s light curve. This must
contain at least the following keys::
{: the right ascension of the object,
: the declination of the object}
lclist_kdtree : scipy.spatial.cKDTree object
This is a KD-Tree built on the Cartesian xyz coordinates from (ra, dec)
of all objects in the same field as this object. It is similar to that
produced by :py:func:`astrobase.lcproc.catalogs.make_lclist`, and is
used to carry out the spatial search required to find neighbors for this
object.
neighbor_radius_arcsec : float
The maximum radius in arcseconds around this object to search for
neighbors in both the light curve catalog and in the GAIA DR2 catalog.
gaia_matchdist_arcsec : float
The maximum distance in arcseconds to use for a GAIA cross-match to this
object.
verbose : bool
If True, indicates progress and warns of problems.
gaia_submit_timeout : float
Sets the timeout in seconds to use when submitting a request to look up
the objects information. If `fast_mode` is set, this is
ignored, and the services will be contacted only once (meaning that a
failure to respond will be silently ignored and no GAIA data will be
added to the checkplots information. Note that if
`fast_mode` is set, this is ignored.
gaia_mirror : str
This sets the GAIA mirror to use. This is a key in the
`services.gaia.GAIA_URLS` dict which defines the URLs to hit for each
mirror.
search_simbad : bool
If this is True, searches for objects in SIMBAD at this objects SIMBAD main ID, type, and stellar
classification if available.
Returns
-------
dict
Returns a dict with neighbor, GAIA, and SIMBAD features.
radeclradeclradeclneighborsnbrindicesdistarcsecclosestdistarcsecclosestdistnbrindsearchradarcsecneighborsnbrindicesdistarcsecclosestdistarcsecclosestdistnbrindsearchradarcsect get observed neighbors")
resultdict = {
:np.nan,
:np.array([]),
:np.array([]),
:np.nan,
:np.array([]),
:neighbor_radius_arcsec,
}
if ( in objectinfo and in objectinfo and
objectinfo[] is not None and objectinfo[] is not None):
gaia_result = gaia.objectlist_conesearch(
objectinfo[],
objectinfo[],
neighbor_radius_arcsec,
verbose=verbose,
timeout=gaia_submit_timeout,
maxtimeout=gaia_max_timeout,
maxtries=gaia_submit_tries,
gaia_mirror=gaia_mirror,
complete_query_later=complete_query_later
)
if gaia_result:
gaia_objlistf = gaia_result[]
with gzip.open(gaia_objlistf,) as infd:
try:
gaia_objlist = np.genfromtxt(
infd,
names=True,
delimiter=,
dtype=,
usecols=(0,1,2,3,4,5,6,7,8,9,10,11,12)
)
except Exception as e:
gaia_objlist = []
gaia_objlist = np.atleast_1d(gaia_objlist)
if gaia_objlist.size > 0:
stampres = skyview.get_stamp(objectinfo[],
objectinfo[])
if (stampres and
in stampres and
stampres[] is not None and
os.path.exists(stampres[])):
stampwcs = WCS(stampres[])
gaia_xypos = stampwcs.all_world2pix(
np.column_stack((gaia_objlist[],
gaia_objlist[])),
1
)
else:
gaia_xypos = None
if gaia_objlist[][0] < gaia_matchdist_arcsec:
if gaia_objlist.size > 1:
gaia_nneighbors = gaia_objlist[1:].size
gaia_status = (
%
gaia_nneighbors
)
gaia_ids = gaia_objlist[]
gaia_mags = gaia_objlist[]
gaia_parallaxes = gaia_objlist[]
gaia_parallax_errs = gaia_objlist[]
gaia_pmra = gaia_objlist[]
gaia_pmra_err = gaia_objlist[]
gaia_pmdecl = gaia_objlist[]
gaia_pmdecl_err = gaia_objlist[]
gaia_absolute_mags = magnitudes.absolute_gaia_magnitude(
gaia_mags, gaia_parallaxes
)
if ( in objectinfo and
objectinfo[] is not None and
np.isfinite(objectinfo[])):
gaiak_colors = gaia_mags - objectinfo[]
else:
gaiak_colors = None
gaia_dists = gaia_objlist[]
gaia_closest_distarcsec = gaia_objlist[][1]
gaia_closest_gmagdiff = (
gaia_objlist[][0] -
gaia_objlist[][1]
)
else:
LOGWARNING(
% (objectinfo[],
objectinfo[]))
gaia_nneighbors = 0
gaia_status = (
)
gaia_ids = gaia_objlist[]
gaia_mags = gaia_objlist[]
gaia_parallaxes = gaia_objlist[]
gaia_parallax_errs = gaia_objlist[]
gaia_pmra = gaia_objlist[]
gaia_pmra_err = gaia_objlist[]
gaia_pmdecl = gaia_objlist[]
gaia_pmdecl_err = gaia_objlist[]
gaia_absolute_mags = magnitudes.absolute_gaia_magnitude(
gaia_mags, gaia_parallaxes
)
if ( in objectinfo and
objectinfo[] is not None and
np.isfinite(objectinfo[])):
gaiak_colors = gaia_mags - objectinfo[]
else:
gaiak_colors = None
gaia_dists = gaia_objlist[]
gaia_closest_distarcsec = np.nan
gaia_closest_gmagdiff = np.nan
resultdict.update(
{:,
:np.nan,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
:np.nan,
:np.nan}
)
if ( in objectinfo and in objectinfo and
objectinfo[] is not None and objectinfo[] is not None and
search_simbad):
simbad_result = simbad.objectnames_conesearch(
objectinfo[],
objectinfo[],
neighbor_radius_arcsec,
verbose=verbose,
timeout=gaia_submit_timeout,
maxtimeout=gaia_max_timeout,
maxtries=gaia_submit_tries,
complete_query_later=complete_query_later
)
else:
simbad_result = None
if (simbad_result and
simbad_result[] and
os.path.exists(simbad_result[])):
with gzip.open(simbad_result[],) as infd:
try:
simbad_objectnames = np.genfromtxt(
infd,
names=True,
delimiter=,
dtype=,
usecols=(0,1,2,3,4,5,6,7,8),
comments=,
)
except Exception as e:
simbad_objectnames = []
simbad_objectnames = np.atleast_1d(simbad_objectnames)
if simbad_objectnames.size > 0:
simbad_mainid = simbad_objectnames[].tolist()
simbad_allids = simbad_objectnames[].tolist()
simbad_objtype = simbad_objectnames[].tolist()
simbad_distarcsec = simbad_objectnames[].tolist()
simbad_nmatches = len(simbad_mainid)
simbad_mainid = [x.replace(,) for x in simbad_mainid]
simbad_allids = [x.replace(,) for x in simbad_allids]
simbad_objtype = [x.replace(,) for x in simbad_objtype]
resultdict.update({
:simbad_nmatches,
:simbad_mainid,
:simbad_objtype,
:simbad_allids,
:simbad_distarcsec
})
if simbad_nmatches > 1:
resultdict[] = (
)
else:
resultdict[] =
if simbad_distarcsec[0] < gaia_matchdist_arcsec:
resultdict.update({
:simbad_mainid[0],
:simbad_objtype[0],
:simbad_allids[0],
:simbad_distarcsec[0],
:
})
else:
LOGWARNING(
%
(gaia_matchdist_arcsec,
objectinfo[],
objectinfo[],
simbad_mainid[0],
simbad_distarcsec[0]))
simbad_status = (
%
(gaia_matchdist_arcsec,
simbad_distarcsec[0]))
resultdict.update({
:None,
:None,
:None,
:None,
:simbad_status
})
else:
resultdict.update({
:,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
})
else:
if search_simbad:
simbad_status =
else:
simbad_status =
resultdict.update({
:simbad_status,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
:None,
})
return resultdict |
def _need_update(self, project_name, updatetime=None, md5sum=None):
if project_name not in self.projects:
return True
elif md5sum and md5sum != self.projects[project_name][].get():
return True
elif updatetime and updatetime > self.projects[project_name][].get(, 0):
return True
elif time.time() - self.projects[project_name][] > self.RELOAD_PROJECT_INTERVAL:
return True
return False | Check if project_name need update | ### Input:
Check if project_name need update
### Response:
def _need_update(self, project_name, updatetime=None, md5sum=None):
if project_name not in self.projects:
return True
elif md5sum and md5sum != self.projects[project_name][].get():
return True
elif updatetime and updatetime > self.projects[project_name][].get(, 0):
return True
elif time.time() - self.projects[project_name][] > self.RELOAD_PROJECT_INTERVAL:
return True
return False |
def save(self, path=None):
if path is None:
path = self._PATH_API_CONTEXT_DEFAULT
with open(path, self._FILE_MODE_WRITE) as file_:
file_.write(self.to_json()) | :type path: str
:rtype: None | ### Input:
:type path: str
:rtype: None
### Response:
def save(self, path=None):
if path is None:
path = self._PATH_API_CONTEXT_DEFAULT
with open(path, self._FILE_MODE_WRITE) as file_:
file_.write(self.to_json()) |
def map_element(self, obj, name, event):
canvas = self.diagram.diagram_canvas
parser = XDotParser()
for element in event.added:
logger.debug("Mapping new element [%s] to diagram node" % element)
for node_mapping in self.nodes:
ct = name[:-6]
if node_mapping.containment_trait == ct:
dot_attrs = node_mapping.dot_node
dot = Dot()
graph_node = Node(str(id(element)))
self._style_node(graph_node, dot_attrs)
dot.add_node(graph_node)
xdot = graph_from_dot_data(dot.create(self.program,"xdot"))
diagram_nodes = parser.parse_nodes(xdot)
for dn in diagram_nodes:
if dn is not None:
dn.element = element
for tool in node_mapping.tools:
dn.tools.append(tool(dn))
canvas.add(dn)
canvas.request_redraw()
for element in event.removed:
logger.debug("Unmapping element [%s] from diagram" % element)
for component in canvas.components:
if element == component.element:
canvas.remove(component)
canvas.request_redraw()
break | Handles mapping elements to diagram components | ### Input:
Handles mapping elements to diagram components
### Response:
def map_element(self, obj, name, event):
canvas = self.diagram.diagram_canvas
parser = XDotParser()
for element in event.added:
logger.debug("Mapping new element [%s] to diagram node" % element)
for node_mapping in self.nodes:
ct = name[:-6]
if node_mapping.containment_trait == ct:
dot_attrs = node_mapping.dot_node
dot = Dot()
graph_node = Node(str(id(element)))
self._style_node(graph_node, dot_attrs)
dot.add_node(graph_node)
xdot = graph_from_dot_data(dot.create(self.program,"xdot"))
diagram_nodes = parser.parse_nodes(xdot)
for dn in diagram_nodes:
if dn is not None:
dn.element = element
for tool in node_mapping.tools:
dn.tools.append(tool(dn))
canvas.add(dn)
canvas.request_redraw()
for element in event.removed:
logger.debug("Unmapping element [%s] from diagram" % element)
for component in canvas.components:
if element == component.element:
canvas.remove(component)
canvas.request_redraw()
break |
def get(self):
self._user = self._client.get(
url=self._client.get_full_url(
self.get_path(
, realm=self._realm_name, user_id=self._user_id
)
)
)
self._user_id = self.user["id"]
return self._user | Return registered user with the given user id.
http://www.keycloak.org/docs-api/3.4/rest-api/index.html#_users_resource | ### Input:
Return registered user with the given user id.
http://www.keycloak.org/docs-api/3.4/rest-api/index.html#_users_resource
### Response:
def get(self):
self._user = self._client.get(
url=self._client.get_full_url(
self.get_path(
, realm=self._realm_name, user_id=self._user_id
)
)
)
self._user_id = self.user["id"]
return self._user |
def set_attributes(self, **kwargs):
if self._subresource_map:
self.set_subresources(**kwargs)
for key in self._subresource_map.keys():
kwargs.pop(key, None)
for field, value in kwargs.items():
if field in self.Meta.attributes:
setattr(self, field, value) | Set the resource attributes from the kwargs.
Only sets items in the `self.Meta.attributes` white list.
Subclass this method to customise attributes.
Args:
kwargs: Keyword arguements passed into the init of this class | ### Input:
Set the resource attributes from the kwargs.
Only sets items in the `self.Meta.attributes` white list.
Subclass this method to customise attributes.
Args:
kwargs: Keyword arguements passed into the init of this class
### Response:
def set_attributes(self, **kwargs):
if self._subresource_map:
self.set_subresources(**kwargs)
for key in self._subresource_map.keys():
kwargs.pop(key, None)
for field, value in kwargs.items():
if field in self.Meta.attributes:
setattr(self, field, value) |
def _parse_organism_tag(self):
organism_name_types = UniProtACEntry.organism_name_types
self.organisms = []
organism_tags = [child for child in self.entry_tag.childNodes if child.nodeType == child.ELEMENT_NODE and child.tagName == ]
assert(len(organism_tags) == 1)
for organism_tag in organism_tags:
names = dict.fromkeys(organism_name_types, None)
for name_tag in [child for child in organism_tag.childNodes if child.nodeType == child.ELEMENT_NODE and child.tagName == ]:
name_type = name_tag.getAttribute("type")
assert(name_type in organism_name_types)
names[name_type] = name_tag.firstChild.nodeValue.strip()
assert(names.get())
self.organisms.append(names) | Parses the protein tag to get the names and EC numbers. | ### Input:
Parses the protein tag to get the names and EC numbers.
### Response:
def _parse_organism_tag(self):
organism_name_types = UniProtACEntry.organism_name_types
self.organisms = []
organism_tags = [child for child in self.entry_tag.childNodes if child.nodeType == child.ELEMENT_NODE and child.tagName == ]
assert(len(organism_tags) == 1)
for organism_tag in organism_tags:
names = dict.fromkeys(organism_name_types, None)
for name_tag in [child for child in organism_tag.childNodes if child.nodeType == child.ELEMENT_NODE and child.tagName == ]:
name_type = name_tag.getAttribute("type")
assert(name_type in organism_name_types)
names[name_type] = name_tag.firstChild.nodeValue.strip()
assert(names.get())
self.organisms.append(names) |
def getOpenFileName(*args):
result = QtGui.QFileDialog.getOpenFileName(*args)
if type(result) is not tuple:
return result, bool(result)
else:
return result | Normalizes the getOpenFileName method between the different Qt
wrappers.
:return (<str> filename, <bool> accepted) | ### Input:
Normalizes the getOpenFileName method between the different Qt
wrappers.
:return (<str> filename, <bool> accepted)
### Response:
def getOpenFileName(*args):
result = QtGui.QFileDialog.getOpenFileName(*args)
if type(result) is not tuple:
return result, bool(result)
else:
return result |
def execute(self, bounds=None, jacobian=None, hessian=None, constraints=None, **minimize_options):
ans = minimize(
self.objective,
self.initial_guesses,
method=self.method_name(),
bounds=bounds,
constraints=constraints,
jac=jacobian,
hess=hessian,
**minimize_options
)
return self._pack_output(ans) | Calls the wrapped algorithm.
:param bounds: The bounds for the parameters. Usually filled by
:class:`~symfit.core.minimizers.BoundedMinimizer`.
:param jacobian: The Jacobian. Usually filled by
:class:`~symfit.core.minimizers.ScipyGradientMinimize`.
:param \*\*minimize_options: Further keywords to pass to
:func:`scipy.optimize.minimize`. Note that your `method` will
usually be filled by a specific subclass. | ### Input:
Calls the wrapped algorithm.
:param bounds: The bounds for the parameters. Usually filled by
:class:`~symfit.core.minimizers.BoundedMinimizer`.
:param jacobian: The Jacobian. Usually filled by
:class:`~symfit.core.minimizers.ScipyGradientMinimize`.
:param \*\*minimize_options: Further keywords to pass to
:func:`scipy.optimize.minimize`. Note that your `method` will
usually be filled by a specific subclass.
### Response:
def execute(self, bounds=None, jacobian=None, hessian=None, constraints=None, **minimize_options):
ans = minimize(
self.objective,
self.initial_guesses,
method=self.method_name(),
bounds=bounds,
constraints=constraints,
jac=jacobian,
hess=hessian,
**minimize_options
)
return self._pack_output(ans) |
def _retrieve_users(self):
users_url = self._build_url()
response = self._request(, users_url)
users = response.json()
return users | Retrieve user objects of the entire administration.
:return: list of dictionary with users information
:rtype: list(dict)
------- | ### Input:
Retrieve user objects of the entire administration.
:return: list of dictionary with users information
:rtype: list(dict)
-------
### Response:
def _retrieve_users(self):
users_url = self._build_url()
response = self._request(, users_url)
users = response.json()
return users |
def detect_proc():
pid = os.getpid()
for name in (, ):
if os.path.exists(os.path.join(, str(pid), name)):
return name
raise ProcFormatError() | Detect /proc filesystem style.
This checks the /proc/{pid} directory for possible formats. Returns one of
the followings as str:
* `stat`: Linux-style, i.e. ``/proc/{pid}/stat``.
* `status`: BSD-style, i.e. ``/proc/{pid}/status``. | ### Input:
Detect /proc filesystem style.
This checks the /proc/{pid} directory for possible formats. Returns one of
the followings as str:
* `stat`: Linux-style, i.e. ``/proc/{pid}/stat``.
* `status`: BSD-style, i.e. ``/proc/{pid}/status``.
### Response:
def detect_proc():
pid = os.getpid()
for name in (, ):
if os.path.exists(os.path.join(, str(pid), name)):
return name
raise ProcFormatError() |
def home(request):
"Simple homepage view."
context = {}
if request.user.is_authenticated():
try:
access = request.user.accountaccess_set.all()[0]
except IndexError:
access = None
else:
client = access.api_client
context[] = client.get_profile_info(raw_token=access.access_token)
return render(request, , context) | Simple homepage view. | ### Input:
Simple homepage view.
### Response:
def home(request):
"Simple homepage view."
context = {}
if request.user.is_authenticated():
try:
access = request.user.accountaccess_set.all()[0]
except IndexError:
access = None
else:
client = access.api_client
context[] = client.get_profile_info(raw_token=access.access_token)
return render(request, , context) |
def getfigsize(plotman):
xmin = plotman.grid.grid[].min()
xmax = plotman.grid.grid[].max()
zmin = plotman.grid.grid[].min()
zmax = plotman.grid.grid[].max()
if np.abs(zmax - zmin) < np.abs(xmax - xmin):
sizex = 10 / 2.54
sizez = 1.2 * sizex * (np.abs(zmax - zmin) / np.abs(xmax - xmin))
else:
sizez = 10 / 2.54
sizex = sizez * (np.abs(xmax - xmin) / np.abs(zmax - zmin))
sizex += 1.3
sizez += 1
return sizex, sizez | calculate appropriate sizes for the subfigures | ### Input:
calculate appropriate sizes for the subfigures
### Response:
def getfigsize(plotman):
xmin = plotman.grid.grid[].min()
xmax = plotman.grid.grid[].max()
zmin = plotman.grid.grid[].min()
zmax = plotman.grid.grid[].max()
if np.abs(zmax - zmin) < np.abs(xmax - xmin):
sizex = 10 / 2.54
sizez = 1.2 * sizex * (np.abs(zmax - zmin) / np.abs(xmax - xmin))
else:
sizez = 10 / 2.54
sizex = sizez * (np.abs(xmax - xmin) / np.abs(zmax - zmin))
sizex += 1.3
sizez += 1
return sizex, sizez |
def get_overrides_filename(variable):
filename = os.environ.get(variable)
if filename is None:
msg = .format(variable)
raise EnvironmentError(msg)
return filename | Get the name of the file containing configuration overrides
from the provided environment variable. | ### Input:
Get the name of the file containing configuration overrides
from the provided environment variable.
### Response:
def get_overrides_filename(variable):
filename = os.environ.get(variable)
if filename is None:
msg = .format(variable)
raise EnvironmentError(msg)
return filename |
def absent(name, auth=None):
ret = {: name,
: {},
: True,
: }
__salt__[](auth)
domain = __salt__[](name=name)
if domain:
if __opts__[] is True:
ret[] = None
ret[] = {: name}
ret[] = .format(name)
return ret
__salt__[](name=domain)
ret[][] = domain.id
ret[] =
return ret | Ensure domain does not exist
name
Name of the domain | ### Input:
Ensure domain does not exist
name
Name of the domain
### Response:
def absent(name, auth=None):
ret = {: name,
: {},
: True,
: }
__salt__[](auth)
domain = __salt__[](name=name)
if domain:
if __opts__[] is True:
ret[] = None
ret[] = {: name}
ret[] = .format(name)
return ret
__salt__[](name=domain)
ret[][] = domain.id
ret[] =
return ret |
def import_experience(self, experiences):
if isinstance(experiences, dict):
if self.unique_state:
experiences[] = dict(state=experiences[])
if self.unique_action:
experiences[] = dict(action=experiences[])
self.model.import_experience(**experiences)
else:
if self.unique_state:
states = dict(state=list())
else:
states = {name: list() for name in experiences[0][]}
internals = [list() for _ in experiences[0][]]
if self.unique_action:
actions = dict(action=list())
else:
actions = {name: list() for name in experiences[0][]}
terminal = list()
reward = list()
for experience in experiences:
if self.unique_state:
states[].append(experience[])
else:
for name in sorted(states):
states[name].append(experience[][name])
for n, internal in enumerate(internals):
internal.append(experience[][n])
if self.unique_action:
actions[].append(experience[])
else:
for name in sorted(actions):
actions[name].append(experience[][name])
terminal.append(experience[])
reward.append(experience[])
self.model.import_experience(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
) | Imports experiences.
Args:
experiences: | ### Input:
Imports experiences.
Args:
experiences:
### Response:
def import_experience(self, experiences):
if isinstance(experiences, dict):
if self.unique_state:
experiences[] = dict(state=experiences[])
if self.unique_action:
experiences[] = dict(action=experiences[])
self.model.import_experience(**experiences)
else:
if self.unique_state:
states = dict(state=list())
else:
states = {name: list() for name in experiences[0][]}
internals = [list() for _ in experiences[0][]]
if self.unique_action:
actions = dict(action=list())
else:
actions = {name: list() for name in experiences[0][]}
terminal = list()
reward = list()
for experience in experiences:
if self.unique_state:
states[].append(experience[])
else:
for name in sorted(states):
states[name].append(experience[][name])
for n, internal in enumerate(internals):
internal.append(experience[][n])
if self.unique_action:
actions[].append(experience[])
else:
for name in sorted(actions):
actions[name].append(experience[][name])
terminal.append(experience[])
reward.append(experience[])
self.model.import_experience(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
) |
def _check_value(self, ovsrec_row, column_value):
column, value_json = column_value
column_schema = ovsrec_row._table.columns[column]
value = ovs.db.data.Datum.from_json(
column_schema.type, value_json).to_python(ovs.db.idl._uuid_to_row)
datum = getattr(ovsrec_row, column)
if column_schema.type.is_map():
for k, v in value.items():
if k in datum and datum[k] == v:
return True
elif datum == value:
return True
return False | :type column_value: tuple of column and value_json | ### Input:
:type column_value: tuple of column and value_json
### Response:
def _check_value(self, ovsrec_row, column_value):
column, value_json = column_value
column_schema = ovsrec_row._table.columns[column]
value = ovs.db.data.Datum.from_json(
column_schema.type, value_json).to_python(ovs.db.idl._uuid_to_row)
datum = getattr(ovsrec_row, column)
if column_schema.type.is_map():
for k, v in value.items():
if k in datum and datum[k] == v:
return True
elif datum == value:
return True
return False |
def throws(self, exception=Exception):
def exception_function(*args, **kwargs):
raise exception
self._conditions["default"] = exception_function
return self | Customizes the stub function to raise an exception. If conditions like withArgs or onCall
were specified, then the return value will only be returned when the conditions are met.
Args: exception (by default=Exception, it could be any customized exception)
Return: a SinonStub object (able to be chained) | ### Input:
Customizes the stub function to raise an exception. If conditions like withArgs or onCall
were specified, then the return value will only be returned when the conditions are met.
Args: exception (by default=Exception, it could be any customized exception)
Return: a SinonStub object (able to be chained)
### Response:
def throws(self, exception=Exception):
def exception_function(*args, **kwargs):
raise exception
self._conditions["default"] = exception_function
return self |
def get_learned_skills(self, lang):
skills = [skill for skill in
self.user_data.language_data[lang][]]
self._compute_dependency_order(skills)
return [skill for skill in
sorted(skills, key=lambda skill: skill[])
if skill[]] | Return the learned skill objects sorted by the order they were learned
in. | ### Input:
Return the learned skill objects sorted by the order they were learned
in.
### Response:
def get_learned_skills(self, lang):
skills = [skill for skill in
self.user_data.language_data[lang][]]
self._compute_dependency_order(skills)
return [skill for skill in
sorted(skills, key=lambda skill: skill[])
if skill[]] |
async def create_wallet(config: str,
credentials: str) -> None:
logger = logging.getLogger(__name__)
logger.debug("create_wallet: >>> config: %r, credentials: %r",
config,
credentials)
if not hasattr(create_wallet, "cb"):
logger.debug("create_wallet: Creating callback")
create_wallet.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32))
c_config = c_char_p(config.encode())
c_credentials = c_char_p(credentials.encode())
await do_call(,
c_config,
c_credentials,
create_wallet.cb)
logger.debug("create_wallet: <<<") | Creates a new secure wallet with the given unique name.
:param config: Wallet configuration json.
{
"id": string, Identifier of the wallet.
Configured storage uses this identifier to lookup exact wallet data placement.
"storage_type": optional<string>, Type of the wallet storage. Defaults to 'default'.
'Default' storage type allows to store wallet data in the local file.
Custom storage types can be registered with indy_register_wallet_storage call.
"storage_config": optional<object>, Storage configuration json. Storage type defines set of supported keys.
Can be optional if storage supports default configuration.
For 'default' storage type configuration is:
{
"path": optional<string>, Path to the directory with wallet files.
Defaults to $HOME/.indy_client/wallet.
Wallet will be stored in the file {path}/{id}/sqlite.db
}
}
:param credentials: Wallet credentials json
{
"key": string, Key or passphrase used for wallet key derivation.
Look to key_derivation_method param for information about supported key derivation methods.
"storage_credentials": optional<object> Credentials for wallet storage. Storage type defines set of supported keys.
Can be optional if storage supports default configuration.
For 'default' storage type should be empty.
"key_derivation_method": optional<string> Algorithm to use for wallet key derivation:
ARGON2I_MOD - derive secured wallet master key (used by default)
ARGON2I_INT - derive secured wallet master key (less secured but faster)
RAW - raw wallet key master provided (skip derivation).
RAW keys can be generated with generate_wallet_key call
}
:return: Error code | ### Input:
Creates a new secure wallet with the given unique name.
:param config: Wallet configuration json.
{
"id": string, Identifier of the wallet.
Configured storage uses this identifier to lookup exact wallet data placement.
"storage_type": optional<string>, Type of the wallet storage. Defaults to 'default'.
'Default' storage type allows to store wallet data in the local file.
Custom storage types can be registered with indy_register_wallet_storage call.
"storage_config": optional<object>, Storage configuration json. Storage type defines set of supported keys.
Can be optional if storage supports default configuration.
For 'default' storage type configuration is:
{
"path": optional<string>, Path to the directory with wallet files.
Defaults to $HOME/.indy_client/wallet.
Wallet will be stored in the file {path}/{id}/sqlite.db
}
}
:param credentials: Wallet credentials json
{
"key": string, Key or passphrase used for wallet key derivation.
Look to key_derivation_method param for information about supported key derivation methods.
"storage_credentials": optional<object> Credentials for wallet storage. Storage type defines set of supported keys.
Can be optional if storage supports default configuration.
For 'default' storage type should be empty.
"key_derivation_method": optional<string> Algorithm to use for wallet key derivation:
ARGON2I_MOD - derive secured wallet master key (used by default)
ARGON2I_INT - derive secured wallet master key (less secured but faster)
RAW - raw wallet key master provided (skip derivation).
RAW keys can be generated with generate_wallet_key call
}
:return: Error code
### Response:
async def create_wallet(config: str,
credentials: str) -> None:
logger = logging.getLogger(__name__)
logger.debug("create_wallet: >>> config: %r, credentials: %r",
config,
credentials)
if not hasattr(create_wallet, "cb"):
logger.debug("create_wallet: Creating callback")
create_wallet.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32))
c_config = c_char_p(config.encode())
c_credentials = c_char_p(credentials.encode())
await do_call(,
c_config,
c_credentials,
create_wallet.cb)
logger.debug("create_wallet: <<<") |
def update_result(result):
try:
result_id = result.id
result = _forbidden_attributes(result)
res = _pybossa_req(, , result_id, payload=result.data)
if res.get():
return Result(res)
else:
return res
except:
raise | Update a result for a given result ID.
:param result: PYBOSSA result | ### Input:
Update a result for a given result ID.
:param result: PYBOSSA result
### Response:
def update_result(result):
try:
result_id = result.id
result = _forbidden_attributes(result)
res = _pybossa_req(, , result_id, payload=result.data)
if res.get():
return Result(res)
else:
return res
except:
raise |
def reset (self):
super(FtpUrl, self).reset()
self.files = []
self.filename = None
self.filename_encoding = | Initialize FTP url data. | ### Input:
Initialize FTP url data.
### Response:
def reset (self):
super(FtpUrl, self).reset()
self.files = []
self.filename = None
self.filename_encoding = |
def t0_perpass_supconj(b, orbit, solve_for=None, **kwargs):
orbit_ps = _get_system_ps(b, orbit)
metawargs = orbit_ps.meta
metawargs.pop()
| Create a constraint for t0_perpass in an orbit - allowing translating between
t0_perpass and t0_supconj.
:parameter b: the :class:`phoebe.frontend.bundle.Bundle`
:parameter str orbit: the label of the orbit in which this
constraint should be built
:parameter str solve_for: if 't0_perpass' should not be the derived/constrained
parameter, provide which other parameter should be derived
(ie 't0_supconj', 'per0', 'period')
:returns: lhs (Parameter), rhs (ConstraintParameter), args (list of arguments
that were passed to this function) | ### Input:
Create a constraint for t0_perpass in an orbit - allowing translating between
t0_perpass and t0_supconj.
:parameter b: the :class:`phoebe.frontend.bundle.Bundle`
:parameter str orbit: the label of the orbit in which this
constraint should be built
:parameter str solve_for: if 't0_perpass' should not be the derived/constrained
parameter, provide which other parameter should be derived
(ie 't0_supconj', 'per0', 'period')
:returns: lhs (Parameter), rhs (ConstraintParameter), args (list of arguments
that were passed to this function)
### Response:
def t0_perpass_supconj(b, orbit, solve_for=None, **kwargs):
orbit_ps = _get_system_ps(b, orbit)
metawargs = orbit_ps.meta
metawargs.pop()
|
def get_bbox(self):
width = self.width
height = self.height
cos_angle = cos(self.angle)
sin_angle = sin(self.angle)
x_diff = 0.5 * width
y_diff = 0.5 * height
c_x_diff = cos_angle * x_diff
s_x_diff = sin_angle * x_diff
c_y_diff = cos_angle * y_diff
s_y_diff = sin_angle * y_diff
if cos_angle > 0:
if sin_angle > 0:
x_max = c_x_diff + s_y_diff
x_min = -x_max
y_max = c_y_diff + s_x_diff
y_min = -y_max
else:
x_max = c_x_diff - s_y_diff
x_min = -x_max
y_max = c_y_diff - s_x_diff
y_min = -y_max
else:
if sin_angle > 0:
x_min = c_x_diff - s_y_diff
x_max = -x_min
y_min = c_y_diff - s_x_diff
y_max = -y_min
else:
x_min = c_x_diff + s_y_diff
x_max = -x_min
y_min = c_y_diff + s_x_diff
y_max = -y_min
return x_min, x_max, y_min, y_max | Returns bounding box (xmin, xmax, ymin, ymax) | ### Input:
Returns bounding box (xmin, xmax, ymin, ymax)
### Response:
def get_bbox(self):
width = self.width
height = self.height
cos_angle = cos(self.angle)
sin_angle = sin(self.angle)
x_diff = 0.5 * width
y_diff = 0.5 * height
c_x_diff = cos_angle * x_diff
s_x_diff = sin_angle * x_diff
c_y_diff = cos_angle * y_diff
s_y_diff = sin_angle * y_diff
if cos_angle > 0:
if sin_angle > 0:
x_max = c_x_diff + s_y_diff
x_min = -x_max
y_max = c_y_diff + s_x_diff
y_min = -y_max
else:
x_max = c_x_diff - s_y_diff
x_min = -x_max
y_max = c_y_diff - s_x_diff
y_min = -y_max
else:
if sin_angle > 0:
x_min = c_x_diff - s_y_diff
x_max = -x_min
y_min = c_y_diff - s_x_diff
y_max = -y_min
else:
x_min = c_x_diff + s_y_diff
x_max = -x_min
y_min = c_y_diff + s_x_diff
y_max = -y_min
return x_min, x_max, y_min, y_max |
def requirements_for_changes(self, changes):
requirements = []
reqs_set = set()
if isinstance(changes, str):
changes = changes.split()
if not changes or changes[0].startswith():
return requirements
for line in changes:
line = line.strip()
if not line:
continue
match = IS_REQUIREMENTS_RE2.search(line)
if match:
for match in REQUIREMENTS_RE.findall(match.group(1)):
if match[1]:
version = + match[2] if match[1].startswith() else match[1]
req_str = match[0] + version
else:
req_str = match[0]
if req_str not in reqs_set:
reqs_set.add(req_str)
try:
requirements.append(pkg_resources.Requirement.parse(req_str))
except Exception as e:
log.warn(, req_str, e)
return requirements | Parse changes for requirements
:param list changes: | ### Input:
Parse changes for requirements
:param list changes:
### Response:
def requirements_for_changes(self, changes):
requirements = []
reqs_set = set()
if isinstance(changes, str):
changes = changes.split()
if not changes or changes[0].startswith():
return requirements
for line in changes:
line = line.strip()
if not line:
continue
match = IS_REQUIREMENTS_RE2.search(line)
if match:
for match in REQUIREMENTS_RE.findall(match.group(1)):
if match[1]:
version = + match[2] if match[1].startswith() else match[1]
req_str = match[0] + version
else:
req_str = match[0]
if req_str not in reqs_set:
reqs_set.add(req_str)
try:
requirements.append(pkg_resources.Requirement.parse(req_str))
except Exception as e:
log.warn(, req_str, e)
return requirements |
def auth_with_token(self, token, tenant_id=None, tenant_name=None):
main_resp, main_body = self._call_token_auth(token, tenant_id,
tenant_name)
roles = main_body["access"]["user"]["roles"]
ostore = [role for role in roles
if role["name"] == "object-store:default"]
if ostore:
ostore_tenant_id = ostore[0]["tenantId"]
ostore_resp, ostore_body = self._call_token_auth(token,
ostore_tenant_id, None)
ostore_cat = ostore_body["access"]["serviceCatalog"]
main_cat = main_body["access"]["serviceCatalog"]
main_cat.extend(ostore_cat)
self._parse_response(main_body)
self.authenticated = True | If a valid token is already known, this call will use it to generate
the service catalog. | ### Input:
If a valid token is already known, this call will use it to generate
the service catalog.
### Response:
def auth_with_token(self, token, tenant_id=None, tenant_name=None):
main_resp, main_body = self._call_token_auth(token, tenant_id,
tenant_name)
roles = main_body["access"]["user"]["roles"]
ostore = [role for role in roles
if role["name"] == "object-store:default"]
if ostore:
ostore_tenant_id = ostore[0]["tenantId"]
ostore_resp, ostore_body = self._call_token_auth(token,
ostore_tenant_id, None)
ostore_cat = ostore_body["access"]["serviceCatalog"]
main_cat = main_body["access"]["serviceCatalog"]
main_cat.extend(ostore_cat)
self._parse_response(main_body)
self.authenticated = True |
def get_unspents(self, address):
addresses = []
addresses.append(str(address))
min_confirmations = 0
max_confirmation = 2000000000
unspents = self.obj.listunspent(min_confirmations, max_confirmation,
addresses)
return self.format_unspents(unspents) | Get the spendable transaction outputs, also known as UTXOs or
unspent transaction outputs.
NOTE: this will only return unspents if the address provided is
present in the bitcoind server. Use the chain, blockchain,
or blockcypher API to grab the unspents for arbitrary addresses. | ### Input:
Get the spendable transaction outputs, also known as UTXOs or
unspent transaction outputs.
NOTE: this will only return unspents if the address provided is
present in the bitcoind server. Use the chain, blockchain,
or blockcypher API to grab the unspents for arbitrary addresses.
### Response:
def get_unspents(self, address):
addresses = []
addresses.append(str(address))
min_confirmations = 0
max_confirmation = 2000000000
unspents = self.obj.listunspent(min_confirmations, max_confirmation,
addresses)
return self.format_unspents(unspents) |
def template(args):
" Add or remove templates from site. "
site = Site(args.PATH)
if args.ACTION == "add":
return site.add_template(args.TEMPLATE)
return site.remove_template(args.TEMPLATE) | Add or remove templates from site. | ### Input:
Add or remove templates from site.
### Response:
def template(args):
" Add or remove templates from site. "
site = Site(args.PATH)
if args.ACTION == "add":
return site.add_template(args.TEMPLATE)
return site.remove_template(args.TEMPLATE) |
def register(explicit=True):
global _WARN
if explicit:
_WARN = False
pairs = get_pairs()
for type_, cls in pairs:
converter = cls()
if type_ in units.registry:
previous = units.registry[type_]
_mpl_units[type_] = previous
units.registry[type_] = converter | Register Pandas Formatters and Converters with matplotlib
This function modifies the global ``matplotlib.units.registry``
dictionary. Pandas adds custom converters for
* pd.Timestamp
* pd.Period
* np.datetime64
* datetime.datetime
* datetime.date
* datetime.time
See Also
--------
deregister_matplotlib_converter | ### Input:
Register Pandas Formatters and Converters with matplotlib
This function modifies the global ``matplotlib.units.registry``
dictionary. Pandas adds custom converters for
* pd.Timestamp
* pd.Period
* np.datetime64
* datetime.datetime
* datetime.date
* datetime.time
See Also
--------
deregister_matplotlib_converter
### Response:
def register(explicit=True):
global _WARN
if explicit:
_WARN = False
pairs = get_pairs()
for type_, cls in pairs:
converter = cls()
if type_ in units.registry:
previous = units.registry[type_]
_mpl_units[type_] = previous
units.registry[type_] = converter |
def ingredient_positions(self):
positions = defaultdict(list)
for y, row in enumerate(self.in_shape):
for x, (item_id, metadata, amount) in enumerate(row):
positions[(item_id, metadata)].append((x, y, amount))
return positions | Returns:
dict: In the form { (item_id, metadata) -> [(x, y, amount), ...] } | ### Input:
Returns:
dict: In the form { (item_id, metadata) -> [(x, y, amount), ...] }
### Response:
def ingredient_positions(self):
positions = defaultdict(list)
for y, row in enumerate(self.in_shape):
for x, (item_id, metadata, amount) in enumerate(row):
positions[(item_id, metadata)].append((x, y, amount))
return positions |
def set_executable(filename):
st = os.stat(filename)
os.chmod(filename, st.st_mode | stat.S_IEXEC) | Set the exectuable bit on the given filename | ### Input:
Set the exectuable bit on the given filename
### Response:
def set_executable(filename):
st = os.stat(filename)
os.chmod(filename, st.st_mode | stat.S_IEXEC) |
def encrypt(s, base64 = False):
e = _cipher().encrypt(s)
return base64 and b64encode(e) or e | 对称加密函数 | ### Input:
对称加密函数
### Response:
def encrypt(s, base64 = False):
e = _cipher().encrypt(s)
return base64 and b64encode(e) or e |
def run(self, *args):
params = self.parser.parse_args(args)
code = self.unify(params.matching, params.sources,
params.fast_matching, params.no_strict,
params.interactive, params.recovery)
return code | Merge unique identities using a matching algorithm. | ### Input:
Merge unique identities using a matching algorithm.
### Response:
def run(self, *args):
params = self.parser.parse_args(args)
code = self.unify(params.matching, params.sources,
params.fast_matching, params.no_strict,
params.interactive, params.recovery)
return code |
def toil_make_tool(toolpath_object, loading_context):
if isinstance(toolpath_object, Mapping) \
and toolpath_object.get("class") == "CommandLineTool":
return ToilCommandLineTool(toolpath_object, loading_context)
return cwltool.workflow.default_make_tool(toolpath_object, loading_context) | Factory function passed to load_tool() which creates instances of the
custom ToilCommandLineTool. | ### Input:
Factory function passed to load_tool() which creates instances of the
custom ToilCommandLineTool.
### Response:
def toil_make_tool(toolpath_object, loading_context):
if isinstance(toolpath_object, Mapping) \
and toolpath_object.get("class") == "CommandLineTool":
return ToilCommandLineTool(toolpath_object, loading_context)
return cwltool.workflow.default_make_tool(toolpath_object, loading_context) |
def dedicate(rh):
rh.printSysLog("Enter changeVM.dedicate")
parms = [
"-T", rh.userid,
"-v", rh.parms[],
"-r", rh.parms[],
"-R", rh.parms[]]
hideList = []
results = invokeSMCLI(rh,
"Image_Device_Dedicate_DM",
parms,
hideInLog=hideList)
if results[] != 0:
rh.printLn("ES", results[])
rh.updateResults(results)
if results[] == 0:
results = isLoggedOn(rh, rh.userid)
if (results[] == 0 and results[] == 0):
parms = [
"-T", rh.userid,
"-v", rh.parms[],
"-r", rh.parms[],
"-R", rh.parms[]]
results = invokeSMCLI(rh, "Image_Device_Dedicate", parms)
if results[] == 0:
rh.printLn("N", "Dedicated device " + rh.parms[] +
" to the active configuration.")
else:
rh.printLn("ES", results[])
rh.updateResults(results)
rh.printSysLog("Exit changeVM.dedicate, rc: " +
str(rh.results[]))
return rh.results[] | Dedicate device.
Input:
Request Handle with the following properties:
function - 'CHANGEVM'
subfunction - 'DEDICATEDM'
userid - userid of the virtual machine
parms['vaddr'] - Virtual address
parms['raddr'] - Real address
parms['mode'] - Read only mode or not.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error | ### Input:
Dedicate device.
Input:
Request Handle with the following properties:
function - 'CHANGEVM'
subfunction - 'DEDICATEDM'
userid - userid of the virtual machine
parms['vaddr'] - Virtual address
parms['raddr'] - Real address
parms['mode'] - Read only mode or not.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
### Response:
def dedicate(rh):
rh.printSysLog("Enter changeVM.dedicate")
parms = [
"-T", rh.userid,
"-v", rh.parms[],
"-r", rh.parms[],
"-R", rh.parms[]]
hideList = []
results = invokeSMCLI(rh,
"Image_Device_Dedicate_DM",
parms,
hideInLog=hideList)
if results[] != 0:
rh.printLn("ES", results[])
rh.updateResults(results)
if results[] == 0:
results = isLoggedOn(rh, rh.userid)
if (results[] == 0 and results[] == 0):
parms = [
"-T", rh.userid,
"-v", rh.parms[],
"-r", rh.parms[],
"-R", rh.parms[]]
results = invokeSMCLI(rh, "Image_Device_Dedicate", parms)
if results[] == 0:
rh.printLn("N", "Dedicated device " + rh.parms[] +
" to the active configuration.")
else:
rh.printLn("ES", results[])
rh.updateResults(results)
rh.printSysLog("Exit changeVM.dedicate, rc: " +
str(rh.results[]))
return rh.results[] |
def create_token(self, data, token_valid_for=180) -> str:
jwt_token = jwt.encode({
: data,
: datetime.utcnow() + timedelta(seconds=token_valid_for)},
self.app_secret)
return Security.encrypt(jwt_token) | Create encrypted JWT | ### Input:
Create encrypted JWT
### Response:
def create_token(self, data, token_valid_for=180) -> str:
jwt_token = jwt.encode({
: data,
: datetime.utcnow() + timedelta(seconds=token_valid_for)},
self.app_secret)
return Security.encrypt(jwt_token) |
def __update_clusters(self):
clusters = [[] for _ in range(len(self.__centers))]
dataset_differences = self.__calculate_dataset_difference(len(clusters))
optimum_indexes = numpy.argmin(dataset_differences, axis=0)
for index_point in range(len(optimum_indexes)):
index_cluster = optimum_indexes[index_point]
clusters[index_cluster].append(index_point)
clusters = [cluster for cluster in clusters if len(cluster) > 0]
return clusters | !
@brief Calculate distance (in line with specified metric) to each point from the each cluster. Nearest points
are captured by according clusters and as a result clusters are updated.
@return (list) Updated clusters as list of clusters. Each cluster contains indexes of objects from data. | ### Input:
!
@brief Calculate distance (in line with specified metric) to each point from the each cluster. Nearest points
are captured by according clusters and as a result clusters are updated.
@return (list) Updated clusters as list of clusters. Each cluster contains indexes of objects from data.
### Response:
def __update_clusters(self):
clusters = [[] for _ in range(len(self.__centers))]
dataset_differences = self.__calculate_dataset_difference(len(clusters))
optimum_indexes = numpy.argmin(dataset_differences, axis=0)
for index_point in range(len(optimum_indexes)):
index_cluster = optimum_indexes[index_point]
clusters[index_cluster].append(index_point)
clusters = [cluster for cluster in clusters if len(cluster) > 0]
return clusters |
def result_report_class_wise_average(self):
results = self.results_class_wise_average_metrics()
output = self.ui.section_header(, indent=2) +
output += self.ui.line(, indent=2) +
output += self.ui.data(field=,
value=float(results[][]) * 100, unit=, indent=4) +
return output | Report class-wise averages
Returns
-------
str
result report in string format | ### Input:
Report class-wise averages
Returns
-------
str
result report in string format
### Response:
def result_report_class_wise_average(self):
results = self.results_class_wise_average_metrics()
output = self.ui.section_header(, indent=2) +
output += self.ui.line(, indent=2) +
output += self.ui.data(field=,
value=float(results[][]) * 100, unit=, indent=4) +
return output |
def add_scripts(self):
common_script = self.node(self.defs, , type=)
def get_js_dict():
return dict(
(k, getattr(self.graph.state, k))
for k in dir(self.graph.config)
if not k.startswith() and hasattr(self.graph.state, k)
and not hasattr(getattr(self.graph.state, k), )
)
def json_default(o):
if isinstance(o, (datetime, date)):
return o.isoformat()
if hasattr(o, ):
return o.to_dict()
return json.JSONEncoder().default(o)
dct = get_js_dict()
dct[] = [
l.get() if isinstance(l, dict) else l
for l in self.graph._legends + self.graph._secondary_legends
]
common_js =
common_js +=
if self.graph.no_prefix:
common_js +=
else:
common_js += % self.graph.uuid
common_script.text = common_js + json.dumps(dct, default=json_default)
for js in self.graph.js:
if js.startswith():
script = self.node(self.defs, , type=)
with io.open(js[len():], encoding=) as f:
script.text = f.read()
else:
if js.startswith() and self.graph.force_uri_protocol:
js = % (self.graph.force_uri_protocol, js)
self.node(self.defs, , type=, href=js) | Add the js to the svg | ### Input:
Add the js to the svg
### Response:
def add_scripts(self):
common_script = self.node(self.defs, , type=)
def get_js_dict():
return dict(
(k, getattr(self.graph.state, k))
for k in dir(self.graph.config)
if not k.startswith() and hasattr(self.graph.state, k)
and not hasattr(getattr(self.graph.state, k), )
)
def json_default(o):
if isinstance(o, (datetime, date)):
return o.isoformat()
if hasattr(o, ):
return o.to_dict()
return json.JSONEncoder().default(o)
dct = get_js_dict()
dct[] = [
l.get() if isinstance(l, dict) else l
for l in self.graph._legends + self.graph._secondary_legends
]
common_js =
common_js +=
if self.graph.no_prefix:
common_js +=
else:
common_js += % self.graph.uuid
common_script.text = common_js + json.dumps(dct, default=json_default)
for js in self.graph.js:
if js.startswith():
script = self.node(self.defs, , type=)
with io.open(js[len():], encoding=) as f:
script.text = f.read()
else:
if js.startswith() and self.graph.force_uri_protocol:
js = % (self.graph.force_uri_protocol, js)
self.node(self.defs, , type=, href=js) |
def forward(self, channel, date_s, fragment):
time_s, sep, nick = fragment.rpartition()
time = datetime.datetime.strptime(time_s, )
date = datetime.datetime.strptime(date_s, )
dt = datetime.datetime.combine(date, time.time())
loc_dt = self.timezone.localize(dt)
utc_dt = loc_dt.astimezone(pytz.utc)
url_tmpl =
url = url_tmpl.format(
target_date=utc_dt.date().isoformat(),
target_time=utc_dt.time().strftime(),
**locals()
)
raise cherrypy.HTTPRedirect(url, 301) | Given an HREF in the legacy timezone, redirect to an href for UTC. | ### Input:
Given an HREF in the legacy timezone, redirect to an href for UTC.
### Response:
def forward(self, channel, date_s, fragment):
time_s, sep, nick = fragment.rpartition()
time = datetime.datetime.strptime(time_s, )
date = datetime.datetime.strptime(date_s, )
dt = datetime.datetime.combine(date, time.time())
loc_dt = self.timezone.localize(dt)
utc_dt = loc_dt.astimezone(pytz.utc)
url_tmpl =
url = url_tmpl.format(
target_date=utc_dt.date().isoformat(),
target_time=utc_dt.time().strftime(),
**locals()
)
raise cherrypy.HTTPRedirect(url, 301) |
def publish(namespace, name, version, description_file, tar_file, readme_file,
readme_file_ext, registry=None):
registry = registry or Registry_Base_URL
url = % (
registry,
namespace,
name,
version
)
if readme_file_ext == :
readme_section_name =
elif readme_file_ext == :
readme_section_name =
else:
raise ValueError( % readme_file_ext)
body = OrderedDict([(, (None, description_file.read(),)),
(,(, tar_file)),
(readme_section_name, (readme_section_name, readme_file))])
headers = _headersForRegistry(registry)
response = requests.put(url, headers=headers, files=body)
response.raise_for_status()
return None | Publish a tarblob to the registry, if the request fails, an exception
is raised, which either triggers re-authentication, or is turned into a
return value by the decorators. (If successful, the decorated function
returns None) | ### Input:
Publish a tarblob to the registry, if the request fails, an exception
is raised, which either triggers re-authentication, or is turned into a
return value by the decorators. (If successful, the decorated function
returns None)
### Response:
def publish(namespace, name, version, description_file, tar_file, readme_file,
readme_file_ext, registry=None):
registry = registry or Registry_Base_URL
url = % (
registry,
namespace,
name,
version
)
if readme_file_ext == :
readme_section_name =
elif readme_file_ext == :
readme_section_name =
else:
raise ValueError( % readme_file_ext)
body = OrderedDict([(, (None, description_file.read(),)),
(,(, tar_file)),
(readme_section_name, (readme_section_name, readme_file))])
headers = _headersForRegistry(registry)
response = requests.put(url, headers=headers, files=body)
response.raise_for_status()
return None |
def inc_version():
new_version = version.__version__
values = list(map(lambda x: int(x), new_version.split()))
values[2] += 1
with open(, ) as f:
f.write(.format(values[0], values[1], values[2]))
with open(, ) as f:
f.write(.format(values[0], values[1], values[2]))
importlib.reload(version)
print(.format(version.__version__))
return values | Increment micro release version (in 'major.minor.micro') in version.py and re-import it.
Major and minor versions must be incremented manually in version.py.
:return: list with current version numbers, e.g., [0,1,23]. | ### Input:
Increment micro release version (in 'major.minor.micro') in version.py and re-import it.
Major and minor versions must be incremented manually in version.py.
:return: list with current version numbers, e.g., [0,1,23].
### Response:
def inc_version():
new_version = version.__version__
values = list(map(lambda x: int(x), new_version.split()))
values[2] += 1
with open(, ) as f:
f.write(.format(values[0], values[1], values[2]))
with open(, ) as f:
f.write(.format(values[0], values[1], values[2]))
importlib.reload(version)
print(.format(version.__version__))
return values |
def get_effective_domain_id(request):
default_domain = get_default_domain(request)
domain_id = default_domain.get()
domain_name = default_domain.get()
return None if domain_name == DEFAULT_DOMAIN else domain_id | Gets the id of the default domain.
If the requests default domain is the same as DEFAULT_DOMAIN,
return None. | ### Input:
Gets the id of the default domain.
If the requests default domain is the same as DEFAULT_DOMAIN,
return None.
### Response:
def get_effective_domain_id(request):
default_domain = get_default_domain(request)
domain_id = default_domain.get()
domain_name = default_domain.get()
return None if domain_name == DEFAULT_DOMAIN else domain_id |
def advance_job_status(namespace: str, job: Job, duration: float,
err: Optional[Exception]):
duration = human_duration(duration)
if not err:
job.status = JobStatus.SUCCEEDED
logger.info(, job, duration)
return
if job.should_retry:
job.status = JobStatus.NOT_SET
job.retries += 1
if isinstance(err, RetryException) and err.at is not None:
job.at = err.at
else:
job.at = (datetime.now(timezone.utc) +
exponential_backoff(job.retries))
signals.job_schedule_retry.send(namespace, job=job, err=err)
log_args = (
job.retries, job.max_retries + 1, job, duration,
human_duration(
(job.at - datetime.now(tz=timezone.utc)).total_seconds()
)
)
if isinstance(err, RetryException):
logger.info(
, *log_args)
else:
logger.warning(
, *log_args)
return
job.status = JobStatus.FAILED
signals.job_failed.send(namespace, job=job, err=err)
logger.error(
,
job.max_retries + 1, job.max_retries + 1, job, duration,
exc_info=err
) | Advance the status of a job depending on its execution.
This function is called after a job has been executed. It calculates its
next status and calls the appropriate signals. | ### Input:
Advance the status of a job depending on its execution.
This function is called after a job has been executed. It calculates its
next status and calls the appropriate signals.
### Response:
def advance_job_status(namespace: str, job: Job, duration: float,
err: Optional[Exception]):
duration = human_duration(duration)
if not err:
job.status = JobStatus.SUCCEEDED
logger.info(, job, duration)
return
if job.should_retry:
job.status = JobStatus.NOT_SET
job.retries += 1
if isinstance(err, RetryException) and err.at is not None:
job.at = err.at
else:
job.at = (datetime.now(timezone.utc) +
exponential_backoff(job.retries))
signals.job_schedule_retry.send(namespace, job=job, err=err)
log_args = (
job.retries, job.max_retries + 1, job, duration,
human_duration(
(job.at - datetime.now(tz=timezone.utc)).total_seconds()
)
)
if isinstance(err, RetryException):
logger.info(
, *log_args)
else:
logger.warning(
, *log_args)
return
job.status = JobStatus.FAILED
signals.job_failed.send(namespace, job=job, err=err)
logger.error(
,
job.max_retries + 1, job.max_retries + 1, job, duration,
exc_info=err
) |
def do_session(self, args):
filename = if self.__session.filename is None \
else self.__session.filename
print(.format(, filename)) | Print current session information | ### Input:
Print current session information
### Response:
def do_session(self, args):
filename = if self.__session.filename is None \
else self.__session.filename
print(.format(, filename)) |
def occultquad(z,u1,u2,p0,return_components=False):
z = np.atleast_1d(z)
nz = np.size(z)
lambdad = np.zeros(nz)
etad = np.zeros(nz)
lambdae = np.zeros(nz)
omega=1.-u1/3.-u2/6.
tol = 1e-14
p = np.absolute(p0)
z = np.where(np.absolute(p-z) < tol,p,z)
z = np.where(np.absolute((p-1)-z) < tol,p-1.,z)
z = np.where(np.absolute((1-p)-z) < tol,1.-p,z)
z = np.where(z < tol,0.,z)
x1=(p-z)**2.
x2=(p+z)**2.
x3=p**2.-z**2.
def finish(p,z,u1,u2,lambdae,lambdad,etad):
omega = 1. - u1/3. - u2/6.
if p0 > 0:
muo1 = 1 - ((1-u1-2*u2)*lambdae+(u1+2*u2)*(lambdad+2./3*(p > z)) + u2*etad)/omega
mu0 = 1 - lambdae
else:
muo1 = 1 + ((1-u1-2*u2)*lambdae+(u1+2*u2)*(lambdad+2./3*(p > z)) + u2*etad)/omega
mu0 = 1 + lambdae
if return_components:
return muo1,(mu0,lambdad,etad)
else:
return muo1
if p <= 0.:
return finish(p,z,u1,u2,lambdae,lambdad,etad)
notusedyet = np.where( z < (1. + p) )[0]
if np.size(notusedyet) == 0:
return finish(p,z,u1,u2,lambdae,lambdad,etad)
if p >= 1.:
cond = z[notusedyet] <= p-1.
occulted = np.where(cond)
notused2 = np.where(~cond)
if np.size(occulted) != 0:
ndxuse = notusedyet[occulted]
etad[ndxuse] = 0.5
lambdae[ndxuse] = 1.
if np.size(notused2) == 0:
return finish(p,z,u1,u2,lambdae,lambdad,etad)
notusedyet = notusedyet[notused2]
inegressuni = np.where((z[notusedyet] >= np.absolute(1.-p)) & (z[notusedyet] < 1.+p))
if np.size(inegressuni) != 0:
ndxuse = notusedyet[inegressuni]
tmp = (1.-p**2.+z[ndxuse]**2.)/2./z[ndxuse]
tmp = np.where(tmp > 1.,1.,tmp)
tmp = np.where(tmp < -1.,-1.,tmp)
kap1 = np.arccos(tmp)
tmp = (p**2.+z[ndxuse]**2-1.)/2./p/z[ndxuse]
tmp = np.where(tmp > 1.,1.,tmp)
tmp = np.where(tmp < -1.,-1.,tmp)
kap0 = np.arccos(tmp)
tmp = 4.*z[ndxuse]**2-(1.+z[ndxuse]**2-p**2)**2
tmp = np.where(tmp < 0,0,tmp)
lambdae[ndxuse] = (p**2*kap0+kap1 - 0.5*np.sqrt(tmp))/np.pi
etad[ndxuse] = 1./2./np.pi*(kap1+p**2*(p**2+2.*z[ndxuse]**2)*kap0- \
(1.+5.*p**2+z[ndxuse]**2)/4.*np.sqrt((1.-x1[ndxuse])*(x2[ndxuse]-1.)))
cond = z[notusedyet] == p
ocltor = np.where(cond)
notused3 = np.where(~cond)
t = np.where(z[notusedyet] == p)
if np.size(ocltor) != 0:
ndxuse = notusedyet[ocltor]
if p < 0.5:
q=2.*p
Ek,Kk = ellke(q)
lambdad[ndxuse] = 1./3.+2./9./np.pi*(4.*(2.*p**2-1.)*Ek+\
(1.-4.*p**2)*Kk)
etad[ndxuse] = p**2/2.*(p**2+2.*z[ndxuse]**2)
lambdae[ndxuse] = p**2
elif p > 0.5:
q=0.5/p
Ek,Kk = ellke(q)
lambdad[ndxuse] = 1./3.+16.*p/9./np.pi*(2.*p**2-1.)*Ek-\
(32.*p**4-20.*p**2+3.)/9./np.pi/p*Kk
else:
lambdad[ndxuse] = 1./3.-4./np.pi/9.
etad[ndxuse] = 3./32.
if np.size(notused3) == 0:
return finish(p,z,u1,u2,lambdae,lambdad,etad)
notusedyet = notusedyet[notused3]
cond = ((z[notusedyet] > 0.5+np.absolute(p-0.5)) & \
(z[notusedyet] < 1.+p)) | \
( (p > 0.5) & (z[notusedyet] > np.absolute(1.-p)) & \
(z[notusedyet] < p))
inegress = np.where(cond)
notused4 = np.where(~cond)
if np.size(inegress) != 0:
ndxuse = notusedyet[inegress]
q=np.sqrt((1.-x1[ndxuse])/(x2[ndxuse]-x1[ndxuse]))
Ek,Kk = ellke(q)
n=1./x1[ndxuse]-1.
lambdad[ndxuse]=2./9./np.pi/np.sqrt(x2[ndxuse]-x1[ndxuse])*\
(((1.-x2[ndxuse])*(2.*x2[ndxuse]+x1[ndxuse]-3.)-\
3.*x3[ndxuse]*(x2[ndxuse]-2.))*Kk+(x2[ndxuse]-\
x1[ndxuse])*(z[ndxuse]**2+7.*p**2-4.)*Ek-\
3.*x3[ndxuse]/x1[ndxuse]*ellpic_bulirsch(n,q))
if np.size(notused4) == 0:
return finish(p,z,u1,u2,lambdae,lambdad,etad)
notusedyet = notusedyet[notused4]
if p < 1.:
cond = z[notusedyet] <= (1.-p)
inside = np.where(cond)
notused5 = np.where(~cond)
if np.size(inside) != 0:
ndxuse = notusedyet[inside]
etad[ndxuse] = p**2/2.*(p**2+2.*z[ndxuse]**2)
lambdae[ndxuse] = p**2
edge = np.where(z[ndxuse] == 1.-p)
if np.size(edge[0]) != 0:
lambdad[ndxuse[edge]] = 2./3./np.pi*np.arccos(1.-2.*p)-\
4./9./np.pi*np.sqrt(p*(1.-p))*(3.+2.*p-8.*p**2)
if p > 0.5:
lambdad[ndxuse[edge]] -= 2./3.
notused6 = np.where(z[ndxuse] != 1.-p)
if np.size(notused6) == 0:
return finish(p,z,u1,u2,lambdae,lambdad,etad)
ndxuse = ndxuse[notused6[0]]
origin = np.where(z[ndxuse] == 0)
if np.size(origin) != 0:
lambdad[ndxuse[origin]] = -2./3.*(1.-p**2)**1.5
notused7 = np.where(z[ndxuse] != 0)
if np.size(notused7) == 0:
return finish(p,z,u1,u2,lambdae,lambdad,etad)
ndxuse = ndxuse[notused7[0]]
q=np.sqrt((x2[ndxuse]-x1[ndxuse])/(1.-x1[ndxuse]))
n=x2[ndxuse]/x1[ndxuse]-1.
Ek,Kk = ellke(q)
lambdad[ndxuse] = 2./9./np.pi/np.sqrt(1.-x1[ndxuse])*\
((1.-5.*z[ndxuse]**2+p**2+x3[ndxuse]**2)*Kk+\
(1.-x1[ndxuse])*(z[ndxuse]**2+7.*p**2-4.)*Ek-\
3.*x3[ndxuse]/x1[ndxuse]*ellpic_bulirsch(n,q))
return finish(p,z,u1,u2,lambdae,lambdad,etad) | #### Mandel-Agol code:
# Python translation of IDL code.
# This routine computes the lightcurve for occultation of a
# quadratically limb-darkened source without microlensing. Please
# cite Mandel & Agol (2002) and Eastman & Agol (2008) if you make use
# of this routine in your research. Please report errors or bugs to
# jdeast@astronomy.ohio-state.edu
.. note::
Should probably wrap the Fortran code at some point.
(This particular part of the code was put together awhile ago.) | ### Input:
#### Mandel-Agol code:
# Python translation of IDL code.
# This routine computes the lightcurve for occultation of a
# quadratically limb-darkened source without microlensing. Please
# cite Mandel & Agol (2002) and Eastman & Agol (2008) if you make use
# of this routine in your research. Please report errors or bugs to
# jdeast@astronomy.ohio-state.edu
.. note::
Should probably wrap the Fortran code at some point.
(This particular part of the code was put together awhile ago.)
### Response:
def occultquad(z,u1,u2,p0,return_components=False):
z = np.atleast_1d(z)
nz = np.size(z)
lambdad = np.zeros(nz)
etad = np.zeros(nz)
lambdae = np.zeros(nz)
omega=1.-u1/3.-u2/6.
tol = 1e-14
p = np.absolute(p0)
z = np.where(np.absolute(p-z) < tol,p,z)
z = np.where(np.absolute((p-1)-z) < tol,p-1.,z)
z = np.where(np.absolute((1-p)-z) < tol,1.-p,z)
z = np.where(z < tol,0.,z)
x1=(p-z)**2.
x2=(p+z)**2.
x3=p**2.-z**2.
def finish(p,z,u1,u2,lambdae,lambdad,etad):
omega = 1. - u1/3. - u2/6.
if p0 > 0:
muo1 = 1 - ((1-u1-2*u2)*lambdae+(u1+2*u2)*(lambdad+2./3*(p > z)) + u2*etad)/omega
mu0 = 1 - lambdae
else:
muo1 = 1 + ((1-u1-2*u2)*lambdae+(u1+2*u2)*(lambdad+2./3*(p > z)) + u2*etad)/omega
mu0 = 1 + lambdae
if return_components:
return muo1,(mu0,lambdad,etad)
else:
return muo1
if p <= 0.:
return finish(p,z,u1,u2,lambdae,lambdad,etad)
notusedyet = np.where( z < (1. + p) )[0]
if np.size(notusedyet) == 0:
return finish(p,z,u1,u2,lambdae,lambdad,etad)
if p >= 1.:
cond = z[notusedyet] <= p-1.
occulted = np.where(cond)
notused2 = np.where(~cond)
if np.size(occulted) != 0:
ndxuse = notusedyet[occulted]
etad[ndxuse] = 0.5
lambdae[ndxuse] = 1.
if np.size(notused2) == 0:
return finish(p,z,u1,u2,lambdae,lambdad,etad)
notusedyet = notusedyet[notused2]
inegressuni = np.where((z[notusedyet] >= np.absolute(1.-p)) & (z[notusedyet] < 1.+p))
if np.size(inegressuni) != 0:
ndxuse = notusedyet[inegressuni]
tmp = (1.-p**2.+z[ndxuse]**2.)/2./z[ndxuse]
tmp = np.where(tmp > 1.,1.,tmp)
tmp = np.where(tmp < -1.,-1.,tmp)
kap1 = np.arccos(tmp)
tmp = (p**2.+z[ndxuse]**2-1.)/2./p/z[ndxuse]
tmp = np.where(tmp > 1.,1.,tmp)
tmp = np.where(tmp < -1.,-1.,tmp)
kap0 = np.arccos(tmp)
tmp = 4.*z[ndxuse]**2-(1.+z[ndxuse]**2-p**2)**2
tmp = np.where(tmp < 0,0,tmp)
lambdae[ndxuse] = (p**2*kap0+kap1 - 0.5*np.sqrt(tmp))/np.pi
etad[ndxuse] = 1./2./np.pi*(kap1+p**2*(p**2+2.*z[ndxuse]**2)*kap0- \
(1.+5.*p**2+z[ndxuse]**2)/4.*np.sqrt((1.-x1[ndxuse])*(x2[ndxuse]-1.)))
cond = z[notusedyet] == p
ocltor = np.where(cond)
notused3 = np.where(~cond)
t = np.where(z[notusedyet] == p)
if np.size(ocltor) != 0:
ndxuse = notusedyet[ocltor]
if p < 0.5:
q=2.*p
Ek,Kk = ellke(q)
lambdad[ndxuse] = 1./3.+2./9./np.pi*(4.*(2.*p**2-1.)*Ek+\
(1.-4.*p**2)*Kk)
etad[ndxuse] = p**2/2.*(p**2+2.*z[ndxuse]**2)
lambdae[ndxuse] = p**2
elif p > 0.5:
q=0.5/p
Ek,Kk = ellke(q)
lambdad[ndxuse] = 1./3.+16.*p/9./np.pi*(2.*p**2-1.)*Ek-\
(32.*p**4-20.*p**2+3.)/9./np.pi/p*Kk
else:
lambdad[ndxuse] = 1./3.-4./np.pi/9.
etad[ndxuse] = 3./32.
if np.size(notused3) == 0:
return finish(p,z,u1,u2,lambdae,lambdad,etad)
notusedyet = notusedyet[notused3]
cond = ((z[notusedyet] > 0.5+np.absolute(p-0.5)) & \
(z[notusedyet] < 1.+p)) | \
( (p > 0.5) & (z[notusedyet] > np.absolute(1.-p)) & \
(z[notusedyet] < p))
inegress = np.where(cond)
notused4 = np.where(~cond)
if np.size(inegress) != 0:
ndxuse = notusedyet[inegress]
q=np.sqrt((1.-x1[ndxuse])/(x2[ndxuse]-x1[ndxuse]))
Ek,Kk = ellke(q)
n=1./x1[ndxuse]-1.
lambdad[ndxuse]=2./9./np.pi/np.sqrt(x2[ndxuse]-x1[ndxuse])*\
(((1.-x2[ndxuse])*(2.*x2[ndxuse]+x1[ndxuse]-3.)-\
3.*x3[ndxuse]*(x2[ndxuse]-2.))*Kk+(x2[ndxuse]-\
x1[ndxuse])*(z[ndxuse]**2+7.*p**2-4.)*Ek-\
3.*x3[ndxuse]/x1[ndxuse]*ellpic_bulirsch(n,q))
if np.size(notused4) == 0:
return finish(p,z,u1,u2,lambdae,lambdad,etad)
notusedyet = notusedyet[notused4]
if p < 1.:
cond = z[notusedyet] <= (1.-p)
inside = np.where(cond)
notused5 = np.where(~cond)
if np.size(inside) != 0:
ndxuse = notusedyet[inside]
etad[ndxuse] = p**2/2.*(p**2+2.*z[ndxuse]**2)
lambdae[ndxuse] = p**2
edge = np.where(z[ndxuse] == 1.-p)
if np.size(edge[0]) != 0:
lambdad[ndxuse[edge]] = 2./3./np.pi*np.arccos(1.-2.*p)-\
4./9./np.pi*np.sqrt(p*(1.-p))*(3.+2.*p-8.*p**2)
if p > 0.5:
lambdad[ndxuse[edge]] -= 2./3.
notused6 = np.where(z[ndxuse] != 1.-p)
if np.size(notused6) == 0:
return finish(p,z,u1,u2,lambdae,lambdad,etad)
ndxuse = ndxuse[notused6[0]]
origin = np.where(z[ndxuse] == 0)
if np.size(origin) != 0:
lambdad[ndxuse[origin]] = -2./3.*(1.-p**2)**1.5
notused7 = np.where(z[ndxuse] != 0)
if np.size(notused7) == 0:
return finish(p,z,u1,u2,lambdae,lambdad,etad)
ndxuse = ndxuse[notused7[0]]
q=np.sqrt((x2[ndxuse]-x1[ndxuse])/(1.-x1[ndxuse]))
n=x2[ndxuse]/x1[ndxuse]-1.
Ek,Kk = ellke(q)
lambdad[ndxuse] = 2./9./np.pi/np.sqrt(1.-x1[ndxuse])*\
((1.-5.*z[ndxuse]**2+p**2+x3[ndxuse]**2)*Kk+\
(1.-x1[ndxuse])*(z[ndxuse]**2+7.*p**2-4.)*Ek-\
3.*x3[ndxuse]/x1[ndxuse]*ellpic_bulirsch(n,q))
return finish(p,z,u1,u2,lambdae,lambdad,etad) |
def get(self, request, slug):
matching_datasets = self.generate_matching_datasets(slug)
if matching_datasets is None:
raise Http404("Datasets meeting these criteria do not exist.")
base_context = {
: matching_datasets,
: matching_datasets.count(),
: self.generate_page_title(slug),
}
additional_context = self.generate_additional_context(
matching_datasets
)
base_context.update(additional_context)
context = base_context
return render(
request,
self.template_path,
context
) | Basic functionality for GET request to view. | ### Input:
Basic functionality for GET request to view.
### Response:
def get(self, request, slug):
matching_datasets = self.generate_matching_datasets(slug)
if matching_datasets is None:
raise Http404("Datasets meeting these criteria do not exist.")
base_context = {
: matching_datasets,
: matching_datasets.count(),
: self.generate_page_title(slug),
}
additional_context = self.generate_additional_context(
matching_datasets
)
base_context.update(additional_context)
context = base_context
return render(
request,
self.template_path,
context
) |
def create(self, identity, role_sid=values.unset, attributes=values.unset,
friendly_name=values.unset):
data = values.of({
: identity,
: role_sid,
: attributes,
: friendly_name,
})
payload = self._version.create(
,
self._uri,
data=data,
)
return UserInstance(self._version, payload, service_sid=self._solution[], ) | Create a new UserInstance
:param unicode identity: The `identity` value that identifies the new resource's User
:param unicode role_sid: The SID of the Role assigned to this user
:param unicode attributes: A valid JSON string that contains application-specific data
:param unicode friendly_name: A string to describe the new resource
:returns: Newly created UserInstance
:rtype: twilio.rest.chat.v2.service.user.UserInstance | ### Input:
Create a new UserInstance
:param unicode identity: The `identity` value that identifies the new resource's User
:param unicode role_sid: The SID of the Role assigned to this user
:param unicode attributes: A valid JSON string that contains application-specific data
:param unicode friendly_name: A string to describe the new resource
:returns: Newly created UserInstance
:rtype: twilio.rest.chat.v2.service.user.UserInstance
### Response:
def create(self, identity, role_sid=values.unset, attributes=values.unset,
friendly_name=values.unset):
data = values.of({
: identity,
: role_sid,
: attributes,
: friendly_name,
})
payload = self._version.create(
,
self._uri,
data=data,
)
return UserInstance(self._version, payload, service_sid=self._solution[], ) |
def get_alerts_for(self, trigger):
assert trigger is not None
assert isinstance(trigger.id, str), "Value must be a string"
status, data = self.http_client.get_json(
ALERTS_URI % trigger.id,
params={: self.API_key},
headers={: })
return [self.alert_parser.parse_dict(item) for item in data] | Retrieves all of the alerts that were fired for the specified Trigger
:param trigger: the trigger
:type trigger: `pyowm.alertapi30.trigger.Trigger`
:return: list of `pyowm.alertapi30.alert.Alert` objects | ### Input:
Retrieves all of the alerts that were fired for the specified Trigger
:param trigger: the trigger
:type trigger: `pyowm.alertapi30.trigger.Trigger`
:return: list of `pyowm.alertapi30.alert.Alert` objects
### Response:
def get_alerts_for(self, trigger):
assert trigger is not None
assert isinstance(trigger.id, str), "Value must be a string"
status, data = self.http_client.get_json(
ALERTS_URI % trigger.id,
params={: self.API_key},
headers={: })
return [self.alert_parser.parse_dict(item) for item in data] |
def add_transitions_from_selected_state_to_parent():
task_string = "create transition"
sub_task_string = "to parent state"
selected_state_m, msg = get_selected_single_state_model_and_check_for_its_parent()
if selected_state_m is None:
logger.warning("Can not {0} {1}: {2}".format(task_string, sub_task_string, msg))
return
logger.debug("Check to {0} {1} ...".format(task_string, sub_task_string))
state = selected_state_m.state
parent_state = state.parent
from_outcomes = get_all_outcomes_except_of_abort_and_preempt(state)
possible_oc_ids = [oc_id for oc_id in state.parent.outcomes.keys() if oc_id >= 0]
possible_oc_ids.sort()
to_outcome = state.parent.outcomes[possible_oc_ids[0]]
oc_connected_to_parent = [oc for oc in from_outcomes if is_outcome_connect_to_state(oc, parent_state.state_id)]
oc_not_connected = [oc for oc in from_outcomes if not state.parent.get_transition_for_outcome(state, oc)]
if all(oc in oc_connected_to_parent for oc in from_outcomes):
logger.info("Remove transition {0} because all outcomes are connected to it.".format(sub_task_string))
for from_outcome in oc_connected_to_parent:
transition = parent_state.get_transition_for_outcome(state, from_outcome)
parent_state.remove(transition)
elif oc_not_connected:
logger.debug("Create transition {0} ... ".format(sub_task_string))
for from_outcome in from_outcomes:
parent_state.add_transition(state.state_id, from_outcome.outcome_id,
parent_state.state_id, to_outcome.outcome_id)
else:
if remove_transitions_if_target_is_the_same(from_outcomes):
logger.info("Removed transitions origin from outcomes of selected state {0}"
"because all point to the same target.".format(sub_task_string))
return add_transitions_from_selected_state_to_parent()
logger.info("Will not create transition {0}: Not clear situation of connected transitions."
"There will be no transitions to other states be touched.".format(sub_task_string))
return True | Generates the default success transition of a state to its parent success port
:return: | ### Input:
Generates the default success transition of a state to its parent success port
:return:
### Response:
def add_transitions_from_selected_state_to_parent():
task_string = "create transition"
sub_task_string = "to parent state"
selected_state_m, msg = get_selected_single_state_model_and_check_for_its_parent()
if selected_state_m is None:
logger.warning("Can not {0} {1}: {2}".format(task_string, sub_task_string, msg))
return
logger.debug("Check to {0} {1} ...".format(task_string, sub_task_string))
state = selected_state_m.state
parent_state = state.parent
from_outcomes = get_all_outcomes_except_of_abort_and_preempt(state)
possible_oc_ids = [oc_id for oc_id in state.parent.outcomes.keys() if oc_id >= 0]
possible_oc_ids.sort()
to_outcome = state.parent.outcomes[possible_oc_ids[0]]
oc_connected_to_parent = [oc for oc in from_outcomes if is_outcome_connect_to_state(oc, parent_state.state_id)]
oc_not_connected = [oc for oc in from_outcomes if not state.parent.get_transition_for_outcome(state, oc)]
if all(oc in oc_connected_to_parent for oc in from_outcomes):
logger.info("Remove transition {0} because all outcomes are connected to it.".format(sub_task_string))
for from_outcome in oc_connected_to_parent:
transition = parent_state.get_transition_for_outcome(state, from_outcome)
parent_state.remove(transition)
elif oc_not_connected:
logger.debug("Create transition {0} ... ".format(sub_task_string))
for from_outcome in from_outcomes:
parent_state.add_transition(state.state_id, from_outcome.outcome_id,
parent_state.state_id, to_outcome.outcome_id)
else:
if remove_transitions_if_target_is_the_same(from_outcomes):
logger.info("Removed transitions origin from outcomes of selected state {0}"
"because all point to the same target.".format(sub_task_string))
return add_transitions_from_selected_state_to_parent()
logger.info("Will not create transition {0}: Not clear situation of connected transitions."
"There will be no transitions to other states be touched.".format(sub_task_string))
return True |
def retrieve(self, id) :
_, _, deal = self.http_client.get("/deals/{id}".format(id=id))
deal["value"] = Coercion.to_decimal(deal["value"])
return deal | Retrieve a single deal
Returns a single deal available to the user, according to the unique deal ID provided
If the specified deal does not exist, the request will return an error
:calls: ``get /deals/{id}``
:param int id: Unique identifier of a Deal.
:return: Dictionary that support attriubte-style access and represent Deal resource.
:rtype: dict | ### Input:
Retrieve a single deal
Returns a single deal available to the user, according to the unique deal ID provided
If the specified deal does not exist, the request will return an error
:calls: ``get /deals/{id}``
:param int id: Unique identifier of a Deal.
:return: Dictionary that support attriubte-style access and represent Deal resource.
:rtype: dict
### Response:
def retrieve(self, id) :
_, _, deal = self.http_client.get("/deals/{id}".format(id=id))
deal["value"] = Coercion.to_decimal(deal["value"])
return deal |
def eigs_s(infile="", dir_path=):
file = os.path.join(dir_path, infile)
eigs_data = np.loadtxt(file)
Ss = []
for ind in range(eigs_data.shape[0]):
tau, Vdirs = [], []
for k in range(0, 9, 3):
tau.append(eigs_data[ind][k])
Vdirs.append([eigs_data[ind][k+1], eigs_data[ind][k+2]])
s = list(pmag.doeigs_s(tau, Vdirs))
Ss.append(s)
return Ss | Converts eigenparamters format data to s format
Parameters
___________________
Input:
file : input file name with eigenvalues (tau) and eigenvectors (V) with format:
tau_1 V1_dec V1_inc tau_2 V2_dec V2_inc tau_3 V3_dec V3_inc
Output
the six tensor elements as a nested array
[[x11,x22,x33,x12,x23,x13],....] | ### Input:
Converts eigenparamters format data to s format
Parameters
___________________
Input:
file : input file name with eigenvalues (tau) and eigenvectors (V) with format:
tau_1 V1_dec V1_inc tau_2 V2_dec V2_inc tau_3 V3_dec V3_inc
Output
the six tensor elements as a nested array
[[x11,x22,x33,x12,x23,x13],....]
### Response:
def eigs_s(infile="", dir_path=):
file = os.path.join(dir_path, infile)
eigs_data = np.loadtxt(file)
Ss = []
for ind in range(eigs_data.shape[0]):
tau, Vdirs = [], []
for k in range(0, 9, 3):
tau.append(eigs_data[ind][k])
Vdirs.append([eigs_data[ind][k+1], eigs_data[ind][k+2]])
s = list(pmag.doeigs_s(tau, Vdirs))
Ss.append(s)
return Ss |
def get_extra_vehicle_info(self, authentication_info):
import requests
base_url = "https://secure.ritassist.nl/GenericServiceJSONP.ashx"
query = "?f=CheckExtraVehicleInfo" \
"&token={token}" \
"&equipmentId={identifier}" \
"&lastHash=null&padding=false"
parameters = {
: authentication_info.access_token,
: str(self.identifier)
}
response = requests.get(base_url + query.format(**parameters))
json = response.json()
self.malfunction_light = json[]
self.fuel_level = json[]
self.coolant_temperature = json[]
self.power_voltage = json[] | Get extra data from the API. | ### Input:
Get extra data from the API.
### Response:
def get_extra_vehicle_info(self, authentication_info):
import requests
base_url = "https://secure.ritassist.nl/GenericServiceJSONP.ashx"
query = "?f=CheckExtraVehicleInfo" \
"&token={token}" \
"&equipmentId={identifier}" \
"&lastHash=null&padding=false"
parameters = {
: authentication_info.access_token,
: str(self.identifier)
}
response = requests.get(base_url + query.format(**parameters))
json = response.json()
self.malfunction_light = json[]
self.fuel_level = json[]
self.coolant_temperature = json[]
self.power_voltage = json[] |
def change_object_content_type(self, container, obj, new_ctype,
guess=False):
cname = utils.get_name(container)
oname = utils.get_name(obj)
if guess and container.cdn_enabled:
obj_url = "%s/%s" % (container.cdn_uri, oname)
new_ctype = mimetypes.guess_type(obj_url)[0]
return self.copy_object(container, obj, container,
content_type=new_ctype) | Copies object to itself, but applies a new content-type. The guess
feature requires the container to be CDN-enabled. If not, then the
content-type must be supplied. If using guess with a CDN-enabled
container, new_ctype can be set to None. Failure during the put will
result in an exception. | ### Input:
Copies object to itself, but applies a new content-type. The guess
feature requires the container to be CDN-enabled. If not, then the
content-type must be supplied. If using guess with a CDN-enabled
container, new_ctype can be set to None. Failure during the put will
result in an exception.
### Response:
def change_object_content_type(self, container, obj, new_ctype,
guess=False):
cname = utils.get_name(container)
oname = utils.get_name(obj)
if guess and container.cdn_enabled:
obj_url = "%s/%s" % (container.cdn_uri, oname)
new_ctype = mimetypes.guess_type(obj_url)[0]
return self.copy_object(container, obj, container,
content_type=new_ctype) |
def get_rlz(self, rlzstr):
r
mo = re.match(r, rlzstr)
if not mo:
return
return self.realizations[int(mo.group(1))] | r"""
Get a Realization instance for a string of the form 'rlz-\d+' | ### Input:
r"""
Get a Realization instance for a string of the form 'rlz-\d+'
### Response:
def get_rlz(self, rlzstr):
r
mo = re.match(r, rlzstr)
if not mo:
return
return self.realizations[int(mo.group(1))] |
def read_tess_fitslc(lcfits,
headerkeys=LCHEADERKEYS,
datakeys=LCDATAKEYS,
sapkeys=LCSAPKEYS,
pdckeys=LCPDCKEYS,
topkeys=LCTOPKEYS,
apkeys=LCAPERTUREKEYS,
normalize=False,
appendto=None,
filterqualityflags=False,
nanfilter=None,
timestoignore=None):
s SAP_FLUX and PDCSAP_FLUX measurements
will be normalized to 1.0 by dividing out the median flux for the
component light curve.
appendto : lcdict or None
If appendto is an `lcdict`, will append measurements of this `lcdict` to
that `lcdict`. This is used for consolidating light curves for the same
object across different files (sectors/cameras/CCDs?). The appending
does not care about the time order. To consolidate light curves in time
order, use `consolidate_tess_fitslc` below.
filterqualityflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {,,} or None
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn
hdulist = pyfits.open(lcfits)
lchdr, lcdata = hdulist[1].header, hdulist[1].data
lctophdr, lcaperturehdr, lcaperturedata = (hdulist[0].header,
hdulist[2].header,
hdulist[2].data)
hdulist.close()
hdrinfo = {}
for key in headerkeys:
if key in lchdr and lchdr[key] is not None:
hdrinfo[key.lower()] = lchdr[key]
else:
hdrinfo[key.lower()] = None
ndet = lchdr[]
for key in topkeys:
if key in lctophdr and lctophdr[key] is not None:
hdrinfo[key.lower()] = lctophdr[key]
else:
hdrinfo[key.lower()] = None
for key in lcaperturehdr:
if key in lcaperturehdr and lcaperturehdr[key] is not None:
hdrinfo[key.lower()] = lcaperturehdr[key]
else:
hdrinfo[key.lower()] = None
if (filterqualityflags is not False or
nanfilter is not None or
timestoignore is not None):
lcdict = filter_tess_lcdict(lcdict,
filterqualityflags,
nanfilter=nanfilter,
timestoignore=timestoignore)
return lcdict | This extracts the light curve from a single TESS .lc.fits file.
This works on the light curves available at MAST.
TODO: look at:
https://archive.stsci.edu/missions/tess/doc/EXP-TESS-ARC-ICD-TM-0014.pdf
for details on the column descriptions and to fill in any other info we
need.
Parameters
----------
lcfits : str
The filename of a MAST Kepler/K2 light curve FITS file.
headerkeys : list
A list of FITS header keys that will be extracted from the FITS light
curve file. These describe the observations. The default value for this
is given in `LCHEADERKEYS` above.
datakeys : list
A list of FITS column names that correspond to the auxiliary
measurements in the light curve. The default is `LCDATAKEYS` above.
sapkeys : list
A list of FITS column names that correspond to the SAP flux
measurements in the light curve. The default is `LCSAPKEYS` above.
pdckeys : list
A list of FITS column names that correspond to the PDC flux
measurements in the light curve. The default is `LCPDCKEYS` above.
topkeys : list
A list of FITS header keys that describe the object in the light
curve. The default is `LCTOPKEYS` above.
apkeys : list
A list of FITS header keys that describe the flux measurement apertures
used by the TESS pipeline. The default is `LCAPERTUREKEYS` above.
normalize : bool
If True, then the light curve's SAP_FLUX and PDCSAP_FLUX measurements
will be normalized to 1.0 by dividing out the median flux for the
component light curve.
appendto : lcdict or None
If appendto is an `lcdict`, will append measurements of this `lcdict` to
that `lcdict`. This is used for consolidating light curves for the same
object across different files (sectors/cameras/CCDs?). The appending
does not care about the time order. To consolidate light curves in time
order, use `consolidate_tess_fitslc` below.
filterqualityflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {'sap','pdc','sap,pdc'} or None
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing). | ### Input:
This extracts the light curve from a single TESS .lc.fits file.
This works on the light curves available at MAST.
TODO: look at:
https://archive.stsci.edu/missions/tess/doc/EXP-TESS-ARC-ICD-TM-0014.pdf
for details on the column descriptions and to fill in any other info we
need.
Parameters
----------
lcfits : str
The filename of a MAST Kepler/K2 light curve FITS file.
headerkeys : list
A list of FITS header keys that will be extracted from the FITS light
curve file. These describe the observations. The default value for this
is given in `LCHEADERKEYS` above.
datakeys : list
A list of FITS column names that correspond to the auxiliary
measurements in the light curve. The default is `LCDATAKEYS` above.
sapkeys : list
A list of FITS column names that correspond to the SAP flux
measurements in the light curve. The default is `LCSAPKEYS` above.
pdckeys : list
A list of FITS column names that correspond to the PDC flux
measurements in the light curve. The default is `LCPDCKEYS` above.
topkeys : list
A list of FITS header keys that describe the object in the light
curve. The default is `LCTOPKEYS` above.
apkeys : list
A list of FITS header keys that describe the flux measurement apertures
used by the TESS pipeline. The default is `LCAPERTUREKEYS` above.
normalize : bool
If True, then the light curve's SAP_FLUX and PDCSAP_FLUX measurements
will be normalized to 1.0 by dividing out the median flux for the
component light curve.
appendto : lcdict or None
If appendto is an `lcdict`, will append measurements of this `lcdict` to
that `lcdict`. This is used for consolidating light curves for the same
object across different files (sectors/cameras/CCDs?). The appending
does not care about the time order. To consolidate light curves in time
order, use `consolidate_tess_fitslc` below.
filterqualityflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {'sap','pdc','sap,pdc'} or None
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing).
### Response:
def read_tess_fitslc(lcfits,
headerkeys=LCHEADERKEYS,
datakeys=LCDATAKEYS,
sapkeys=LCSAPKEYS,
pdckeys=LCPDCKEYS,
topkeys=LCTOPKEYS,
apkeys=LCAPERTUREKEYS,
normalize=False,
appendto=None,
filterqualityflags=False,
nanfilter=None,
timestoignore=None):
s SAP_FLUX and PDCSAP_FLUX measurements
will be normalized to 1.0 by dividing out the median flux for the
component light curve.
appendto : lcdict or None
If appendto is an `lcdict`, will append measurements of this `lcdict` to
that `lcdict`. This is used for consolidating light curves for the same
object across different files (sectors/cameras/CCDs?). The appending
does not care about the time order. To consolidate light curves in time
order, use `consolidate_tess_fitslc` below.
filterqualityflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {,,} or None
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn
hdulist = pyfits.open(lcfits)
lchdr, lcdata = hdulist[1].header, hdulist[1].data
lctophdr, lcaperturehdr, lcaperturedata = (hdulist[0].header,
hdulist[2].header,
hdulist[2].data)
hdulist.close()
hdrinfo = {}
for key in headerkeys:
if key in lchdr and lchdr[key] is not None:
hdrinfo[key.lower()] = lchdr[key]
else:
hdrinfo[key.lower()] = None
ndet = lchdr[]
for key in topkeys:
if key in lctophdr and lctophdr[key] is not None:
hdrinfo[key.lower()] = lctophdr[key]
else:
hdrinfo[key.lower()] = None
for key in lcaperturehdr:
if key in lcaperturehdr and lcaperturehdr[key] is not None:
hdrinfo[key.lower()] = lcaperturehdr[key]
else:
hdrinfo[key.lower()] = None
if (filterqualityflags is not False or
nanfilter is not None or
timestoignore is not None):
lcdict = filter_tess_lcdict(lcdict,
filterqualityflags,
nanfilter=nanfilter,
timestoignore=timestoignore)
return lcdict |
def get_want_file_pos(file_list):
want_file_pos = []
print
for i in file_list:
print(os.path.join(*i[]))
while 1:
all_answer = raw_input()
if all_answer in (, ):
break
if all_answer == :
want_file_pos = range(len(file_list))
return want_file_pos
if all_answer == :
for j, tfile in enumerate(file_list):
while 1:
file_answer = raw_input(
.format(os.path.join
(*tfile[])))
if file_answer in (, ):
break
if file_answer == :
want_file_pos.append(j)
print "Here are all the files you want:"
for k in want_file_pos:
print os.path.join(*file_list[k][])
return want_file_pos | Ask the user which files in file_list he or she is interested in.
Return indices for the files inside file_list | ### Input:
Ask the user which files in file_list he or she is interested in.
Return indices for the files inside file_list
### Response:
def get_want_file_pos(file_list):
want_file_pos = []
print
for i in file_list:
print(os.path.join(*i[]))
while 1:
all_answer = raw_input()
if all_answer in (, ):
break
if all_answer == :
want_file_pos = range(len(file_list))
return want_file_pos
if all_answer == :
for j, tfile in enumerate(file_list):
while 1:
file_answer = raw_input(
.format(os.path.join
(*tfile[])))
if file_answer in (, ):
break
if file_answer == :
want_file_pos.append(j)
print "Here are all the files you want:"
for k in want_file_pos:
print os.path.join(*file_list[k][])
return want_file_pos |
def list_tokens(self, request, **kwargs):
infos = [ObfuscatedUrlInfoSerializer(info).data
for info in ObfuscatedUrlInfo.objects.filter(content=self.get_object())]
return Response(infos, status=status.HTTP_200_OK, content_type="application/json") | List all tokens for this content instance.
:param request: a WSGI request object
:param kwargs: keyword arguments (optional)
:return: `rest_framework.response.Response` | ### Input:
List all tokens for this content instance.
:param request: a WSGI request object
:param kwargs: keyword arguments (optional)
:return: `rest_framework.response.Response`
### Response:
def list_tokens(self, request, **kwargs):
infos = [ObfuscatedUrlInfoSerializer(info).data
for info in ObfuscatedUrlInfo.objects.filter(content=self.get_object())]
return Response(infos, status=status.HTTP_200_OK, content_type="application/json") |
def upsert_multi(self, keys, ttl=0, format=None, persist_to=0, replicate_to=0):
return _Base.upsert_multi(self, keys, ttl=ttl, format=format,
persist_to=persist_to,
replicate_to=replicate_to) | Write multiple items to the cluster. Multi version of :meth:`upsert`
:param dict keys: A dictionary of keys to set. The keys are the
keys as they should be on the server, and the values are the
values for the keys to be stored.
`keys` may also be a :class:`~.ItemCollection`. If using a
dictionary variant for item collections, an additional
`ignore_cas` parameter may be supplied with a boolean value.
If not specified, the operation will fail if the CAS value
on the server does not match the one specified in the
`Item`'s `cas` field.
:param int ttl: If specified, sets the expiration value
for all keys
:param int format: If specified, this is the conversion format
which will be used for _all_ the keys.
:param int persist_to: Durability constraint for persistence.
Note that it is more efficient to use :meth:`endure_multi`
on the returned :class:`~couchbase.result.MultiResult` than
using these parameters for a high volume of keys. Using
these parameters however does save on latency as the
constraint checking for each item is performed as soon as it
is successfully stored.
:param int replicate_to: Durability constraints for replication.
See notes on the `persist_to` parameter for usage.
:return: A :class:`~.MultiResult` object, which is a
`dict`-like object
The multi methods are more than just a convenience, they also
save on network performance by batch-scheduling operations,
reducing latencies. This is especially noticeable on smaller
value sizes.
.. seealso:: :meth:`upsert` | ### Input:
Write multiple items to the cluster. Multi version of :meth:`upsert`
:param dict keys: A dictionary of keys to set. The keys are the
keys as they should be on the server, and the values are the
values for the keys to be stored.
`keys` may also be a :class:`~.ItemCollection`. If using a
dictionary variant for item collections, an additional
`ignore_cas` parameter may be supplied with a boolean value.
If not specified, the operation will fail if the CAS value
on the server does not match the one specified in the
`Item`'s `cas` field.
:param int ttl: If specified, sets the expiration value
for all keys
:param int format: If specified, this is the conversion format
which will be used for _all_ the keys.
:param int persist_to: Durability constraint for persistence.
Note that it is more efficient to use :meth:`endure_multi`
on the returned :class:`~couchbase.result.MultiResult` than
using these parameters for a high volume of keys. Using
these parameters however does save on latency as the
constraint checking for each item is performed as soon as it
is successfully stored.
:param int replicate_to: Durability constraints for replication.
See notes on the `persist_to` parameter for usage.
:return: A :class:`~.MultiResult` object, which is a
`dict`-like object
The multi methods are more than just a convenience, they also
save on network performance by batch-scheduling operations,
reducing latencies. This is especially noticeable on smaller
value sizes.
.. seealso:: :meth:`upsert`
### Response:
def upsert_multi(self, keys, ttl=0, format=None, persist_to=0, replicate_to=0):
return _Base.upsert_multi(self, keys, ttl=ttl, format=format,
persist_to=persist_to,
replicate_to=replicate_to) |
def _generate_examples(self, num_examples, data_path, label_path):
images = _extract_mnist_images(data_path, num_examples)
labels = _extract_mnist_labels(label_path, num_examples)
data = list(zip(images, labels))
for image, label in data:
yield {
"image": image,
"label": label,
} | Generate MNIST examples as dicts.
Args:
num_examples (int): The number of example.
data_path (str): Path to the data files
label_path (str): Path to the labels
Yields:
Generator yielding the next examples | ### Input:
Generate MNIST examples as dicts.
Args:
num_examples (int): The number of example.
data_path (str): Path to the data files
label_path (str): Path to the labels
Yields:
Generator yielding the next examples
### Response:
def _generate_examples(self, num_examples, data_path, label_path):
images = _extract_mnist_images(data_path, num_examples)
labels = _extract_mnist_labels(label_path, num_examples)
data = list(zip(images, labels))
for image, label in data:
yield {
"image": image,
"label": label,
} |
def clean_bytecode_extension(filename):
path, extension = os.path.splitext(filename)
if extension == :
filename = % path
return filename | Replaces Python bytecode extensions (``.pyc``) with their source extension. | ### Input:
Replaces Python bytecode extensions (``.pyc``) with their source extension.
### Response:
def clean_bytecode_extension(filename):
path, extension = os.path.splitext(filename)
if extension == :
filename = % path
return filename |
def verify_permitted_to_read(gs_path):
from . import _bucket
bucket, prefix = _bucket.parse_name(gs_path)
credentials = None
if datalab.context.Context.is_signed_in():
credentials = datalab.context._utils.get_credentials()
args = {
: Api._MAX_RESULTS,
:
}
if prefix is not None:
args[] = prefix
url = Api._ENDPOINT + (Api._OBJECT_PATH % (bucket, ))
try:
datalab.utils.Http.request(url, args=args, credentials=credentials)
except datalab.utils.RequestException as e:
if e.status == 401:
raise Exception(
)
raise e | Check if the user has permissions to read from the given path.
Args:
gs_path: the GCS path to check if user is permitted to read.
Raises:
Exception if user has no permissions to read. | ### Input:
Check if the user has permissions to read from the given path.
Args:
gs_path: the GCS path to check if user is permitted to read.
Raises:
Exception if user has no permissions to read.
### Response:
def verify_permitted_to_read(gs_path):
from . import _bucket
bucket, prefix = _bucket.parse_name(gs_path)
credentials = None
if datalab.context.Context.is_signed_in():
credentials = datalab.context._utils.get_credentials()
args = {
: Api._MAX_RESULTS,
:
}
if prefix is not None:
args[] = prefix
url = Api._ENDPOINT + (Api._OBJECT_PATH % (bucket, ))
try:
datalab.utils.Http.request(url, args=args, credentials=credentials)
except datalab.utils.RequestException as e:
if e.status == 401:
raise Exception(
)
raise e |
def getOSDesc(interface, ext_list):
try:
ext_type, = {type(x) for x in ext_list}
except ValueError:
raise TypeError()
if issubclass(ext_type, OSExtCompatDesc):
wIndex = 4
kw = {
: OSDescHeaderBCount(
bCount=len(ext_list),
Reserved=0,
),
}
elif issubclass(ext_type, OSExtPropDescHead):
wIndex = 5
kw = {
: len(ext_list),
}
else:
raise TypeError()
ext_list_type = ext_type * len(ext_list)
klass = type(
,
(OSDescHeader, ),
{
: [
(, ext_list_type),
],
},
)
return klass(
interface=interface,
dwLength=ctypes.sizeof(klass),
bcdVersion=1,
wIndex=wIndex,
ext_list=ext_list_type(*ext_list),
**kw
) | Return an OS description header.
interface (int)
Related interface number.
ext_list (list of OSExtCompatDesc or OSExtPropDesc)
List of instances of extended descriptors. | ### Input:
Return an OS description header.
interface (int)
Related interface number.
ext_list (list of OSExtCompatDesc or OSExtPropDesc)
List of instances of extended descriptors.
### Response:
def getOSDesc(interface, ext_list):
try:
ext_type, = {type(x) for x in ext_list}
except ValueError:
raise TypeError()
if issubclass(ext_type, OSExtCompatDesc):
wIndex = 4
kw = {
: OSDescHeaderBCount(
bCount=len(ext_list),
Reserved=0,
),
}
elif issubclass(ext_type, OSExtPropDescHead):
wIndex = 5
kw = {
: len(ext_list),
}
else:
raise TypeError()
ext_list_type = ext_type * len(ext_list)
klass = type(
,
(OSDescHeader, ),
{
: [
(, ext_list_type),
],
},
)
return klass(
interface=interface,
dwLength=ctypes.sizeof(klass),
bcdVersion=1,
wIndex=wIndex,
ext_list=ext_list_type(*ext_list),
**kw
) |
def run_optimization(self, max_iter = 0, max_time = np.inf, eps = 1e-8, context = None, verbosity=False, save_models_parameters= True, report_file = None, evaluations_file = None, models_file=None):
if self.objective is None:
raise InvalidConfigError("Cannot run the optimization loop without the objective function")
self.verbosity = verbosity
self.save_models_parameters = save_models_parameters
self.report_file = report_file
self.evaluations_file = evaluations_file
self.models_file = models_file
self.model_parameters_iterations = None
self.context = context
if self.save_models_parameters == True:
if not (isinstance(self.model, GPyOpt.models.GPModel) or isinstance(self.model, GPyOpt.models.GPModel_MCMC)):
print()
self.save_models_parameters = False
self.eps = eps
if (max_iter is None) and (max_time is None):
self.max_iter = 0
self.max_time = np.inf
elif (max_iter is None) and (max_time is not None):
self.max_iter = np.inf
self.max_time = max_time
elif (max_iter is not None) and (max_time is None):
self.max_iter = max_iter
self.max_time = np.inf
else:
self.max_iter = max_iter
self.max_time = max_time
if self.X is not None and self.Y is None:
self.Y, cost_values = self.objective.evaluate(self.X)
if self.cost.cost_type == :
self.cost.update_cost_model(self.X, cost_values)
self.time_zero = time.time()
self.cum_time = 0
self.num_acquisitions = 0
self.suggested_sample = self.X
self.Y_new = self.Y
while (self.max_time > self.cum_time):
try:
self._update_model(self.normalization_type)
except np.linalg.linalg.LinAlgError:
break
if (self.num_acquisitions >= self.max_iter
or (len(self.X) > 1 and self._distance_last_evaluations() <= self.eps)):
break
self.suggested_sample = self._compute_next_evaluations()
self.X = np.vstack((self.X,self.suggested_sample))
self.evaluate_objective()
self.cum_time = time.time() - self.time_zero
self.num_acquisitions += 1
if verbosity:
print("num acquisition: {}, time elapsed: {:.2f}s".format(
self.num_acquisitions, self.cum_time))
self._compute_results()
if self.report_file is not None:
self.save_report(self.report_file)
if self.evaluations_file is not None:
self.save_evaluations(self.evaluations_file)
if self.models_file is not None:
self.save_models(self.models_file) | Runs Bayesian Optimization for a number 'max_iter' of iterations (after the initial exploration data)
:param max_iter: exploration horizon, or number of acquisitions. If nothing is provided optimizes the current acquisition.
:param max_time: maximum exploration horizon in seconds.
:param eps: minimum distance between two consecutive x's to keep running the model.
:param context: fixes specified variables to a particular context (values) for the optimization run (default, None).
:param verbosity: flag to print the optimization results after each iteration (default, False).
:param report_file: file to which the results of the optimization are saved (default, None).
:param evaluations_file: file to which the evalations are saved (default, None).
:param models_file: file to which the model parameters are saved (default, None). | ### Input:
Runs Bayesian Optimization for a number 'max_iter' of iterations (after the initial exploration data)
:param max_iter: exploration horizon, or number of acquisitions. If nothing is provided optimizes the current acquisition.
:param max_time: maximum exploration horizon in seconds.
:param eps: minimum distance between two consecutive x's to keep running the model.
:param context: fixes specified variables to a particular context (values) for the optimization run (default, None).
:param verbosity: flag to print the optimization results after each iteration (default, False).
:param report_file: file to which the results of the optimization are saved (default, None).
:param evaluations_file: file to which the evalations are saved (default, None).
:param models_file: file to which the model parameters are saved (default, None).
### Response:
def run_optimization(self, max_iter = 0, max_time = np.inf, eps = 1e-8, context = None, verbosity=False, save_models_parameters= True, report_file = None, evaluations_file = None, models_file=None):
if self.objective is None:
raise InvalidConfigError("Cannot run the optimization loop without the objective function")
self.verbosity = verbosity
self.save_models_parameters = save_models_parameters
self.report_file = report_file
self.evaluations_file = evaluations_file
self.models_file = models_file
self.model_parameters_iterations = None
self.context = context
if self.save_models_parameters == True:
if not (isinstance(self.model, GPyOpt.models.GPModel) or isinstance(self.model, GPyOpt.models.GPModel_MCMC)):
print()
self.save_models_parameters = False
self.eps = eps
if (max_iter is None) and (max_time is None):
self.max_iter = 0
self.max_time = np.inf
elif (max_iter is None) and (max_time is not None):
self.max_iter = np.inf
self.max_time = max_time
elif (max_iter is not None) and (max_time is None):
self.max_iter = max_iter
self.max_time = np.inf
else:
self.max_iter = max_iter
self.max_time = max_time
if self.X is not None and self.Y is None:
self.Y, cost_values = self.objective.evaluate(self.X)
if self.cost.cost_type == :
self.cost.update_cost_model(self.X, cost_values)
self.time_zero = time.time()
self.cum_time = 0
self.num_acquisitions = 0
self.suggested_sample = self.X
self.Y_new = self.Y
while (self.max_time > self.cum_time):
try:
self._update_model(self.normalization_type)
except np.linalg.linalg.LinAlgError:
break
if (self.num_acquisitions >= self.max_iter
or (len(self.X) > 1 and self._distance_last_evaluations() <= self.eps)):
break
self.suggested_sample = self._compute_next_evaluations()
self.X = np.vstack((self.X,self.suggested_sample))
self.evaluate_objective()
self.cum_time = time.time() - self.time_zero
self.num_acquisitions += 1
if verbosity:
print("num acquisition: {}, time elapsed: {:.2f}s".format(
self.num_acquisitions, self.cum_time))
self._compute_results()
if self.report_file is not None:
self.save_report(self.report_file)
if self.evaluations_file is not None:
self.save_evaluations(self.evaluations_file)
if self.models_file is not None:
self.save_models(self.models_file) |
def paste_action_callback(self, *event):
if react_to_event(self.view, self.tree_view, event):
sm_selection, _ = self.get_state_machine_selection()
if len(sm_selection.states) == 1:
global_clipboard.paste(sm_selection.get_selected_state(), limited=[, , ])
else:
logger.warning("Please select only one state to paste.")
return True | Callback method for paste action | ### Input:
Callback method for paste action
### Response:
def paste_action_callback(self, *event):
if react_to_event(self.view, self.tree_view, event):
sm_selection, _ = self.get_state_machine_selection()
if len(sm_selection.states) == 1:
global_clipboard.paste(sm_selection.get_selected_state(), limited=[, , ])
else:
logger.warning("Please select only one state to paste.")
return True |
def update_script(self, information, timeout=-1):
uri = "{}/script".format(self.data["uri"])
return self._helper.update(information, uri=uri, timeout=timeout) | Updates the configuration script of the logical enclosure and on all enclosures in the logical enclosure with
the specified ID.
Args:
information: Updated script.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Return:
Configuration script. | ### Input:
Updates the configuration script of the logical enclosure and on all enclosures in the logical enclosure with
the specified ID.
Args:
information: Updated script.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Return:
Configuration script.
### Response:
def update_script(self, information, timeout=-1):
uri = "{}/script".format(self.data["uri"])
return self._helper.update(information, uri=uri, timeout=timeout) |
def split(self, start=None, end=None, datatype=None, **kwargs):
if not self.interface.multi:
if datatype == :
obj = self.array(**kwargs)
elif datatype == :
obj = self.dframe(**kwargs)
elif datatype == :
obj = self.columns(**kwargs)
elif datatype is None:
obj = self
else:
raise ValueError("%s datatype not support" % datatype)
return [obj]
return self.interface.split(self, start, end, datatype, **kwargs) | The split method allows splitting a Path type into a list of
subpaths of the same type. A start and/or end may be supplied
to select a subset of paths. | ### Input:
The split method allows splitting a Path type into a list of
subpaths of the same type. A start and/or end may be supplied
to select a subset of paths.
### Response:
def split(self, start=None, end=None, datatype=None, **kwargs):
if not self.interface.multi:
if datatype == :
obj = self.array(**kwargs)
elif datatype == :
obj = self.dframe(**kwargs)
elif datatype == :
obj = self.columns(**kwargs)
elif datatype is None:
obj = self
else:
raise ValueError("%s datatype not support" % datatype)
return [obj]
return self.interface.split(self, start, end, datatype, **kwargs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.