repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
andrewda/frc-livescore | livescore/simpleocr_utils/processor.py | ProcessorStack.set_parameters | def set_parameters(self, **args):
"""sets to all wrapped processors"""
not_used = set()
not_given = set()
for p in self.processors:
nu, ng = p.set_parameters(**args)
not_used = not_used.union(nu)
not_given = not_given.union(ng)
return not_used, not_given | python | def set_parameters(self, **args):
"""sets to all wrapped processors"""
not_used = set()
not_given = set()
for p in self.processors:
nu, ng = p.set_parameters(**args)
not_used = not_used.union(nu)
not_given = not_given.union(ng)
return not_used, not_given | [
"def",
"set_parameters",
"(",
"self",
",",
"*",
"*",
"args",
")",
":",
"not_used",
"=",
"set",
"(",
")",
"not_given",
"=",
"set",
"(",
")",
"for",
"p",
"in",
"self",
".",
"processors",
":",
"nu",
",",
"ng",
"=",
"p",
".",
"set_parameters",
"(",
"*",
"*",
"args",
")",
"not_used",
"=",
"not_used",
".",
"union",
"(",
"nu",
")",
"not_given",
"=",
"not_given",
".",
"union",
"(",
"ng",
")",
"return",
"not_used",
",",
"not_given"
] | sets to all wrapped processors | [
"sets",
"to",
"all",
"wrapped",
"processors"
] | train | https://github.com/andrewda/frc-livescore/blob/71594cd6d2c8b6c5feb3889bb05552d09b8128b1/livescore/simpleocr_utils/processor.py#L137-L145 |
daskol/nls | nls/animation.py | AbstractAnimation.render | def render(self, filename):
"""Perform initialization of render, set quality and size video attributes and then call template method that
is defined in child class.
"""
self.elapsed_time = -time()
dpi = 100
fig = figure(figsize=(16, 9), dpi=dpi)
with self.writer.saving(fig, filename, dpi):
for frame_id in xrange(self.frames + 1):
self.renderFrame(frame_id)
self.writer.grab_frame()
self.elapsed_time += time() | python | def render(self, filename):
"""Perform initialization of render, set quality and size video attributes and then call template method that
is defined in child class.
"""
self.elapsed_time = -time()
dpi = 100
fig = figure(figsize=(16, 9), dpi=dpi)
with self.writer.saving(fig, filename, dpi):
for frame_id in xrange(self.frames + 1):
self.renderFrame(frame_id)
self.writer.grab_frame()
self.elapsed_time += time() | [
"def",
"render",
"(",
"self",
",",
"filename",
")",
":",
"self",
".",
"elapsed_time",
"=",
"-",
"time",
"(",
")",
"dpi",
"=",
"100",
"fig",
"=",
"figure",
"(",
"figsize",
"=",
"(",
"16",
",",
"9",
")",
",",
"dpi",
"=",
"dpi",
")",
"with",
"self",
".",
"writer",
".",
"saving",
"(",
"fig",
",",
"filename",
",",
"dpi",
")",
":",
"for",
"frame_id",
"in",
"xrange",
"(",
"self",
".",
"frames",
"+",
"1",
")",
":",
"self",
".",
"renderFrame",
"(",
"frame_id",
")",
"self",
".",
"writer",
".",
"grab_frame",
"(",
")",
"self",
".",
"elapsed_time",
"+=",
"time",
"(",
")"
] | Perform initialization of render, set quality and size video attributes and then call template method that
is defined in child class. | [
"Perform",
"initialization",
"of",
"render",
"set",
"quality",
"and",
"size",
"video",
"attributes",
"and",
"then",
"call",
"template",
"method",
"that",
"is",
"defined",
"in",
"child",
"class",
"."
] | train | https://github.com/daskol/nls/blob/00bb4555e4f56e222dc6f54faf2e286567519626/nls/animation.py#L30-L41 |
daskol/nls | nls/animation.py | AbstractAnimation.report | def report(self):
"""Prints in standard output report about animation rendering. Namely, it prints seconds spent, number of
frames and step size that is used in functional animation.
"""
message = 'Elapsed in {0} seconds with {1} frames and {2} step.'
print(message.format(self.elapsed_time, self.frames, self.step)) | python | def report(self):
"""Prints in standard output report about animation rendering. Namely, it prints seconds spent, number of
frames and step size that is used in functional animation.
"""
message = 'Elapsed in {0} seconds with {1} frames and {2} step.'
print(message.format(self.elapsed_time, self.frames, self.step)) | [
"def",
"report",
"(",
"self",
")",
":",
"message",
"=",
"'Elapsed in {0} seconds with {1} frames and {2} step.'",
"print",
"(",
"message",
".",
"format",
"(",
"self",
".",
"elapsed_time",
",",
"self",
".",
"frames",
",",
"self",
".",
"step",
")",
")"
] | Prints in standard output report about animation rendering. Namely, it prints seconds spent, number of
frames and step size that is used in functional animation. | [
"Prints",
"in",
"standard",
"output",
"report",
"about",
"animation",
"rendering",
".",
"Namely",
"it",
"prints",
"seconds",
"spent",
"number",
"of",
"frames",
"and",
"step",
"size",
"that",
"is",
"used",
"in",
"functional",
"animation",
"."
] | train | https://github.com/daskol/nls/blob/00bb4555e4f56e222dc6f54faf2e286567519626/nls/animation.py#L48-L53 |
pytroll/posttroll | posttroll/subscriber.py | Subscriber.add | def add(self, address, topics=None):
"""Add *address* to the subscribing list for *topics*.
It topics is None we will subscibe to already specified topics.
"""
with self._lock:
if address in self.addresses:
return False
topics = self._magickfy_topics(topics) or self._topics
LOGGER.info("Subscriber adding address %s with topics %s",
str(address), str(topics))
subscriber = get_context().socket(SUB)
for t__ in topics:
subscriber.setsockopt_string(SUBSCRIBE, six.text_type(t__))
subscriber.connect(address)
self.sub_addr[subscriber] = address
self.addr_sub[address] = subscriber
if self.poller:
self.poller.register(subscriber, POLLIN)
return True | python | def add(self, address, topics=None):
"""Add *address* to the subscribing list for *topics*.
It topics is None we will subscibe to already specified topics.
"""
with self._lock:
if address in self.addresses:
return False
topics = self._magickfy_topics(topics) or self._topics
LOGGER.info("Subscriber adding address %s with topics %s",
str(address), str(topics))
subscriber = get_context().socket(SUB)
for t__ in topics:
subscriber.setsockopt_string(SUBSCRIBE, six.text_type(t__))
subscriber.connect(address)
self.sub_addr[subscriber] = address
self.addr_sub[address] = subscriber
if self.poller:
self.poller.register(subscriber, POLLIN)
return True | [
"def",
"add",
"(",
"self",
",",
"address",
",",
"topics",
"=",
"None",
")",
":",
"with",
"self",
".",
"_lock",
":",
"if",
"address",
"in",
"self",
".",
"addresses",
":",
"return",
"False",
"topics",
"=",
"self",
".",
"_magickfy_topics",
"(",
"topics",
")",
"or",
"self",
".",
"_topics",
"LOGGER",
".",
"info",
"(",
"\"Subscriber adding address %s with topics %s\"",
",",
"str",
"(",
"address",
")",
",",
"str",
"(",
"topics",
")",
")",
"subscriber",
"=",
"get_context",
"(",
")",
".",
"socket",
"(",
"SUB",
")",
"for",
"t__",
"in",
"topics",
":",
"subscriber",
".",
"setsockopt_string",
"(",
"SUBSCRIBE",
",",
"six",
".",
"text_type",
"(",
"t__",
")",
")",
"subscriber",
".",
"connect",
"(",
"address",
")",
"self",
".",
"sub_addr",
"[",
"subscriber",
"]",
"=",
"address",
"self",
".",
"addr_sub",
"[",
"address",
"]",
"=",
"subscriber",
"if",
"self",
".",
"poller",
":",
"self",
".",
"poller",
".",
"register",
"(",
"subscriber",
",",
"POLLIN",
")",
"return",
"True"
] | Add *address* to the subscribing list for *topics*.
It topics is None we will subscibe to already specified topics. | [
"Add",
"*",
"address",
"*",
"to",
"the",
"subscribing",
"list",
"for",
"*",
"topics",
"*",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/subscriber.py#L90-L110 |
pytroll/posttroll | posttroll/subscriber.py | Subscriber.remove | def remove(self, address):
"""Remove *address* from the subscribing list for *topics*.
"""
with self._lock:
try:
subscriber = self.addr_sub[address]
except KeyError:
return False
LOGGER.info("Subscriber removing address %s", str(address))
if self.poller:
self.poller.unregister(subscriber)
del self.addr_sub[address]
del self.sub_addr[subscriber]
subscriber.close()
return True | python | def remove(self, address):
"""Remove *address* from the subscribing list for *topics*.
"""
with self._lock:
try:
subscriber = self.addr_sub[address]
except KeyError:
return False
LOGGER.info("Subscriber removing address %s", str(address))
if self.poller:
self.poller.unregister(subscriber)
del self.addr_sub[address]
del self.sub_addr[subscriber]
subscriber.close()
return True | [
"def",
"remove",
"(",
"self",
",",
"address",
")",
":",
"with",
"self",
".",
"_lock",
":",
"try",
":",
"subscriber",
"=",
"self",
".",
"addr_sub",
"[",
"address",
"]",
"except",
"KeyError",
":",
"return",
"False",
"LOGGER",
".",
"info",
"(",
"\"Subscriber removing address %s\"",
",",
"str",
"(",
"address",
")",
")",
"if",
"self",
".",
"poller",
":",
"self",
".",
"poller",
".",
"unregister",
"(",
"subscriber",
")",
"del",
"self",
".",
"addr_sub",
"[",
"address",
"]",
"del",
"self",
".",
"sub_addr",
"[",
"subscriber",
"]",
"subscriber",
".",
"close",
"(",
")",
"return",
"True"
] | Remove *address* from the subscribing list for *topics*. | [
"Remove",
"*",
"address",
"*",
"from",
"the",
"subscribing",
"list",
"for",
"*",
"topics",
"*",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/subscriber.py#L112-L126 |
pytroll/posttroll | posttroll/subscriber.py | Subscriber.update | def update(self, addresses):
"""Updating with a set of addresses.
"""
if isinstance(addresses, six.string_types):
addresses = [addresses, ]
s0_, s1_ = set(self.addresses), set(addresses)
sr_, sa_ = s0_.difference(s1_), s1_.difference(s0_)
for a__ in sr_:
self.remove(a__)
for a__ in sa_:
self.add(a__)
return bool(sr_ or sa_) | python | def update(self, addresses):
"""Updating with a set of addresses.
"""
if isinstance(addresses, six.string_types):
addresses = [addresses, ]
s0_, s1_ = set(self.addresses), set(addresses)
sr_, sa_ = s0_.difference(s1_), s1_.difference(s0_)
for a__ in sr_:
self.remove(a__)
for a__ in sa_:
self.add(a__)
return bool(sr_ or sa_) | [
"def",
"update",
"(",
"self",
",",
"addresses",
")",
":",
"if",
"isinstance",
"(",
"addresses",
",",
"six",
".",
"string_types",
")",
":",
"addresses",
"=",
"[",
"addresses",
",",
"]",
"s0_",
",",
"s1_",
"=",
"set",
"(",
"self",
".",
"addresses",
")",
",",
"set",
"(",
"addresses",
")",
"sr_",
",",
"sa_",
"=",
"s0_",
".",
"difference",
"(",
"s1_",
")",
",",
"s1_",
".",
"difference",
"(",
"s0_",
")",
"for",
"a__",
"in",
"sr_",
":",
"self",
".",
"remove",
"(",
"a__",
")",
"for",
"a__",
"in",
"sa_",
":",
"self",
".",
"add",
"(",
"a__",
")",
"return",
"bool",
"(",
"sr_",
"or",
"sa_",
")"
] | Updating with a set of addresses. | [
"Updating",
"with",
"a",
"set",
"of",
"addresses",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/subscriber.py#L128-L139 |
pytroll/posttroll | posttroll/subscriber.py | Subscriber.add_hook_sub | def add_hook_sub(self, address, topics, callback):
"""Specify a *callback* in the same stream (thread) as the main receive
loop. The callback will be called with the received messages from the
specified subscription.
Good for operations, which is required to be done in the same thread as
the main recieve loop (e.q operations on the underlying sockets).
"""
LOGGER.info("Subscriber adding SUB hook %s for topics %s",
str(address), str(topics))
socket = get_context().socket(SUB)
for t__ in self._magickfy_topics(topics):
socket.setsockopt_string(SUBSCRIBE, six.text_type(t__))
socket.connect(address)
self._add_hook(socket, callback) | python | def add_hook_sub(self, address, topics, callback):
"""Specify a *callback* in the same stream (thread) as the main receive
loop. The callback will be called with the received messages from the
specified subscription.
Good for operations, which is required to be done in the same thread as
the main recieve loop (e.q operations on the underlying sockets).
"""
LOGGER.info("Subscriber adding SUB hook %s for topics %s",
str(address), str(topics))
socket = get_context().socket(SUB)
for t__ in self._magickfy_topics(topics):
socket.setsockopt_string(SUBSCRIBE, six.text_type(t__))
socket.connect(address)
self._add_hook(socket, callback) | [
"def",
"add_hook_sub",
"(",
"self",
",",
"address",
",",
"topics",
",",
"callback",
")",
":",
"LOGGER",
".",
"info",
"(",
"\"Subscriber adding SUB hook %s for topics %s\"",
",",
"str",
"(",
"address",
")",
",",
"str",
"(",
"topics",
")",
")",
"socket",
"=",
"get_context",
"(",
")",
".",
"socket",
"(",
"SUB",
")",
"for",
"t__",
"in",
"self",
".",
"_magickfy_topics",
"(",
"topics",
")",
":",
"socket",
".",
"setsockopt_string",
"(",
"SUBSCRIBE",
",",
"six",
".",
"text_type",
"(",
"t__",
")",
")",
"socket",
".",
"connect",
"(",
"address",
")",
"self",
".",
"_add_hook",
"(",
"socket",
",",
"callback",
")"
] | Specify a *callback* in the same stream (thread) as the main receive
loop. The callback will be called with the received messages from the
specified subscription.
Good for operations, which is required to be done in the same thread as
the main recieve loop (e.q operations on the underlying sockets). | [
"Specify",
"a",
"*",
"callback",
"*",
"in",
"the",
"same",
"stream",
"(",
"thread",
")",
"as",
"the",
"main",
"receive",
"loop",
".",
"The",
"callback",
"will",
"be",
"called",
"with",
"the",
"received",
"messages",
"from",
"the",
"specified",
"subscription",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/subscriber.py#L141-L155 |
pytroll/posttroll | posttroll/subscriber.py | Subscriber.add_hook_pull | def add_hook_pull(self, address, callback):
"""Same as above, but with a PULL socket.
(e.g good for pushed 'inproc' messages from another thread).
"""
LOGGER.info("Subscriber adding PULL hook %s", str(address))
socket = get_context().socket(PULL)
socket.connect(address)
self._add_hook(socket, callback) | python | def add_hook_pull(self, address, callback):
"""Same as above, but with a PULL socket.
(e.g good for pushed 'inproc' messages from another thread).
"""
LOGGER.info("Subscriber adding PULL hook %s", str(address))
socket = get_context().socket(PULL)
socket.connect(address)
self._add_hook(socket, callback) | [
"def",
"add_hook_pull",
"(",
"self",
",",
"address",
",",
"callback",
")",
":",
"LOGGER",
".",
"info",
"(",
"\"Subscriber adding PULL hook %s\"",
",",
"str",
"(",
"address",
")",
")",
"socket",
"=",
"get_context",
"(",
")",
".",
"socket",
"(",
"PULL",
")",
"socket",
".",
"connect",
"(",
"address",
")",
"self",
".",
"_add_hook",
"(",
"socket",
",",
"callback",
")"
] | Same as above, but with a PULL socket.
(e.g good for pushed 'inproc' messages from another thread). | [
"Same",
"as",
"above",
"but",
"with",
"a",
"PULL",
"socket",
".",
"(",
"e",
".",
"g",
"good",
"for",
"pushed",
"inproc",
"messages",
"from",
"another",
"thread",
")",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/subscriber.py#L157-L164 |
pytroll/posttroll | posttroll/subscriber.py | Subscriber._add_hook | def _add_hook(self, socket, callback):
"""Generic hook. The passed socket has to be "receive only".
"""
self._hooks.append(socket)
self._hooks_cb[socket] = callback
if self.poller:
self.poller.register(socket, POLLIN) | python | def _add_hook(self, socket, callback):
"""Generic hook. The passed socket has to be "receive only".
"""
self._hooks.append(socket)
self._hooks_cb[socket] = callback
if self.poller:
self.poller.register(socket, POLLIN) | [
"def",
"_add_hook",
"(",
"self",
",",
"socket",
",",
"callback",
")",
":",
"self",
".",
"_hooks",
".",
"append",
"(",
"socket",
")",
"self",
".",
"_hooks_cb",
"[",
"socket",
"]",
"=",
"callback",
"if",
"self",
".",
"poller",
":",
"self",
".",
"poller",
".",
"register",
"(",
"socket",
",",
"POLLIN",
")"
] | Generic hook. The passed socket has to be "receive only". | [
"Generic",
"hook",
".",
"The",
"passed",
"socket",
"has",
"to",
"be",
"receive",
"only",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/subscriber.py#L166-L172 |
pytroll/posttroll | posttroll/subscriber.py | Subscriber.recv | def recv(self, timeout=None):
"""Receive, optionally with *timeout* in seconds.
"""
if timeout:
timeout *= 1000.
for sub in list(self.subscribers) + self._hooks:
self.poller.register(sub, POLLIN)
self._loop = True
try:
while self._loop:
sleep(0)
try:
socks = dict(self.poller.poll(timeout=timeout))
if socks:
for sub in self.subscribers:
if sub in socks and socks[sub] == POLLIN:
m__ = Message.decode(sub.recv_string(NOBLOCK))
if not self._filter or self._filter(m__):
if self._translate:
url = urlsplit(self.sub_addr[sub])
host = url[1].split(":")[0]
m__.sender = (m__.sender.split("@")[0]
+ "@" + host)
yield m__
for sub in self._hooks:
if sub in socks and socks[sub] == POLLIN:
m__ = Message.decode(sub.recv_string(NOBLOCK))
self._hooks_cb[sub](m__)
else:
# timeout
yield None
except ZMQError as err:
LOGGER.exception("Receive failed: %s", str(err))
finally:
for sub in list(self.subscribers) + self._hooks:
self.poller.unregister(sub) | python | def recv(self, timeout=None):
"""Receive, optionally with *timeout* in seconds.
"""
if timeout:
timeout *= 1000.
for sub in list(self.subscribers) + self._hooks:
self.poller.register(sub, POLLIN)
self._loop = True
try:
while self._loop:
sleep(0)
try:
socks = dict(self.poller.poll(timeout=timeout))
if socks:
for sub in self.subscribers:
if sub in socks and socks[sub] == POLLIN:
m__ = Message.decode(sub.recv_string(NOBLOCK))
if not self._filter or self._filter(m__):
if self._translate:
url = urlsplit(self.sub_addr[sub])
host = url[1].split(":")[0]
m__.sender = (m__.sender.split("@")[0]
+ "@" + host)
yield m__
for sub in self._hooks:
if sub in socks and socks[sub] == POLLIN:
m__ = Message.decode(sub.recv_string(NOBLOCK))
self._hooks_cb[sub](m__)
else:
# timeout
yield None
except ZMQError as err:
LOGGER.exception("Receive failed: %s", str(err))
finally:
for sub in list(self.subscribers) + self._hooks:
self.poller.unregister(sub) | [
"def",
"recv",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"timeout",
":",
"timeout",
"*=",
"1000.",
"for",
"sub",
"in",
"list",
"(",
"self",
".",
"subscribers",
")",
"+",
"self",
".",
"_hooks",
":",
"self",
".",
"poller",
".",
"register",
"(",
"sub",
",",
"POLLIN",
")",
"self",
".",
"_loop",
"=",
"True",
"try",
":",
"while",
"self",
".",
"_loop",
":",
"sleep",
"(",
"0",
")",
"try",
":",
"socks",
"=",
"dict",
"(",
"self",
".",
"poller",
".",
"poll",
"(",
"timeout",
"=",
"timeout",
")",
")",
"if",
"socks",
":",
"for",
"sub",
"in",
"self",
".",
"subscribers",
":",
"if",
"sub",
"in",
"socks",
"and",
"socks",
"[",
"sub",
"]",
"==",
"POLLIN",
":",
"m__",
"=",
"Message",
".",
"decode",
"(",
"sub",
".",
"recv_string",
"(",
"NOBLOCK",
")",
")",
"if",
"not",
"self",
".",
"_filter",
"or",
"self",
".",
"_filter",
"(",
"m__",
")",
":",
"if",
"self",
".",
"_translate",
":",
"url",
"=",
"urlsplit",
"(",
"self",
".",
"sub_addr",
"[",
"sub",
"]",
")",
"host",
"=",
"url",
"[",
"1",
"]",
".",
"split",
"(",
"\":\"",
")",
"[",
"0",
"]",
"m__",
".",
"sender",
"=",
"(",
"m__",
".",
"sender",
".",
"split",
"(",
"\"@\"",
")",
"[",
"0",
"]",
"+",
"\"@\"",
"+",
"host",
")",
"yield",
"m__",
"for",
"sub",
"in",
"self",
".",
"_hooks",
":",
"if",
"sub",
"in",
"socks",
"and",
"socks",
"[",
"sub",
"]",
"==",
"POLLIN",
":",
"m__",
"=",
"Message",
".",
"decode",
"(",
"sub",
".",
"recv_string",
"(",
"NOBLOCK",
")",
")",
"self",
".",
"_hooks_cb",
"[",
"sub",
"]",
"(",
"m__",
")",
"else",
":",
"# timeout",
"yield",
"None",
"except",
"ZMQError",
"as",
"err",
":",
"LOGGER",
".",
"exception",
"(",
"\"Receive failed: %s\"",
",",
"str",
"(",
"err",
")",
")",
"finally",
":",
"for",
"sub",
"in",
"list",
"(",
"self",
".",
"subscribers",
")",
"+",
"self",
".",
"_hooks",
":",
"self",
".",
"poller",
".",
"unregister",
"(",
"sub",
")"
] | Receive, optionally with *timeout* in seconds. | [
"Receive",
"optionally",
"with",
"*",
"timeout",
"*",
"in",
"seconds",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/subscriber.py#L186-L223 |
pytroll/posttroll | posttroll/subscriber.py | Subscriber.close | def close(self):
"""Close the subscriber: stop it and close the local subscribers.
"""
self.stop()
for sub in list(self.subscribers) + self._hooks:
try:
sub.setsockopt(LINGER, 1)
sub.close()
except ZMQError:
pass | python | def close(self):
"""Close the subscriber: stop it and close the local subscribers.
"""
self.stop()
for sub in list(self.subscribers) + self._hooks:
try:
sub.setsockopt(LINGER, 1)
sub.close()
except ZMQError:
pass | [
"def",
"close",
"(",
"self",
")",
":",
"self",
".",
"stop",
"(",
")",
"for",
"sub",
"in",
"list",
"(",
"self",
".",
"subscribers",
")",
"+",
"self",
".",
"_hooks",
":",
"try",
":",
"sub",
".",
"setsockopt",
"(",
"LINGER",
",",
"1",
")",
"sub",
".",
"close",
"(",
")",
"except",
"ZMQError",
":",
"pass"
] | Close the subscriber: stop it and close the local subscribers. | [
"Close",
"the",
"subscriber",
":",
"stop",
"it",
"and",
"close",
"the",
"local",
"subscribers",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/subscriber.py#L233-L242 |
pytroll/posttroll | posttroll/subscriber.py | Subscriber._magickfy_topics | def _magickfy_topics(topics):
"""Add the magick to the topics if missing.
"""
# If topic does not start with messages._MAGICK (pytroll:/), it will be
# prepended.
if topics is None:
return None
if isinstance(topics, six.string_types):
topics = [topics, ]
ts_ = []
for t__ in topics:
if not t__.startswith(_MAGICK):
if t__ and t__[0] == '/':
t__ = _MAGICK + t__
else:
t__ = _MAGICK + '/' + t__
ts_.append(t__)
return ts_ | python | def _magickfy_topics(topics):
"""Add the magick to the topics if missing.
"""
# If topic does not start with messages._MAGICK (pytroll:/), it will be
# prepended.
if topics is None:
return None
if isinstance(topics, six.string_types):
topics = [topics, ]
ts_ = []
for t__ in topics:
if not t__.startswith(_MAGICK):
if t__ and t__[0] == '/':
t__ = _MAGICK + t__
else:
t__ = _MAGICK + '/' + t__
ts_.append(t__)
return ts_ | [
"def",
"_magickfy_topics",
"(",
"topics",
")",
":",
"# If topic does not start with messages._MAGICK (pytroll:/), it will be",
"# prepended.",
"if",
"topics",
"is",
"None",
":",
"return",
"None",
"if",
"isinstance",
"(",
"topics",
",",
"six",
".",
"string_types",
")",
":",
"topics",
"=",
"[",
"topics",
",",
"]",
"ts_",
"=",
"[",
"]",
"for",
"t__",
"in",
"topics",
":",
"if",
"not",
"t__",
".",
"startswith",
"(",
"_MAGICK",
")",
":",
"if",
"t__",
"and",
"t__",
"[",
"0",
"]",
"==",
"'/'",
":",
"t__",
"=",
"_MAGICK",
"+",
"t__",
"else",
":",
"t__",
"=",
"_MAGICK",
"+",
"'/'",
"+",
"t__",
"ts_",
".",
"append",
"(",
"t__",
")",
"return",
"ts_"
] | Add the magick to the topics if missing. | [
"Add",
"the",
"magick",
"to",
"the",
"topics",
"if",
"missing",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/subscriber.py#L245-L262 |
pytroll/posttroll | posttroll/subscriber.py | NSSubscriber.start | def start(self):
"""Start the subscriber.
"""
def _get_addr_loop(service, timeout):
"""Try to get the address of *service* until for *timeout* seconds.
"""
then = datetime.now() + timedelta(seconds=timeout)
while datetime.now() < then:
addrs = get_pub_address(service, nameserver=self._nameserver)
if addrs:
return [addr["URI"] for addr in addrs]
time.sleep(1)
return []
# Subscribe to those services and topics.
LOGGER.debug("Subscribing to topics %s", str(self._topics))
self._subscriber = Subscriber(self._addresses,
self._topics,
translate=self._translate)
if self._addr_listener:
self._addr_listener = _AddressListener(self._subscriber,
self._services,
nameserver=self._nameserver)
# Search for addresses corresponding to service.
for service in self._services:
addresses = _get_addr_loop(service, self._timeout)
if not addresses:
LOGGER.warning("Can't get any address for %s", service)
continue
else:
LOGGER.debug("Got address for %s: %s",
str(service), str(addresses))
for addr in addresses:
self._subscriber.add(addr)
return self._subscriber | python | def start(self):
"""Start the subscriber.
"""
def _get_addr_loop(service, timeout):
"""Try to get the address of *service* until for *timeout* seconds.
"""
then = datetime.now() + timedelta(seconds=timeout)
while datetime.now() < then:
addrs = get_pub_address(service, nameserver=self._nameserver)
if addrs:
return [addr["URI"] for addr in addrs]
time.sleep(1)
return []
# Subscribe to those services and topics.
LOGGER.debug("Subscribing to topics %s", str(self._topics))
self._subscriber = Subscriber(self._addresses,
self._topics,
translate=self._translate)
if self._addr_listener:
self._addr_listener = _AddressListener(self._subscriber,
self._services,
nameserver=self._nameserver)
# Search for addresses corresponding to service.
for service in self._services:
addresses = _get_addr_loop(service, self._timeout)
if not addresses:
LOGGER.warning("Can't get any address for %s", service)
continue
else:
LOGGER.debug("Got address for %s: %s",
str(service), str(addresses))
for addr in addresses:
self._subscriber.add(addr)
return self._subscriber | [
"def",
"start",
"(",
"self",
")",
":",
"def",
"_get_addr_loop",
"(",
"service",
",",
"timeout",
")",
":",
"\"\"\"Try to get the address of *service* until for *timeout* seconds.\n \"\"\"",
"then",
"=",
"datetime",
".",
"now",
"(",
")",
"+",
"timedelta",
"(",
"seconds",
"=",
"timeout",
")",
"while",
"datetime",
".",
"now",
"(",
")",
"<",
"then",
":",
"addrs",
"=",
"get_pub_address",
"(",
"service",
",",
"nameserver",
"=",
"self",
".",
"_nameserver",
")",
"if",
"addrs",
":",
"return",
"[",
"addr",
"[",
"\"URI\"",
"]",
"for",
"addr",
"in",
"addrs",
"]",
"time",
".",
"sleep",
"(",
"1",
")",
"return",
"[",
"]",
"# Subscribe to those services and topics.",
"LOGGER",
".",
"debug",
"(",
"\"Subscribing to topics %s\"",
",",
"str",
"(",
"self",
".",
"_topics",
")",
")",
"self",
".",
"_subscriber",
"=",
"Subscriber",
"(",
"self",
".",
"_addresses",
",",
"self",
".",
"_topics",
",",
"translate",
"=",
"self",
".",
"_translate",
")",
"if",
"self",
".",
"_addr_listener",
":",
"self",
".",
"_addr_listener",
"=",
"_AddressListener",
"(",
"self",
".",
"_subscriber",
",",
"self",
".",
"_services",
",",
"nameserver",
"=",
"self",
".",
"_nameserver",
")",
"# Search for addresses corresponding to service.",
"for",
"service",
"in",
"self",
".",
"_services",
":",
"addresses",
"=",
"_get_addr_loop",
"(",
"service",
",",
"self",
".",
"_timeout",
")",
"if",
"not",
"addresses",
":",
"LOGGER",
".",
"warning",
"(",
"\"Can't get any address for %s\"",
",",
"service",
")",
"continue",
"else",
":",
"LOGGER",
".",
"debug",
"(",
"\"Got address for %s: %s\"",
",",
"str",
"(",
"service",
")",
",",
"str",
"(",
"addresses",
")",
")",
"for",
"addr",
"in",
"addresses",
":",
"self",
".",
"_subscriber",
".",
"add",
"(",
"addr",
")",
"return",
"self",
".",
"_subscriber"
] | Start the subscriber. | [
"Start",
"the",
"subscriber",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/subscriber.py#L306-L343 |
pytroll/posttroll | posttroll/subscriber.py | _AddressListener.handle_msg | def handle_msg(self, msg):
"""handle the message *msg*.
"""
addr_ = msg.data["URI"]
status = msg.data.get('status', True)
if status:
service = msg.data.get('service')
for service in self.services:
if not service or service in service:
LOGGER.debug("Adding address %s %s", str(addr_),
str(service))
self.subscriber.add(addr_)
break
else:
LOGGER.debug("Removing address %s", str(addr_))
self.subscriber.remove(addr_) | python | def handle_msg(self, msg):
"""handle the message *msg*.
"""
addr_ = msg.data["URI"]
status = msg.data.get('status', True)
if status:
service = msg.data.get('service')
for service in self.services:
if not service or service in service:
LOGGER.debug("Adding address %s %s", str(addr_),
str(service))
self.subscriber.add(addr_)
break
else:
LOGGER.debug("Removing address %s", str(addr_))
self.subscriber.remove(addr_) | [
"def",
"handle_msg",
"(",
"self",
",",
"msg",
")",
":",
"addr_",
"=",
"msg",
".",
"data",
"[",
"\"URI\"",
"]",
"status",
"=",
"msg",
".",
"data",
".",
"get",
"(",
"'status'",
",",
"True",
")",
"if",
"status",
":",
"service",
"=",
"msg",
".",
"data",
".",
"get",
"(",
"'service'",
")",
"for",
"service",
"in",
"self",
".",
"services",
":",
"if",
"not",
"service",
"or",
"service",
"in",
"service",
":",
"LOGGER",
".",
"debug",
"(",
"\"Adding address %s %s\"",
",",
"str",
"(",
"addr_",
")",
",",
"str",
"(",
"service",
")",
")",
"self",
".",
"subscriber",
".",
"add",
"(",
"addr_",
")",
"break",
"else",
":",
"LOGGER",
".",
"debug",
"(",
"\"Removing address %s\"",
",",
"str",
"(",
"addr_",
")",
")",
"self",
".",
"subscriber",
".",
"remove",
"(",
"addr_",
")"
] | handle the message *msg*. | [
"handle",
"the",
"message",
"*",
"msg",
"*",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/subscriber.py#L399-L414 |
thespacedoctor/sherlock | sherlock/transient_classifier.py | transient_classifier.classify | def classify(self):
"""
*classify the transients selected from the transient selection query in the settings file or passed in via the CL or other code*
**Return:**
- ``crossmatches`` -- list of dictionaries of crossmatched associated sources
- ``classifications`` -- the classifications assigned to the transients post-crossmatches (dictionary of rank ordered list of classifications)
See class docstring for usage.
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
global theseBatches
global crossmatchArray
self.log.debug('starting the ``classify`` method')
remaining = 1
# THE COLUMN MAPS - WHICH COLUMNS IN THE CATALOGUE TABLES = RA, DEC,
# REDSHIFT, MAG ETC
colMaps = get_crossmatch_catalogues_column_map(
log=self.log,
dbConn=self.cataloguesDbConn
)
self._create_tables_if_not_exist()
import time
start_time = time.time()
# COUNT SEARCHES
sa = self.settings["search algorithm"]
searchCount = 0
brightnessFilters = ["bright", "faint", "general"]
for search_name, searchPara in sa.iteritems():
for bf in brightnessFilters:
if bf in searchPara:
searchCount += 1
cpuCount = psutil.cpu_count()
if searchCount > cpuCount:
searchCount = cpuCount
largeBatchSize = 5000
miniBatchSize = 100
self.largeBatchSize = largeBatchSize
# print "mini batch size ", str(miniBatchSize)
while remaining:
# IF A TRANSIENT HAS NOT BEEN PASSED IN VIA THE COMMAND-LINE, THEN
# QUERY THE TRANSIENT DATABASE
if not self.ra and not self.dec:
# COUNT REMAINING TRANSIENTS
from fundamentals.mysql import readquery
sqlQuery = self.settings["database settings"][
"transients"]["transient count"]
thisInt = randint(0, 100)
if "where" in sqlQuery:
sqlQuery = sqlQuery.replace(
"where", "where %(thisInt)s=%(thisInt)s and " % locals())
if remaining == 1 or remaining < largeBatchSize:
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.transientsDbConn,
)
remaining = rows[0]["count(*)"]
else:
remaining = remaining - largeBatchSize
print "%(remaining)s transient sources requiring a classification remain" % locals()
# START THE TIME TO TRACK CLASSIFICATION SPPED
start_time = time.time()
# A LIST OF DICTIONARIES OF TRANSIENT METADATA
transientsMetadataList = self._get_transient_metadata_from_database_list()
count = len(transientsMetadataList)
print " now classifying the next %(count)s transient sources" % locals()
# EXAMPLE OF TRANSIENT METADATA
# { 'name': 'PS17gx',
# 'alt_id': 'PS17gx',
# 'object_classification': 'SN',
# 'dec': '+43:25:44.1',
# 'id': 1,
# 'ra': '08:57:57.19'}
# TRANSIENT PASSED VIA COMMAND-LINE
else:
if not self.name:
name = "transient"
else:
name = self.name
transient = {
'name': name,
'object_classification': None,
'dec': self.dec,
'id': name,
'ra': self.ra
}
transientsMetadataList = [transient]
remaining = 0
if self.oneRun:
remaining = 0
if len(transientsMetadataList) == 0:
if self.daemonMode == False:
remaining = 0
print "No transients need classified"
return None, None
else:
print "No remaining transients need classified, will try again in 5 mins"
time.sleep("10")
# FROM THE LOCATIONS OF THE TRANSIENTS, CHECK IF OUR LOCAL NED DATABASE
# NEEDS UPDATED
if self.updateNed:
self._update_ned_stream(
transientsMetadataList=transientsMetadataList
)
# SOME TESTING SHOWED THAT 25 IS GOOD
total = len(transientsMetadataList)
batches = int((float(total) / float(miniBatchSize)) + 1.)
if batches == 0:
batches = 1
start = 0
end = 0
theseBatches = []
for i in range(batches):
end = end + miniBatchSize
start = i * miniBatchSize
thisBatch = transientsMetadataList[start:end]
theseBatches.append(thisBatch)
print "BATCH SIZE = %(total)s" % locals()
print "MINI BATCH SIZE = %(batches)s x %(miniBatchSize)s" % locals()
# DEFINE AN INPUT ARRAY
# cores = psutil.cpu_count()
# if cores > 8:
# cores = 8
start_time2 = time.time()
print "START CROSSMATCH"
crossmatchArray = fmultiprocess(log=self.log, function=self._crossmatch_transients_against_catalogues,
inputArray=range(len(theseBatches)), poolSize=None, colMaps=colMaps)
print "FINISH CROSSMATCH/START RANKING: %d" % (time.time() - start_time2,)
start_time2 = time.time()
classifications = {}
crossmatches = []
for sublist in crossmatchArray:
sublist = sorted(
sublist, key=itemgetter('transient_object_id'))
# REORGANISE INTO INDIVIDUAL TRANSIENTS FOR RANKING AND
# TOP-LEVEL CLASSIFICATION EXTRACTION
batch = []
if len(sublist) != 0:
transientId = sublist[0]['transient_object_id']
for s in sublist:
if s['transient_object_id'] != transientId:
# RANK TRANSIENT CROSSMATCH BATCH
cl, cr = self._rank_classifications(
batch, colMaps)
crossmatches.extend(cr)
classifications = dict(
classifications.items() + cl.items())
transientId = s['transient_object_id']
batch = [s]
else:
batch.append(s)
# RANK FINAL BATCH
cl, cr = self._rank_classifications(
batch, colMaps)
classifications = dict(
classifications.items() + cl.items())
crossmatches.extend(cr)
for t in transientsMetadataList:
if t["id"] not in classifications:
classifications[t["id"]] = ["ORPHAN"]
if self.cl:
self._print_results_to_stdout(
classifications=classifications,
crossmatches=crossmatches
)
# UPDATE THE TRANSIENT DATABASE IF UPDATE REQUESTED (ADD DATA TO
# tcs_crossmatch_table AND A CLASSIFICATION TO THE ORIGINAL TRANSIENT
# TABLE)
print "FINISH RANKING/START UPDATING TRANSIENT DB: %d" % (time.time() - start_time2,)
start_time2 = time.time()
if self.update and not self.ra:
self._update_transient_database(
crossmatches=crossmatches,
classifications=classifications,
transientsMetadataList=transientsMetadataList,
colMaps=colMaps
)
print "FINISH UPDATING TRANSIENT DB/START ANNOTATING TRANSIENT DB: %d" % (time.time() - start_time2,)
start_time2 = time.time()
if self.ra:
return classifications, crossmatches
if self.updatePeakMags and self.settings["database settings"]["transients"]["transient peak magnitude query"]:
self.update_peak_magnitudes()
self.update_classification_annotations_and_summaries(
self.updatePeakMags)
print "FINISH ANNOTATING TRANSIENT DB: %d" % (time.time() - start_time2,)
start_time2 = time.time()
classificationRate = count / (time.time() - start_time)
print "Sherlock is classify at a rate of %(classificationRate)2.1f transients/sec" % locals()
self.log.debug('completed the ``classify`` method')
return None, None | python | def classify(self):
"""
*classify the transients selected from the transient selection query in the settings file or passed in via the CL or other code*
**Return:**
- ``crossmatches`` -- list of dictionaries of crossmatched associated sources
- ``classifications`` -- the classifications assigned to the transients post-crossmatches (dictionary of rank ordered list of classifications)
See class docstring for usage.
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
global theseBatches
global crossmatchArray
self.log.debug('starting the ``classify`` method')
remaining = 1
# THE COLUMN MAPS - WHICH COLUMNS IN THE CATALOGUE TABLES = RA, DEC,
# REDSHIFT, MAG ETC
colMaps = get_crossmatch_catalogues_column_map(
log=self.log,
dbConn=self.cataloguesDbConn
)
self._create_tables_if_not_exist()
import time
start_time = time.time()
# COUNT SEARCHES
sa = self.settings["search algorithm"]
searchCount = 0
brightnessFilters = ["bright", "faint", "general"]
for search_name, searchPara in sa.iteritems():
for bf in brightnessFilters:
if bf in searchPara:
searchCount += 1
cpuCount = psutil.cpu_count()
if searchCount > cpuCount:
searchCount = cpuCount
largeBatchSize = 5000
miniBatchSize = 100
self.largeBatchSize = largeBatchSize
# print "mini batch size ", str(miniBatchSize)
while remaining:
# IF A TRANSIENT HAS NOT BEEN PASSED IN VIA THE COMMAND-LINE, THEN
# QUERY THE TRANSIENT DATABASE
if not self.ra and not self.dec:
# COUNT REMAINING TRANSIENTS
from fundamentals.mysql import readquery
sqlQuery = self.settings["database settings"][
"transients"]["transient count"]
thisInt = randint(0, 100)
if "where" in sqlQuery:
sqlQuery = sqlQuery.replace(
"where", "where %(thisInt)s=%(thisInt)s and " % locals())
if remaining == 1 or remaining < largeBatchSize:
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.transientsDbConn,
)
remaining = rows[0]["count(*)"]
else:
remaining = remaining - largeBatchSize
print "%(remaining)s transient sources requiring a classification remain" % locals()
# START THE TIME TO TRACK CLASSIFICATION SPPED
start_time = time.time()
# A LIST OF DICTIONARIES OF TRANSIENT METADATA
transientsMetadataList = self._get_transient_metadata_from_database_list()
count = len(transientsMetadataList)
print " now classifying the next %(count)s transient sources" % locals()
# EXAMPLE OF TRANSIENT METADATA
# { 'name': 'PS17gx',
# 'alt_id': 'PS17gx',
# 'object_classification': 'SN',
# 'dec': '+43:25:44.1',
# 'id': 1,
# 'ra': '08:57:57.19'}
# TRANSIENT PASSED VIA COMMAND-LINE
else:
if not self.name:
name = "transient"
else:
name = self.name
transient = {
'name': name,
'object_classification': None,
'dec': self.dec,
'id': name,
'ra': self.ra
}
transientsMetadataList = [transient]
remaining = 0
if self.oneRun:
remaining = 0
if len(transientsMetadataList) == 0:
if self.daemonMode == False:
remaining = 0
print "No transients need classified"
return None, None
else:
print "No remaining transients need classified, will try again in 5 mins"
time.sleep("10")
# FROM THE LOCATIONS OF THE TRANSIENTS, CHECK IF OUR LOCAL NED DATABASE
# NEEDS UPDATED
if self.updateNed:
self._update_ned_stream(
transientsMetadataList=transientsMetadataList
)
# SOME TESTING SHOWED THAT 25 IS GOOD
total = len(transientsMetadataList)
batches = int((float(total) / float(miniBatchSize)) + 1.)
if batches == 0:
batches = 1
start = 0
end = 0
theseBatches = []
for i in range(batches):
end = end + miniBatchSize
start = i * miniBatchSize
thisBatch = transientsMetadataList[start:end]
theseBatches.append(thisBatch)
print "BATCH SIZE = %(total)s" % locals()
print "MINI BATCH SIZE = %(batches)s x %(miniBatchSize)s" % locals()
# DEFINE AN INPUT ARRAY
# cores = psutil.cpu_count()
# if cores > 8:
# cores = 8
start_time2 = time.time()
print "START CROSSMATCH"
crossmatchArray = fmultiprocess(log=self.log, function=self._crossmatch_transients_against_catalogues,
inputArray=range(len(theseBatches)), poolSize=None, colMaps=colMaps)
print "FINISH CROSSMATCH/START RANKING: %d" % (time.time() - start_time2,)
start_time2 = time.time()
classifications = {}
crossmatches = []
for sublist in crossmatchArray:
sublist = sorted(
sublist, key=itemgetter('transient_object_id'))
# REORGANISE INTO INDIVIDUAL TRANSIENTS FOR RANKING AND
# TOP-LEVEL CLASSIFICATION EXTRACTION
batch = []
if len(sublist) != 0:
transientId = sublist[0]['transient_object_id']
for s in sublist:
if s['transient_object_id'] != transientId:
# RANK TRANSIENT CROSSMATCH BATCH
cl, cr = self._rank_classifications(
batch, colMaps)
crossmatches.extend(cr)
classifications = dict(
classifications.items() + cl.items())
transientId = s['transient_object_id']
batch = [s]
else:
batch.append(s)
# RANK FINAL BATCH
cl, cr = self._rank_classifications(
batch, colMaps)
classifications = dict(
classifications.items() + cl.items())
crossmatches.extend(cr)
for t in transientsMetadataList:
if t["id"] not in classifications:
classifications[t["id"]] = ["ORPHAN"]
if self.cl:
self._print_results_to_stdout(
classifications=classifications,
crossmatches=crossmatches
)
# UPDATE THE TRANSIENT DATABASE IF UPDATE REQUESTED (ADD DATA TO
# tcs_crossmatch_table AND A CLASSIFICATION TO THE ORIGINAL TRANSIENT
# TABLE)
print "FINISH RANKING/START UPDATING TRANSIENT DB: %d" % (time.time() - start_time2,)
start_time2 = time.time()
if self.update and not self.ra:
self._update_transient_database(
crossmatches=crossmatches,
classifications=classifications,
transientsMetadataList=transientsMetadataList,
colMaps=colMaps
)
print "FINISH UPDATING TRANSIENT DB/START ANNOTATING TRANSIENT DB: %d" % (time.time() - start_time2,)
start_time2 = time.time()
if self.ra:
return classifications, crossmatches
if self.updatePeakMags and self.settings["database settings"]["transients"]["transient peak magnitude query"]:
self.update_peak_magnitudes()
self.update_classification_annotations_and_summaries(
self.updatePeakMags)
print "FINISH ANNOTATING TRANSIENT DB: %d" % (time.time() - start_time2,)
start_time2 = time.time()
classificationRate = count / (time.time() - start_time)
print "Sherlock is classify at a rate of %(classificationRate)2.1f transients/sec" % locals()
self.log.debug('completed the ``classify`` method')
return None, None | [
"def",
"classify",
"(",
"self",
")",
":",
"global",
"theseBatches",
"global",
"crossmatchArray",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``classify`` method'",
")",
"remaining",
"=",
"1",
"# THE COLUMN MAPS - WHICH COLUMNS IN THE CATALOGUE TABLES = RA, DEC,",
"# REDSHIFT, MAG ETC",
"colMaps",
"=",
"get_crossmatch_catalogues_column_map",
"(",
"log",
"=",
"self",
".",
"log",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
")",
"self",
".",
"_create_tables_if_not_exist",
"(",
")",
"import",
"time",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"# COUNT SEARCHES",
"sa",
"=",
"self",
".",
"settings",
"[",
"\"search algorithm\"",
"]",
"searchCount",
"=",
"0",
"brightnessFilters",
"=",
"[",
"\"bright\"",
",",
"\"faint\"",
",",
"\"general\"",
"]",
"for",
"search_name",
",",
"searchPara",
"in",
"sa",
".",
"iteritems",
"(",
")",
":",
"for",
"bf",
"in",
"brightnessFilters",
":",
"if",
"bf",
"in",
"searchPara",
":",
"searchCount",
"+=",
"1",
"cpuCount",
"=",
"psutil",
".",
"cpu_count",
"(",
")",
"if",
"searchCount",
">",
"cpuCount",
":",
"searchCount",
"=",
"cpuCount",
"largeBatchSize",
"=",
"5000",
"miniBatchSize",
"=",
"100",
"self",
".",
"largeBatchSize",
"=",
"largeBatchSize",
"# print \"mini batch size \", str(miniBatchSize)",
"while",
"remaining",
":",
"# IF A TRANSIENT HAS NOT BEEN PASSED IN VIA THE COMMAND-LINE, THEN",
"# QUERY THE TRANSIENT DATABASE",
"if",
"not",
"self",
".",
"ra",
"and",
"not",
"self",
".",
"dec",
":",
"# COUNT REMAINING TRANSIENTS",
"from",
"fundamentals",
".",
"mysql",
"import",
"readquery",
"sqlQuery",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"transients\"",
"]",
"[",
"\"transient count\"",
"]",
"thisInt",
"=",
"randint",
"(",
"0",
",",
"100",
")",
"if",
"\"where\"",
"in",
"sqlQuery",
":",
"sqlQuery",
"=",
"sqlQuery",
".",
"replace",
"(",
"\"where\"",
",",
"\"where %(thisInt)s=%(thisInt)s and \"",
"%",
"locals",
"(",
")",
")",
"if",
"remaining",
"==",
"1",
"or",
"remaining",
"<",
"largeBatchSize",
":",
"rows",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"transientsDbConn",
",",
")",
"remaining",
"=",
"rows",
"[",
"0",
"]",
"[",
"\"count(*)\"",
"]",
"else",
":",
"remaining",
"=",
"remaining",
"-",
"largeBatchSize",
"print",
"\"%(remaining)s transient sources requiring a classification remain\"",
"%",
"locals",
"(",
")",
"# START THE TIME TO TRACK CLASSIFICATION SPPED",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"# A LIST OF DICTIONARIES OF TRANSIENT METADATA",
"transientsMetadataList",
"=",
"self",
".",
"_get_transient_metadata_from_database_list",
"(",
")",
"count",
"=",
"len",
"(",
"transientsMetadataList",
")",
"print",
"\" now classifying the next %(count)s transient sources\"",
"%",
"locals",
"(",
")",
"# EXAMPLE OF TRANSIENT METADATA",
"# { 'name': 'PS17gx',",
"# 'alt_id': 'PS17gx',",
"# 'object_classification': 'SN',",
"# 'dec': '+43:25:44.1',",
"# 'id': 1,",
"# 'ra': '08:57:57.19'}",
"# TRANSIENT PASSED VIA COMMAND-LINE",
"else",
":",
"if",
"not",
"self",
".",
"name",
":",
"name",
"=",
"\"transient\"",
"else",
":",
"name",
"=",
"self",
".",
"name",
"transient",
"=",
"{",
"'name'",
":",
"name",
",",
"'object_classification'",
":",
"None",
",",
"'dec'",
":",
"self",
".",
"dec",
",",
"'id'",
":",
"name",
",",
"'ra'",
":",
"self",
".",
"ra",
"}",
"transientsMetadataList",
"=",
"[",
"transient",
"]",
"remaining",
"=",
"0",
"if",
"self",
".",
"oneRun",
":",
"remaining",
"=",
"0",
"if",
"len",
"(",
"transientsMetadataList",
")",
"==",
"0",
":",
"if",
"self",
".",
"daemonMode",
"==",
"False",
":",
"remaining",
"=",
"0",
"print",
"\"No transients need classified\"",
"return",
"None",
",",
"None",
"else",
":",
"print",
"\"No remaining transients need classified, will try again in 5 mins\"",
"time",
".",
"sleep",
"(",
"\"10\"",
")",
"# FROM THE LOCATIONS OF THE TRANSIENTS, CHECK IF OUR LOCAL NED DATABASE",
"# NEEDS UPDATED",
"if",
"self",
".",
"updateNed",
":",
"self",
".",
"_update_ned_stream",
"(",
"transientsMetadataList",
"=",
"transientsMetadataList",
")",
"# SOME TESTING SHOWED THAT 25 IS GOOD",
"total",
"=",
"len",
"(",
"transientsMetadataList",
")",
"batches",
"=",
"int",
"(",
"(",
"float",
"(",
"total",
")",
"/",
"float",
"(",
"miniBatchSize",
")",
")",
"+",
"1.",
")",
"if",
"batches",
"==",
"0",
":",
"batches",
"=",
"1",
"start",
"=",
"0",
"end",
"=",
"0",
"theseBatches",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"batches",
")",
":",
"end",
"=",
"end",
"+",
"miniBatchSize",
"start",
"=",
"i",
"*",
"miniBatchSize",
"thisBatch",
"=",
"transientsMetadataList",
"[",
"start",
":",
"end",
"]",
"theseBatches",
".",
"append",
"(",
"thisBatch",
")",
"print",
"\"BATCH SIZE = %(total)s\"",
"%",
"locals",
"(",
")",
"print",
"\"MINI BATCH SIZE = %(batches)s x %(miniBatchSize)s\"",
"%",
"locals",
"(",
")",
"# DEFINE AN INPUT ARRAY",
"# cores = psutil.cpu_count()",
"# if cores > 8:",
"# cores = 8",
"start_time2",
"=",
"time",
".",
"time",
"(",
")",
"print",
"\"START CROSSMATCH\"",
"crossmatchArray",
"=",
"fmultiprocess",
"(",
"log",
"=",
"self",
".",
"log",
",",
"function",
"=",
"self",
".",
"_crossmatch_transients_against_catalogues",
",",
"inputArray",
"=",
"range",
"(",
"len",
"(",
"theseBatches",
")",
")",
",",
"poolSize",
"=",
"None",
",",
"colMaps",
"=",
"colMaps",
")",
"print",
"\"FINISH CROSSMATCH/START RANKING: %d\"",
"%",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time2",
",",
")",
"start_time2",
"=",
"time",
".",
"time",
"(",
")",
"classifications",
"=",
"{",
"}",
"crossmatches",
"=",
"[",
"]",
"for",
"sublist",
"in",
"crossmatchArray",
":",
"sublist",
"=",
"sorted",
"(",
"sublist",
",",
"key",
"=",
"itemgetter",
"(",
"'transient_object_id'",
")",
")",
"# REORGANISE INTO INDIVIDUAL TRANSIENTS FOR RANKING AND",
"# TOP-LEVEL CLASSIFICATION EXTRACTION",
"batch",
"=",
"[",
"]",
"if",
"len",
"(",
"sublist",
")",
"!=",
"0",
":",
"transientId",
"=",
"sublist",
"[",
"0",
"]",
"[",
"'transient_object_id'",
"]",
"for",
"s",
"in",
"sublist",
":",
"if",
"s",
"[",
"'transient_object_id'",
"]",
"!=",
"transientId",
":",
"# RANK TRANSIENT CROSSMATCH BATCH",
"cl",
",",
"cr",
"=",
"self",
".",
"_rank_classifications",
"(",
"batch",
",",
"colMaps",
")",
"crossmatches",
".",
"extend",
"(",
"cr",
")",
"classifications",
"=",
"dict",
"(",
"classifications",
".",
"items",
"(",
")",
"+",
"cl",
".",
"items",
"(",
")",
")",
"transientId",
"=",
"s",
"[",
"'transient_object_id'",
"]",
"batch",
"=",
"[",
"s",
"]",
"else",
":",
"batch",
".",
"append",
"(",
"s",
")",
"# RANK FINAL BATCH",
"cl",
",",
"cr",
"=",
"self",
".",
"_rank_classifications",
"(",
"batch",
",",
"colMaps",
")",
"classifications",
"=",
"dict",
"(",
"classifications",
".",
"items",
"(",
")",
"+",
"cl",
".",
"items",
"(",
")",
")",
"crossmatches",
".",
"extend",
"(",
"cr",
")",
"for",
"t",
"in",
"transientsMetadataList",
":",
"if",
"t",
"[",
"\"id\"",
"]",
"not",
"in",
"classifications",
":",
"classifications",
"[",
"t",
"[",
"\"id\"",
"]",
"]",
"=",
"[",
"\"ORPHAN\"",
"]",
"if",
"self",
".",
"cl",
":",
"self",
".",
"_print_results_to_stdout",
"(",
"classifications",
"=",
"classifications",
",",
"crossmatches",
"=",
"crossmatches",
")",
"# UPDATE THE TRANSIENT DATABASE IF UPDATE REQUESTED (ADD DATA TO",
"# tcs_crossmatch_table AND A CLASSIFICATION TO THE ORIGINAL TRANSIENT",
"# TABLE)",
"print",
"\"FINISH RANKING/START UPDATING TRANSIENT DB: %d\"",
"%",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time2",
",",
")",
"start_time2",
"=",
"time",
".",
"time",
"(",
")",
"if",
"self",
".",
"update",
"and",
"not",
"self",
".",
"ra",
":",
"self",
".",
"_update_transient_database",
"(",
"crossmatches",
"=",
"crossmatches",
",",
"classifications",
"=",
"classifications",
",",
"transientsMetadataList",
"=",
"transientsMetadataList",
",",
"colMaps",
"=",
"colMaps",
")",
"print",
"\"FINISH UPDATING TRANSIENT DB/START ANNOTATING TRANSIENT DB: %d\"",
"%",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time2",
",",
")",
"start_time2",
"=",
"time",
".",
"time",
"(",
")",
"if",
"self",
".",
"ra",
":",
"return",
"classifications",
",",
"crossmatches",
"if",
"self",
".",
"updatePeakMags",
"and",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"transients\"",
"]",
"[",
"\"transient peak magnitude query\"",
"]",
":",
"self",
".",
"update_peak_magnitudes",
"(",
")",
"self",
".",
"update_classification_annotations_and_summaries",
"(",
"self",
".",
"updatePeakMags",
")",
"print",
"\"FINISH ANNOTATING TRANSIENT DB: %d\"",
"%",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time2",
",",
")",
"start_time2",
"=",
"time",
".",
"time",
"(",
")",
"classificationRate",
"=",
"count",
"/",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
")",
"print",
"\"Sherlock is classify at a rate of %(classificationRate)2.1f transients/sec\"",
"%",
"locals",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``classify`` method'",
")",
"return",
"None",
",",
"None"
] | *classify the transients selected from the transient selection query in the settings file or passed in via the CL or other code*
**Return:**
- ``crossmatches`` -- list of dictionaries of crossmatched associated sources
- ``classifications`` -- the classifications assigned to the transients post-crossmatches (dictionary of rank ordered list of classifications)
See class docstring for usage.
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"*",
"classify",
"the",
"transients",
"selected",
"from",
"the",
"transient",
"selection",
"query",
"in",
"the",
"settings",
"file",
"or",
"passed",
"in",
"via",
"the",
"CL",
"or",
"other",
"code",
"*"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/transient_classifier.py#L191-L438 |
thespacedoctor/sherlock | sherlock/transient_classifier.py | transient_classifier._get_transient_metadata_from_database_list | def _get_transient_metadata_from_database_list(
self):
"""use the transient query in the settings file to generate a list of transients to corssmatch and classify
**Return:**
- ``transientsMetadataList``
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_get_transient_metadata_from_database_list`` method')
sqlQuery = self.settings["database settings"][
"transients"]["transient query"] + " limit " + str(self.largeBatchSize)
thisInt = randint(0, 100)
if "where" in sqlQuery:
sqlQuery = sqlQuery.replace(
"where", "where %(thisInt)s=%(thisInt)s and " % locals())
transientsMetadataList = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.transientsDbConn,
quiet=False
)
self.log.debug(
'completed the ``_get_transient_metadata_from_database_list`` method')
return transientsMetadataList | python | def _get_transient_metadata_from_database_list(
self):
"""use the transient query in the settings file to generate a list of transients to corssmatch and classify
**Return:**
- ``transientsMetadataList``
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_get_transient_metadata_from_database_list`` method')
sqlQuery = self.settings["database settings"][
"transients"]["transient query"] + " limit " + str(self.largeBatchSize)
thisInt = randint(0, 100)
if "where" in sqlQuery:
sqlQuery = sqlQuery.replace(
"where", "where %(thisInt)s=%(thisInt)s and " % locals())
transientsMetadataList = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.transientsDbConn,
quiet=False
)
self.log.debug(
'completed the ``_get_transient_metadata_from_database_list`` method')
return transientsMetadataList | [
"def",
"_get_transient_metadata_from_database_list",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_get_transient_metadata_from_database_list`` method'",
")",
"sqlQuery",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"transients\"",
"]",
"[",
"\"transient query\"",
"]",
"+",
"\" limit \"",
"+",
"str",
"(",
"self",
".",
"largeBatchSize",
")",
"thisInt",
"=",
"randint",
"(",
"0",
",",
"100",
")",
"if",
"\"where\"",
"in",
"sqlQuery",
":",
"sqlQuery",
"=",
"sqlQuery",
".",
"replace",
"(",
"\"where\"",
",",
"\"where %(thisInt)s=%(thisInt)s and \"",
"%",
"locals",
"(",
")",
")",
"transientsMetadataList",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"transientsDbConn",
",",
"quiet",
"=",
"False",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_get_transient_metadata_from_database_list`` method'",
")",
"return",
"transientsMetadataList"
] | use the transient query in the settings file to generate a list of transients to corssmatch and classify
**Return:**
- ``transientsMetadataList``
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"use",
"the",
"transient",
"query",
"in",
"the",
"settings",
"file",
"to",
"generate",
"a",
"list",
"of",
"transients",
"to",
"corssmatch",
"and",
"classify"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/transient_classifier.py#L440-L477 |
thespacedoctor/sherlock | sherlock/transient_classifier.py | transient_classifier._update_ned_stream | def _update_ned_stream(
self,
transientsMetadataList
):
""" update the NED stream within the catalogues database at the locations of the transients
**Key Arguments:**
- ``transientsMetadataList`` -- the list of transient metadata lifted from the database.
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_update_ned_stream`` method')
coordinateList = []
for i in transientsMetadataList:
# thisList = str(i["ra"]) + " " + str(i["dec"])
thisList = (i["ra"], i["dec"])
coordinateList.append(thisList)
coordinateList = self._remove_previous_ned_queries(
coordinateList=coordinateList
)
# MINIMISE COORDINATES IN LIST TO REDUCE NUMBER OF REQUIRE NED QUERIES
coordinateList = self._consolidate_coordinateList(
coordinateList=coordinateList
)
stream = ned(
log=self.log,
settings=self.settings,
coordinateList=coordinateList,
radiusArcsec=self.settings["ned stream search radius arcec"]
)
stream.ingest()
sqlQuery = """SET session sql_mode = "";""" % locals(
)
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn
)
sqlQuery = """update tcs_cat_ned_stream set magnitude = CAST(`magnitude_filter` AS DECIMAL(5,2)) where magnitude is null;""" % locals(
)
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn
)
self.log.debug('completed the ``_update_ned_stream`` method')
return None | python | def _update_ned_stream(
self,
transientsMetadataList
):
""" update the NED stream within the catalogues database at the locations of the transients
**Key Arguments:**
- ``transientsMetadataList`` -- the list of transient metadata lifted from the database.
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_update_ned_stream`` method')
coordinateList = []
for i in transientsMetadataList:
# thisList = str(i["ra"]) + " " + str(i["dec"])
thisList = (i["ra"], i["dec"])
coordinateList.append(thisList)
coordinateList = self._remove_previous_ned_queries(
coordinateList=coordinateList
)
# MINIMISE COORDINATES IN LIST TO REDUCE NUMBER OF REQUIRE NED QUERIES
coordinateList = self._consolidate_coordinateList(
coordinateList=coordinateList
)
stream = ned(
log=self.log,
settings=self.settings,
coordinateList=coordinateList,
radiusArcsec=self.settings["ned stream search radius arcec"]
)
stream.ingest()
sqlQuery = """SET session sql_mode = "";""" % locals(
)
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn
)
sqlQuery = """update tcs_cat_ned_stream set magnitude = CAST(`magnitude_filter` AS DECIMAL(5,2)) where magnitude is null;""" % locals(
)
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn
)
self.log.debug('completed the ``_update_ned_stream`` method')
return None | [
"def",
"_update_ned_stream",
"(",
"self",
",",
"transientsMetadataList",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_update_ned_stream`` method'",
")",
"coordinateList",
"=",
"[",
"]",
"for",
"i",
"in",
"transientsMetadataList",
":",
"# thisList = str(i[\"ra\"]) + \" \" + str(i[\"dec\"])",
"thisList",
"=",
"(",
"i",
"[",
"\"ra\"",
"]",
",",
"i",
"[",
"\"dec\"",
"]",
")",
"coordinateList",
".",
"append",
"(",
"thisList",
")",
"coordinateList",
"=",
"self",
".",
"_remove_previous_ned_queries",
"(",
"coordinateList",
"=",
"coordinateList",
")",
"# MINIMISE COORDINATES IN LIST TO REDUCE NUMBER OF REQUIRE NED QUERIES",
"coordinateList",
"=",
"self",
".",
"_consolidate_coordinateList",
"(",
"coordinateList",
"=",
"coordinateList",
")",
"stream",
"=",
"ned",
"(",
"log",
"=",
"self",
".",
"log",
",",
"settings",
"=",
"self",
".",
"settings",
",",
"coordinateList",
"=",
"coordinateList",
",",
"radiusArcsec",
"=",
"self",
".",
"settings",
"[",
"\"ned stream search radius arcec\"",
"]",
")",
"stream",
".",
"ingest",
"(",
")",
"sqlQuery",
"=",
"\"\"\"SET session sql_mode = \"\";\"\"\"",
"%",
"locals",
"(",
")",
"writequery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
")",
"sqlQuery",
"=",
"\"\"\"update tcs_cat_ned_stream set magnitude = CAST(`magnitude_filter` AS DECIMAL(5,2)) where magnitude is null;\"\"\"",
"%",
"locals",
"(",
")",
"writequery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_update_ned_stream`` method'",
")",
"return",
"None"
] | update the NED stream within the catalogues database at the locations of the transients
**Key Arguments:**
- ``transientsMetadataList`` -- the list of transient metadata lifted from the database.
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"update",
"the",
"NED",
"stream",
"within",
"the",
"catalogues",
"database",
"at",
"the",
"locations",
"of",
"the",
"transients"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/transient_classifier.py#L479-L540 |
thespacedoctor/sherlock | sherlock/transient_classifier.py | transient_classifier._remove_previous_ned_queries | def _remove_previous_ned_queries(
self,
coordinateList):
"""iterate through the transient locations to see if we have recent local NED coverage of that area already
**Key Arguments:**
- ``coordinateList`` -- set of coordinate to check for previous queries
**Return:**
- ``updatedCoordinateList`` -- coordinate list with previous queries removed
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_remove_previous_ned_queries`` method')
# 1 DEGREE QUERY RADIUS
radius = 60. * 60.
updatedCoordinateList = []
keepers = []
# CALCULATE THE OLDEST RESULTS LIMIT
now = datetime.now()
td = timedelta(
days=self.settings["ned stream refresh rate in days"])
refreshLimit = now - td
refreshLimit = refreshLimit.strftime("%Y-%m-%d %H:%M:%S")
raList = []
raList[:] = [c[0] for c in coordinateList]
decList = []
decList[:] = [c[1] for c in coordinateList]
# MATCH COORDINATES AGAINST PREVIOUS NED SEARCHES
cs = conesearch(
log=self.log,
dbConn=self.cataloguesDbConn,
tableName="tcs_helper_ned_query_history",
columns="*",
ra=raList,
dec=decList,
radiusArcsec=radius,
separations=True,
distinct=True,
sqlWhere="dateQueried > '%(refreshLimit)s'" % locals(),
closest=False
)
matchIndies, matches = cs.search()
# DETERMINE WHICH COORDINATES REQUIRE A NED QUERY
curatedMatchIndices = []
curatedMatches = []
for i, m in zip(matchIndies, matches.list):
match = False
row = m
row["separationArcsec"] = row["cmSepArcsec"]
raStream = row["raDeg"]
decStream = row["decDeg"]
radiusStream = row["arcsecRadius"]
dateStream = row["dateQueried"]
angularSeparation = row["separationArcsec"]
if angularSeparation + self.settings["first pass ned search radius arcec"] < radiusStream:
curatedMatchIndices.append(i)
curatedMatches.append(m)
# NON MATCHES
for i, v in enumerate(coordinateList):
if i not in curatedMatchIndices:
updatedCoordinateList.append(v)
self.log.debug('completed the ``_remove_previous_ned_queries`` method')
return updatedCoordinateList | python | def _remove_previous_ned_queries(
self,
coordinateList):
"""iterate through the transient locations to see if we have recent local NED coverage of that area already
**Key Arguments:**
- ``coordinateList`` -- set of coordinate to check for previous queries
**Return:**
- ``updatedCoordinateList`` -- coordinate list with previous queries removed
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_remove_previous_ned_queries`` method')
# 1 DEGREE QUERY RADIUS
radius = 60. * 60.
updatedCoordinateList = []
keepers = []
# CALCULATE THE OLDEST RESULTS LIMIT
now = datetime.now()
td = timedelta(
days=self.settings["ned stream refresh rate in days"])
refreshLimit = now - td
refreshLimit = refreshLimit.strftime("%Y-%m-%d %H:%M:%S")
raList = []
raList[:] = [c[0] for c in coordinateList]
decList = []
decList[:] = [c[1] for c in coordinateList]
# MATCH COORDINATES AGAINST PREVIOUS NED SEARCHES
cs = conesearch(
log=self.log,
dbConn=self.cataloguesDbConn,
tableName="tcs_helper_ned_query_history",
columns="*",
ra=raList,
dec=decList,
radiusArcsec=radius,
separations=True,
distinct=True,
sqlWhere="dateQueried > '%(refreshLimit)s'" % locals(),
closest=False
)
matchIndies, matches = cs.search()
# DETERMINE WHICH COORDINATES REQUIRE A NED QUERY
curatedMatchIndices = []
curatedMatches = []
for i, m in zip(matchIndies, matches.list):
match = False
row = m
row["separationArcsec"] = row["cmSepArcsec"]
raStream = row["raDeg"]
decStream = row["decDeg"]
radiusStream = row["arcsecRadius"]
dateStream = row["dateQueried"]
angularSeparation = row["separationArcsec"]
if angularSeparation + self.settings["first pass ned search radius arcec"] < radiusStream:
curatedMatchIndices.append(i)
curatedMatches.append(m)
# NON MATCHES
for i, v in enumerate(coordinateList):
if i not in curatedMatchIndices:
updatedCoordinateList.append(v)
self.log.debug('completed the ``_remove_previous_ned_queries`` method')
return updatedCoordinateList | [
"def",
"_remove_previous_ned_queries",
"(",
"self",
",",
"coordinateList",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_remove_previous_ned_queries`` method'",
")",
"# 1 DEGREE QUERY RADIUS",
"radius",
"=",
"60.",
"*",
"60.",
"updatedCoordinateList",
"=",
"[",
"]",
"keepers",
"=",
"[",
"]",
"# CALCULATE THE OLDEST RESULTS LIMIT",
"now",
"=",
"datetime",
".",
"now",
"(",
")",
"td",
"=",
"timedelta",
"(",
"days",
"=",
"self",
".",
"settings",
"[",
"\"ned stream refresh rate in days\"",
"]",
")",
"refreshLimit",
"=",
"now",
"-",
"td",
"refreshLimit",
"=",
"refreshLimit",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S\"",
")",
"raList",
"=",
"[",
"]",
"raList",
"[",
":",
"]",
"=",
"[",
"c",
"[",
"0",
"]",
"for",
"c",
"in",
"coordinateList",
"]",
"decList",
"=",
"[",
"]",
"decList",
"[",
":",
"]",
"=",
"[",
"c",
"[",
"1",
"]",
"for",
"c",
"in",
"coordinateList",
"]",
"# MATCH COORDINATES AGAINST PREVIOUS NED SEARCHES",
"cs",
"=",
"conesearch",
"(",
"log",
"=",
"self",
".",
"log",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
"tableName",
"=",
"\"tcs_helper_ned_query_history\"",
",",
"columns",
"=",
"\"*\"",
",",
"ra",
"=",
"raList",
",",
"dec",
"=",
"decList",
",",
"radiusArcsec",
"=",
"radius",
",",
"separations",
"=",
"True",
",",
"distinct",
"=",
"True",
",",
"sqlWhere",
"=",
"\"dateQueried > '%(refreshLimit)s'\"",
"%",
"locals",
"(",
")",
",",
"closest",
"=",
"False",
")",
"matchIndies",
",",
"matches",
"=",
"cs",
".",
"search",
"(",
")",
"# DETERMINE WHICH COORDINATES REQUIRE A NED QUERY",
"curatedMatchIndices",
"=",
"[",
"]",
"curatedMatches",
"=",
"[",
"]",
"for",
"i",
",",
"m",
"in",
"zip",
"(",
"matchIndies",
",",
"matches",
".",
"list",
")",
":",
"match",
"=",
"False",
"row",
"=",
"m",
"row",
"[",
"\"separationArcsec\"",
"]",
"=",
"row",
"[",
"\"cmSepArcsec\"",
"]",
"raStream",
"=",
"row",
"[",
"\"raDeg\"",
"]",
"decStream",
"=",
"row",
"[",
"\"decDeg\"",
"]",
"radiusStream",
"=",
"row",
"[",
"\"arcsecRadius\"",
"]",
"dateStream",
"=",
"row",
"[",
"\"dateQueried\"",
"]",
"angularSeparation",
"=",
"row",
"[",
"\"separationArcsec\"",
"]",
"if",
"angularSeparation",
"+",
"self",
".",
"settings",
"[",
"\"first pass ned search radius arcec\"",
"]",
"<",
"radiusStream",
":",
"curatedMatchIndices",
".",
"append",
"(",
"i",
")",
"curatedMatches",
".",
"append",
"(",
"m",
")",
"# NON MATCHES",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"coordinateList",
")",
":",
"if",
"i",
"not",
"in",
"curatedMatchIndices",
":",
"updatedCoordinateList",
".",
"append",
"(",
"v",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_remove_previous_ned_queries`` method'",
")",
"return",
"updatedCoordinateList"
] | iterate through the transient locations to see if we have recent local NED coverage of that area already
**Key Arguments:**
- ``coordinateList`` -- set of coordinate to check for previous queries
**Return:**
- ``updatedCoordinateList`` -- coordinate list with previous queries removed
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"iterate",
"through",
"the",
"transient",
"locations",
"to",
"see",
"if",
"we",
"have",
"recent",
"local",
"NED",
"coverage",
"of",
"that",
"area",
"already"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/transient_classifier.py#L542-L621 |
thespacedoctor/sherlock | sherlock/transient_classifier.py | transient_classifier._crossmatch_transients_against_catalogues | def _crossmatch_transients_against_catalogues(
self,
transientsMetadataListIndex,
colMaps):
"""run the transients through the crossmatch algorithm in the settings file
**Key Arguments:**
- ``transientsMetadataListIndex`` -- the list of transient metadata lifted from the database.
- ``colMaps`` -- dictionary of dictionaries with the name of the database-view (e.g. `tcs_view_agn_milliquas_v4_5`) as the key and the column-name dictary map as value (`{view_name: {columnMap}}`).
**Return:**
- ``crossmatches`` -- a list of dictionaries of the associated sources crossmatched from the catalogues database
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
global theseBatches
self.log.debug(
'starting the ``_crossmatch_transients_against_catalogues`` method')
# SETUP ALL DATABASE CONNECTIONS
transientsMetadataList = theseBatches[transientsMetadataListIndex]
dbConn = database(
log=self.log,
dbSettings=self.settings["database settings"]["static catalogues"]
).connect()
self.allClassifications = []
cm = transient_catalogue_crossmatch(
log=self.log,
dbConn=dbConn,
transients=transientsMetadataList,
settings=self.settings,
colMaps=colMaps
)
crossmatches = cm.match()
self.log.debug(
'completed the ``_crossmatch_transients_against_catalogues`` method')
return crossmatches | python | def _crossmatch_transients_against_catalogues(
self,
transientsMetadataListIndex,
colMaps):
"""run the transients through the crossmatch algorithm in the settings file
**Key Arguments:**
- ``transientsMetadataListIndex`` -- the list of transient metadata lifted from the database.
- ``colMaps`` -- dictionary of dictionaries with the name of the database-view (e.g. `tcs_view_agn_milliquas_v4_5`) as the key and the column-name dictary map as value (`{view_name: {columnMap}}`).
**Return:**
- ``crossmatches`` -- a list of dictionaries of the associated sources crossmatched from the catalogues database
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
global theseBatches
self.log.debug(
'starting the ``_crossmatch_transients_against_catalogues`` method')
# SETUP ALL DATABASE CONNECTIONS
transientsMetadataList = theseBatches[transientsMetadataListIndex]
dbConn = database(
log=self.log,
dbSettings=self.settings["database settings"]["static catalogues"]
).connect()
self.allClassifications = []
cm = transient_catalogue_crossmatch(
log=self.log,
dbConn=dbConn,
transients=transientsMetadataList,
settings=self.settings,
colMaps=colMaps
)
crossmatches = cm.match()
self.log.debug(
'completed the ``_crossmatch_transients_against_catalogues`` method')
return crossmatches | [
"def",
"_crossmatch_transients_against_catalogues",
"(",
"self",
",",
"transientsMetadataListIndex",
",",
"colMaps",
")",
":",
"global",
"theseBatches",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_crossmatch_transients_against_catalogues`` method'",
")",
"# SETUP ALL DATABASE CONNECTIONS",
"transientsMetadataList",
"=",
"theseBatches",
"[",
"transientsMetadataListIndex",
"]",
"dbConn",
"=",
"database",
"(",
"log",
"=",
"self",
".",
"log",
",",
"dbSettings",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"static catalogues\"",
"]",
")",
".",
"connect",
"(",
")",
"self",
".",
"allClassifications",
"=",
"[",
"]",
"cm",
"=",
"transient_catalogue_crossmatch",
"(",
"log",
"=",
"self",
".",
"log",
",",
"dbConn",
"=",
"dbConn",
",",
"transients",
"=",
"transientsMetadataList",
",",
"settings",
"=",
"self",
".",
"settings",
",",
"colMaps",
"=",
"colMaps",
")",
"crossmatches",
"=",
"cm",
".",
"match",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_crossmatch_transients_against_catalogues`` method'",
")",
"return",
"crossmatches"
] | run the transients through the crossmatch algorithm in the settings file
**Key Arguments:**
- ``transientsMetadataListIndex`` -- the list of transient metadata lifted from the database.
- ``colMaps`` -- dictionary of dictionaries with the name of the database-view (e.g. `tcs_view_agn_milliquas_v4_5`) as the key and the column-name dictary map as value (`{view_name: {columnMap}}`).
**Return:**
- ``crossmatches`` -- a list of dictionaries of the associated sources crossmatched from the catalogues database
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"run",
"the",
"transients",
"through",
"the",
"crossmatch",
"algorithm",
"in",
"the",
"settings",
"file"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/transient_classifier.py#L623-L676 |
thespacedoctor/sherlock | sherlock/transient_classifier.py | transient_classifier._update_transient_database | def _update_transient_database(
self,
crossmatches,
classifications,
transientsMetadataList,
colMaps):
""" update transient database with classifications and crossmatch results
**Key Arguments:**
- ``crossmatches`` -- the crossmatches and associations resulting from the catlaogue crossmatches
- ``classifications`` -- the classifications assigned to the transients post-crossmatches (dictionary of rank ordered list of classifications)
- ``transientsMetadataList`` -- the list of transient metadata lifted from the database.
- ``colMaps`` -- maps of the important column names for each table/view in the crossmatch-catalogues database
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_update_transient_database`` method')
import time
start_time = time.time()
print "UPDATING TRANSIENTS DATABASE WITH RESULTS"
print "DELETING OLD RESULTS"
now = datetime.now()
now = now.strftime("%Y-%m-%d_%H-%M-%S-%f")
transientTable = self.settings["database settings"][
"transients"]["transient table"]
transientTableClassCol = self.settings["database settings"][
"transients"]["transient classification column"]
transientTableIdCol = self.settings["database settings"][
"transients"]["transient primary id column"]
# COMBINE ALL CROSSMATCHES INTO A LIST OF DICTIONARIES TO DUMP INTO
# DATABASE TABLE
transientIDs = [str(c)
for c in classifications.keys()]
transientIDs = ",".join(transientIDs)
# REMOVE PREVIOUS MATCHES
sqlQuery = """delete from sherlock_crossmatches where transient_object_id in (%(transientIDs)s);""" % locals(
)
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.transientsDbConn,
)
sqlQuery = """delete from sherlock_classifications where transient_object_id in (%(transientIDs)s);""" % locals(
)
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.transientsDbConn,
)
print "FINISHED DELETING OLD RESULTS/ADDING TO CROSSMATCHES: %d" % (time.time() - start_time,)
start_time = time.time()
if len(crossmatches):
insert_list_of_dictionaries_into_database_tables(
dbConn=self.transientsDbConn,
log=self.log,
dictList=crossmatches,
dbTableName="sherlock_crossmatches",
dateModified=True,
batchSize=10000,
replace=True,
dbSettings=self.settings["database settings"][
"transients"]
)
print "FINISHED ADDING TO CROSSMATCHES/UPDATING CLASSIFICATIONS IN TRANSIENT TABLE: %d" % (time.time() - start_time,)
start_time = time.time()
sqlQuery = ""
inserts = []
for k, v in classifications.iteritems():
thisInsert = {
"transient_object_id": k,
"classification": v[0]
}
inserts.append(thisInsert)
print "FINISHED UPDATING CLASSIFICATIONS IN TRANSIENT TABLE/UPDATING sherlock_classifications TABLE: %d" % (time.time() - start_time,)
start_time = time.time()
insert_list_of_dictionaries_into_database_tables(
dbConn=self.transientsDbConn,
log=self.log,
dictList=inserts,
dbTableName="sherlock_classifications",
dateModified=True,
batchSize=10000,
replace=True,
dbSettings=self.settings["database settings"][
"transients"]
)
print "FINISHED UPDATING sherlock_classifications TABLE: %d" % (time.time() - start_time,)
start_time = time.time()
self.log.debug('completed the ``_update_transient_database`` method')
return None | python | def _update_transient_database(
self,
crossmatches,
classifications,
transientsMetadataList,
colMaps):
""" update transient database with classifications and crossmatch results
**Key Arguments:**
- ``crossmatches`` -- the crossmatches and associations resulting from the catlaogue crossmatches
- ``classifications`` -- the classifications assigned to the transients post-crossmatches (dictionary of rank ordered list of classifications)
- ``transientsMetadataList`` -- the list of transient metadata lifted from the database.
- ``colMaps`` -- maps of the important column names for each table/view in the crossmatch-catalogues database
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_update_transient_database`` method')
import time
start_time = time.time()
print "UPDATING TRANSIENTS DATABASE WITH RESULTS"
print "DELETING OLD RESULTS"
now = datetime.now()
now = now.strftime("%Y-%m-%d_%H-%M-%S-%f")
transientTable = self.settings["database settings"][
"transients"]["transient table"]
transientTableClassCol = self.settings["database settings"][
"transients"]["transient classification column"]
transientTableIdCol = self.settings["database settings"][
"transients"]["transient primary id column"]
# COMBINE ALL CROSSMATCHES INTO A LIST OF DICTIONARIES TO DUMP INTO
# DATABASE TABLE
transientIDs = [str(c)
for c in classifications.keys()]
transientIDs = ",".join(transientIDs)
# REMOVE PREVIOUS MATCHES
sqlQuery = """delete from sherlock_crossmatches where transient_object_id in (%(transientIDs)s);""" % locals(
)
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.transientsDbConn,
)
sqlQuery = """delete from sherlock_classifications where transient_object_id in (%(transientIDs)s);""" % locals(
)
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.transientsDbConn,
)
print "FINISHED DELETING OLD RESULTS/ADDING TO CROSSMATCHES: %d" % (time.time() - start_time,)
start_time = time.time()
if len(crossmatches):
insert_list_of_dictionaries_into_database_tables(
dbConn=self.transientsDbConn,
log=self.log,
dictList=crossmatches,
dbTableName="sherlock_crossmatches",
dateModified=True,
batchSize=10000,
replace=True,
dbSettings=self.settings["database settings"][
"transients"]
)
print "FINISHED ADDING TO CROSSMATCHES/UPDATING CLASSIFICATIONS IN TRANSIENT TABLE: %d" % (time.time() - start_time,)
start_time = time.time()
sqlQuery = ""
inserts = []
for k, v in classifications.iteritems():
thisInsert = {
"transient_object_id": k,
"classification": v[0]
}
inserts.append(thisInsert)
print "FINISHED UPDATING CLASSIFICATIONS IN TRANSIENT TABLE/UPDATING sherlock_classifications TABLE: %d" % (time.time() - start_time,)
start_time = time.time()
insert_list_of_dictionaries_into_database_tables(
dbConn=self.transientsDbConn,
log=self.log,
dictList=inserts,
dbTableName="sherlock_classifications",
dateModified=True,
batchSize=10000,
replace=True,
dbSettings=self.settings["database settings"][
"transients"]
)
print "FINISHED UPDATING sherlock_classifications TABLE: %d" % (time.time() - start_time,)
start_time = time.time()
self.log.debug('completed the ``_update_transient_database`` method')
return None | [
"def",
"_update_transient_database",
"(",
"self",
",",
"crossmatches",
",",
"classifications",
",",
"transientsMetadataList",
",",
"colMaps",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_update_transient_database`` method'",
")",
"import",
"time",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"print",
"\"UPDATING TRANSIENTS DATABASE WITH RESULTS\"",
"print",
"\"DELETING OLD RESULTS\"",
"now",
"=",
"datetime",
".",
"now",
"(",
")",
"now",
"=",
"now",
".",
"strftime",
"(",
"\"%Y-%m-%d_%H-%M-%S-%f\"",
")",
"transientTable",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"transients\"",
"]",
"[",
"\"transient table\"",
"]",
"transientTableClassCol",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"transients\"",
"]",
"[",
"\"transient classification column\"",
"]",
"transientTableIdCol",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"transients\"",
"]",
"[",
"\"transient primary id column\"",
"]",
"# COMBINE ALL CROSSMATCHES INTO A LIST OF DICTIONARIES TO DUMP INTO",
"# DATABASE TABLE",
"transientIDs",
"=",
"[",
"str",
"(",
"c",
")",
"for",
"c",
"in",
"classifications",
".",
"keys",
"(",
")",
"]",
"transientIDs",
"=",
"\",\"",
".",
"join",
"(",
"transientIDs",
")",
"# REMOVE PREVIOUS MATCHES",
"sqlQuery",
"=",
"\"\"\"delete from sherlock_crossmatches where transient_object_id in (%(transientIDs)s);\"\"\"",
"%",
"locals",
"(",
")",
"writequery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"transientsDbConn",
",",
")",
"sqlQuery",
"=",
"\"\"\"delete from sherlock_classifications where transient_object_id in (%(transientIDs)s);\"\"\"",
"%",
"locals",
"(",
")",
"writequery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"transientsDbConn",
",",
")",
"print",
"\"FINISHED DELETING OLD RESULTS/ADDING TO CROSSMATCHES: %d\"",
"%",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
",",
")",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"if",
"len",
"(",
"crossmatches",
")",
":",
"insert_list_of_dictionaries_into_database_tables",
"(",
"dbConn",
"=",
"self",
".",
"transientsDbConn",
",",
"log",
"=",
"self",
".",
"log",
",",
"dictList",
"=",
"crossmatches",
",",
"dbTableName",
"=",
"\"sherlock_crossmatches\"",
",",
"dateModified",
"=",
"True",
",",
"batchSize",
"=",
"10000",
",",
"replace",
"=",
"True",
",",
"dbSettings",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"transients\"",
"]",
")",
"print",
"\"FINISHED ADDING TO CROSSMATCHES/UPDATING CLASSIFICATIONS IN TRANSIENT TABLE: %d\"",
"%",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
",",
")",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"sqlQuery",
"=",
"\"\"",
"inserts",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"classifications",
".",
"iteritems",
"(",
")",
":",
"thisInsert",
"=",
"{",
"\"transient_object_id\"",
":",
"k",
",",
"\"classification\"",
":",
"v",
"[",
"0",
"]",
"}",
"inserts",
".",
"append",
"(",
"thisInsert",
")",
"print",
"\"FINISHED UPDATING CLASSIFICATIONS IN TRANSIENT TABLE/UPDATING sherlock_classifications TABLE: %d\"",
"%",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
",",
")",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"insert_list_of_dictionaries_into_database_tables",
"(",
"dbConn",
"=",
"self",
".",
"transientsDbConn",
",",
"log",
"=",
"self",
".",
"log",
",",
"dictList",
"=",
"inserts",
",",
"dbTableName",
"=",
"\"sherlock_classifications\"",
",",
"dateModified",
"=",
"True",
",",
"batchSize",
"=",
"10000",
",",
"replace",
"=",
"True",
",",
"dbSettings",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"transients\"",
"]",
")",
"print",
"\"FINISHED UPDATING sherlock_classifications TABLE: %d\"",
"%",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
",",
")",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_update_transient_database`` method'",
")",
"return",
"None"
] | update transient database with classifications and crossmatch results
**Key Arguments:**
- ``crossmatches`` -- the crossmatches and associations resulting from the catlaogue crossmatches
- ``classifications`` -- the classifications assigned to the transients post-crossmatches (dictionary of rank ordered list of classifications)
- ``transientsMetadataList`` -- the list of transient metadata lifted from the database.
- ``colMaps`` -- maps of the important column names for each table/view in the crossmatch-catalogues database
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"update",
"transient",
"database",
"with",
"classifications",
"and",
"crossmatch",
"results"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/transient_classifier.py#L678-L789 |
thespacedoctor/sherlock | sherlock/transient_classifier.py | transient_classifier._rank_classifications | def _rank_classifications(
self,
crossmatchArray,
colMaps):
"""*rank the classifications returned from the catalogue crossmatcher, annotate the results with a classification rank-number (most likely = 1) and a rank-score (weight of classification)*
**Key Arguments:**
- ``crossmatchArrayIndex`` -- the index of list of unranked crossmatch classifications
- ``colMaps`` -- dictionary of dictionaries with the name of the database-view (e.g. `tcs_view_agn_milliquas_v4_5`) as the key and the column-name dictary map as value (`{view_name: {columnMap}}`).
**Return:**
- ``classifications`` -- the classifications assigned to the transients post-crossmatches
- ``crossmatches`` -- the crossmatches annotated with rankings and rank-scores
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_rank_classifications`` method')
crossmatches = crossmatchArray
# GROUP CROSSMATCHES INTO DISTINCT SOURCES (DUPLICATE ENTRIES OF THE
# SAME ASTROPHYSICAL SOURCE ACROSS MULTIPLE CATALOGUES)
ra, dec = zip(*[(r["raDeg"], r["decDeg"]) for r in crossmatches])
from HMpTy.htm import sets
xmatcher = sets(
log=self.log,
ra=ra,
dec=dec,
radius=3 / (60. * 60.), # in degrees
sourceList=crossmatches
)
groupedMatches = xmatcher.match
associatationTypeOrder = ["AGN", "CV", "NT", "SN", "VS", "BS"]
# ADD DISTINCT-SOURCE KEY
dupKey = 0
distinctMatches = []
for x in groupedMatches:
dupKey += 1
mergedMatch = copy.deepcopy(x[0])
mergedMatch["merged_rank"] = int(dupKey)
# ADD OTHER ESSENTIAL KEYS
for e in ['z', 'photoZ', 'photoZErr']:
if e not in mergedMatch:
mergedMatch[e] = None
bestDirectDistance = {
"direct_distance": mergedMatch["direct_distance"],
"direct_distance_modulus": mergedMatch["direct_distance_modulus"],
"direct_distance_scale": mergedMatch["direct_distance"],
"qual": colMaps[mergedMatch["catalogue_view_name"]]["object_type_accuracy"]
}
bestSpecz = {
"z": mergedMatch["z"],
"distance": mergedMatch["distance"],
"distance_modulus": mergedMatch["distance_modulus"],
"scale": mergedMatch["scale"],
"qual": colMaps[mergedMatch["catalogue_view_name"]]["object_type_accuracy"]
}
bestPhotoz = {
"photoZ": mergedMatch["photoZ"],
"photoZErr": mergedMatch["photoZErr"],
"distance": mergedMatch["distance"],
"distance_modulus": mergedMatch["distance_modulus"],
"scale": mergedMatch["scale"],
"qual": colMaps[mergedMatch["catalogue_view_name"]]["object_type_accuracy"]
}
for i, m in enumerate(x):
m["merged_rank"] = int(dupKey)
if i > 0:
# MERGE ALL BEST MAGNITUDE MEASUREMENTS
for f in self.filterPreference:
if f in m and m[f] and (f not in mergedMatch or (f + "Err" in mergedMatch and (f + "Err" not in m or (mergedMatch[f + "Err"] > m[f + "Err"])))):
mergedMatch[f] = m[f]
try:
mergedMatch[f + "Err"] = m[f + "Err"]
except:
pass
mergedMatch["original_search_radius_arcsec"] = "multiple"
mergedMatch["catalogue_object_subtype"] = "multiple"
mergedMatch["catalogue_view_name"] = "multiple"
# MERGE SEARCH NAMES
snippet = m["search_name"].split(" ")[0].upper()
if "/" not in mergedMatch["search_name"] and snippet not in mergedMatch["search_name"].upper():
mergedMatch["search_name"] = mergedMatch["search_name"].split(
" ")[0].upper() + "/" + m["search_name"].split(" ")[0].upper()
elif snippet not in mergedMatch["search_name"].upper():
mergedMatch[
"search_name"] += "/" + m["search_name"].split(" ")[0].upper()
elif "/" not in mergedMatch["search_name"]:
mergedMatch["search_name"] = mergedMatch["search_name"].split(
" ")[0].upper()
mergedMatch["catalogue_table_name"] = mergedMatch[
"search_name"]
# MERGE CATALOGUE SOURCE NAMES
mergedMatch["catalogue_object_id"] = str(
mergedMatch["catalogue_object_id"])
m["catalogue_object_id"] = str(m["catalogue_object_id"])
if m["catalogue_object_id"].replace(" ", "").lower() not in mergedMatch["catalogue_object_id"].replace(" ", "").lower():
mergedMatch["catalogue_object_id"] += "/" + \
m["catalogue_object_id"]
# DETERMINE BEST CLASSIFICATION
if mergedMatch["classificationReliability"] == 3 and m["classificationReliability"] < 3:
mergedMatch["association_type"] = m["association_type"]
mergedMatch["classificationReliability"] = m[
"classificationReliability"]
if m["classificationReliability"] != 3 and m["association_type"] in associatationTypeOrder and (mergedMatch["association_type"] not in associatationTypeOrder or associatationTypeOrder.index(m["association_type"]) < associatationTypeOrder.index(mergedMatch["association_type"])):
mergedMatch["association_type"] = m["association_type"]
mergedMatch["classificationReliability"] = m[
"classificationReliability"]
# FIND BEST DISTANCES
if "direct_distance" in m and m["direct_distance"] and colMaps[m["catalogue_view_name"]]["object_type_accuracy"] > bestDirectDistance["qual"]:
bestDirectDistance = {
"direct_distance": m["direct_distance"],
"direct_distance_modulus": m["direct_distance_modulus"],
"direct_distance_scale": m["direct_distance_scale"],
"catalogue_object_type": m["catalogue_object_type"],
"qual": colMaps[m["catalogue_view_name"]]["object_type_accuracy"]
}
# FIND BEST SPEC-Z
if "z" in m and m["z"] and colMaps[m["catalogue_view_name"]]["object_type_accuracy"] > bestSpecz["qual"]:
bestSpecz = {
"z": m["z"],
"distance": m["distance"],
"distance_modulus": m["distance_modulus"],
"scale": m["scale"],
"catalogue_object_type": m["catalogue_object_type"],
"qual": colMaps[m["catalogue_view_name"]]["object_type_accuracy"]
}
# FIND BEST PHOT-Z
if "photoZ" in m and m["photoZ"] and colMaps[m["catalogue_view_name"]]["object_type_accuracy"] > bestPhotoz["qual"]:
bestPhotoz = {
"photoZ": m["photoZ"],
"photoZErr": m["photoZErr"],
"distance": m["distance"],
"distance_modulus": m["distance_modulus"],
"scale": m["scale"],
"catalogue_object_type": m["catalogue_object_type"],
"qual": colMaps[m["catalogue_view_name"]]["object_type_accuracy"]
}
# CLOSEST ANGULAR SEP & COORDINATES
if m["separationArcsec"] < mergedMatch["separationArcsec"]:
mergedMatch["separationArcsec"] = m["separationArcsec"]
mergedMatch["raDeg"] = m["raDeg"]
mergedMatch["decDeg"] = m["decDeg"]
# MERGE THE BEST RESULTS
for l in [bestPhotoz, bestSpecz, bestDirectDistance]:
for k, v in l.iteritems():
if k != "qual":
mergedMatch[k] = v
mergedMatch["catalogue_object_id"] = str(mergedMatch[
"catalogue_object_id"]).replace(" ", "")
# RECALULATE PHYSICAL DISTANCE SEPARATION
if mergedMatch["direct_distance_scale"]:
mergedMatch["physical_separation_kpc"] = mergedMatch[
"direct_distance_scale"] * mergedMatch["separationArcsec"]
elif mergedMatch["scale"]:
mergedMatch["physical_separation_kpc"] = mergedMatch[
"scale"] * mergedMatch["separationArcsec"]
if "/" in mergedMatch["search_name"]:
mergedMatch["search_name"] = "multiple"
distinctMatches.append(mergedMatch)
crossmatches = []
for xm, gm in zip(distinctMatches, groupedMatches):
# SPEC-Z GALAXIES
if (xm["physical_separation_kpc"] is not None and xm["physical_separation_kpc"] != "null" and xm["physical_separation_kpc"] < 20. and xm["association_type"] == "SN" and (("z" in xm and xm["z"] is not None) or "photoZ" not in xm or xm["photoZ"] is None or xm["photoZ"] < 0.)):
rankScore = xm["classificationReliability"] * 1000 + 2. - \
xm["physical_separation_kpc"] / 10
# PHOTO-Z GALAXIES
elif (xm["physical_separation_kpc"] is not None and xm["physical_separation_kpc"] != "null" and xm["physical_separation_kpc"] < 20. and xm["association_type"] == "SN"):
rankScore = xm["classificationReliability"] * 1000 + 2.2 - \
xm["physical_separation_kpc"] / 10
# NOT SPEC-Z, NON PHOTO-Z GALAXIES
elif (xm["association_type"] == "SN"):
rankScore = xm["classificationReliability"] * 1000 + 5.
# VS
elif (xm["association_type"] == "VS"):
rankScore = xm["classificationReliability"] * \
1000 + xm["separationArcsec"] + 2.
# BS
elif (xm["association_type"] == "BS"):
rankScore = xm["classificationReliability"] * \
1000 + xm["separationArcsec"]
else:
rankScore = xm["classificationReliability"] * \
1000 + xm["separationArcsec"] + 10.
xm["rankScore"] = rankScore
crossmatches.append(xm)
if len(gm) > 1:
for g in gm:
g["rankScore"] = rankScore
crossmatches = sorted(
crossmatches, key=itemgetter('rankScore'), reverse=False)
crossmatches = sorted(
crossmatches, key=itemgetter('transient_object_id'))
transient_object_id = None
uniqueIndexCheck = []
classifications = {}
crossmatchesKeep = []
rank = 0
transClass = []
for xm in crossmatches:
rank += 1
if rank == 1:
transClass.append(xm["association_type"])
classifications[xm["transient_object_id"]] = transClass
xm["rank"] = rank
crossmatchesKeep.append(xm)
crossmatches = crossmatchesKeep
crossmatchesKeep = []
for xm in crossmatches:
group = groupedMatches[xm["merged_rank"] - 1]
xm["merged_rank"] = None
crossmatchesKeep.append(xm)
if len(group) > 1:
groupKeep = []
uniqueIndexCheck = []
for g in group:
g["merged_rank"] = xm["rank"]
g["rankScore"] = xm["rankScore"]
index = "%(catalogue_table_name)s%(catalogue_object_id)s" % g
# IF WE HAVE HIT A NEW SOURCE
if index not in uniqueIndexCheck:
uniqueIndexCheck.append(index)
crossmatchesKeep.append(g)
crossmatches = crossmatchesKeep
self.log.debug('completed the ``_rank_classifications`` method')
return classifications, crossmatches | python | def _rank_classifications(
self,
crossmatchArray,
colMaps):
"""*rank the classifications returned from the catalogue crossmatcher, annotate the results with a classification rank-number (most likely = 1) and a rank-score (weight of classification)*
**Key Arguments:**
- ``crossmatchArrayIndex`` -- the index of list of unranked crossmatch classifications
- ``colMaps`` -- dictionary of dictionaries with the name of the database-view (e.g. `tcs_view_agn_milliquas_v4_5`) as the key and the column-name dictary map as value (`{view_name: {columnMap}}`).
**Return:**
- ``classifications`` -- the classifications assigned to the transients post-crossmatches
- ``crossmatches`` -- the crossmatches annotated with rankings and rank-scores
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_rank_classifications`` method')
crossmatches = crossmatchArray
# GROUP CROSSMATCHES INTO DISTINCT SOURCES (DUPLICATE ENTRIES OF THE
# SAME ASTROPHYSICAL SOURCE ACROSS MULTIPLE CATALOGUES)
ra, dec = zip(*[(r["raDeg"], r["decDeg"]) for r in crossmatches])
from HMpTy.htm import sets
xmatcher = sets(
log=self.log,
ra=ra,
dec=dec,
radius=3 / (60. * 60.), # in degrees
sourceList=crossmatches
)
groupedMatches = xmatcher.match
associatationTypeOrder = ["AGN", "CV", "NT", "SN", "VS", "BS"]
# ADD DISTINCT-SOURCE KEY
dupKey = 0
distinctMatches = []
for x in groupedMatches:
dupKey += 1
mergedMatch = copy.deepcopy(x[0])
mergedMatch["merged_rank"] = int(dupKey)
# ADD OTHER ESSENTIAL KEYS
for e in ['z', 'photoZ', 'photoZErr']:
if e not in mergedMatch:
mergedMatch[e] = None
bestDirectDistance = {
"direct_distance": mergedMatch["direct_distance"],
"direct_distance_modulus": mergedMatch["direct_distance_modulus"],
"direct_distance_scale": mergedMatch["direct_distance"],
"qual": colMaps[mergedMatch["catalogue_view_name"]]["object_type_accuracy"]
}
bestSpecz = {
"z": mergedMatch["z"],
"distance": mergedMatch["distance"],
"distance_modulus": mergedMatch["distance_modulus"],
"scale": mergedMatch["scale"],
"qual": colMaps[mergedMatch["catalogue_view_name"]]["object_type_accuracy"]
}
bestPhotoz = {
"photoZ": mergedMatch["photoZ"],
"photoZErr": mergedMatch["photoZErr"],
"distance": mergedMatch["distance"],
"distance_modulus": mergedMatch["distance_modulus"],
"scale": mergedMatch["scale"],
"qual": colMaps[mergedMatch["catalogue_view_name"]]["object_type_accuracy"]
}
for i, m in enumerate(x):
m["merged_rank"] = int(dupKey)
if i > 0:
# MERGE ALL BEST MAGNITUDE MEASUREMENTS
for f in self.filterPreference:
if f in m and m[f] and (f not in mergedMatch or (f + "Err" in mergedMatch and (f + "Err" not in m or (mergedMatch[f + "Err"] > m[f + "Err"])))):
mergedMatch[f] = m[f]
try:
mergedMatch[f + "Err"] = m[f + "Err"]
except:
pass
mergedMatch["original_search_radius_arcsec"] = "multiple"
mergedMatch["catalogue_object_subtype"] = "multiple"
mergedMatch["catalogue_view_name"] = "multiple"
# MERGE SEARCH NAMES
snippet = m["search_name"].split(" ")[0].upper()
if "/" not in mergedMatch["search_name"] and snippet not in mergedMatch["search_name"].upper():
mergedMatch["search_name"] = mergedMatch["search_name"].split(
" ")[0].upper() + "/" + m["search_name"].split(" ")[0].upper()
elif snippet not in mergedMatch["search_name"].upper():
mergedMatch[
"search_name"] += "/" + m["search_name"].split(" ")[0].upper()
elif "/" not in mergedMatch["search_name"]:
mergedMatch["search_name"] = mergedMatch["search_name"].split(
" ")[0].upper()
mergedMatch["catalogue_table_name"] = mergedMatch[
"search_name"]
# MERGE CATALOGUE SOURCE NAMES
mergedMatch["catalogue_object_id"] = str(
mergedMatch["catalogue_object_id"])
m["catalogue_object_id"] = str(m["catalogue_object_id"])
if m["catalogue_object_id"].replace(" ", "").lower() not in mergedMatch["catalogue_object_id"].replace(" ", "").lower():
mergedMatch["catalogue_object_id"] += "/" + \
m["catalogue_object_id"]
# DETERMINE BEST CLASSIFICATION
if mergedMatch["classificationReliability"] == 3 and m["classificationReliability"] < 3:
mergedMatch["association_type"] = m["association_type"]
mergedMatch["classificationReliability"] = m[
"classificationReliability"]
if m["classificationReliability"] != 3 and m["association_type"] in associatationTypeOrder and (mergedMatch["association_type"] not in associatationTypeOrder or associatationTypeOrder.index(m["association_type"]) < associatationTypeOrder.index(mergedMatch["association_type"])):
mergedMatch["association_type"] = m["association_type"]
mergedMatch["classificationReliability"] = m[
"classificationReliability"]
# FIND BEST DISTANCES
if "direct_distance" in m and m["direct_distance"] and colMaps[m["catalogue_view_name"]]["object_type_accuracy"] > bestDirectDistance["qual"]:
bestDirectDistance = {
"direct_distance": m["direct_distance"],
"direct_distance_modulus": m["direct_distance_modulus"],
"direct_distance_scale": m["direct_distance_scale"],
"catalogue_object_type": m["catalogue_object_type"],
"qual": colMaps[m["catalogue_view_name"]]["object_type_accuracy"]
}
# FIND BEST SPEC-Z
if "z" in m and m["z"] and colMaps[m["catalogue_view_name"]]["object_type_accuracy"] > bestSpecz["qual"]:
bestSpecz = {
"z": m["z"],
"distance": m["distance"],
"distance_modulus": m["distance_modulus"],
"scale": m["scale"],
"catalogue_object_type": m["catalogue_object_type"],
"qual": colMaps[m["catalogue_view_name"]]["object_type_accuracy"]
}
# FIND BEST PHOT-Z
if "photoZ" in m and m["photoZ"] and colMaps[m["catalogue_view_name"]]["object_type_accuracy"] > bestPhotoz["qual"]:
bestPhotoz = {
"photoZ": m["photoZ"],
"photoZErr": m["photoZErr"],
"distance": m["distance"],
"distance_modulus": m["distance_modulus"],
"scale": m["scale"],
"catalogue_object_type": m["catalogue_object_type"],
"qual": colMaps[m["catalogue_view_name"]]["object_type_accuracy"]
}
# CLOSEST ANGULAR SEP & COORDINATES
if m["separationArcsec"] < mergedMatch["separationArcsec"]:
mergedMatch["separationArcsec"] = m["separationArcsec"]
mergedMatch["raDeg"] = m["raDeg"]
mergedMatch["decDeg"] = m["decDeg"]
# MERGE THE BEST RESULTS
for l in [bestPhotoz, bestSpecz, bestDirectDistance]:
for k, v in l.iteritems():
if k != "qual":
mergedMatch[k] = v
mergedMatch["catalogue_object_id"] = str(mergedMatch[
"catalogue_object_id"]).replace(" ", "")
# RECALULATE PHYSICAL DISTANCE SEPARATION
if mergedMatch["direct_distance_scale"]:
mergedMatch["physical_separation_kpc"] = mergedMatch[
"direct_distance_scale"] * mergedMatch["separationArcsec"]
elif mergedMatch["scale"]:
mergedMatch["physical_separation_kpc"] = mergedMatch[
"scale"] * mergedMatch["separationArcsec"]
if "/" in mergedMatch["search_name"]:
mergedMatch["search_name"] = "multiple"
distinctMatches.append(mergedMatch)
crossmatches = []
for xm, gm in zip(distinctMatches, groupedMatches):
# SPEC-Z GALAXIES
if (xm["physical_separation_kpc"] is not None and xm["physical_separation_kpc"] != "null" and xm["physical_separation_kpc"] < 20. and xm["association_type"] == "SN" and (("z" in xm and xm["z"] is not None) or "photoZ" not in xm or xm["photoZ"] is None or xm["photoZ"] < 0.)):
rankScore = xm["classificationReliability"] * 1000 + 2. - \
xm["physical_separation_kpc"] / 10
# PHOTO-Z GALAXIES
elif (xm["physical_separation_kpc"] is not None and xm["physical_separation_kpc"] != "null" and xm["physical_separation_kpc"] < 20. and xm["association_type"] == "SN"):
rankScore = xm["classificationReliability"] * 1000 + 2.2 - \
xm["physical_separation_kpc"] / 10
# NOT SPEC-Z, NON PHOTO-Z GALAXIES
elif (xm["association_type"] == "SN"):
rankScore = xm["classificationReliability"] * 1000 + 5.
# VS
elif (xm["association_type"] == "VS"):
rankScore = xm["classificationReliability"] * \
1000 + xm["separationArcsec"] + 2.
# BS
elif (xm["association_type"] == "BS"):
rankScore = xm["classificationReliability"] * \
1000 + xm["separationArcsec"]
else:
rankScore = xm["classificationReliability"] * \
1000 + xm["separationArcsec"] + 10.
xm["rankScore"] = rankScore
crossmatches.append(xm)
if len(gm) > 1:
for g in gm:
g["rankScore"] = rankScore
crossmatches = sorted(
crossmatches, key=itemgetter('rankScore'), reverse=False)
crossmatches = sorted(
crossmatches, key=itemgetter('transient_object_id'))
transient_object_id = None
uniqueIndexCheck = []
classifications = {}
crossmatchesKeep = []
rank = 0
transClass = []
for xm in crossmatches:
rank += 1
if rank == 1:
transClass.append(xm["association_type"])
classifications[xm["transient_object_id"]] = transClass
xm["rank"] = rank
crossmatchesKeep.append(xm)
crossmatches = crossmatchesKeep
crossmatchesKeep = []
for xm in crossmatches:
group = groupedMatches[xm["merged_rank"] - 1]
xm["merged_rank"] = None
crossmatchesKeep.append(xm)
if len(group) > 1:
groupKeep = []
uniqueIndexCheck = []
for g in group:
g["merged_rank"] = xm["rank"]
g["rankScore"] = xm["rankScore"]
index = "%(catalogue_table_name)s%(catalogue_object_id)s" % g
# IF WE HAVE HIT A NEW SOURCE
if index not in uniqueIndexCheck:
uniqueIndexCheck.append(index)
crossmatchesKeep.append(g)
crossmatches = crossmatchesKeep
self.log.debug('completed the ``_rank_classifications`` method')
return classifications, crossmatches | [
"def",
"_rank_classifications",
"(",
"self",
",",
"crossmatchArray",
",",
"colMaps",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_rank_classifications`` method'",
")",
"crossmatches",
"=",
"crossmatchArray",
"# GROUP CROSSMATCHES INTO DISTINCT SOURCES (DUPLICATE ENTRIES OF THE",
"# SAME ASTROPHYSICAL SOURCE ACROSS MULTIPLE CATALOGUES)",
"ra",
",",
"dec",
"=",
"zip",
"(",
"*",
"[",
"(",
"r",
"[",
"\"raDeg\"",
"]",
",",
"r",
"[",
"\"decDeg\"",
"]",
")",
"for",
"r",
"in",
"crossmatches",
"]",
")",
"from",
"HMpTy",
".",
"htm",
"import",
"sets",
"xmatcher",
"=",
"sets",
"(",
"log",
"=",
"self",
".",
"log",
",",
"ra",
"=",
"ra",
",",
"dec",
"=",
"dec",
",",
"radius",
"=",
"3",
"/",
"(",
"60.",
"*",
"60.",
")",
",",
"# in degrees",
"sourceList",
"=",
"crossmatches",
")",
"groupedMatches",
"=",
"xmatcher",
".",
"match",
"associatationTypeOrder",
"=",
"[",
"\"AGN\"",
",",
"\"CV\"",
",",
"\"NT\"",
",",
"\"SN\"",
",",
"\"VS\"",
",",
"\"BS\"",
"]",
"# ADD DISTINCT-SOURCE KEY",
"dupKey",
"=",
"0",
"distinctMatches",
"=",
"[",
"]",
"for",
"x",
"in",
"groupedMatches",
":",
"dupKey",
"+=",
"1",
"mergedMatch",
"=",
"copy",
".",
"deepcopy",
"(",
"x",
"[",
"0",
"]",
")",
"mergedMatch",
"[",
"\"merged_rank\"",
"]",
"=",
"int",
"(",
"dupKey",
")",
"# ADD OTHER ESSENTIAL KEYS",
"for",
"e",
"in",
"[",
"'z'",
",",
"'photoZ'",
",",
"'photoZErr'",
"]",
":",
"if",
"e",
"not",
"in",
"mergedMatch",
":",
"mergedMatch",
"[",
"e",
"]",
"=",
"None",
"bestDirectDistance",
"=",
"{",
"\"direct_distance\"",
":",
"mergedMatch",
"[",
"\"direct_distance\"",
"]",
",",
"\"direct_distance_modulus\"",
":",
"mergedMatch",
"[",
"\"direct_distance_modulus\"",
"]",
",",
"\"direct_distance_scale\"",
":",
"mergedMatch",
"[",
"\"direct_distance\"",
"]",
",",
"\"qual\"",
":",
"colMaps",
"[",
"mergedMatch",
"[",
"\"catalogue_view_name\"",
"]",
"]",
"[",
"\"object_type_accuracy\"",
"]",
"}",
"bestSpecz",
"=",
"{",
"\"z\"",
":",
"mergedMatch",
"[",
"\"z\"",
"]",
",",
"\"distance\"",
":",
"mergedMatch",
"[",
"\"distance\"",
"]",
",",
"\"distance_modulus\"",
":",
"mergedMatch",
"[",
"\"distance_modulus\"",
"]",
",",
"\"scale\"",
":",
"mergedMatch",
"[",
"\"scale\"",
"]",
",",
"\"qual\"",
":",
"colMaps",
"[",
"mergedMatch",
"[",
"\"catalogue_view_name\"",
"]",
"]",
"[",
"\"object_type_accuracy\"",
"]",
"}",
"bestPhotoz",
"=",
"{",
"\"photoZ\"",
":",
"mergedMatch",
"[",
"\"photoZ\"",
"]",
",",
"\"photoZErr\"",
":",
"mergedMatch",
"[",
"\"photoZErr\"",
"]",
",",
"\"distance\"",
":",
"mergedMatch",
"[",
"\"distance\"",
"]",
",",
"\"distance_modulus\"",
":",
"mergedMatch",
"[",
"\"distance_modulus\"",
"]",
",",
"\"scale\"",
":",
"mergedMatch",
"[",
"\"scale\"",
"]",
",",
"\"qual\"",
":",
"colMaps",
"[",
"mergedMatch",
"[",
"\"catalogue_view_name\"",
"]",
"]",
"[",
"\"object_type_accuracy\"",
"]",
"}",
"for",
"i",
",",
"m",
"in",
"enumerate",
"(",
"x",
")",
":",
"m",
"[",
"\"merged_rank\"",
"]",
"=",
"int",
"(",
"dupKey",
")",
"if",
"i",
">",
"0",
":",
"# MERGE ALL BEST MAGNITUDE MEASUREMENTS",
"for",
"f",
"in",
"self",
".",
"filterPreference",
":",
"if",
"f",
"in",
"m",
"and",
"m",
"[",
"f",
"]",
"and",
"(",
"f",
"not",
"in",
"mergedMatch",
"or",
"(",
"f",
"+",
"\"Err\"",
"in",
"mergedMatch",
"and",
"(",
"f",
"+",
"\"Err\"",
"not",
"in",
"m",
"or",
"(",
"mergedMatch",
"[",
"f",
"+",
"\"Err\"",
"]",
">",
"m",
"[",
"f",
"+",
"\"Err\"",
"]",
")",
")",
")",
")",
":",
"mergedMatch",
"[",
"f",
"]",
"=",
"m",
"[",
"f",
"]",
"try",
":",
"mergedMatch",
"[",
"f",
"+",
"\"Err\"",
"]",
"=",
"m",
"[",
"f",
"+",
"\"Err\"",
"]",
"except",
":",
"pass",
"mergedMatch",
"[",
"\"original_search_radius_arcsec\"",
"]",
"=",
"\"multiple\"",
"mergedMatch",
"[",
"\"catalogue_object_subtype\"",
"]",
"=",
"\"multiple\"",
"mergedMatch",
"[",
"\"catalogue_view_name\"",
"]",
"=",
"\"multiple\"",
"# MERGE SEARCH NAMES",
"snippet",
"=",
"m",
"[",
"\"search_name\"",
"]",
".",
"split",
"(",
"\" \"",
")",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"if",
"\"/\"",
"not",
"in",
"mergedMatch",
"[",
"\"search_name\"",
"]",
"and",
"snippet",
"not",
"in",
"mergedMatch",
"[",
"\"search_name\"",
"]",
".",
"upper",
"(",
")",
":",
"mergedMatch",
"[",
"\"search_name\"",
"]",
"=",
"mergedMatch",
"[",
"\"search_name\"",
"]",
".",
"split",
"(",
"\" \"",
")",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"+",
"\"/\"",
"+",
"m",
"[",
"\"search_name\"",
"]",
".",
"split",
"(",
"\" \"",
")",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"elif",
"snippet",
"not",
"in",
"mergedMatch",
"[",
"\"search_name\"",
"]",
".",
"upper",
"(",
")",
":",
"mergedMatch",
"[",
"\"search_name\"",
"]",
"+=",
"\"/\"",
"+",
"m",
"[",
"\"search_name\"",
"]",
".",
"split",
"(",
"\" \"",
")",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"elif",
"\"/\"",
"not",
"in",
"mergedMatch",
"[",
"\"search_name\"",
"]",
":",
"mergedMatch",
"[",
"\"search_name\"",
"]",
"=",
"mergedMatch",
"[",
"\"search_name\"",
"]",
".",
"split",
"(",
"\" \"",
")",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"mergedMatch",
"[",
"\"catalogue_table_name\"",
"]",
"=",
"mergedMatch",
"[",
"\"search_name\"",
"]",
"# MERGE CATALOGUE SOURCE NAMES",
"mergedMatch",
"[",
"\"catalogue_object_id\"",
"]",
"=",
"str",
"(",
"mergedMatch",
"[",
"\"catalogue_object_id\"",
"]",
")",
"m",
"[",
"\"catalogue_object_id\"",
"]",
"=",
"str",
"(",
"m",
"[",
"\"catalogue_object_id\"",
"]",
")",
"if",
"m",
"[",
"\"catalogue_object_id\"",
"]",
".",
"replace",
"(",
"\" \"",
",",
"\"\"",
")",
".",
"lower",
"(",
")",
"not",
"in",
"mergedMatch",
"[",
"\"catalogue_object_id\"",
"]",
".",
"replace",
"(",
"\" \"",
",",
"\"\"",
")",
".",
"lower",
"(",
")",
":",
"mergedMatch",
"[",
"\"catalogue_object_id\"",
"]",
"+=",
"\"/\"",
"+",
"m",
"[",
"\"catalogue_object_id\"",
"]",
"# DETERMINE BEST CLASSIFICATION",
"if",
"mergedMatch",
"[",
"\"classificationReliability\"",
"]",
"==",
"3",
"and",
"m",
"[",
"\"classificationReliability\"",
"]",
"<",
"3",
":",
"mergedMatch",
"[",
"\"association_type\"",
"]",
"=",
"m",
"[",
"\"association_type\"",
"]",
"mergedMatch",
"[",
"\"classificationReliability\"",
"]",
"=",
"m",
"[",
"\"classificationReliability\"",
"]",
"if",
"m",
"[",
"\"classificationReliability\"",
"]",
"!=",
"3",
"and",
"m",
"[",
"\"association_type\"",
"]",
"in",
"associatationTypeOrder",
"and",
"(",
"mergedMatch",
"[",
"\"association_type\"",
"]",
"not",
"in",
"associatationTypeOrder",
"or",
"associatationTypeOrder",
".",
"index",
"(",
"m",
"[",
"\"association_type\"",
"]",
")",
"<",
"associatationTypeOrder",
".",
"index",
"(",
"mergedMatch",
"[",
"\"association_type\"",
"]",
")",
")",
":",
"mergedMatch",
"[",
"\"association_type\"",
"]",
"=",
"m",
"[",
"\"association_type\"",
"]",
"mergedMatch",
"[",
"\"classificationReliability\"",
"]",
"=",
"m",
"[",
"\"classificationReliability\"",
"]",
"# FIND BEST DISTANCES",
"if",
"\"direct_distance\"",
"in",
"m",
"and",
"m",
"[",
"\"direct_distance\"",
"]",
"and",
"colMaps",
"[",
"m",
"[",
"\"catalogue_view_name\"",
"]",
"]",
"[",
"\"object_type_accuracy\"",
"]",
">",
"bestDirectDistance",
"[",
"\"qual\"",
"]",
":",
"bestDirectDistance",
"=",
"{",
"\"direct_distance\"",
":",
"m",
"[",
"\"direct_distance\"",
"]",
",",
"\"direct_distance_modulus\"",
":",
"m",
"[",
"\"direct_distance_modulus\"",
"]",
",",
"\"direct_distance_scale\"",
":",
"m",
"[",
"\"direct_distance_scale\"",
"]",
",",
"\"catalogue_object_type\"",
":",
"m",
"[",
"\"catalogue_object_type\"",
"]",
",",
"\"qual\"",
":",
"colMaps",
"[",
"m",
"[",
"\"catalogue_view_name\"",
"]",
"]",
"[",
"\"object_type_accuracy\"",
"]",
"}",
"# FIND BEST SPEC-Z",
"if",
"\"z\"",
"in",
"m",
"and",
"m",
"[",
"\"z\"",
"]",
"and",
"colMaps",
"[",
"m",
"[",
"\"catalogue_view_name\"",
"]",
"]",
"[",
"\"object_type_accuracy\"",
"]",
">",
"bestSpecz",
"[",
"\"qual\"",
"]",
":",
"bestSpecz",
"=",
"{",
"\"z\"",
":",
"m",
"[",
"\"z\"",
"]",
",",
"\"distance\"",
":",
"m",
"[",
"\"distance\"",
"]",
",",
"\"distance_modulus\"",
":",
"m",
"[",
"\"distance_modulus\"",
"]",
",",
"\"scale\"",
":",
"m",
"[",
"\"scale\"",
"]",
",",
"\"catalogue_object_type\"",
":",
"m",
"[",
"\"catalogue_object_type\"",
"]",
",",
"\"qual\"",
":",
"colMaps",
"[",
"m",
"[",
"\"catalogue_view_name\"",
"]",
"]",
"[",
"\"object_type_accuracy\"",
"]",
"}",
"# FIND BEST PHOT-Z",
"if",
"\"photoZ\"",
"in",
"m",
"and",
"m",
"[",
"\"photoZ\"",
"]",
"and",
"colMaps",
"[",
"m",
"[",
"\"catalogue_view_name\"",
"]",
"]",
"[",
"\"object_type_accuracy\"",
"]",
">",
"bestPhotoz",
"[",
"\"qual\"",
"]",
":",
"bestPhotoz",
"=",
"{",
"\"photoZ\"",
":",
"m",
"[",
"\"photoZ\"",
"]",
",",
"\"photoZErr\"",
":",
"m",
"[",
"\"photoZErr\"",
"]",
",",
"\"distance\"",
":",
"m",
"[",
"\"distance\"",
"]",
",",
"\"distance_modulus\"",
":",
"m",
"[",
"\"distance_modulus\"",
"]",
",",
"\"scale\"",
":",
"m",
"[",
"\"scale\"",
"]",
",",
"\"catalogue_object_type\"",
":",
"m",
"[",
"\"catalogue_object_type\"",
"]",
",",
"\"qual\"",
":",
"colMaps",
"[",
"m",
"[",
"\"catalogue_view_name\"",
"]",
"]",
"[",
"\"object_type_accuracy\"",
"]",
"}",
"# CLOSEST ANGULAR SEP & COORDINATES",
"if",
"m",
"[",
"\"separationArcsec\"",
"]",
"<",
"mergedMatch",
"[",
"\"separationArcsec\"",
"]",
":",
"mergedMatch",
"[",
"\"separationArcsec\"",
"]",
"=",
"m",
"[",
"\"separationArcsec\"",
"]",
"mergedMatch",
"[",
"\"raDeg\"",
"]",
"=",
"m",
"[",
"\"raDeg\"",
"]",
"mergedMatch",
"[",
"\"decDeg\"",
"]",
"=",
"m",
"[",
"\"decDeg\"",
"]",
"# MERGE THE BEST RESULTS",
"for",
"l",
"in",
"[",
"bestPhotoz",
",",
"bestSpecz",
",",
"bestDirectDistance",
"]",
":",
"for",
"k",
",",
"v",
"in",
"l",
".",
"iteritems",
"(",
")",
":",
"if",
"k",
"!=",
"\"qual\"",
":",
"mergedMatch",
"[",
"k",
"]",
"=",
"v",
"mergedMatch",
"[",
"\"catalogue_object_id\"",
"]",
"=",
"str",
"(",
"mergedMatch",
"[",
"\"catalogue_object_id\"",
"]",
")",
".",
"replace",
"(",
"\" \"",
",",
"\"\"",
")",
"# RECALULATE PHYSICAL DISTANCE SEPARATION",
"if",
"mergedMatch",
"[",
"\"direct_distance_scale\"",
"]",
":",
"mergedMatch",
"[",
"\"physical_separation_kpc\"",
"]",
"=",
"mergedMatch",
"[",
"\"direct_distance_scale\"",
"]",
"*",
"mergedMatch",
"[",
"\"separationArcsec\"",
"]",
"elif",
"mergedMatch",
"[",
"\"scale\"",
"]",
":",
"mergedMatch",
"[",
"\"physical_separation_kpc\"",
"]",
"=",
"mergedMatch",
"[",
"\"scale\"",
"]",
"*",
"mergedMatch",
"[",
"\"separationArcsec\"",
"]",
"if",
"\"/\"",
"in",
"mergedMatch",
"[",
"\"search_name\"",
"]",
":",
"mergedMatch",
"[",
"\"search_name\"",
"]",
"=",
"\"multiple\"",
"distinctMatches",
".",
"append",
"(",
"mergedMatch",
")",
"crossmatches",
"=",
"[",
"]",
"for",
"xm",
",",
"gm",
"in",
"zip",
"(",
"distinctMatches",
",",
"groupedMatches",
")",
":",
"# SPEC-Z GALAXIES",
"if",
"(",
"xm",
"[",
"\"physical_separation_kpc\"",
"]",
"is",
"not",
"None",
"and",
"xm",
"[",
"\"physical_separation_kpc\"",
"]",
"!=",
"\"null\"",
"and",
"xm",
"[",
"\"physical_separation_kpc\"",
"]",
"<",
"20.",
"and",
"xm",
"[",
"\"association_type\"",
"]",
"==",
"\"SN\"",
"and",
"(",
"(",
"\"z\"",
"in",
"xm",
"and",
"xm",
"[",
"\"z\"",
"]",
"is",
"not",
"None",
")",
"or",
"\"photoZ\"",
"not",
"in",
"xm",
"or",
"xm",
"[",
"\"photoZ\"",
"]",
"is",
"None",
"or",
"xm",
"[",
"\"photoZ\"",
"]",
"<",
"0.",
")",
")",
":",
"rankScore",
"=",
"xm",
"[",
"\"classificationReliability\"",
"]",
"*",
"1000",
"+",
"2.",
"-",
"xm",
"[",
"\"physical_separation_kpc\"",
"]",
"/",
"10",
"# PHOTO-Z GALAXIES",
"elif",
"(",
"xm",
"[",
"\"physical_separation_kpc\"",
"]",
"is",
"not",
"None",
"and",
"xm",
"[",
"\"physical_separation_kpc\"",
"]",
"!=",
"\"null\"",
"and",
"xm",
"[",
"\"physical_separation_kpc\"",
"]",
"<",
"20.",
"and",
"xm",
"[",
"\"association_type\"",
"]",
"==",
"\"SN\"",
")",
":",
"rankScore",
"=",
"xm",
"[",
"\"classificationReliability\"",
"]",
"*",
"1000",
"+",
"2.2",
"-",
"xm",
"[",
"\"physical_separation_kpc\"",
"]",
"/",
"10",
"# NOT SPEC-Z, NON PHOTO-Z GALAXIES",
"elif",
"(",
"xm",
"[",
"\"association_type\"",
"]",
"==",
"\"SN\"",
")",
":",
"rankScore",
"=",
"xm",
"[",
"\"classificationReliability\"",
"]",
"*",
"1000",
"+",
"5.",
"# VS",
"elif",
"(",
"xm",
"[",
"\"association_type\"",
"]",
"==",
"\"VS\"",
")",
":",
"rankScore",
"=",
"xm",
"[",
"\"classificationReliability\"",
"]",
"*",
"1000",
"+",
"xm",
"[",
"\"separationArcsec\"",
"]",
"+",
"2.",
"# BS",
"elif",
"(",
"xm",
"[",
"\"association_type\"",
"]",
"==",
"\"BS\"",
")",
":",
"rankScore",
"=",
"xm",
"[",
"\"classificationReliability\"",
"]",
"*",
"1000",
"+",
"xm",
"[",
"\"separationArcsec\"",
"]",
"else",
":",
"rankScore",
"=",
"xm",
"[",
"\"classificationReliability\"",
"]",
"*",
"1000",
"+",
"xm",
"[",
"\"separationArcsec\"",
"]",
"+",
"10.",
"xm",
"[",
"\"rankScore\"",
"]",
"=",
"rankScore",
"crossmatches",
".",
"append",
"(",
"xm",
")",
"if",
"len",
"(",
"gm",
")",
">",
"1",
":",
"for",
"g",
"in",
"gm",
":",
"g",
"[",
"\"rankScore\"",
"]",
"=",
"rankScore",
"crossmatches",
"=",
"sorted",
"(",
"crossmatches",
",",
"key",
"=",
"itemgetter",
"(",
"'rankScore'",
")",
",",
"reverse",
"=",
"False",
")",
"crossmatches",
"=",
"sorted",
"(",
"crossmatches",
",",
"key",
"=",
"itemgetter",
"(",
"'transient_object_id'",
")",
")",
"transient_object_id",
"=",
"None",
"uniqueIndexCheck",
"=",
"[",
"]",
"classifications",
"=",
"{",
"}",
"crossmatchesKeep",
"=",
"[",
"]",
"rank",
"=",
"0",
"transClass",
"=",
"[",
"]",
"for",
"xm",
"in",
"crossmatches",
":",
"rank",
"+=",
"1",
"if",
"rank",
"==",
"1",
":",
"transClass",
".",
"append",
"(",
"xm",
"[",
"\"association_type\"",
"]",
")",
"classifications",
"[",
"xm",
"[",
"\"transient_object_id\"",
"]",
"]",
"=",
"transClass",
"xm",
"[",
"\"rank\"",
"]",
"=",
"rank",
"crossmatchesKeep",
".",
"append",
"(",
"xm",
")",
"crossmatches",
"=",
"crossmatchesKeep",
"crossmatchesKeep",
"=",
"[",
"]",
"for",
"xm",
"in",
"crossmatches",
":",
"group",
"=",
"groupedMatches",
"[",
"xm",
"[",
"\"merged_rank\"",
"]",
"-",
"1",
"]",
"xm",
"[",
"\"merged_rank\"",
"]",
"=",
"None",
"crossmatchesKeep",
".",
"append",
"(",
"xm",
")",
"if",
"len",
"(",
"group",
")",
">",
"1",
":",
"groupKeep",
"=",
"[",
"]",
"uniqueIndexCheck",
"=",
"[",
"]",
"for",
"g",
"in",
"group",
":",
"g",
"[",
"\"merged_rank\"",
"]",
"=",
"xm",
"[",
"\"rank\"",
"]",
"g",
"[",
"\"rankScore\"",
"]",
"=",
"xm",
"[",
"\"rankScore\"",
"]",
"index",
"=",
"\"%(catalogue_table_name)s%(catalogue_object_id)s\"",
"%",
"g",
"# IF WE HAVE HIT A NEW SOURCE",
"if",
"index",
"not",
"in",
"uniqueIndexCheck",
":",
"uniqueIndexCheck",
".",
"append",
"(",
"index",
")",
"crossmatchesKeep",
".",
"append",
"(",
"g",
")",
"crossmatches",
"=",
"crossmatchesKeep",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_rank_classifications`` method'",
")",
"return",
"classifications",
",",
"crossmatches"
] | *rank the classifications returned from the catalogue crossmatcher, annotate the results with a classification rank-number (most likely = 1) and a rank-score (weight of classification)*
**Key Arguments:**
- ``crossmatchArrayIndex`` -- the index of list of unranked crossmatch classifications
- ``colMaps`` -- dictionary of dictionaries with the name of the database-view (e.g. `tcs_view_agn_milliquas_v4_5`) as the key and the column-name dictary map as value (`{view_name: {columnMap}}`).
**Return:**
- ``classifications`` -- the classifications assigned to the transients post-crossmatches
- ``crossmatches`` -- the crossmatches annotated with rankings and rank-scores
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"*",
"rank",
"the",
"classifications",
"returned",
"from",
"the",
"catalogue",
"crossmatcher",
"annotate",
"the",
"results",
"with",
"a",
"classification",
"rank",
"-",
"number",
"(",
"most",
"likely",
"=",
"1",
")",
"and",
"a",
"rank",
"-",
"score",
"(",
"weight",
"of",
"classification",
")",
"*"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/transient_classifier.py#L791-L1051 |
thespacedoctor/sherlock | sherlock/transient_classifier.py | transient_classifier._print_results_to_stdout | def _print_results_to_stdout(
self,
classifications,
crossmatches):
"""*print the classification and crossmatch results for a single transient object to stdout*
**Key Arguments:**
- ``crossmatches`` -- the unranked crossmatch classifications
- ``classifications`` -- the classifications assigned to the transients post-crossmatches (dictionary of rank ordered list of classifications)
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_print_results_to_stdout`` method')
if self.verbose == 0:
return
if self.name in classifications:
headline = self.name + "'s Predicted Classification: " + \
classifications[self.name][0]
else:
headline = self.name + "'s Predicted Classification: ORPHAN"
print headline
print
print "Suggested Associations:"
# REPORT ONLY THE MOST PREFERED MAGNITUDE VALUE
basic = ["association_type", "rank", "rankScore", "catalogue_table_name", "catalogue_object_id", "catalogue_object_type", "catalogue_object_subtype",
"raDeg", "decDeg", "separationArcsec", "physical_separation_kpc", "direct_distance", "distance", "z", "photoZ", "photoZErr", "Mag", "MagFilter", "MagErr", "classificationReliability", "merged_rank"]
verbose = ["search_name", "catalogue_view_name", "original_search_radius_arcsec", "direct_distance_modulus", "distance_modulus", "direct_distance_scale", "major_axis_arcsec", "scale", "U", "UErr",
"B", "BErr", "V", "VErr", "R", "RErr", "I", "IErr", "J", "JErr", "H", "HErr", "K", "KErr", "_u", "_uErr", "_g", "_gErr", "_r", "_rErr", "_i", "_iErr", "_z", "_zErr", "_y", "G", "GErr", "_yErr", "unkMag"]
dontFormat = ["decDeg", "raDeg", "rank",
"catalogue_object_id", "catalogue_object_subtype", "merged_rank"]
if self.verbose == 2:
basic = basic + verbose
for c in crossmatches:
for f in self.filterPreference:
if f in c and c[f]:
c["Mag"] = c[f]
c["MagFilter"] = f.replace("_", "").replace("Mag", "")
if f + "Err" in c:
c["MagErr"] = c[f + "Err"]
else:
c["MagErr"] = None
break
allKeys = []
for c in crossmatches:
for k, v in c.iteritems():
if k not in allKeys:
allKeys.append(k)
for c in crossmatches:
for k in allKeys:
if k not in c:
c[k] = None
printCrossmatches = []
for c in crossmatches:
ordDict = collections.OrderedDict(sorted({}.items()))
for k in basic:
if k in c:
if k == "catalogue_table_name":
c[k] = c[k].replace("tcs_cat_", "").replace("_", " ")
if k == "classificationReliability":
if c[k] == 1:
c["classification reliability"] = "synonym"
elif c[k] == 2:
c["classification reliability"] = "association"
elif c[k] == 3:
c["classification reliability"] = "annotation"
k = "classification reliability"
if k == "catalogue_object_subtype" and "sdss" in c["catalogue_table_name"]:
if c[k] == 6:
c[k] = "galaxy"
elif c[k] == 3:
c[k] = "star"
columnName = k.replace("tcs_cat_", "").replace("_", " ")
value = c[k]
if k not in dontFormat:
try:
ordDict[columnName] = "%(value)0.2f" % locals()
except:
ordDict[columnName] = value
else:
ordDict[columnName] = value
printCrossmatches.append(ordDict)
from fundamentals.renderer import list_of_dictionaries
dataSet = list_of_dictionaries(
log=self.log,
listOfDictionaries=printCrossmatches
)
tableData = dataSet.table(filepath=None)
print tableData
self.log.debug('completed the ``_print_results_to_stdout`` method')
return None | python | def _print_results_to_stdout(
self,
classifications,
crossmatches):
"""*print the classification and crossmatch results for a single transient object to stdout*
**Key Arguments:**
- ``crossmatches`` -- the unranked crossmatch classifications
- ``classifications`` -- the classifications assigned to the transients post-crossmatches (dictionary of rank ordered list of classifications)
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_print_results_to_stdout`` method')
if self.verbose == 0:
return
if self.name in classifications:
headline = self.name + "'s Predicted Classification: " + \
classifications[self.name][0]
else:
headline = self.name + "'s Predicted Classification: ORPHAN"
print headline
print
print "Suggested Associations:"
# REPORT ONLY THE MOST PREFERED MAGNITUDE VALUE
basic = ["association_type", "rank", "rankScore", "catalogue_table_name", "catalogue_object_id", "catalogue_object_type", "catalogue_object_subtype",
"raDeg", "decDeg", "separationArcsec", "physical_separation_kpc", "direct_distance", "distance", "z", "photoZ", "photoZErr", "Mag", "MagFilter", "MagErr", "classificationReliability", "merged_rank"]
verbose = ["search_name", "catalogue_view_name", "original_search_radius_arcsec", "direct_distance_modulus", "distance_modulus", "direct_distance_scale", "major_axis_arcsec", "scale", "U", "UErr",
"B", "BErr", "V", "VErr", "R", "RErr", "I", "IErr", "J", "JErr", "H", "HErr", "K", "KErr", "_u", "_uErr", "_g", "_gErr", "_r", "_rErr", "_i", "_iErr", "_z", "_zErr", "_y", "G", "GErr", "_yErr", "unkMag"]
dontFormat = ["decDeg", "raDeg", "rank",
"catalogue_object_id", "catalogue_object_subtype", "merged_rank"]
if self.verbose == 2:
basic = basic + verbose
for c in crossmatches:
for f in self.filterPreference:
if f in c and c[f]:
c["Mag"] = c[f]
c["MagFilter"] = f.replace("_", "").replace("Mag", "")
if f + "Err" in c:
c["MagErr"] = c[f + "Err"]
else:
c["MagErr"] = None
break
allKeys = []
for c in crossmatches:
for k, v in c.iteritems():
if k not in allKeys:
allKeys.append(k)
for c in crossmatches:
for k in allKeys:
if k not in c:
c[k] = None
printCrossmatches = []
for c in crossmatches:
ordDict = collections.OrderedDict(sorted({}.items()))
for k in basic:
if k in c:
if k == "catalogue_table_name":
c[k] = c[k].replace("tcs_cat_", "").replace("_", " ")
if k == "classificationReliability":
if c[k] == 1:
c["classification reliability"] = "synonym"
elif c[k] == 2:
c["classification reliability"] = "association"
elif c[k] == 3:
c["classification reliability"] = "annotation"
k = "classification reliability"
if k == "catalogue_object_subtype" and "sdss" in c["catalogue_table_name"]:
if c[k] == 6:
c[k] = "galaxy"
elif c[k] == 3:
c[k] = "star"
columnName = k.replace("tcs_cat_", "").replace("_", " ")
value = c[k]
if k not in dontFormat:
try:
ordDict[columnName] = "%(value)0.2f" % locals()
except:
ordDict[columnName] = value
else:
ordDict[columnName] = value
printCrossmatches.append(ordDict)
from fundamentals.renderer import list_of_dictionaries
dataSet = list_of_dictionaries(
log=self.log,
listOfDictionaries=printCrossmatches
)
tableData = dataSet.table(filepath=None)
print tableData
self.log.debug('completed the ``_print_results_to_stdout`` method')
return None | [
"def",
"_print_results_to_stdout",
"(",
"self",
",",
"classifications",
",",
"crossmatches",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_print_results_to_stdout`` method'",
")",
"if",
"self",
".",
"verbose",
"==",
"0",
":",
"return",
"if",
"self",
".",
"name",
"in",
"classifications",
":",
"headline",
"=",
"self",
".",
"name",
"+",
"\"'s Predicted Classification: \"",
"+",
"classifications",
"[",
"self",
".",
"name",
"]",
"[",
"0",
"]",
"else",
":",
"headline",
"=",
"self",
".",
"name",
"+",
"\"'s Predicted Classification: ORPHAN\"",
"print",
"headline",
"print",
"print",
"\"Suggested Associations:\"",
"# REPORT ONLY THE MOST PREFERED MAGNITUDE VALUE",
"basic",
"=",
"[",
"\"association_type\"",
",",
"\"rank\"",
",",
"\"rankScore\"",
",",
"\"catalogue_table_name\"",
",",
"\"catalogue_object_id\"",
",",
"\"catalogue_object_type\"",
",",
"\"catalogue_object_subtype\"",
",",
"\"raDeg\"",
",",
"\"decDeg\"",
",",
"\"separationArcsec\"",
",",
"\"physical_separation_kpc\"",
",",
"\"direct_distance\"",
",",
"\"distance\"",
",",
"\"z\"",
",",
"\"photoZ\"",
",",
"\"photoZErr\"",
",",
"\"Mag\"",
",",
"\"MagFilter\"",
",",
"\"MagErr\"",
",",
"\"classificationReliability\"",
",",
"\"merged_rank\"",
"]",
"verbose",
"=",
"[",
"\"search_name\"",
",",
"\"catalogue_view_name\"",
",",
"\"original_search_radius_arcsec\"",
",",
"\"direct_distance_modulus\"",
",",
"\"distance_modulus\"",
",",
"\"direct_distance_scale\"",
",",
"\"major_axis_arcsec\"",
",",
"\"scale\"",
",",
"\"U\"",
",",
"\"UErr\"",
",",
"\"B\"",
",",
"\"BErr\"",
",",
"\"V\"",
",",
"\"VErr\"",
",",
"\"R\"",
",",
"\"RErr\"",
",",
"\"I\"",
",",
"\"IErr\"",
",",
"\"J\"",
",",
"\"JErr\"",
",",
"\"H\"",
",",
"\"HErr\"",
",",
"\"K\"",
",",
"\"KErr\"",
",",
"\"_u\"",
",",
"\"_uErr\"",
",",
"\"_g\"",
",",
"\"_gErr\"",
",",
"\"_r\"",
",",
"\"_rErr\"",
",",
"\"_i\"",
",",
"\"_iErr\"",
",",
"\"_z\"",
",",
"\"_zErr\"",
",",
"\"_y\"",
",",
"\"G\"",
",",
"\"GErr\"",
",",
"\"_yErr\"",
",",
"\"unkMag\"",
"]",
"dontFormat",
"=",
"[",
"\"decDeg\"",
",",
"\"raDeg\"",
",",
"\"rank\"",
",",
"\"catalogue_object_id\"",
",",
"\"catalogue_object_subtype\"",
",",
"\"merged_rank\"",
"]",
"if",
"self",
".",
"verbose",
"==",
"2",
":",
"basic",
"=",
"basic",
"+",
"verbose",
"for",
"c",
"in",
"crossmatches",
":",
"for",
"f",
"in",
"self",
".",
"filterPreference",
":",
"if",
"f",
"in",
"c",
"and",
"c",
"[",
"f",
"]",
":",
"c",
"[",
"\"Mag\"",
"]",
"=",
"c",
"[",
"f",
"]",
"c",
"[",
"\"MagFilter\"",
"]",
"=",
"f",
".",
"replace",
"(",
"\"_\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"Mag\"",
",",
"\"\"",
")",
"if",
"f",
"+",
"\"Err\"",
"in",
"c",
":",
"c",
"[",
"\"MagErr\"",
"]",
"=",
"c",
"[",
"f",
"+",
"\"Err\"",
"]",
"else",
":",
"c",
"[",
"\"MagErr\"",
"]",
"=",
"None",
"break",
"allKeys",
"=",
"[",
"]",
"for",
"c",
"in",
"crossmatches",
":",
"for",
"k",
",",
"v",
"in",
"c",
".",
"iteritems",
"(",
")",
":",
"if",
"k",
"not",
"in",
"allKeys",
":",
"allKeys",
".",
"append",
"(",
"k",
")",
"for",
"c",
"in",
"crossmatches",
":",
"for",
"k",
"in",
"allKeys",
":",
"if",
"k",
"not",
"in",
"c",
":",
"c",
"[",
"k",
"]",
"=",
"None",
"printCrossmatches",
"=",
"[",
"]",
"for",
"c",
"in",
"crossmatches",
":",
"ordDict",
"=",
"collections",
".",
"OrderedDict",
"(",
"sorted",
"(",
"{",
"}",
".",
"items",
"(",
")",
")",
")",
"for",
"k",
"in",
"basic",
":",
"if",
"k",
"in",
"c",
":",
"if",
"k",
"==",
"\"catalogue_table_name\"",
":",
"c",
"[",
"k",
"]",
"=",
"c",
"[",
"k",
"]",
".",
"replace",
"(",
"\"tcs_cat_\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"_\"",
",",
"\" \"",
")",
"if",
"k",
"==",
"\"classificationReliability\"",
":",
"if",
"c",
"[",
"k",
"]",
"==",
"1",
":",
"c",
"[",
"\"classification reliability\"",
"]",
"=",
"\"synonym\"",
"elif",
"c",
"[",
"k",
"]",
"==",
"2",
":",
"c",
"[",
"\"classification reliability\"",
"]",
"=",
"\"association\"",
"elif",
"c",
"[",
"k",
"]",
"==",
"3",
":",
"c",
"[",
"\"classification reliability\"",
"]",
"=",
"\"annotation\"",
"k",
"=",
"\"classification reliability\"",
"if",
"k",
"==",
"\"catalogue_object_subtype\"",
"and",
"\"sdss\"",
"in",
"c",
"[",
"\"catalogue_table_name\"",
"]",
":",
"if",
"c",
"[",
"k",
"]",
"==",
"6",
":",
"c",
"[",
"k",
"]",
"=",
"\"galaxy\"",
"elif",
"c",
"[",
"k",
"]",
"==",
"3",
":",
"c",
"[",
"k",
"]",
"=",
"\"star\"",
"columnName",
"=",
"k",
".",
"replace",
"(",
"\"tcs_cat_\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"_\"",
",",
"\" \"",
")",
"value",
"=",
"c",
"[",
"k",
"]",
"if",
"k",
"not",
"in",
"dontFormat",
":",
"try",
":",
"ordDict",
"[",
"columnName",
"]",
"=",
"\"%(value)0.2f\"",
"%",
"locals",
"(",
")",
"except",
":",
"ordDict",
"[",
"columnName",
"]",
"=",
"value",
"else",
":",
"ordDict",
"[",
"columnName",
"]",
"=",
"value",
"printCrossmatches",
".",
"append",
"(",
"ordDict",
")",
"from",
"fundamentals",
".",
"renderer",
"import",
"list_of_dictionaries",
"dataSet",
"=",
"list_of_dictionaries",
"(",
"log",
"=",
"self",
".",
"log",
",",
"listOfDictionaries",
"=",
"printCrossmatches",
")",
"tableData",
"=",
"dataSet",
".",
"table",
"(",
"filepath",
"=",
"None",
")",
"print",
"tableData",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_print_results_to_stdout`` method'",
")",
"return",
"None"
] | *print the classification and crossmatch results for a single transient object to stdout*
**Key Arguments:**
- ``crossmatches`` -- the unranked crossmatch classifications
- ``classifications`` -- the classifications assigned to the transients post-crossmatches (dictionary of rank ordered list of classifications)
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"*",
"print",
"the",
"classification",
"and",
"crossmatch",
"results",
"for",
"a",
"single",
"transient",
"object",
"to",
"stdout",
"*"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/transient_classifier.py#L1053-L1163 |
thespacedoctor/sherlock | sherlock/transient_classifier.py | transient_classifier._consolidate_coordinateList | def _consolidate_coordinateList(
self,
coordinateList):
"""*match the coordinate list against itself with the parameters of the NED search queries to minimise duplicated NED queries*
**Key Arguments:**
- ``coordinateList`` -- the original coordinateList.
**Return:**
- ``updatedCoordinateList`` -- the coordinate list with duplicated search areas removed
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- update package tutorial if needed
.. code-block:: python
usage code
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_consolidate_coordinateList`` method')
raList = []
raList[:] = np.array([c[0] for c in coordinateList])
decList = []
decList[:] = np.array([c[1] for c in coordinateList])
nedStreamRadius = self.settings[
"ned stream search radius arcec"] / (60. * 60.)
firstPassNedSearchRadius = self.settings[
"first pass ned search radius arcec"] / (60. * 60.)
radius = nedStreamRadius - firstPassNedSearchRadius
# LET'S BE CONSERVATIVE
# radius = radius * 0.9
xmatcher = sets(
log=self.log,
ra=raList,
dec=decList,
radius=radius, # in degrees
sourceList=coordinateList,
convertToArray=False
)
allMatches = xmatcher.match
updatedCoordianteList = []
for aSet in allMatches:
updatedCoordianteList.append(aSet[0])
self.log.debug('completed the ``_consolidate_coordinateList`` method')
return updatedCoordianteList | python | def _consolidate_coordinateList(
self,
coordinateList):
"""*match the coordinate list against itself with the parameters of the NED search queries to minimise duplicated NED queries*
**Key Arguments:**
- ``coordinateList`` -- the original coordinateList.
**Return:**
- ``updatedCoordinateList`` -- the coordinate list with duplicated search areas removed
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- update package tutorial if needed
.. code-block:: python
usage code
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_consolidate_coordinateList`` method')
raList = []
raList[:] = np.array([c[0] for c in coordinateList])
decList = []
decList[:] = np.array([c[1] for c in coordinateList])
nedStreamRadius = self.settings[
"ned stream search radius arcec"] / (60. * 60.)
firstPassNedSearchRadius = self.settings[
"first pass ned search radius arcec"] / (60. * 60.)
radius = nedStreamRadius - firstPassNedSearchRadius
# LET'S BE CONSERVATIVE
# radius = radius * 0.9
xmatcher = sets(
log=self.log,
ra=raList,
dec=decList,
radius=radius, # in degrees
sourceList=coordinateList,
convertToArray=False
)
allMatches = xmatcher.match
updatedCoordianteList = []
for aSet in allMatches:
updatedCoordianteList.append(aSet[0])
self.log.debug('completed the ``_consolidate_coordinateList`` method')
return updatedCoordianteList | [
"def",
"_consolidate_coordinateList",
"(",
"self",
",",
"coordinateList",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_consolidate_coordinateList`` method'",
")",
"raList",
"=",
"[",
"]",
"raList",
"[",
":",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"c",
"[",
"0",
"]",
"for",
"c",
"in",
"coordinateList",
"]",
")",
"decList",
"=",
"[",
"]",
"decList",
"[",
":",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"c",
"[",
"1",
"]",
"for",
"c",
"in",
"coordinateList",
"]",
")",
"nedStreamRadius",
"=",
"self",
".",
"settings",
"[",
"\"ned stream search radius arcec\"",
"]",
"/",
"(",
"60.",
"*",
"60.",
")",
"firstPassNedSearchRadius",
"=",
"self",
".",
"settings",
"[",
"\"first pass ned search radius arcec\"",
"]",
"/",
"(",
"60.",
"*",
"60.",
")",
"radius",
"=",
"nedStreamRadius",
"-",
"firstPassNedSearchRadius",
"# LET'S BE CONSERVATIVE",
"# radius = radius * 0.9",
"xmatcher",
"=",
"sets",
"(",
"log",
"=",
"self",
".",
"log",
",",
"ra",
"=",
"raList",
",",
"dec",
"=",
"decList",
",",
"radius",
"=",
"radius",
",",
"# in degrees",
"sourceList",
"=",
"coordinateList",
",",
"convertToArray",
"=",
"False",
")",
"allMatches",
"=",
"xmatcher",
".",
"match",
"updatedCoordianteList",
"=",
"[",
"]",
"for",
"aSet",
"in",
"allMatches",
":",
"updatedCoordianteList",
".",
"append",
"(",
"aSet",
"[",
"0",
"]",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_consolidate_coordinateList`` method'",
")",
"return",
"updatedCoordianteList"
] | *match the coordinate list against itself with the parameters of the NED search queries to minimise duplicated NED queries*
**Key Arguments:**
- ``coordinateList`` -- the original coordinateList.
**Return:**
- ``updatedCoordinateList`` -- the coordinate list with duplicated search areas removed
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- update package tutorial if needed
.. code-block:: python
usage code
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"*",
"match",
"the",
"coordinate",
"list",
"against",
"itself",
"with",
"the",
"parameters",
"of",
"the",
"NED",
"search",
"queries",
"to",
"minimise",
"duplicated",
"NED",
"queries",
"*"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/transient_classifier.py#L1165-L1229 |
thespacedoctor/sherlock | sherlock/transient_classifier.py | transient_classifier.classification_annotations | def classification_annotations(
self):
"""*add a detialed classification annotation to each classification in the sherlock_classifications table*
**Key Arguments:**
# -
**Return:**
- None
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- write a command-line tool for this method
- update package tutorial with command-line tool info if needed
.. code-block:: python
usage code
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``classification_annotations`` method')
from fundamentals.mysql import readquery
sqlQuery = u"""
select * from sherlock_classifications cl, sherlock_crossmatches xm where cl.transient_object_id=xm.transient_object_id and cl.annotation is null
""" % locals()
topXMs = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.transientsDbConn
)
for xm in topXMs:
annotation = []
classType = xm["classificationReliability"]
if classType == 1:
annotation.append("is synonymous with")
elif classType in [2, 3]:
annotation.append("is possibly associated with")
print xm["catalogue_object_id"]
self.log.debug('completed the ``classification_annotations`` method')
return None | python | def classification_annotations(
self):
"""*add a detialed classification annotation to each classification in the sherlock_classifications table*
**Key Arguments:**
# -
**Return:**
- None
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- write a command-line tool for this method
- update package tutorial with command-line tool info if needed
.. code-block:: python
usage code
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``classification_annotations`` method')
from fundamentals.mysql import readquery
sqlQuery = u"""
select * from sherlock_classifications cl, sherlock_crossmatches xm where cl.transient_object_id=xm.transient_object_id and cl.annotation is null
""" % locals()
topXMs = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.transientsDbConn
)
for xm in topXMs:
annotation = []
classType = xm["classificationReliability"]
if classType == 1:
annotation.append("is synonymous with")
elif classType in [2, 3]:
annotation.append("is possibly associated with")
print xm["catalogue_object_id"]
self.log.debug('completed the ``classification_annotations`` method')
return None | [
"def",
"classification_annotations",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``classification_annotations`` method'",
")",
"from",
"fundamentals",
".",
"mysql",
"import",
"readquery",
"sqlQuery",
"=",
"u\"\"\"\n select * from sherlock_classifications cl, sherlock_crossmatches xm where cl.transient_object_id=xm.transient_object_id and cl.annotation is null\n \"\"\"",
"%",
"locals",
"(",
")",
"topXMs",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"transientsDbConn",
")",
"for",
"xm",
"in",
"topXMs",
":",
"annotation",
"=",
"[",
"]",
"classType",
"=",
"xm",
"[",
"\"classificationReliability\"",
"]",
"if",
"classType",
"==",
"1",
":",
"annotation",
".",
"append",
"(",
"\"is synonymous with\"",
")",
"elif",
"classType",
"in",
"[",
"2",
",",
"3",
"]",
":",
"annotation",
".",
"append",
"(",
"\"is possibly associated with\"",
")",
"print",
"xm",
"[",
"\"catalogue_object_id\"",
"]",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``classification_annotations`` method'",
")",
"return",
"None"
] | *add a detialed classification annotation to each classification in the sherlock_classifications table*
**Key Arguments:**
# -
**Return:**
- None
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- write a command-line tool for this method
- update package tutorial with command-line tool info if needed
.. code-block:: python
usage code
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"*",
"add",
"a",
"detialed",
"classification",
"annotation",
"to",
"each",
"classification",
"in",
"the",
"sherlock_classifications",
"table",
"*"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/transient_classifier.py#L1231-L1287 |
thespacedoctor/sherlock | sherlock/transient_classifier.py | transient_classifier.update_classification_annotations_and_summaries | def update_classification_annotations_and_summaries(
self,
updatePeakMagnitudes=True):
"""*update classification annotations and summaries*
**Key Arguments:**
- ``updatePeakMagnitudes`` -- update the peak magnitudes in the annotations to give absolute magnitudes. Default *True*
**Return:**
- None
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- write a command-line tool for this method
- update package tutorial with command-line tool info if needed
.. code-block:: python
usage code
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``update_classification_annotations_and_summaries`` method')
# import time
# start_time = time.time()
# print "COLLECTING TRANSIENTS WITH NO ANNOTATIONS"
if updatePeakMagnitudes:
sqlQuery = u"""
SELECT * from sherlock_crossmatches cm, sherlock_classifications cl where rank =1 and cl.transient_object_id=cm.transient_object_id and (cl.annotation is null orcl.dateLastModified is null or cl.dateLastModified > DATE_SUB(NOW(), INTERVAL 30 DAY)) order by cl.dateLastModified asc limit 100000
""" % locals()
else:
sqlQuery = u"""
SELECT * from sherlock_crossmatches cm, sherlock_classifications cl where rank =1 and cl.transient_object_id=cm.transient_object_id and cl.summary is null
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.transientsDbConn,
quiet=False
)
# print "FINISHED COLLECTING TRANSIENTS WITH NO ANNOTATIONS/GENERATING ANNOTATIONS: %d" % (time.time() - start_time,)
# start_time = time.time()
from astrocalc.coords import unit_conversion
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
updates = []
for row in rows:
catalogue = row["catalogue_table_name"]
objectId = row["catalogue_object_id"]
objectType = row["catalogue_object_type"]
objectSubtype = row["catalogue_object_subtype"]
catalogueString = catalogue
if "catalogue" not in catalogueString.lower():
catalogueString = catalogue + " catalogue"
if "/" in catalogueString:
catalogueString += "s"
if "ned" in catalogue.lower() and "/" not in catalogue:
objectId = '''<a href="https://ned.ipac.caltech.edu/cgi-bin/objsearch?objname=%(objectId)s&extend=no&hconst=73&omegam=0.27&omegav=0.73&corr_z=1&out_csys=Equatorial&out_equinox=J2000.0&obj_sort=RA+or+Longitude&of=pre_text&zv_breaker=30000.0&list_limit=5&img_stamp=YES">%(objectId)s</a>''' % locals()
elif "sdss" in catalogue.lower() and "/" not in catalogue:
objectId = "http://skyserver.sdss.org/dr12/en/tools/explore/Summary.aspx?id=%(objectId)s" % locals(
)
ra = converter.ra_decimal_to_sexegesimal(
ra=row["raDeg"],
delimiter=""
)
dec = converter.dec_decimal_to_sexegesimal(
dec=row["decDeg"],
delimiter=""
)
betterName = "SDSS J" + ra[0:9] + dec[0:9]
objectId = '''<a href="%(objectId)s">%(betterName)s</a>''' % locals()
elif "milliquas" in catalogue.lower() and "/" not in catalogue:
thisName = objectId
objectId = objectId.replace(" ", "+")
objectId = '''<a href="https://heasarc.gsfc.nasa.gov/db-perl/W3Browse/w3table.pl?popupFrom=Query+Results&tablehead=name%%3Dheasarc_milliquas%%26description%%3DMillion+Quasars+Catalog+%%28MILLIQUAS%%29%%2C+Version+4.8+%%2822+June+2016%%29%%26url%%3Dhttp%%3A%%2F%%2Fheasarc.gsfc.nasa.gov%%2FW3Browse%%2Fgalaxy-catalog%%2Fmilliquas.html%%26archive%%3DN%%26radius%%3D1%%26mission%%3DGALAXY+CATALOG%%26priority%%3D5%%26tabletype%%3DObject&dummy=Examples+of+query+constraints%%3A&varon=name&bparam_name=%%3D%%22%(objectId)s%%22&bparam_name%%3A%%3Aunit=+&bparam_name%%3A%%3Aformat=char25&varon=ra&bparam_ra=&bparam_ra%%3A%%3Aunit=degree&bparam_ra%%3A%%3Aformat=float8%%3A.5f&varon=dec&bparam_dec=&bparam_dec%%3A%%3Aunit=degree&bparam_dec%%3A%%3Aformat=float8%%3A.5f&varon=bmag&bparam_bmag=&bparam_bmag%%3A%%3Aunit=mag&bparam_bmag%%3A%%3Aformat=float8%%3A4.1f&varon=rmag&bparam_rmag=&bparam_rmag%%3A%%3Aunit=mag&bparam_rmag%%3A%%3Aformat=float8%%3A4.1f&varon=redshift&bparam_redshift=&bparam_redshift%%3A%%3Aunit=+&bparam_redshift%%3A%%3Aformat=float8%%3A6.3f&varon=radio_name&bparam_radio_name=&bparam_radio_name%%3A%%3Aunit=+&bparam_radio_name%%3A%%3Aformat=char22&varon=xray_name&bparam_xray_name=&bparam_xray_name%%3A%%3Aunit=+&bparam_xray_name%%3A%%3Aformat=char22&bparam_lii=&bparam_lii%%3A%%3Aunit=degree&bparam_lii%%3A%%3Aformat=float8%%3A.5f&bparam_bii=&bparam_bii%%3A%%3Aunit=degree&bparam_bii%%3A%%3Aformat=float8%%3A.5f&bparam_broad_type=&bparam_broad_type%%3A%%3Aunit=+&bparam_broad_type%%3A%%3Aformat=char4&bparam_optical_flag=&bparam_optical_flag%%3A%%3Aunit=+&bparam_optical_flag%%3A%%3Aformat=char3&bparam_red_psf_flag=&bparam_red_psf_flag%%3A%%3Aunit=+&bparam_red_psf_flag%%3A%%3Aformat=char1&bparam_blue_psf_flag=&bparam_blue_psf_flag%%3A%%3Aunit=+&bparam_blue_psf_flag%%3A%%3Aformat=char1&bparam_ref_name=&bparam_ref_name%%3A%%3Aunit=+&bparam_ref_name%%3A%%3Aformat=char6&bparam_ref_redshift=&bparam_ref_redshift%%3A%%3Aunit=+&bparam_ref_redshift%%3A%%3Aformat=char6&bparam_qso_prob=&bparam_qso_prob%%3A%%3Aunit=percent&bparam_qso_prob%%3A%%3Aformat=int2%%3A3d&bparam_alt_name_1=&bparam_alt_name_1%%3A%%3Aunit=+&bparam_alt_name_1%%3A%%3Aformat=char22&bparam_alt_name_2=&bparam_alt_name_2%%3A%%3Aunit=+&bparam_alt_name_2%%3A%%3Aformat=char22&Entry=&Coordinates=J2000&Radius=Default&Radius_unit=arcsec&NR=CheckCaches%%2FGRB%%2FSIMBAD%%2BSesame%%2FNED&Time=&ResultMax=1000&displaymode=Display&Action=Start+Search&table=heasarc_milliquas">%(thisName)s</a>''' % locals()
if objectSubtype and objectSubtype.lower() in ["uvs", "radios", "xray", "qso", "irs", 'uves', 'viss', 'hii', 'gclstr', 'ggroup', 'gpair', 'gtrpl']:
objectType = objectSubtype
if objectType == "star":
objectType = "stellar source"
elif objectType == "agn":
objectType = "AGN"
elif objectType == "cb":
objectType = "CV"
elif objectType == "unknown":
objectType = "unclassified source"
sep = row["separationArcsec"]
if row["classificationReliability"] == 1:
classificationReliability = "synonymous"
psep = row["physical_separation_kpc"]
if psep:
location = '%(sep)0.1f" (%(psep)0.1f Kpc) from the %(objectType)s core' % locals(
)
else:
location = '%(sep)0.1f" from the %(objectType)s core' % locals(
)
elif row["classificationReliability"] in (2, 3):
classificationReliability = "possibly associated"
n = row["northSeparationArcsec"]
if n > 0:
nd = "S"
else:
nd = "N"
e = row["eastSeparationArcsec"]
if e > 0:
ed = "W"
else:
ed = "E"
n = math.fabs(n)
e = math.fabs(e)
psep = row["physical_separation_kpc"]
if psep:
location = '%(n)0.2f" %(nd)s, %(e)0.2f" %(ed)s (%(psep)0.1f Kpc) from the %(objectType)s centre' % locals(
)
else:
location = '%(n)0.2f" %(nd)s, %(e)0.2f" %(ed)s from the %(objectType)s centre' % locals(
)
location = location.replace("unclassified", "object's")
best_mag = None
best_mag_error = None
best_mag_filter = None
filters = ["R", "V", "B", "I", "J", "G", "H", "K", "U",
"_r", "_g", "_i", "_g", "_z", "_y", "_u", "unkMag"]
for f in filters:
if row[f] and not best_mag:
best_mag = row[f]
best_mag_error = row[f + "Err"]
subfilter = f.replace(
"_", "").replace("Mag", "")
best_mag_filter = f.replace(
"_", "").replace("Mag", "") + "="
if "unk" in best_mag_filter:
best_mag_filter = ""
subfilter = ''
if not best_mag_filter:
if str(best_mag).lower() in ("8", "11", "18"):
best_mag_filter = "an "
else:
best_mag_filter = "a "
else:
if str(best_mag_filter)[0].lower() in ("r", "i", "h"):
best_mag_filter = "an " + best_mag_filter
else:
best_mag_filter = "a " + best_mag_filter
if not best_mag:
best_mag = "an unknown-"
best_mag_filter = ""
else:
best_mag = "%(best_mag)0.2f " % locals()
distance = None
if row["direct_distance"]:
d = row["direct_distance"]
distance = "distance of %(d)0.1f Mpc" % locals()
if row["z"]:
z = row["z"]
distance += "(z=%(z)0.3f)" % locals()
elif row["z"]:
z = row["z"]
distance = "z=%(z)0.3f" % locals()
elif row["photoZ"]:
z = row["photoZ"]
zErr = row["photoZErr"]
distance = "photoZ=%(z)0.3f (±%(zErr)0.3f)" % locals()
if distance:
distance = "%(distance)s" % locals()
distance_modulus = None
if row["direct_distance_modulus"]:
distance_modulus = row["direct_distance_modulus"]
elif row["distance_modulus"]:
distance_modulus = row["distance_modulus"]
if updatePeakMagnitudes:
if distance:
absMag = row["transientAbsMag"]
absMag = """ A host %(distance)s implies a transient <em>M =</em> %(absMag)s.""" % locals(
)
else:
absMag = ""
else:
if distance and distance_modulus:
absMag = "%(distance_modulus)0.2f" % locals()
absMag = """ A host %(distance)s implies a <em>m - M =</em> %(absMag)s.""" % locals(
)
else:
absMag = ""
annotation = "The transient is %(classificationReliability)s with <em>%(objectId)s</em>; %(best_mag_filter)s%(best_mag)smag %(objectType)s found in the %(catalogueString)s. It's located %(location)s.%(absMag)s" % locals()
summary = '%(sep)0.1f" from %(objectType)s in %(catalogue)s' % locals(
)
update = {
"transient_object_id": row["transient_object_id"],
"annotation": annotation,
"summary": summary,
"separationArcsec": sep
}
updates.append(update)
# print "FINISHED GENERATING ANNOTATIONS/ADDING ANNOTATIONS TO TRANSIENT DATABASE: %d" % (time.time() - start_time,)
# start_time = time.time()
insert_list_of_dictionaries_into_database_tables(
dbConn=self.transientsDbConn,
log=self.log,
dictList=updates,
dbTableName="sherlock_classifications",
dateModified=True,
batchSize=10000,
replace=True,
dbSettings=self.settings["database settings"]["transients"]
)
# print "FINISHED ADDING ANNOTATIONS TO TRANSIENT DATABASE/UPDATING ORPHAN ANNOTATIONS: %d" % (time.time() - start_time,)
# start_time = time.time()
sqlQuery = """update sherlock_classifications set annotation = "The transient location is not matched against any known catalogued source", summary = "No catalogued match" where classification = 'ORPHAN' and summary is null """ % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.transientsDbConn,
)
# print "FINISHED UPDATING ORPHAN ANNOTATIONS: %d" % (time.time() - start_time,)
# start_time = time.time()
self.log.debug(
'completed the ``update_classification_annotations_and_summaries`` method')
return None | python | def update_classification_annotations_and_summaries(
self,
updatePeakMagnitudes=True):
"""*update classification annotations and summaries*
**Key Arguments:**
- ``updatePeakMagnitudes`` -- update the peak magnitudes in the annotations to give absolute magnitudes. Default *True*
**Return:**
- None
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- write a command-line tool for this method
- update package tutorial with command-line tool info if needed
.. code-block:: python
usage code
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``update_classification_annotations_and_summaries`` method')
# import time
# start_time = time.time()
# print "COLLECTING TRANSIENTS WITH NO ANNOTATIONS"
if updatePeakMagnitudes:
sqlQuery = u"""
SELECT * from sherlock_crossmatches cm, sherlock_classifications cl where rank =1 and cl.transient_object_id=cm.transient_object_id and (cl.annotation is null orcl.dateLastModified is null or cl.dateLastModified > DATE_SUB(NOW(), INTERVAL 30 DAY)) order by cl.dateLastModified asc limit 100000
""" % locals()
else:
sqlQuery = u"""
SELECT * from sherlock_crossmatches cm, sherlock_classifications cl where rank =1 and cl.transient_object_id=cm.transient_object_id and cl.summary is null
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.transientsDbConn,
quiet=False
)
# print "FINISHED COLLECTING TRANSIENTS WITH NO ANNOTATIONS/GENERATING ANNOTATIONS: %d" % (time.time() - start_time,)
# start_time = time.time()
from astrocalc.coords import unit_conversion
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
updates = []
for row in rows:
catalogue = row["catalogue_table_name"]
objectId = row["catalogue_object_id"]
objectType = row["catalogue_object_type"]
objectSubtype = row["catalogue_object_subtype"]
catalogueString = catalogue
if "catalogue" not in catalogueString.lower():
catalogueString = catalogue + " catalogue"
if "/" in catalogueString:
catalogueString += "s"
if "ned" in catalogue.lower() and "/" not in catalogue:
objectId = '''<a href="https://ned.ipac.caltech.edu/cgi-bin/objsearch?objname=%(objectId)s&extend=no&hconst=73&omegam=0.27&omegav=0.73&corr_z=1&out_csys=Equatorial&out_equinox=J2000.0&obj_sort=RA+or+Longitude&of=pre_text&zv_breaker=30000.0&list_limit=5&img_stamp=YES">%(objectId)s</a>''' % locals()
elif "sdss" in catalogue.lower() and "/" not in catalogue:
objectId = "http://skyserver.sdss.org/dr12/en/tools/explore/Summary.aspx?id=%(objectId)s" % locals(
)
ra = converter.ra_decimal_to_sexegesimal(
ra=row["raDeg"],
delimiter=""
)
dec = converter.dec_decimal_to_sexegesimal(
dec=row["decDeg"],
delimiter=""
)
betterName = "SDSS J" + ra[0:9] + dec[0:9]
objectId = '''<a href="%(objectId)s">%(betterName)s</a>''' % locals()
elif "milliquas" in catalogue.lower() and "/" not in catalogue:
thisName = objectId
objectId = objectId.replace(" ", "+")
objectId = '''<a href="https://heasarc.gsfc.nasa.gov/db-perl/W3Browse/w3table.pl?popupFrom=Query+Results&tablehead=name%%3Dheasarc_milliquas%%26description%%3DMillion+Quasars+Catalog+%%28MILLIQUAS%%29%%2C+Version+4.8+%%2822+June+2016%%29%%26url%%3Dhttp%%3A%%2F%%2Fheasarc.gsfc.nasa.gov%%2FW3Browse%%2Fgalaxy-catalog%%2Fmilliquas.html%%26archive%%3DN%%26radius%%3D1%%26mission%%3DGALAXY+CATALOG%%26priority%%3D5%%26tabletype%%3DObject&dummy=Examples+of+query+constraints%%3A&varon=name&bparam_name=%%3D%%22%(objectId)s%%22&bparam_name%%3A%%3Aunit=+&bparam_name%%3A%%3Aformat=char25&varon=ra&bparam_ra=&bparam_ra%%3A%%3Aunit=degree&bparam_ra%%3A%%3Aformat=float8%%3A.5f&varon=dec&bparam_dec=&bparam_dec%%3A%%3Aunit=degree&bparam_dec%%3A%%3Aformat=float8%%3A.5f&varon=bmag&bparam_bmag=&bparam_bmag%%3A%%3Aunit=mag&bparam_bmag%%3A%%3Aformat=float8%%3A4.1f&varon=rmag&bparam_rmag=&bparam_rmag%%3A%%3Aunit=mag&bparam_rmag%%3A%%3Aformat=float8%%3A4.1f&varon=redshift&bparam_redshift=&bparam_redshift%%3A%%3Aunit=+&bparam_redshift%%3A%%3Aformat=float8%%3A6.3f&varon=radio_name&bparam_radio_name=&bparam_radio_name%%3A%%3Aunit=+&bparam_radio_name%%3A%%3Aformat=char22&varon=xray_name&bparam_xray_name=&bparam_xray_name%%3A%%3Aunit=+&bparam_xray_name%%3A%%3Aformat=char22&bparam_lii=&bparam_lii%%3A%%3Aunit=degree&bparam_lii%%3A%%3Aformat=float8%%3A.5f&bparam_bii=&bparam_bii%%3A%%3Aunit=degree&bparam_bii%%3A%%3Aformat=float8%%3A.5f&bparam_broad_type=&bparam_broad_type%%3A%%3Aunit=+&bparam_broad_type%%3A%%3Aformat=char4&bparam_optical_flag=&bparam_optical_flag%%3A%%3Aunit=+&bparam_optical_flag%%3A%%3Aformat=char3&bparam_red_psf_flag=&bparam_red_psf_flag%%3A%%3Aunit=+&bparam_red_psf_flag%%3A%%3Aformat=char1&bparam_blue_psf_flag=&bparam_blue_psf_flag%%3A%%3Aunit=+&bparam_blue_psf_flag%%3A%%3Aformat=char1&bparam_ref_name=&bparam_ref_name%%3A%%3Aunit=+&bparam_ref_name%%3A%%3Aformat=char6&bparam_ref_redshift=&bparam_ref_redshift%%3A%%3Aunit=+&bparam_ref_redshift%%3A%%3Aformat=char6&bparam_qso_prob=&bparam_qso_prob%%3A%%3Aunit=percent&bparam_qso_prob%%3A%%3Aformat=int2%%3A3d&bparam_alt_name_1=&bparam_alt_name_1%%3A%%3Aunit=+&bparam_alt_name_1%%3A%%3Aformat=char22&bparam_alt_name_2=&bparam_alt_name_2%%3A%%3Aunit=+&bparam_alt_name_2%%3A%%3Aformat=char22&Entry=&Coordinates=J2000&Radius=Default&Radius_unit=arcsec&NR=CheckCaches%%2FGRB%%2FSIMBAD%%2BSesame%%2FNED&Time=&ResultMax=1000&displaymode=Display&Action=Start+Search&table=heasarc_milliquas">%(thisName)s</a>''' % locals()
if objectSubtype and objectSubtype.lower() in ["uvs", "radios", "xray", "qso", "irs", 'uves', 'viss', 'hii', 'gclstr', 'ggroup', 'gpair', 'gtrpl']:
objectType = objectSubtype
if objectType == "star":
objectType = "stellar source"
elif objectType == "agn":
objectType = "AGN"
elif objectType == "cb":
objectType = "CV"
elif objectType == "unknown":
objectType = "unclassified source"
sep = row["separationArcsec"]
if row["classificationReliability"] == 1:
classificationReliability = "synonymous"
psep = row["physical_separation_kpc"]
if psep:
location = '%(sep)0.1f" (%(psep)0.1f Kpc) from the %(objectType)s core' % locals(
)
else:
location = '%(sep)0.1f" from the %(objectType)s core' % locals(
)
elif row["classificationReliability"] in (2, 3):
classificationReliability = "possibly associated"
n = row["northSeparationArcsec"]
if n > 0:
nd = "S"
else:
nd = "N"
e = row["eastSeparationArcsec"]
if e > 0:
ed = "W"
else:
ed = "E"
n = math.fabs(n)
e = math.fabs(e)
psep = row["physical_separation_kpc"]
if psep:
location = '%(n)0.2f" %(nd)s, %(e)0.2f" %(ed)s (%(psep)0.1f Kpc) from the %(objectType)s centre' % locals(
)
else:
location = '%(n)0.2f" %(nd)s, %(e)0.2f" %(ed)s from the %(objectType)s centre' % locals(
)
location = location.replace("unclassified", "object's")
best_mag = None
best_mag_error = None
best_mag_filter = None
filters = ["R", "V", "B", "I", "J", "G", "H", "K", "U",
"_r", "_g", "_i", "_g", "_z", "_y", "_u", "unkMag"]
for f in filters:
if row[f] and not best_mag:
best_mag = row[f]
best_mag_error = row[f + "Err"]
subfilter = f.replace(
"_", "").replace("Mag", "")
best_mag_filter = f.replace(
"_", "").replace("Mag", "") + "="
if "unk" in best_mag_filter:
best_mag_filter = ""
subfilter = ''
if not best_mag_filter:
if str(best_mag).lower() in ("8", "11", "18"):
best_mag_filter = "an "
else:
best_mag_filter = "a "
else:
if str(best_mag_filter)[0].lower() in ("r", "i", "h"):
best_mag_filter = "an " + best_mag_filter
else:
best_mag_filter = "a " + best_mag_filter
if not best_mag:
best_mag = "an unknown-"
best_mag_filter = ""
else:
best_mag = "%(best_mag)0.2f " % locals()
distance = None
if row["direct_distance"]:
d = row["direct_distance"]
distance = "distance of %(d)0.1f Mpc" % locals()
if row["z"]:
z = row["z"]
distance += "(z=%(z)0.3f)" % locals()
elif row["z"]:
z = row["z"]
distance = "z=%(z)0.3f" % locals()
elif row["photoZ"]:
z = row["photoZ"]
zErr = row["photoZErr"]
distance = "photoZ=%(z)0.3f (±%(zErr)0.3f)" % locals()
if distance:
distance = "%(distance)s" % locals()
distance_modulus = None
if row["direct_distance_modulus"]:
distance_modulus = row["direct_distance_modulus"]
elif row["distance_modulus"]:
distance_modulus = row["distance_modulus"]
if updatePeakMagnitudes:
if distance:
absMag = row["transientAbsMag"]
absMag = """ A host %(distance)s implies a transient <em>M =</em> %(absMag)s.""" % locals(
)
else:
absMag = ""
else:
if distance and distance_modulus:
absMag = "%(distance_modulus)0.2f" % locals()
absMag = """ A host %(distance)s implies a <em>m - M =</em> %(absMag)s.""" % locals(
)
else:
absMag = ""
annotation = "The transient is %(classificationReliability)s with <em>%(objectId)s</em>; %(best_mag_filter)s%(best_mag)smag %(objectType)s found in the %(catalogueString)s. It's located %(location)s.%(absMag)s" % locals()
summary = '%(sep)0.1f" from %(objectType)s in %(catalogue)s' % locals(
)
update = {
"transient_object_id": row["transient_object_id"],
"annotation": annotation,
"summary": summary,
"separationArcsec": sep
}
updates.append(update)
# print "FINISHED GENERATING ANNOTATIONS/ADDING ANNOTATIONS TO TRANSIENT DATABASE: %d" % (time.time() - start_time,)
# start_time = time.time()
insert_list_of_dictionaries_into_database_tables(
dbConn=self.transientsDbConn,
log=self.log,
dictList=updates,
dbTableName="sherlock_classifications",
dateModified=True,
batchSize=10000,
replace=True,
dbSettings=self.settings["database settings"]["transients"]
)
# print "FINISHED ADDING ANNOTATIONS TO TRANSIENT DATABASE/UPDATING ORPHAN ANNOTATIONS: %d" % (time.time() - start_time,)
# start_time = time.time()
sqlQuery = """update sherlock_classifications set annotation = "The transient location is not matched against any known catalogued source", summary = "No catalogued match" where classification = 'ORPHAN' and summary is null """ % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.transientsDbConn,
)
# print "FINISHED UPDATING ORPHAN ANNOTATIONS: %d" % (time.time() - start_time,)
# start_time = time.time()
self.log.debug(
'completed the ``update_classification_annotations_and_summaries`` method')
return None | [
"def",
"update_classification_annotations_and_summaries",
"(",
"self",
",",
"updatePeakMagnitudes",
"=",
"True",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``update_classification_annotations_and_summaries`` method'",
")",
"# import time",
"# start_time = time.time()",
"# print \"COLLECTING TRANSIENTS WITH NO ANNOTATIONS\"",
"if",
"updatePeakMagnitudes",
":",
"sqlQuery",
"=",
"u\"\"\"\n SELECT * from sherlock_crossmatches cm, sherlock_classifications cl where rank =1 and cl.transient_object_id=cm.transient_object_id and (cl.annotation is null orcl.dateLastModified is null or cl.dateLastModified > DATE_SUB(NOW(), INTERVAL 30 DAY)) order by cl.dateLastModified asc limit 100000\n \"\"\"",
"%",
"locals",
"(",
")",
"else",
":",
"sqlQuery",
"=",
"u\"\"\"\n SELECT * from sherlock_crossmatches cm, sherlock_classifications cl where rank =1 and cl.transient_object_id=cm.transient_object_id and cl.summary is null\n \"\"\"",
"%",
"locals",
"(",
")",
"rows",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"transientsDbConn",
",",
"quiet",
"=",
"False",
")",
"# print \"FINISHED COLLECTING TRANSIENTS WITH NO ANNOTATIONS/GENERATING ANNOTATIONS: %d\" % (time.time() - start_time,)",
"# start_time = time.time()",
"from",
"astrocalc",
".",
"coords",
"import",
"unit_conversion",
"# ASTROCALC UNIT CONVERTER OBJECT",
"converter",
"=",
"unit_conversion",
"(",
"log",
"=",
"self",
".",
"log",
")",
"updates",
"=",
"[",
"]",
"for",
"row",
"in",
"rows",
":",
"catalogue",
"=",
"row",
"[",
"\"catalogue_table_name\"",
"]",
"objectId",
"=",
"row",
"[",
"\"catalogue_object_id\"",
"]",
"objectType",
"=",
"row",
"[",
"\"catalogue_object_type\"",
"]",
"objectSubtype",
"=",
"row",
"[",
"\"catalogue_object_subtype\"",
"]",
"catalogueString",
"=",
"catalogue",
"if",
"\"catalogue\"",
"not",
"in",
"catalogueString",
".",
"lower",
"(",
")",
":",
"catalogueString",
"=",
"catalogue",
"+",
"\" catalogue\"",
"if",
"\"/\"",
"in",
"catalogueString",
":",
"catalogueString",
"+=",
"\"s\"",
"if",
"\"ned\"",
"in",
"catalogue",
".",
"lower",
"(",
")",
"and",
"\"/\"",
"not",
"in",
"catalogue",
":",
"objectId",
"=",
"'''<a href=\"https://ned.ipac.caltech.edu/cgi-bin/objsearch?objname=%(objectId)s&extend=no&hconst=73&omegam=0.27&omegav=0.73&corr_z=1&out_csys=Equatorial&out_equinox=J2000.0&obj_sort=RA+or+Longitude&of=pre_text&zv_breaker=30000.0&list_limit=5&img_stamp=YES\">%(objectId)s</a>'''",
"%",
"locals",
"(",
")",
"elif",
"\"sdss\"",
"in",
"catalogue",
".",
"lower",
"(",
")",
"and",
"\"/\"",
"not",
"in",
"catalogue",
":",
"objectId",
"=",
"\"http://skyserver.sdss.org/dr12/en/tools/explore/Summary.aspx?id=%(objectId)s\"",
"%",
"locals",
"(",
")",
"ra",
"=",
"converter",
".",
"ra_decimal_to_sexegesimal",
"(",
"ra",
"=",
"row",
"[",
"\"raDeg\"",
"]",
",",
"delimiter",
"=",
"\"\"",
")",
"dec",
"=",
"converter",
".",
"dec_decimal_to_sexegesimal",
"(",
"dec",
"=",
"row",
"[",
"\"decDeg\"",
"]",
",",
"delimiter",
"=",
"\"\"",
")",
"betterName",
"=",
"\"SDSS J\"",
"+",
"ra",
"[",
"0",
":",
"9",
"]",
"+",
"dec",
"[",
"0",
":",
"9",
"]",
"objectId",
"=",
"'''<a href=\"%(objectId)s\">%(betterName)s</a>'''",
"%",
"locals",
"(",
")",
"elif",
"\"milliquas\"",
"in",
"catalogue",
".",
"lower",
"(",
")",
"and",
"\"/\"",
"not",
"in",
"catalogue",
":",
"thisName",
"=",
"objectId",
"objectId",
"=",
"objectId",
".",
"replace",
"(",
"\" \"",
",",
"\"+\"",
")",
"objectId",
"=",
"'''<a href=\"https://heasarc.gsfc.nasa.gov/db-perl/W3Browse/w3table.pl?popupFrom=Query+Results&tablehead=name%%3Dheasarc_milliquas%%26description%%3DMillion+Quasars+Catalog+%%28MILLIQUAS%%29%%2C+Version+4.8+%%2822+June+2016%%29%%26url%%3Dhttp%%3A%%2F%%2Fheasarc.gsfc.nasa.gov%%2FW3Browse%%2Fgalaxy-catalog%%2Fmilliquas.html%%26archive%%3DN%%26radius%%3D1%%26mission%%3DGALAXY+CATALOG%%26priority%%3D5%%26tabletype%%3DObject&dummy=Examples+of+query+constraints%%3A&varon=name&bparam_name=%%3D%%22%(objectId)s%%22&bparam_name%%3A%%3Aunit=+&bparam_name%%3A%%3Aformat=char25&varon=ra&bparam_ra=&bparam_ra%%3A%%3Aunit=degree&bparam_ra%%3A%%3Aformat=float8%%3A.5f&varon=dec&bparam_dec=&bparam_dec%%3A%%3Aunit=degree&bparam_dec%%3A%%3Aformat=float8%%3A.5f&varon=bmag&bparam_bmag=&bparam_bmag%%3A%%3Aunit=mag&bparam_bmag%%3A%%3Aformat=float8%%3A4.1f&varon=rmag&bparam_rmag=&bparam_rmag%%3A%%3Aunit=mag&bparam_rmag%%3A%%3Aformat=float8%%3A4.1f&varon=redshift&bparam_redshift=&bparam_redshift%%3A%%3Aunit=+&bparam_redshift%%3A%%3Aformat=float8%%3A6.3f&varon=radio_name&bparam_radio_name=&bparam_radio_name%%3A%%3Aunit=+&bparam_radio_name%%3A%%3Aformat=char22&varon=xray_name&bparam_xray_name=&bparam_xray_name%%3A%%3Aunit=+&bparam_xray_name%%3A%%3Aformat=char22&bparam_lii=&bparam_lii%%3A%%3Aunit=degree&bparam_lii%%3A%%3Aformat=float8%%3A.5f&bparam_bii=&bparam_bii%%3A%%3Aunit=degree&bparam_bii%%3A%%3Aformat=float8%%3A.5f&bparam_broad_type=&bparam_broad_type%%3A%%3Aunit=+&bparam_broad_type%%3A%%3Aformat=char4&bparam_optical_flag=&bparam_optical_flag%%3A%%3Aunit=+&bparam_optical_flag%%3A%%3Aformat=char3&bparam_red_psf_flag=&bparam_red_psf_flag%%3A%%3Aunit=+&bparam_red_psf_flag%%3A%%3Aformat=char1&bparam_blue_psf_flag=&bparam_blue_psf_flag%%3A%%3Aunit=+&bparam_blue_psf_flag%%3A%%3Aformat=char1&bparam_ref_name=&bparam_ref_name%%3A%%3Aunit=+&bparam_ref_name%%3A%%3Aformat=char6&bparam_ref_redshift=&bparam_ref_redshift%%3A%%3Aunit=+&bparam_ref_redshift%%3A%%3Aformat=char6&bparam_qso_prob=&bparam_qso_prob%%3A%%3Aunit=percent&bparam_qso_prob%%3A%%3Aformat=int2%%3A3d&bparam_alt_name_1=&bparam_alt_name_1%%3A%%3Aunit=+&bparam_alt_name_1%%3A%%3Aformat=char22&bparam_alt_name_2=&bparam_alt_name_2%%3A%%3Aunit=+&bparam_alt_name_2%%3A%%3Aformat=char22&Entry=&Coordinates=J2000&Radius=Default&Radius_unit=arcsec&NR=CheckCaches%%2FGRB%%2FSIMBAD%%2BSesame%%2FNED&Time=&ResultMax=1000&displaymode=Display&Action=Start+Search&table=heasarc_milliquas\">%(thisName)s</a>'''",
"%",
"locals",
"(",
")",
"if",
"objectSubtype",
"and",
"objectSubtype",
".",
"lower",
"(",
")",
"in",
"[",
"\"uvs\"",
",",
"\"radios\"",
",",
"\"xray\"",
",",
"\"qso\"",
",",
"\"irs\"",
",",
"'uves'",
",",
"'viss'",
",",
"'hii'",
",",
"'gclstr'",
",",
"'ggroup'",
",",
"'gpair'",
",",
"'gtrpl'",
"]",
":",
"objectType",
"=",
"objectSubtype",
"if",
"objectType",
"==",
"\"star\"",
":",
"objectType",
"=",
"\"stellar source\"",
"elif",
"objectType",
"==",
"\"agn\"",
":",
"objectType",
"=",
"\"AGN\"",
"elif",
"objectType",
"==",
"\"cb\"",
":",
"objectType",
"=",
"\"CV\"",
"elif",
"objectType",
"==",
"\"unknown\"",
":",
"objectType",
"=",
"\"unclassified source\"",
"sep",
"=",
"row",
"[",
"\"separationArcsec\"",
"]",
"if",
"row",
"[",
"\"classificationReliability\"",
"]",
"==",
"1",
":",
"classificationReliability",
"=",
"\"synonymous\"",
"psep",
"=",
"row",
"[",
"\"physical_separation_kpc\"",
"]",
"if",
"psep",
":",
"location",
"=",
"'%(sep)0.1f\" (%(psep)0.1f Kpc) from the %(objectType)s core'",
"%",
"locals",
"(",
")",
"else",
":",
"location",
"=",
"'%(sep)0.1f\" from the %(objectType)s core'",
"%",
"locals",
"(",
")",
"elif",
"row",
"[",
"\"classificationReliability\"",
"]",
"in",
"(",
"2",
",",
"3",
")",
":",
"classificationReliability",
"=",
"\"possibly associated\"",
"n",
"=",
"row",
"[",
"\"northSeparationArcsec\"",
"]",
"if",
"n",
">",
"0",
":",
"nd",
"=",
"\"S\"",
"else",
":",
"nd",
"=",
"\"N\"",
"e",
"=",
"row",
"[",
"\"eastSeparationArcsec\"",
"]",
"if",
"e",
">",
"0",
":",
"ed",
"=",
"\"W\"",
"else",
":",
"ed",
"=",
"\"E\"",
"n",
"=",
"math",
".",
"fabs",
"(",
"n",
")",
"e",
"=",
"math",
".",
"fabs",
"(",
"e",
")",
"psep",
"=",
"row",
"[",
"\"physical_separation_kpc\"",
"]",
"if",
"psep",
":",
"location",
"=",
"'%(n)0.2f\" %(nd)s, %(e)0.2f\" %(ed)s (%(psep)0.1f Kpc) from the %(objectType)s centre'",
"%",
"locals",
"(",
")",
"else",
":",
"location",
"=",
"'%(n)0.2f\" %(nd)s, %(e)0.2f\" %(ed)s from the %(objectType)s centre'",
"%",
"locals",
"(",
")",
"location",
"=",
"location",
".",
"replace",
"(",
"\"unclassified\"",
",",
"\"object's\"",
")",
"best_mag",
"=",
"None",
"best_mag_error",
"=",
"None",
"best_mag_filter",
"=",
"None",
"filters",
"=",
"[",
"\"R\"",
",",
"\"V\"",
",",
"\"B\"",
",",
"\"I\"",
",",
"\"J\"",
",",
"\"G\"",
",",
"\"H\"",
",",
"\"K\"",
",",
"\"U\"",
",",
"\"_r\"",
",",
"\"_g\"",
",",
"\"_i\"",
",",
"\"_g\"",
",",
"\"_z\"",
",",
"\"_y\"",
",",
"\"_u\"",
",",
"\"unkMag\"",
"]",
"for",
"f",
"in",
"filters",
":",
"if",
"row",
"[",
"f",
"]",
"and",
"not",
"best_mag",
":",
"best_mag",
"=",
"row",
"[",
"f",
"]",
"best_mag_error",
"=",
"row",
"[",
"f",
"+",
"\"Err\"",
"]",
"subfilter",
"=",
"f",
".",
"replace",
"(",
"\"_\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"Mag\"",
",",
"\"\"",
")",
"best_mag_filter",
"=",
"f",
".",
"replace",
"(",
"\"_\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"Mag\"",
",",
"\"\"",
")",
"+",
"\"=\"",
"if",
"\"unk\"",
"in",
"best_mag_filter",
":",
"best_mag_filter",
"=",
"\"\"",
"subfilter",
"=",
"''",
"if",
"not",
"best_mag_filter",
":",
"if",
"str",
"(",
"best_mag",
")",
".",
"lower",
"(",
")",
"in",
"(",
"\"8\"",
",",
"\"11\"",
",",
"\"18\"",
")",
":",
"best_mag_filter",
"=",
"\"an \"",
"else",
":",
"best_mag_filter",
"=",
"\"a \"",
"else",
":",
"if",
"str",
"(",
"best_mag_filter",
")",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"in",
"(",
"\"r\"",
",",
"\"i\"",
",",
"\"h\"",
")",
":",
"best_mag_filter",
"=",
"\"an \"",
"+",
"best_mag_filter",
"else",
":",
"best_mag_filter",
"=",
"\"a \"",
"+",
"best_mag_filter",
"if",
"not",
"best_mag",
":",
"best_mag",
"=",
"\"an unknown-\"",
"best_mag_filter",
"=",
"\"\"",
"else",
":",
"best_mag",
"=",
"\"%(best_mag)0.2f \"",
"%",
"locals",
"(",
")",
"distance",
"=",
"None",
"if",
"row",
"[",
"\"direct_distance\"",
"]",
":",
"d",
"=",
"row",
"[",
"\"direct_distance\"",
"]",
"distance",
"=",
"\"distance of %(d)0.1f Mpc\"",
"%",
"locals",
"(",
")",
"if",
"row",
"[",
"\"z\"",
"]",
":",
"z",
"=",
"row",
"[",
"\"z\"",
"]",
"distance",
"+=",
"\"(z=%(z)0.3f)\"",
"%",
"locals",
"(",
")",
"elif",
"row",
"[",
"\"z\"",
"]",
":",
"z",
"=",
"row",
"[",
"\"z\"",
"]",
"distance",
"=",
"\"z=%(z)0.3f\"",
"%",
"locals",
"(",
")",
"elif",
"row",
"[",
"\"photoZ\"",
"]",
":",
"z",
"=",
"row",
"[",
"\"photoZ\"",
"]",
"zErr",
"=",
"row",
"[",
"\"photoZErr\"",
"]",
"distance",
"=",
"\"photoZ=%(z)0.3f (±%(zErr)0.3f)\"",
"%",
"locals",
"(",
")",
"if",
"distance",
":",
"distance",
"=",
"\"%(distance)s\"",
"%",
"locals",
"(",
")",
"distance_modulus",
"=",
"None",
"if",
"row",
"[",
"\"direct_distance_modulus\"",
"]",
":",
"distance_modulus",
"=",
"row",
"[",
"\"direct_distance_modulus\"",
"]",
"elif",
"row",
"[",
"\"distance_modulus\"",
"]",
":",
"distance_modulus",
"=",
"row",
"[",
"\"distance_modulus\"",
"]",
"if",
"updatePeakMagnitudes",
":",
"if",
"distance",
":",
"absMag",
"=",
"row",
"[",
"\"transientAbsMag\"",
"]",
"absMag",
"=",
"\"\"\" A host %(distance)s implies a transient <em>M =</em> %(absMag)s.\"\"\"",
"%",
"locals",
"(",
")",
"else",
":",
"absMag",
"=",
"\"\"",
"else",
":",
"if",
"distance",
"and",
"distance_modulus",
":",
"absMag",
"=",
"\"%(distance_modulus)0.2f\"",
"%",
"locals",
"(",
")",
"absMag",
"=",
"\"\"\" A host %(distance)s implies a <em>m - M =</em> %(absMag)s.\"\"\"",
"%",
"locals",
"(",
")",
"else",
":",
"absMag",
"=",
"\"\"",
"annotation",
"=",
"\"The transient is %(classificationReliability)s with <em>%(objectId)s</em>; %(best_mag_filter)s%(best_mag)smag %(objectType)s found in the %(catalogueString)s. It's located %(location)s.%(absMag)s\"",
"%",
"locals",
"(",
")",
"summary",
"=",
"'%(sep)0.1f\" from %(objectType)s in %(catalogue)s'",
"%",
"locals",
"(",
")",
"update",
"=",
"{",
"\"transient_object_id\"",
":",
"row",
"[",
"\"transient_object_id\"",
"]",
",",
"\"annotation\"",
":",
"annotation",
",",
"\"summary\"",
":",
"summary",
",",
"\"separationArcsec\"",
":",
"sep",
"}",
"updates",
".",
"append",
"(",
"update",
")",
"# print \"FINISHED GENERATING ANNOTATIONS/ADDING ANNOTATIONS TO TRANSIENT DATABASE: %d\" % (time.time() - start_time,)",
"# start_time = time.time()",
"insert_list_of_dictionaries_into_database_tables",
"(",
"dbConn",
"=",
"self",
".",
"transientsDbConn",
",",
"log",
"=",
"self",
".",
"log",
",",
"dictList",
"=",
"updates",
",",
"dbTableName",
"=",
"\"sherlock_classifications\"",
",",
"dateModified",
"=",
"True",
",",
"batchSize",
"=",
"10000",
",",
"replace",
"=",
"True",
",",
"dbSettings",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"transients\"",
"]",
")",
"# print \"FINISHED ADDING ANNOTATIONS TO TRANSIENT DATABASE/UPDATING ORPHAN ANNOTATIONS: %d\" % (time.time() - start_time,)",
"# start_time = time.time()",
"sqlQuery",
"=",
"\"\"\"update sherlock_classifications set annotation = \"The transient location is not matched against any known catalogued source\", summary = \"No catalogued match\" where classification = 'ORPHAN' and summary is null \"\"\"",
"%",
"locals",
"(",
")",
"writequery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"transientsDbConn",
",",
")",
"# print \"FINISHED UPDATING ORPHAN ANNOTATIONS: %d\" % (time.time() - start_time,)",
"# start_time = time.time()",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``update_classification_annotations_and_summaries`` method'",
")",
"return",
"None"
] | *update classification annotations and summaries*
**Key Arguments:**
- ``updatePeakMagnitudes`` -- update the peak magnitudes in the annotations to give absolute magnitudes. Default *True*
**Return:**
- None
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- write a command-line tool for this method
- update package tutorial with command-line tool info if needed
.. code-block:: python
usage code
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"*",
"update",
"classification",
"annotations",
"and",
"summaries",
"*"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/transient_classifier.py#L1290-L1551 |
thespacedoctor/sherlock | sherlock/transient_classifier.py | transient_classifier.update_peak_magnitudes | def update_peak_magnitudes(
self):
"""*update peak magnitudes*
**Key Arguments:**
# -
**Return:**
- None
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- write a command-line tool for this method
- update package tutorial with command-line tool info if needed
.. code-block:: python
usage code
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``update_peak_magnitudes`` method')
sqlQuery = self.settings["database settings"][
"transients"]["transient peak magnitude query"]
sqlQuery = """UPDATE sherlock_crossmatches s,
(%(sqlQuery)s) t
SET
s.transientAbsMag = ROUND(t.mag - IFNULL(direct_distance_modulus,
distance_modulus),
2)
WHERE
IFNULL(direct_distance_modulus,
distance_modulus) IS NOT NULL
AND t.id = s.transient_object_id;""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.transientsDbConn,
)
self.log.debug('completed the ``update_peak_magnitudes`` method')
return None | python | def update_peak_magnitudes(
self):
"""*update peak magnitudes*
**Key Arguments:**
# -
**Return:**
- None
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- write a command-line tool for this method
- update package tutorial with command-line tool info if needed
.. code-block:: python
usage code
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``update_peak_magnitudes`` method')
sqlQuery = self.settings["database settings"][
"transients"]["transient peak magnitude query"]
sqlQuery = """UPDATE sherlock_crossmatches s,
(%(sqlQuery)s) t
SET
s.transientAbsMag = ROUND(t.mag - IFNULL(direct_distance_modulus,
distance_modulus),
2)
WHERE
IFNULL(direct_distance_modulus,
distance_modulus) IS NOT NULL
AND t.id = s.transient_object_id;""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.transientsDbConn,
)
self.log.debug('completed the ``update_peak_magnitudes`` method')
return None | [
"def",
"update_peak_magnitudes",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``update_peak_magnitudes`` method'",
")",
"sqlQuery",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"transients\"",
"]",
"[",
"\"transient peak magnitude query\"",
"]",
"sqlQuery",
"=",
"\"\"\"UPDATE sherlock_crossmatches s,\n (%(sqlQuery)s) t\n SET\n s.transientAbsMag = ROUND(t.mag - IFNULL(direct_distance_modulus,\n distance_modulus),\n 2)\n WHERE\n IFNULL(direct_distance_modulus,\n distance_modulus) IS NOT NULL\n AND t.id = s.transient_object_id;\"\"\"",
"%",
"locals",
"(",
")",
"writequery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"transientsDbConn",
",",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``update_peak_magnitudes`` method'",
")",
"return",
"None"
] | *update peak magnitudes*
**Key Arguments:**
# -
**Return:**
- None
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- write a command-line tool for this method
- update package tutorial with command-line tool info if needed
.. code-block:: python
usage code
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"*",
"update",
"peak",
"magnitudes",
"*"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/transient_classifier.py#L1554-L1611 |
thespacedoctor/sherlock | sherlock/transient_classifier.py | transient_classifier._create_tables_if_not_exist | def _create_tables_if_not_exist(
self):
"""*create the sherlock helper tables if they don't yet exist*
**Key Arguments:**
# -
**Return:**
- None
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- write a command-line tool for this method
- update package tutorial with command-line tool info if needed
.. code-block:: python
usage code
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_create_tables_if_not_exist`` method')
transientTable = self.settings["database settings"][
"transients"]["transient table"]
transientTableClassCol = self.settings["database settings"][
"transients"]["transient classification column"]
transientTableIdCol = self.settings["database settings"][
"transients"]["transient primary id column"]
crossmatchTable = "sherlock_crossmatches"
createStatement = """
CREATE TABLE IF NOT EXISTS `%(crossmatchTable)s` (
`transient_object_id` bigint(20) unsigned DEFAULT NULL,
`catalogue_object_id` varchar(30) DEFAULT NULL,
`catalogue_table_id` smallint(5) unsigned DEFAULT NULL,
`separationArcsec` double DEFAULT NULL,
`northSeparationArcsec` DOUBLE DEFAULT NULL,
`eastSeparationArcsec` DOUBLE DEFAULT NULL,
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`z` double DEFAULT NULL,
`scale` double DEFAULT NULL,
`distance` double DEFAULT NULL,
`distance_modulus` double DEFAULT NULL,
`photoZ` double DEFAULT NULL,
`photoZErr` double DEFAULT NULL,
`association_type` varchar(45) DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`physical_separation_kpc` double DEFAULT NULL,
`catalogue_object_type` varchar(45) DEFAULT NULL,
`catalogue_object_subtype` varchar(45) DEFAULT NULL,
`association_rank` int(11) DEFAULT NULL,
`catalogue_table_name` varchar(100) DEFAULT NULL,
`catalogue_view_name` varchar(100) DEFAULT NULL,
`rank` int(11) DEFAULT NULL,
`rankScore` double DEFAULT NULL,
`search_name` varchar(100) DEFAULT NULL,
`major_axis_arcsec` double DEFAULT NULL,
`direct_distance` double DEFAULT NULL,
`direct_distance_scale` double DEFAULT NULL,
`direct_distance_modulus` double DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`original_search_radius_arcsec` double DEFAULT NULL,
`catalogue_view_id` int(11) DEFAULT NULL,
`U` double DEFAULT NULL,
`UErr` double DEFAULT NULL,
`B` double DEFAULT NULL,
`BErr` double DEFAULT NULL,
`V` double DEFAULT NULL,
`VErr` double DEFAULT NULL,
`R` double DEFAULT NULL,
`RErr` double DEFAULT NULL,
`I` double DEFAULT NULL,
`IErr` double DEFAULT NULL,
`J` double DEFAULT NULL,
`JErr` double DEFAULT NULL,
`H` double DEFAULT NULL,
`HErr` double DEFAULT NULL,
`K` double DEFAULT NULL,
`KErr` double DEFAULT NULL,
`_u` double DEFAULT NULL,
`_uErr` double DEFAULT NULL,
`_g` double DEFAULT NULL,
`_gErr` double DEFAULT NULL,
`_r` double DEFAULT NULL,
`_rErr` double DEFAULT NULL,
`_i` double DEFAULT NULL,
`_iErr` double DEFAULT NULL,
`_z` double DEFAULT NULL,
`_zErr` double DEFAULT NULL,
`_y` double DEFAULT NULL,
`_yErr` double DEFAULT NULL,
`G` double DEFAULT NULL,
`GErr` double DEFAULT NULL,
`unkMag` double DEFAULT NULL,
`unkMagErr` double DEFAULT NULL,
`dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,
`updated` TINYINT NULL DEFAULT 0,
`classificationReliability` TINYINT NULL DEFAULT NULL,
`transientAbsMag` DOUBLE NULL DEFAULT NULL,
PRIMARY KEY (`id`),
KEY `key_transient_object_id` (`transient_object_id`),
KEY `key_catalogue_object_id` (`catalogue_object_id`),
KEY `idx_separationArcsec` (`separationArcsec`),
KEY `idx_rank` (`rank`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8;
CREATE TABLE IF NOT EXISTS `sherlock_classifications` (
`transient_object_id` bigint(20) NOT NULL,
`classification` varchar(45) DEFAULT NULL,
`annotation` TEXT COLLATE utf8_unicode_ci DEFAULT NULL,
`summary` VARCHAR(50) COLLATE utf8_unicode_ci DEFAULT NULL,
`separationArcsec` DOUBLE DEFAULT NULL,
`matchVerified` TINYINT NULL DEFAULT NULL,
`developmentComment` VARCHAR(100) NULL,
`dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`updated` varchar(45) DEFAULT '0',
PRIMARY KEY (`transient_object_id`),
KEY `key_transient_object_id` (`transient_object_id`),
KEY `idx_summary` (`summary`),
KEY `idx_classification` (`classification`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
""" % locals()
# A FIX FOR MYSQL VERSIONS < 5.6
triggers = []
if float(self.dbVersions["transients"][:3]) < 5.6:
createStatement = createStatement.replace(
"`dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,", "`dateLastModified` datetime DEFAULT NULL,")
createStatement = createStatement.replace(
"`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,", "`dateCreated` datetime DEFAULT NULL,")
triggers.append("""
CREATE TRIGGER dateCreated
BEFORE INSERT ON `%(crossmatchTable)s`
FOR EACH ROW
BEGIN
IF NEW.dateCreated IS NULL THEN
SET NEW.dateCreated = NOW();
SET NEW.dateLastModified = NOW();
END IF;
END""" % locals())
try:
writequery(
log=self.log,
sqlQuery=createStatement,
dbConn=self.transientsDbConn,
Force=True
)
except:
self.log.info(
"Could not create table (`%(crossmatchTable)s`). Probably already exist." % locals())
sqlQuery = u"""
SHOW TRIGGERS;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.transientsDbConn,
)
# DON'T ADD TRIGGERS IF THEY ALREADY EXIST
for r in rows:
if r["Trigger"] in ("sherlock_classifications_BEFORE_INSERT", "sherlock_classifications_AFTER_INSERT"):
return None
triggers.append("""CREATE TRIGGER `sherlock_classifications_BEFORE_INSERT` BEFORE INSERT ON `sherlock_classifications` FOR EACH ROW
BEGIN
IF new.classification = "ORPHAN" THEN
SET new.annotation = "The transient location is not matched against any known catalogued source", new.summary = "No catalogued match";
END IF;
END""" % locals())
triggers.append("""CREATE TRIGGER `sherlock_classifications_AFTER_INSERT` AFTER INSERT ON `sherlock_classifications` FOR EACH ROW
BEGIN
update `%(transientTable)s` set `%(transientTableClassCol)s` = new.classification
where `%(transientTableIdCol)s` = new.transient_object_id;
END""" % locals())
for t in triggers:
try:
writequery(
log=self.log,
sqlQuery=t,
dbConn=self.transientsDbConn,
Force=True
)
except:
self.log.info(
"Could not create trigger (`%(crossmatchTable)s`). Probably already exist." % locals())
self.log.debug('completed the ``_create_tables_if_not_exist`` method')
return None | python | def _create_tables_if_not_exist(
self):
"""*create the sherlock helper tables if they don't yet exist*
**Key Arguments:**
# -
**Return:**
- None
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- write a command-line tool for this method
- update package tutorial with command-line tool info if needed
.. code-block:: python
usage code
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_create_tables_if_not_exist`` method')
transientTable = self.settings["database settings"][
"transients"]["transient table"]
transientTableClassCol = self.settings["database settings"][
"transients"]["transient classification column"]
transientTableIdCol = self.settings["database settings"][
"transients"]["transient primary id column"]
crossmatchTable = "sherlock_crossmatches"
createStatement = """
CREATE TABLE IF NOT EXISTS `%(crossmatchTable)s` (
`transient_object_id` bigint(20) unsigned DEFAULT NULL,
`catalogue_object_id` varchar(30) DEFAULT NULL,
`catalogue_table_id` smallint(5) unsigned DEFAULT NULL,
`separationArcsec` double DEFAULT NULL,
`northSeparationArcsec` DOUBLE DEFAULT NULL,
`eastSeparationArcsec` DOUBLE DEFAULT NULL,
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`z` double DEFAULT NULL,
`scale` double DEFAULT NULL,
`distance` double DEFAULT NULL,
`distance_modulus` double DEFAULT NULL,
`photoZ` double DEFAULT NULL,
`photoZErr` double DEFAULT NULL,
`association_type` varchar(45) DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`physical_separation_kpc` double DEFAULT NULL,
`catalogue_object_type` varchar(45) DEFAULT NULL,
`catalogue_object_subtype` varchar(45) DEFAULT NULL,
`association_rank` int(11) DEFAULT NULL,
`catalogue_table_name` varchar(100) DEFAULT NULL,
`catalogue_view_name` varchar(100) DEFAULT NULL,
`rank` int(11) DEFAULT NULL,
`rankScore` double DEFAULT NULL,
`search_name` varchar(100) DEFAULT NULL,
`major_axis_arcsec` double DEFAULT NULL,
`direct_distance` double DEFAULT NULL,
`direct_distance_scale` double DEFAULT NULL,
`direct_distance_modulus` double DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`original_search_radius_arcsec` double DEFAULT NULL,
`catalogue_view_id` int(11) DEFAULT NULL,
`U` double DEFAULT NULL,
`UErr` double DEFAULT NULL,
`B` double DEFAULT NULL,
`BErr` double DEFAULT NULL,
`V` double DEFAULT NULL,
`VErr` double DEFAULT NULL,
`R` double DEFAULT NULL,
`RErr` double DEFAULT NULL,
`I` double DEFAULT NULL,
`IErr` double DEFAULT NULL,
`J` double DEFAULT NULL,
`JErr` double DEFAULT NULL,
`H` double DEFAULT NULL,
`HErr` double DEFAULT NULL,
`K` double DEFAULT NULL,
`KErr` double DEFAULT NULL,
`_u` double DEFAULT NULL,
`_uErr` double DEFAULT NULL,
`_g` double DEFAULT NULL,
`_gErr` double DEFAULT NULL,
`_r` double DEFAULT NULL,
`_rErr` double DEFAULT NULL,
`_i` double DEFAULT NULL,
`_iErr` double DEFAULT NULL,
`_z` double DEFAULT NULL,
`_zErr` double DEFAULT NULL,
`_y` double DEFAULT NULL,
`_yErr` double DEFAULT NULL,
`G` double DEFAULT NULL,
`GErr` double DEFAULT NULL,
`unkMag` double DEFAULT NULL,
`unkMagErr` double DEFAULT NULL,
`dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,
`updated` TINYINT NULL DEFAULT 0,
`classificationReliability` TINYINT NULL DEFAULT NULL,
`transientAbsMag` DOUBLE NULL DEFAULT NULL,
PRIMARY KEY (`id`),
KEY `key_transient_object_id` (`transient_object_id`),
KEY `key_catalogue_object_id` (`catalogue_object_id`),
KEY `idx_separationArcsec` (`separationArcsec`),
KEY `idx_rank` (`rank`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8;
CREATE TABLE IF NOT EXISTS `sherlock_classifications` (
`transient_object_id` bigint(20) NOT NULL,
`classification` varchar(45) DEFAULT NULL,
`annotation` TEXT COLLATE utf8_unicode_ci DEFAULT NULL,
`summary` VARCHAR(50) COLLATE utf8_unicode_ci DEFAULT NULL,
`separationArcsec` DOUBLE DEFAULT NULL,
`matchVerified` TINYINT NULL DEFAULT NULL,
`developmentComment` VARCHAR(100) NULL,
`dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`updated` varchar(45) DEFAULT '0',
PRIMARY KEY (`transient_object_id`),
KEY `key_transient_object_id` (`transient_object_id`),
KEY `idx_summary` (`summary`),
KEY `idx_classification` (`classification`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
""" % locals()
# A FIX FOR MYSQL VERSIONS < 5.6
triggers = []
if float(self.dbVersions["transients"][:3]) < 5.6:
createStatement = createStatement.replace(
"`dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,", "`dateLastModified` datetime DEFAULT NULL,")
createStatement = createStatement.replace(
"`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,", "`dateCreated` datetime DEFAULT NULL,")
triggers.append("""
CREATE TRIGGER dateCreated
BEFORE INSERT ON `%(crossmatchTable)s`
FOR EACH ROW
BEGIN
IF NEW.dateCreated IS NULL THEN
SET NEW.dateCreated = NOW();
SET NEW.dateLastModified = NOW();
END IF;
END""" % locals())
try:
writequery(
log=self.log,
sqlQuery=createStatement,
dbConn=self.transientsDbConn,
Force=True
)
except:
self.log.info(
"Could not create table (`%(crossmatchTable)s`). Probably already exist." % locals())
sqlQuery = u"""
SHOW TRIGGERS;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.transientsDbConn,
)
# DON'T ADD TRIGGERS IF THEY ALREADY EXIST
for r in rows:
if r["Trigger"] in ("sherlock_classifications_BEFORE_INSERT", "sherlock_classifications_AFTER_INSERT"):
return None
triggers.append("""CREATE TRIGGER `sherlock_classifications_BEFORE_INSERT` BEFORE INSERT ON `sherlock_classifications` FOR EACH ROW
BEGIN
IF new.classification = "ORPHAN" THEN
SET new.annotation = "The transient location is not matched against any known catalogued source", new.summary = "No catalogued match";
END IF;
END""" % locals())
triggers.append("""CREATE TRIGGER `sherlock_classifications_AFTER_INSERT` AFTER INSERT ON `sherlock_classifications` FOR EACH ROW
BEGIN
update `%(transientTable)s` set `%(transientTableClassCol)s` = new.classification
where `%(transientTableIdCol)s` = new.transient_object_id;
END""" % locals())
for t in triggers:
try:
writequery(
log=self.log,
sqlQuery=t,
dbConn=self.transientsDbConn,
Force=True
)
except:
self.log.info(
"Could not create trigger (`%(crossmatchTable)s`). Probably already exist." % locals())
self.log.debug('completed the ``_create_tables_if_not_exist`` method')
return None | [
"def",
"_create_tables_if_not_exist",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_create_tables_if_not_exist`` method'",
")",
"transientTable",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"transients\"",
"]",
"[",
"\"transient table\"",
"]",
"transientTableClassCol",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"transients\"",
"]",
"[",
"\"transient classification column\"",
"]",
"transientTableIdCol",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"transients\"",
"]",
"[",
"\"transient primary id column\"",
"]",
"crossmatchTable",
"=",
"\"sherlock_crossmatches\"",
"createStatement",
"=",
"\"\"\"\nCREATE TABLE IF NOT EXISTS `%(crossmatchTable)s` (\n `transient_object_id` bigint(20) unsigned DEFAULT NULL,\n `catalogue_object_id` varchar(30) DEFAULT NULL,\n `catalogue_table_id` smallint(5) unsigned DEFAULT NULL,\n `separationArcsec` double DEFAULT NULL,\n `northSeparationArcsec` DOUBLE DEFAULT NULL,\n `eastSeparationArcsec` DOUBLE DEFAULT NULL,\n `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,\n `z` double DEFAULT NULL,\n `scale` double DEFAULT NULL,\n `distance` double DEFAULT NULL,\n `distance_modulus` double DEFAULT NULL,\n `photoZ` double DEFAULT NULL,\n `photoZErr` double DEFAULT NULL,\n `association_type` varchar(45) DEFAULT NULL,\n `dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,\n `physical_separation_kpc` double DEFAULT NULL,\n `catalogue_object_type` varchar(45) DEFAULT NULL,\n `catalogue_object_subtype` varchar(45) DEFAULT NULL,\n `association_rank` int(11) DEFAULT NULL,\n `catalogue_table_name` varchar(100) DEFAULT NULL,\n `catalogue_view_name` varchar(100) DEFAULT NULL,\n `rank` int(11) DEFAULT NULL,\n `rankScore` double DEFAULT NULL,\n `search_name` varchar(100) DEFAULT NULL,\n `major_axis_arcsec` double DEFAULT NULL,\n `direct_distance` double DEFAULT NULL,\n `direct_distance_scale` double DEFAULT NULL,\n `direct_distance_modulus` double DEFAULT NULL,\n `raDeg` double DEFAULT NULL,\n `decDeg` double DEFAULT NULL,\n `original_search_radius_arcsec` double DEFAULT NULL,\n `catalogue_view_id` int(11) DEFAULT NULL,\n `U` double DEFAULT NULL,\n `UErr` double DEFAULT NULL,\n `B` double DEFAULT NULL,\n `BErr` double DEFAULT NULL,\n `V` double DEFAULT NULL,\n `VErr` double DEFAULT NULL,\n `R` double DEFAULT NULL,\n `RErr` double DEFAULT NULL,\n `I` double DEFAULT NULL,\n `IErr` double DEFAULT NULL,\n `J` double DEFAULT NULL,\n `JErr` double DEFAULT NULL,\n `H` double DEFAULT NULL,\n `HErr` double DEFAULT NULL,\n `K` double DEFAULT NULL,\n `KErr` double DEFAULT NULL,\n `_u` double DEFAULT NULL,\n `_uErr` double DEFAULT NULL,\n `_g` double DEFAULT NULL,\n `_gErr` double DEFAULT NULL,\n `_r` double DEFAULT NULL,\n `_rErr` double DEFAULT NULL,\n `_i` double DEFAULT NULL,\n `_iErr` double DEFAULT NULL,\n `_z` double DEFAULT NULL,\n `_zErr` double DEFAULT NULL,\n `_y` double DEFAULT NULL,\n `_yErr` double DEFAULT NULL,\n `G` double DEFAULT NULL,\n `GErr` double DEFAULT NULL,\n `unkMag` double DEFAULT NULL,\n `unkMagErr` double DEFAULT NULL,\n `dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,\n `updated` TINYINT NULL DEFAULT 0,\n `classificationReliability` TINYINT NULL DEFAULT NULL,\n `transientAbsMag` DOUBLE NULL DEFAULT NULL,\n PRIMARY KEY (`id`),\n KEY `key_transient_object_id` (`transient_object_id`),\n KEY `key_catalogue_object_id` (`catalogue_object_id`),\n KEY `idx_separationArcsec` (`separationArcsec`),\n KEY `idx_rank` (`rank`)\n) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8;\n\n\nCREATE TABLE IF NOT EXISTS `sherlock_classifications` (\n `transient_object_id` bigint(20) NOT NULL,\n `classification` varchar(45) DEFAULT NULL,\n `annotation` TEXT COLLATE utf8_unicode_ci DEFAULT NULL,\n `summary` VARCHAR(50) COLLATE utf8_unicode_ci DEFAULT NULL,\n `separationArcsec` DOUBLE DEFAULT NULL,\n `matchVerified` TINYINT NULL DEFAULT NULL,\n `developmentComment` VARCHAR(100) NULL,\n `dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,\n `dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,\n `updated` varchar(45) DEFAULT '0',\n PRIMARY KEY (`transient_object_id`),\n KEY `key_transient_object_id` (`transient_object_id`),\n KEY `idx_summary` (`summary`),\n KEY `idx_classification` (`classification`)\n) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;\n\n\"\"\"",
"%",
"locals",
"(",
")",
"# A FIX FOR MYSQL VERSIONS < 5.6",
"triggers",
"=",
"[",
"]",
"if",
"float",
"(",
"self",
".",
"dbVersions",
"[",
"\"transients\"",
"]",
"[",
":",
"3",
"]",
")",
"<",
"5.6",
":",
"createStatement",
"=",
"createStatement",
".",
"replace",
"(",
"\"`dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,\"",
",",
"\"`dateLastModified` datetime DEFAULT NULL,\"",
")",
"createStatement",
"=",
"createStatement",
".",
"replace",
"(",
"\"`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,\"",
",",
"\"`dateCreated` datetime DEFAULT NULL,\"",
")",
"triggers",
".",
"append",
"(",
"\"\"\"\nCREATE TRIGGER dateCreated\nBEFORE INSERT ON `%(crossmatchTable)s`\nFOR EACH ROW\nBEGIN\n IF NEW.dateCreated IS NULL THEN\n SET NEW.dateCreated = NOW();\n SET NEW.dateLastModified = NOW();\n END IF;\nEND\"\"\"",
"%",
"locals",
"(",
")",
")",
"try",
":",
"writequery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"createStatement",
",",
"dbConn",
"=",
"self",
".",
"transientsDbConn",
",",
"Force",
"=",
"True",
")",
"except",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Could not create table (`%(crossmatchTable)s`). Probably already exist.\"",
"%",
"locals",
"(",
")",
")",
"sqlQuery",
"=",
"u\"\"\"\n SHOW TRIGGERS;\n \"\"\"",
"%",
"locals",
"(",
")",
"rows",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"transientsDbConn",
",",
")",
"# DON'T ADD TRIGGERS IF THEY ALREADY EXIST",
"for",
"r",
"in",
"rows",
":",
"if",
"r",
"[",
"\"Trigger\"",
"]",
"in",
"(",
"\"sherlock_classifications_BEFORE_INSERT\"",
",",
"\"sherlock_classifications_AFTER_INSERT\"",
")",
":",
"return",
"None",
"triggers",
".",
"append",
"(",
"\"\"\"CREATE TRIGGER `sherlock_classifications_BEFORE_INSERT` BEFORE INSERT ON `sherlock_classifications` FOR EACH ROW\nBEGIN\n IF new.classification = \"ORPHAN\" THEN\n SET new.annotation = \"The transient location is not matched against any known catalogued source\", new.summary = \"No catalogued match\";\n END IF;\nEND\"\"\"",
"%",
"locals",
"(",
")",
")",
"triggers",
".",
"append",
"(",
"\"\"\"CREATE TRIGGER `sherlock_classifications_AFTER_INSERT` AFTER INSERT ON `sherlock_classifications` FOR EACH ROW\nBEGIN\n update `%(transientTable)s` set `%(transientTableClassCol)s` = new.classification\n where `%(transientTableIdCol)s` = new.transient_object_id;\nEND\"\"\"",
"%",
"locals",
"(",
")",
")",
"for",
"t",
"in",
"triggers",
":",
"try",
":",
"writequery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"t",
",",
"dbConn",
"=",
"self",
".",
"transientsDbConn",
",",
"Force",
"=",
"True",
")",
"except",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Could not create trigger (`%(crossmatchTable)s`). Probably already exist.\"",
"%",
"locals",
"(",
")",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_create_tables_if_not_exist`` method'",
")",
"return",
"None"
] | *create the sherlock helper tables if they don't yet exist*
**Key Arguments:**
# -
**Return:**
- None
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- write a command-line tool for this method
- update package tutorial with command-line tool info if needed
.. code-block:: python
usage code
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"*",
"create",
"the",
"sherlock",
"helper",
"tables",
"if",
"they",
"don",
"t",
"yet",
"exist",
"*"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/transient_classifier.py#L1613-L1823 |
Clinical-Genomics/trailblazer | trailblazer/cli/check.py | check | def check(context: click.Context, family: str):
"""Delete an analysis log from the database."""
analysis_obj = context.obj['store'].analyses(family=family).first()
if analysis_obj is None:
LOG.error('no analysis found')
context.abort()
config_path = Path(analysis_obj.config_path)
if not config_path.exists():
LOG.error(f"analysis config not found: {config_path}")
context.abort()
config_raw = ruamel.yaml.safe_load(config_path.open())
config_data = files.parse_config(config_raw)
sampleinfo_raw = ruamel.yaml.safe_load(Path(config_data['sampleinfo_path']).open())
sampleinfo_data = files.parse_sampleinfo(sampleinfo_raw)
qcmetrics_path = Path(sampleinfo_data['qcmetrics_path'])
if not qcmetrics_path.exists():
LOG.error(f"qc metrics not found: {str(qcmetrics_path)}")
context.abort()
qcmetrics_raw = ruamel.yaml.safe_load(qcmetrics_path.open())
qcmetrics_data = files.parse_qcmetrics(qcmetrics_raw)
samples = {
'sample': [],
'type': [],
'ped': [],
'chanjo': [],
'peddy': [],
'plink': [],
'duplicates': [],
}
for sample_data in config_data['samples']:
LOG.debug(f"{sample_data['id']}: parse analysis config")
samples['sample'].append(sample_data['id'])
samples['type'].append(sample_data['type'])
for sample_data in sampleinfo_data['samples']:
LOG.debug(f"{sample_data['id']}: parse sample info")
samples['ped'].append(sample_data['sex'])
with Path(sample_data['chanjo_sexcheck']).open() as chanjo_handle:
sexcheck_data = files.parse_chanjo_sexcheck(chanjo_handle)
predicted_sex = sexcheck_data['predicted_sex']
xy_ratio = sexcheck_data['y_coverage'] / sexcheck_data['x_coverage']
samples['chanjo'].append(f"{predicted_sex} ({xy_ratio:.3f})")
for sample_data in qcmetrics_data['samples']:
LOG.debug(f"{sample_data['id']}: parse qc metrics")
samples['plink'].append(sample_data['plink_sex'])
duplicates_percent = sample_data['duplicates'] * 100
samples['duplicates'].append(f"{duplicates_percent:.3f}%")
peddy_path = Path(sampleinfo_data['peddy']['sex_check'])
if peddy_path.exists():
with peddy_path.open() as sexcheck_handle:
peddy_data = files.parse_peddy_sexcheck(sexcheck_handle)
for sample_id in samples['sample']:
LOG.debug(f"{sample_id}: parse peddy")
predicted_sex = peddy_data[sample_id]['predicted_sex']
het_ratio = peddy_data[sample_id]['het_ratio']
samples['peddy'].append(f"{predicted_sex} ({het_ratio})")
else:
LOG.warning(f"missing peddy output: {peddy_path}")
print(tabulate(samples, headers='keys', tablefmt='psql')) | python | def check(context: click.Context, family: str):
"""Delete an analysis log from the database."""
analysis_obj = context.obj['store'].analyses(family=family).first()
if analysis_obj is None:
LOG.error('no analysis found')
context.abort()
config_path = Path(analysis_obj.config_path)
if not config_path.exists():
LOG.error(f"analysis config not found: {config_path}")
context.abort()
config_raw = ruamel.yaml.safe_load(config_path.open())
config_data = files.parse_config(config_raw)
sampleinfo_raw = ruamel.yaml.safe_load(Path(config_data['sampleinfo_path']).open())
sampleinfo_data = files.parse_sampleinfo(sampleinfo_raw)
qcmetrics_path = Path(sampleinfo_data['qcmetrics_path'])
if not qcmetrics_path.exists():
LOG.error(f"qc metrics not found: {str(qcmetrics_path)}")
context.abort()
qcmetrics_raw = ruamel.yaml.safe_load(qcmetrics_path.open())
qcmetrics_data = files.parse_qcmetrics(qcmetrics_raw)
samples = {
'sample': [],
'type': [],
'ped': [],
'chanjo': [],
'peddy': [],
'plink': [],
'duplicates': [],
}
for sample_data in config_data['samples']:
LOG.debug(f"{sample_data['id']}: parse analysis config")
samples['sample'].append(sample_data['id'])
samples['type'].append(sample_data['type'])
for sample_data in sampleinfo_data['samples']:
LOG.debug(f"{sample_data['id']}: parse sample info")
samples['ped'].append(sample_data['sex'])
with Path(sample_data['chanjo_sexcheck']).open() as chanjo_handle:
sexcheck_data = files.parse_chanjo_sexcheck(chanjo_handle)
predicted_sex = sexcheck_data['predicted_sex']
xy_ratio = sexcheck_data['y_coverage'] / sexcheck_data['x_coverage']
samples['chanjo'].append(f"{predicted_sex} ({xy_ratio:.3f})")
for sample_data in qcmetrics_data['samples']:
LOG.debug(f"{sample_data['id']}: parse qc metrics")
samples['plink'].append(sample_data['plink_sex'])
duplicates_percent = sample_data['duplicates'] * 100
samples['duplicates'].append(f"{duplicates_percent:.3f}%")
peddy_path = Path(sampleinfo_data['peddy']['sex_check'])
if peddy_path.exists():
with peddy_path.open() as sexcheck_handle:
peddy_data = files.parse_peddy_sexcheck(sexcheck_handle)
for sample_id in samples['sample']:
LOG.debug(f"{sample_id}: parse peddy")
predicted_sex = peddy_data[sample_id]['predicted_sex']
het_ratio = peddy_data[sample_id]['het_ratio']
samples['peddy'].append(f"{predicted_sex} ({het_ratio})")
else:
LOG.warning(f"missing peddy output: {peddy_path}")
print(tabulate(samples, headers='keys', tablefmt='psql')) | [
"def",
"check",
"(",
"context",
":",
"click",
".",
"Context",
",",
"family",
":",
"str",
")",
":",
"analysis_obj",
"=",
"context",
".",
"obj",
"[",
"'store'",
"]",
".",
"analyses",
"(",
"family",
"=",
"family",
")",
".",
"first",
"(",
")",
"if",
"analysis_obj",
"is",
"None",
":",
"LOG",
".",
"error",
"(",
"'no analysis found'",
")",
"context",
".",
"abort",
"(",
")",
"config_path",
"=",
"Path",
"(",
"analysis_obj",
".",
"config_path",
")",
"if",
"not",
"config_path",
".",
"exists",
"(",
")",
":",
"LOG",
".",
"error",
"(",
"f\"analysis config not found: {config_path}\"",
")",
"context",
".",
"abort",
"(",
")",
"config_raw",
"=",
"ruamel",
".",
"yaml",
".",
"safe_load",
"(",
"config_path",
".",
"open",
"(",
")",
")",
"config_data",
"=",
"files",
".",
"parse_config",
"(",
"config_raw",
")",
"sampleinfo_raw",
"=",
"ruamel",
".",
"yaml",
".",
"safe_load",
"(",
"Path",
"(",
"config_data",
"[",
"'sampleinfo_path'",
"]",
")",
".",
"open",
"(",
")",
")",
"sampleinfo_data",
"=",
"files",
".",
"parse_sampleinfo",
"(",
"sampleinfo_raw",
")",
"qcmetrics_path",
"=",
"Path",
"(",
"sampleinfo_data",
"[",
"'qcmetrics_path'",
"]",
")",
"if",
"not",
"qcmetrics_path",
".",
"exists",
"(",
")",
":",
"LOG",
".",
"error",
"(",
"f\"qc metrics not found: {str(qcmetrics_path)}\"",
")",
"context",
".",
"abort",
"(",
")",
"qcmetrics_raw",
"=",
"ruamel",
".",
"yaml",
".",
"safe_load",
"(",
"qcmetrics_path",
".",
"open",
"(",
")",
")",
"qcmetrics_data",
"=",
"files",
".",
"parse_qcmetrics",
"(",
"qcmetrics_raw",
")",
"samples",
"=",
"{",
"'sample'",
":",
"[",
"]",
",",
"'type'",
":",
"[",
"]",
",",
"'ped'",
":",
"[",
"]",
",",
"'chanjo'",
":",
"[",
"]",
",",
"'peddy'",
":",
"[",
"]",
",",
"'plink'",
":",
"[",
"]",
",",
"'duplicates'",
":",
"[",
"]",
",",
"}",
"for",
"sample_data",
"in",
"config_data",
"[",
"'samples'",
"]",
":",
"LOG",
".",
"debug",
"(",
"f\"{sample_data['id']}: parse analysis config\"",
")",
"samples",
"[",
"'sample'",
"]",
".",
"append",
"(",
"sample_data",
"[",
"'id'",
"]",
")",
"samples",
"[",
"'type'",
"]",
".",
"append",
"(",
"sample_data",
"[",
"'type'",
"]",
")",
"for",
"sample_data",
"in",
"sampleinfo_data",
"[",
"'samples'",
"]",
":",
"LOG",
".",
"debug",
"(",
"f\"{sample_data['id']}: parse sample info\"",
")",
"samples",
"[",
"'ped'",
"]",
".",
"append",
"(",
"sample_data",
"[",
"'sex'",
"]",
")",
"with",
"Path",
"(",
"sample_data",
"[",
"'chanjo_sexcheck'",
"]",
")",
".",
"open",
"(",
")",
"as",
"chanjo_handle",
":",
"sexcheck_data",
"=",
"files",
".",
"parse_chanjo_sexcheck",
"(",
"chanjo_handle",
")",
"predicted_sex",
"=",
"sexcheck_data",
"[",
"'predicted_sex'",
"]",
"xy_ratio",
"=",
"sexcheck_data",
"[",
"'y_coverage'",
"]",
"/",
"sexcheck_data",
"[",
"'x_coverage'",
"]",
"samples",
"[",
"'chanjo'",
"]",
".",
"append",
"(",
"f\"{predicted_sex} ({xy_ratio:.3f})\"",
")",
"for",
"sample_data",
"in",
"qcmetrics_data",
"[",
"'samples'",
"]",
":",
"LOG",
".",
"debug",
"(",
"f\"{sample_data['id']}: parse qc metrics\"",
")",
"samples",
"[",
"'plink'",
"]",
".",
"append",
"(",
"sample_data",
"[",
"'plink_sex'",
"]",
")",
"duplicates_percent",
"=",
"sample_data",
"[",
"'duplicates'",
"]",
"*",
"100",
"samples",
"[",
"'duplicates'",
"]",
".",
"append",
"(",
"f\"{duplicates_percent:.3f}%\"",
")",
"peddy_path",
"=",
"Path",
"(",
"sampleinfo_data",
"[",
"'peddy'",
"]",
"[",
"'sex_check'",
"]",
")",
"if",
"peddy_path",
".",
"exists",
"(",
")",
":",
"with",
"peddy_path",
".",
"open",
"(",
")",
"as",
"sexcheck_handle",
":",
"peddy_data",
"=",
"files",
".",
"parse_peddy_sexcheck",
"(",
"sexcheck_handle",
")",
"for",
"sample_id",
"in",
"samples",
"[",
"'sample'",
"]",
":",
"LOG",
".",
"debug",
"(",
"f\"{sample_id}: parse peddy\"",
")",
"predicted_sex",
"=",
"peddy_data",
"[",
"sample_id",
"]",
"[",
"'predicted_sex'",
"]",
"het_ratio",
"=",
"peddy_data",
"[",
"sample_id",
"]",
"[",
"'het_ratio'",
"]",
"samples",
"[",
"'peddy'",
"]",
".",
"append",
"(",
"f\"{predicted_sex} ({het_ratio})\"",
")",
"else",
":",
"LOG",
".",
"warning",
"(",
"f\"missing peddy output: {peddy_path}\"",
")",
"print",
"(",
"tabulate",
"(",
"samples",
",",
"headers",
"=",
"'keys'",
",",
"tablefmt",
"=",
"'psql'",
")",
")"
] | Delete an analysis log from the database. | [
"Delete",
"an",
"analysis",
"log",
"from",
"the",
"database",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/cli/check.py#L16-L84 |
Clinical-Genomics/trailblazer | trailblazer/mip/fastq.py | FastqHandler.name_file | def name_file(lane: int, flowcell: str, sample: str, read: int,
undetermined: bool=False, date: dt.datetime=None, index: str=None) -> str:
"""Name a FASTQ file following MIP conventions."""
flowcell = f"{flowcell}-undetermined" if undetermined else flowcell
date_str = date.strftime('%y%m%d') if date else '171015'
index = index if index else 'XXXXXX'
return f"{lane}_{date_str}_{flowcell}_{sample}_{index}_{read}.fastq.gz" | python | def name_file(lane: int, flowcell: str, sample: str, read: int,
undetermined: bool=False, date: dt.datetime=None, index: str=None) -> str:
"""Name a FASTQ file following MIP conventions."""
flowcell = f"{flowcell}-undetermined" if undetermined else flowcell
date_str = date.strftime('%y%m%d') if date else '171015'
index = index if index else 'XXXXXX'
return f"{lane}_{date_str}_{flowcell}_{sample}_{index}_{read}.fastq.gz" | [
"def",
"name_file",
"(",
"lane",
":",
"int",
",",
"flowcell",
":",
"str",
",",
"sample",
":",
"str",
",",
"read",
":",
"int",
",",
"undetermined",
":",
"bool",
"=",
"False",
",",
"date",
":",
"dt",
".",
"datetime",
"=",
"None",
",",
"index",
":",
"str",
"=",
"None",
")",
"->",
"str",
":",
"flowcell",
"=",
"f\"{flowcell}-undetermined\"",
"if",
"undetermined",
"else",
"flowcell",
"date_str",
"=",
"date",
".",
"strftime",
"(",
"'%y%m%d'",
")",
"if",
"date",
"else",
"'171015'",
"index",
"=",
"index",
"if",
"index",
"else",
"'XXXXXX'",
"return",
"f\"{lane}_{date_str}_{flowcell}_{sample}_{index}_{read}.fastq.gz\""
] | Name a FASTQ file following MIP conventions. | [
"Name",
"a",
"FASTQ",
"file",
"following",
"MIP",
"conventions",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/mip/fastq.py#L13-L19 |
Clinical-Genomics/trailblazer | trailblazer/mip/fastq.py | FastqHandler.link | def link(self, family: str, sample: str, analysis_type: str, files: List[str]):
"""Link FASTQ files for a sample."""
root_dir = Path(self.families_dir) / family / analysis_type / sample / 'fastq'
root_dir.mkdir(parents=True, exist_ok=True)
for fastq_data in files:
fastq_path = Path(fastq_data['path'])
fastq_name = self.name_file(
lane=fastq_data['lane'],
flowcell=fastq_data['flowcell'],
sample=sample,
read=fastq_data['read'],
undetermined=fastq_data['undetermined'],
)
dest_path = root_dir / fastq_name
if not dest_path.exists():
log.info(f"linking: {fastq_path} -> {dest_path}")
dest_path.symlink_to(fastq_path)
else:
log.debug(f"destination path already exists: {dest_path}") | python | def link(self, family: str, sample: str, analysis_type: str, files: List[str]):
"""Link FASTQ files for a sample."""
root_dir = Path(self.families_dir) / family / analysis_type / sample / 'fastq'
root_dir.mkdir(parents=True, exist_ok=True)
for fastq_data in files:
fastq_path = Path(fastq_data['path'])
fastq_name = self.name_file(
lane=fastq_data['lane'],
flowcell=fastq_data['flowcell'],
sample=sample,
read=fastq_data['read'],
undetermined=fastq_data['undetermined'],
)
dest_path = root_dir / fastq_name
if not dest_path.exists():
log.info(f"linking: {fastq_path} -> {dest_path}")
dest_path.symlink_to(fastq_path)
else:
log.debug(f"destination path already exists: {dest_path}") | [
"def",
"link",
"(",
"self",
",",
"family",
":",
"str",
",",
"sample",
":",
"str",
",",
"analysis_type",
":",
"str",
",",
"files",
":",
"List",
"[",
"str",
"]",
")",
":",
"root_dir",
"=",
"Path",
"(",
"self",
".",
"families_dir",
")",
"/",
"family",
"/",
"analysis_type",
"/",
"sample",
"/",
"'fastq'",
"root_dir",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"for",
"fastq_data",
"in",
"files",
":",
"fastq_path",
"=",
"Path",
"(",
"fastq_data",
"[",
"'path'",
"]",
")",
"fastq_name",
"=",
"self",
".",
"name_file",
"(",
"lane",
"=",
"fastq_data",
"[",
"'lane'",
"]",
",",
"flowcell",
"=",
"fastq_data",
"[",
"'flowcell'",
"]",
",",
"sample",
"=",
"sample",
",",
"read",
"=",
"fastq_data",
"[",
"'read'",
"]",
",",
"undetermined",
"=",
"fastq_data",
"[",
"'undetermined'",
"]",
",",
")",
"dest_path",
"=",
"root_dir",
"/",
"fastq_name",
"if",
"not",
"dest_path",
".",
"exists",
"(",
")",
":",
"log",
".",
"info",
"(",
"f\"linking: {fastq_path} -> {dest_path}\"",
")",
"dest_path",
".",
"symlink_to",
"(",
"fastq_path",
")",
"else",
":",
"log",
".",
"debug",
"(",
"f\"destination path already exists: {dest_path}\"",
")"
] | Link FASTQ files for a sample. | [
"Link",
"FASTQ",
"files",
"for",
"a",
"sample",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/mip/fastq.py#L21-L39 |
thespacedoctor/sherlock | sherlock/commonutils/getpackagepath.py | getpackagepath | def getpackagepath():
"""
*Get the root path for this python package - used in unit testing code*
"""
moduleDirectory = os.path.dirname(__file__)
packagePath = os.path.dirname(__file__) + "/../"
return packagePath | python | def getpackagepath():
"""
*Get the root path for this python package - used in unit testing code*
"""
moduleDirectory = os.path.dirname(__file__)
packagePath = os.path.dirname(__file__) + "/../"
return packagePath | [
"def",
"getpackagepath",
"(",
")",
":",
"moduleDirectory",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
"packagePath",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
"+",
"\"/../\"",
"return",
"packagePath"
] | *Get the root path for this python package - used in unit testing code* | [
"*",
"Get",
"the",
"root",
"path",
"for",
"this",
"python",
"package",
"-",
"used",
"in",
"unit",
"testing",
"code",
"*"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/commonutils/getpackagepath.py#L15-L22 |
pytroll/posttroll | posttroll/bbmcast.py | mcast_sender | def mcast_sender(mcgroup=MC_GROUP):
"""Non-object interface for sending multicast messages.
"""
sock = socket(AF_INET, SOCK_DGRAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
if _is_broadcast_group(mcgroup):
group = '<broadcast>'
sock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
elif((int(mcgroup.split(".")[0]) > 239) or
(int(mcgroup.split(".")[0]) < 224)):
raise IOError("Invalid multicast address.")
else:
group = mcgroup
ttl = struct.pack('b', TTL_LOCALNET) # Time-to-live
sock.setsockopt(IPPROTO_IP, IP_MULTICAST_TTL, ttl)
return sock, group | python | def mcast_sender(mcgroup=MC_GROUP):
"""Non-object interface for sending multicast messages.
"""
sock = socket(AF_INET, SOCK_DGRAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
if _is_broadcast_group(mcgroup):
group = '<broadcast>'
sock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
elif((int(mcgroup.split(".")[0]) > 239) or
(int(mcgroup.split(".")[0]) < 224)):
raise IOError("Invalid multicast address.")
else:
group = mcgroup
ttl = struct.pack('b', TTL_LOCALNET) # Time-to-live
sock.setsockopt(IPPROTO_IP, IP_MULTICAST_TTL, ttl)
return sock, group | [
"def",
"mcast_sender",
"(",
"mcgroup",
"=",
"MC_GROUP",
")",
":",
"sock",
"=",
"socket",
"(",
"AF_INET",
",",
"SOCK_DGRAM",
")",
"sock",
".",
"setsockopt",
"(",
"SOL_SOCKET",
",",
"SO_REUSEADDR",
",",
"1",
")",
"if",
"_is_broadcast_group",
"(",
"mcgroup",
")",
":",
"group",
"=",
"'<broadcast>'",
"sock",
".",
"setsockopt",
"(",
"SOL_SOCKET",
",",
"SO_BROADCAST",
",",
"1",
")",
"elif",
"(",
"(",
"int",
"(",
"mcgroup",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
")",
">",
"239",
")",
"or",
"(",
"int",
"(",
"mcgroup",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
")",
"<",
"224",
")",
")",
":",
"raise",
"IOError",
"(",
"\"Invalid multicast address.\"",
")",
"else",
":",
"group",
"=",
"mcgroup",
"ttl",
"=",
"struct",
".",
"pack",
"(",
"'b'",
",",
"TTL_LOCALNET",
")",
"# Time-to-live",
"sock",
".",
"setsockopt",
"(",
"IPPROTO_IP",
",",
"IP_MULTICAST_TTL",
",",
"ttl",
")",
"return",
"sock",
",",
"group"
] | Non-object interface for sending multicast messages. | [
"Non",
"-",
"object",
"interface",
"for",
"sending",
"multicast",
"messages",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/bbmcast.py#L80-L95 |
pytroll/posttroll | posttroll/bbmcast.py | mcast_receiver | def mcast_receiver(port, mcgroup=MC_GROUP):
"""Open a UDP socket, bind it to a port and select a multicast group.
"""
if _is_broadcast_group(mcgroup):
group = None
else:
group = mcgroup
# Create a socket
sock = socket(AF_INET, SOCK_DGRAM)
# Allow multiple copies of this program on one machine
# (not strictly needed)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
if group:
sock.setsockopt(SOL_IP, IP_MULTICAST_TTL, TTL_LOCALNET) # default
sock.setsockopt(SOL_IP, IP_MULTICAST_LOOP, 1) # default
# Bind it to the port
sock.bind(('', port))
# Look up multicast group address in name server
# (doesn't hurt if it is already in ddd.ddd.ddd.ddd format)
if group:
group = gethostbyname(group)
# Construct binary group address
bytes_ = [int(b) for b in group.split(".")]
grpaddr = 0
for byte in bytes_:
grpaddr = (grpaddr << 8) | byte
# Construct struct mreq from grpaddr and ifaddr
ifaddr = INADDR_ANY
mreq = struct.pack('!LL', grpaddr, ifaddr)
# Add group membership
sock.setsockopt(IPPROTO_IP, IP_ADD_MEMBERSHIP, mreq)
return sock, group or '<broadcast>' | python | def mcast_receiver(port, mcgroup=MC_GROUP):
"""Open a UDP socket, bind it to a port and select a multicast group.
"""
if _is_broadcast_group(mcgroup):
group = None
else:
group = mcgroup
# Create a socket
sock = socket(AF_INET, SOCK_DGRAM)
# Allow multiple copies of this program on one machine
# (not strictly needed)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
if group:
sock.setsockopt(SOL_IP, IP_MULTICAST_TTL, TTL_LOCALNET) # default
sock.setsockopt(SOL_IP, IP_MULTICAST_LOOP, 1) # default
# Bind it to the port
sock.bind(('', port))
# Look up multicast group address in name server
# (doesn't hurt if it is already in ddd.ddd.ddd.ddd format)
if group:
group = gethostbyname(group)
# Construct binary group address
bytes_ = [int(b) for b in group.split(".")]
grpaddr = 0
for byte in bytes_:
grpaddr = (grpaddr << 8) | byte
# Construct struct mreq from grpaddr and ifaddr
ifaddr = INADDR_ANY
mreq = struct.pack('!LL', grpaddr, ifaddr)
# Add group membership
sock.setsockopt(IPPROTO_IP, IP_ADD_MEMBERSHIP, mreq)
return sock, group or '<broadcast>' | [
"def",
"mcast_receiver",
"(",
"port",
",",
"mcgroup",
"=",
"MC_GROUP",
")",
":",
"if",
"_is_broadcast_group",
"(",
"mcgroup",
")",
":",
"group",
"=",
"None",
"else",
":",
"group",
"=",
"mcgroup",
"# Create a socket",
"sock",
"=",
"socket",
"(",
"AF_INET",
",",
"SOCK_DGRAM",
")",
"# Allow multiple copies of this program on one machine",
"# (not strictly needed)",
"sock",
".",
"setsockopt",
"(",
"SOL_SOCKET",
",",
"SO_REUSEADDR",
",",
"1",
")",
"if",
"group",
":",
"sock",
".",
"setsockopt",
"(",
"SOL_IP",
",",
"IP_MULTICAST_TTL",
",",
"TTL_LOCALNET",
")",
"# default",
"sock",
".",
"setsockopt",
"(",
"SOL_IP",
",",
"IP_MULTICAST_LOOP",
",",
"1",
")",
"# default",
"# Bind it to the port",
"sock",
".",
"bind",
"(",
"(",
"''",
",",
"port",
")",
")",
"# Look up multicast group address in name server",
"# (doesn't hurt if it is already in ddd.ddd.ddd.ddd format)",
"if",
"group",
":",
"group",
"=",
"gethostbyname",
"(",
"group",
")",
"# Construct binary group address",
"bytes_",
"=",
"[",
"int",
"(",
"b",
")",
"for",
"b",
"in",
"group",
".",
"split",
"(",
"\".\"",
")",
"]",
"grpaddr",
"=",
"0",
"for",
"byte",
"in",
"bytes_",
":",
"grpaddr",
"=",
"(",
"grpaddr",
"<<",
"8",
")",
"|",
"byte",
"# Construct struct mreq from grpaddr and ifaddr",
"ifaddr",
"=",
"INADDR_ANY",
"mreq",
"=",
"struct",
".",
"pack",
"(",
"'!LL'",
",",
"grpaddr",
",",
"ifaddr",
")",
"# Add group membership",
"sock",
".",
"setsockopt",
"(",
"IPPROTO_IP",
",",
"IP_ADD_MEMBERSHIP",
",",
"mreq",
")",
"return",
"sock",
",",
"group",
"or",
"'<broadcast>'"
] | Open a UDP socket, bind it to a port and select a multicast group. | [
"Open",
"a",
"UDP",
"socket",
"bind",
"it",
"to",
"a",
"port",
"and",
"select",
"a",
"multicast",
"group",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/bbmcast.py#L135-L175 |
pytroll/posttroll | posttroll/bbmcast.py | MulticastReceiver.close | def close(self):
"""Close the receiver.
"""
self.socket.setsockopt(SOL_SOCKET, SO_LINGER,
struct.pack('ii', 1, 1))
self.socket.close() | python | def close(self):
"""Close the receiver.
"""
self.socket.setsockopt(SOL_SOCKET, SO_LINGER,
struct.pack('ii', 1, 1))
self.socket.close() | [
"def",
"close",
"(",
"self",
")",
":",
"self",
".",
"socket",
".",
"setsockopt",
"(",
"SOL_SOCKET",
",",
"SO_LINGER",
",",
"struct",
".",
"pack",
"(",
"'ii'",
",",
"1",
",",
"1",
")",
")",
"self",
".",
"socket",
".",
"close",
"(",
")"
] | Close the receiver. | [
"Close",
"the",
"receiver",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/bbmcast.py#L125-L130 |
contentful-labs/contentful.py | contentful/cda/errors.py | api_exception | def api_exception(http_code):
"""Convenience decorator to associate HTTP status codes with :class:`.ApiError` subclasses.
:param http_code: (int) HTTP status code.
:return: wrapper function.
"""
def wrapper(*args):
code = args[0]
ErrorMapping.mapping[http_code] = code
return code
return wrapper | python | def api_exception(http_code):
"""Convenience decorator to associate HTTP status codes with :class:`.ApiError` subclasses.
:param http_code: (int) HTTP status code.
:return: wrapper function.
"""
def wrapper(*args):
code = args[0]
ErrorMapping.mapping[http_code] = code
return code
return wrapper | [
"def",
"api_exception",
"(",
"http_code",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
")",
":",
"code",
"=",
"args",
"[",
"0",
"]",
"ErrorMapping",
".",
"mapping",
"[",
"http_code",
"]",
"=",
"code",
"return",
"code",
"return",
"wrapper"
] | Convenience decorator to associate HTTP status codes with :class:`.ApiError` subclasses.
:param http_code: (int) HTTP status code.
:return: wrapper function. | [
"Convenience",
"decorator",
"to",
"associate",
"HTTP",
"status",
"codes",
"with",
":",
"class",
":",
".",
"ApiError",
"subclasses",
"."
] | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/errors.py#L23-L33 |
Clinical-Genomics/trailblazer | trailblazer/log.py | LogAnalysis._delete_temp_logs | def _delete_temp_logs(self, family_name: str):
"""Delete temporary logs for the current family."""
for temp_log in self.store.analyses(family=family_name, temp=True):
log.debug(f"delete temporary log: {temp_log.id} - {temp_log.status}")
temp_log.delete() | python | def _delete_temp_logs(self, family_name: str):
"""Delete temporary logs for the current family."""
for temp_log in self.store.analyses(family=family_name, temp=True):
log.debug(f"delete temporary log: {temp_log.id} - {temp_log.status}")
temp_log.delete() | [
"def",
"_delete_temp_logs",
"(",
"self",
",",
"family_name",
":",
"str",
")",
":",
"for",
"temp_log",
"in",
"self",
".",
"store",
".",
"analyses",
"(",
"family",
"=",
"family_name",
",",
"temp",
"=",
"True",
")",
":",
"log",
".",
"debug",
"(",
"f\"delete temporary log: {temp_log.id} - {temp_log.status}\"",
")",
"temp_log",
".",
"delete",
"(",
")"
] | Delete temporary logs for the current family. | [
"Delete",
"temporary",
"logs",
"for",
"the",
"current",
"family",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/log.py#L44-L48 |
Clinical-Genomics/trailblazer | trailblazer/log.py | LogAnalysis.parse | def parse(cls, config_data: dict, sampleinfo_data: dict, sacct_jobs: List[dict],
jobs: int) -> dict:
"""Parse information about a run."""
analysis_types = [sample['type'] for sample in config_data['samples']]
run_data = {
'user': config_data['email'],
'family': config_data['family'],
'priority': config_data['priority'],
'started_at': sampleinfo_data['date'],
'version': sampleinfo_data['version'],
'out_dir': config_data['out_dir'],
'config_path': config_data['config_path'],
'type': cls._get_analysis_type(analysis_types),
}
sacct_data, last_job_end = cls._parse_sacct(sacct_jobs, jobs_count=jobs)
run_data.update(sacct_data)
run_data['status'] = cls.get_status(sampleinfo_data['is_finished'],
len(run_data['failed_jobs']))
if run_data['status'] == 'completed':
run_data['completed_at'] = last_job_end
return run_data | python | def parse(cls, config_data: dict, sampleinfo_data: dict, sacct_jobs: List[dict],
jobs: int) -> dict:
"""Parse information about a run."""
analysis_types = [sample['type'] for sample in config_data['samples']]
run_data = {
'user': config_data['email'],
'family': config_data['family'],
'priority': config_data['priority'],
'started_at': sampleinfo_data['date'],
'version': sampleinfo_data['version'],
'out_dir': config_data['out_dir'],
'config_path': config_data['config_path'],
'type': cls._get_analysis_type(analysis_types),
}
sacct_data, last_job_end = cls._parse_sacct(sacct_jobs, jobs_count=jobs)
run_data.update(sacct_data)
run_data['status'] = cls.get_status(sampleinfo_data['is_finished'],
len(run_data['failed_jobs']))
if run_data['status'] == 'completed':
run_data['completed_at'] = last_job_end
return run_data | [
"def",
"parse",
"(",
"cls",
",",
"config_data",
":",
"dict",
",",
"sampleinfo_data",
":",
"dict",
",",
"sacct_jobs",
":",
"List",
"[",
"dict",
"]",
",",
"jobs",
":",
"int",
")",
"->",
"dict",
":",
"analysis_types",
"=",
"[",
"sample",
"[",
"'type'",
"]",
"for",
"sample",
"in",
"config_data",
"[",
"'samples'",
"]",
"]",
"run_data",
"=",
"{",
"'user'",
":",
"config_data",
"[",
"'email'",
"]",
",",
"'family'",
":",
"config_data",
"[",
"'family'",
"]",
",",
"'priority'",
":",
"config_data",
"[",
"'priority'",
"]",
",",
"'started_at'",
":",
"sampleinfo_data",
"[",
"'date'",
"]",
",",
"'version'",
":",
"sampleinfo_data",
"[",
"'version'",
"]",
",",
"'out_dir'",
":",
"config_data",
"[",
"'out_dir'",
"]",
",",
"'config_path'",
":",
"config_data",
"[",
"'config_path'",
"]",
",",
"'type'",
":",
"cls",
".",
"_get_analysis_type",
"(",
"analysis_types",
")",
",",
"}",
"sacct_data",
",",
"last_job_end",
"=",
"cls",
".",
"_parse_sacct",
"(",
"sacct_jobs",
",",
"jobs_count",
"=",
"jobs",
")",
"run_data",
".",
"update",
"(",
"sacct_data",
")",
"run_data",
"[",
"'status'",
"]",
"=",
"cls",
".",
"get_status",
"(",
"sampleinfo_data",
"[",
"'is_finished'",
"]",
",",
"len",
"(",
"run_data",
"[",
"'failed_jobs'",
"]",
")",
")",
"if",
"run_data",
"[",
"'status'",
"]",
"==",
"'completed'",
":",
"run_data",
"[",
"'completed_at'",
"]",
"=",
"last_job_end",
"return",
"run_data"
] | Parse information about a run. | [
"Parse",
"information",
"about",
"a",
"run",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/log.py#L51-L74 |
Clinical-Genomics/trailblazer | trailblazer/log.py | LogAnalysis._parse_sacct | def _parse_sacct(sacct_jobs: List[dict], jobs_count: int=None):
"""Parse out info from Sacct log."""
failed_jobs = sacct_api.filter_jobs(sacct_jobs, failed=True)
completed_jobs = [job for job in sacct_jobs if job['is_completed']]
last_job_end = completed_jobs[-1]['end'] if len(completed_jobs) > 0 else None
data = {
'jobs': jobs_count,
'completed_jobs': len(completed_jobs),
'progress': (len(completed_jobs) / jobs_count) if jobs_count else None,
'failed_jobs': [{
'slurm_id': job['id'],
'started_at': job['start'],
'elapsed': job['elapsed'],
'status': job['state'].lower(),
'name': job['step'],
'context': job['context'],
} for job in failed_jobs]
}
return data, last_job_end | python | def _parse_sacct(sacct_jobs: List[dict], jobs_count: int=None):
"""Parse out info from Sacct log."""
failed_jobs = sacct_api.filter_jobs(sacct_jobs, failed=True)
completed_jobs = [job for job in sacct_jobs if job['is_completed']]
last_job_end = completed_jobs[-1]['end'] if len(completed_jobs) > 0 else None
data = {
'jobs': jobs_count,
'completed_jobs': len(completed_jobs),
'progress': (len(completed_jobs) / jobs_count) if jobs_count else None,
'failed_jobs': [{
'slurm_id': job['id'],
'started_at': job['start'],
'elapsed': job['elapsed'],
'status': job['state'].lower(),
'name': job['step'],
'context': job['context'],
} for job in failed_jobs]
}
return data, last_job_end | [
"def",
"_parse_sacct",
"(",
"sacct_jobs",
":",
"List",
"[",
"dict",
"]",
",",
"jobs_count",
":",
"int",
"=",
"None",
")",
":",
"failed_jobs",
"=",
"sacct_api",
".",
"filter_jobs",
"(",
"sacct_jobs",
",",
"failed",
"=",
"True",
")",
"completed_jobs",
"=",
"[",
"job",
"for",
"job",
"in",
"sacct_jobs",
"if",
"job",
"[",
"'is_completed'",
"]",
"]",
"last_job_end",
"=",
"completed_jobs",
"[",
"-",
"1",
"]",
"[",
"'end'",
"]",
"if",
"len",
"(",
"completed_jobs",
")",
">",
"0",
"else",
"None",
"data",
"=",
"{",
"'jobs'",
":",
"jobs_count",
",",
"'completed_jobs'",
":",
"len",
"(",
"completed_jobs",
")",
",",
"'progress'",
":",
"(",
"len",
"(",
"completed_jobs",
")",
"/",
"jobs_count",
")",
"if",
"jobs_count",
"else",
"None",
",",
"'failed_jobs'",
":",
"[",
"{",
"'slurm_id'",
":",
"job",
"[",
"'id'",
"]",
",",
"'started_at'",
":",
"job",
"[",
"'start'",
"]",
",",
"'elapsed'",
":",
"job",
"[",
"'elapsed'",
"]",
",",
"'status'",
":",
"job",
"[",
"'state'",
"]",
".",
"lower",
"(",
")",
",",
"'name'",
":",
"job",
"[",
"'step'",
"]",
",",
"'context'",
":",
"job",
"[",
"'context'",
"]",
",",
"}",
"for",
"job",
"in",
"failed_jobs",
"]",
"}",
"return",
"data",
",",
"last_job_end"
] | Parse out info from Sacct log. | [
"Parse",
"out",
"info",
"from",
"Sacct",
"log",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/log.py#L77-L95 |
Clinical-Genomics/trailblazer | trailblazer/log.py | LogAnalysis._get_analysis_type | def _get_analysis_type(analysis_types: List[str]) -> str:
"""Determine the overall analysis type."""
types_set = set(analysis_types)
return types_set.pop() if len(types_set) == 1 else 'wgs' | python | def _get_analysis_type(analysis_types: List[str]) -> str:
"""Determine the overall analysis type."""
types_set = set(analysis_types)
return types_set.pop() if len(types_set) == 1 else 'wgs' | [
"def",
"_get_analysis_type",
"(",
"analysis_types",
":",
"List",
"[",
"str",
"]",
")",
"->",
"str",
":",
"types_set",
"=",
"set",
"(",
"analysis_types",
")",
"return",
"types_set",
".",
"pop",
"(",
")",
"if",
"len",
"(",
"types_set",
")",
"==",
"1",
"else",
"'wgs'"
] | Determine the overall analysis type. | [
"Determine",
"the",
"overall",
"analysis",
"type",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/log.py#L108-L111 |
Clinical-Genomics/trailblazer | trailblazer/log.py | LogAnalysis.build | def build(self, run_data: dict) -> models.Analysis:
"""Build a new Analysis object."""
existing_run = self.store.find_analysis(family=run_data['family'],
started_at=run_data['started_at'],
status=run_data['status'])
if existing_run:
return None
run_data['user'] = self.store.user(run_data['user'])
new_failed_jobs = [self.store.Job(**job) for job in run_data['failed_jobs']]
del run_data['failed_jobs']
new_run = self.store.Analysis(**run_data)
new_run.failed_jobs = new_failed_jobs
return new_run | python | def build(self, run_data: dict) -> models.Analysis:
"""Build a new Analysis object."""
existing_run = self.store.find_analysis(family=run_data['family'],
started_at=run_data['started_at'],
status=run_data['status'])
if existing_run:
return None
run_data['user'] = self.store.user(run_data['user'])
new_failed_jobs = [self.store.Job(**job) for job in run_data['failed_jobs']]
del run_data['failed_jobs']
new_run = self.store.Analysis(**run_data)
new_run.failed_jobs = new_failed_jobs
return new_run | [
"def",
"build",
"(",
"self",
",",
"run_data",
":",
"dict",
")",
"->",
"models",
".",
"Analysis",
":",
"existing_run",
"=",
"self",
".",
"store",
".",
"find_analysis",
"(",
"family",
"=",
"run_data",
"[",
"'family'",
"]",
",",
"started_at",
"=",
"run_data",
"[",
"'started_at'",
"]",
",",
"status",
"=",
"run_data",
"[",
"'status'",
"]",
")",
"if",
"existing_run",
":",
"return",
"None",
"run_data",
"[",
"'user'",
"]",
"=",
"self",
".",
"store",
".",
"user",
"(",
"run_data",
"[",
"'user'",
"]",
")",
"new_failed_jobs",
"=",
"[",
"self",
".",
"store",
".",
"Job",
"(",
"*",
"*",
"job",
")",
"for",
"job",
"in",
"run_data",
"[",
"'failed_jobs'",
"]",
"]",
"del",
"run_data",
"[",
"'failed_jobs'",
"]",
"new_run",
"=",
"self",
".",
"store",
".",
"Analysis",
"(",
"*",
"*",
"run_data",
")",
"new_run",
".",
"failed_jobs",
"=",
"new_failed_jobs",
"return",
"new_run"
] | Build a new Analysis object. | [
"Build",
"a",
"new",
"Analysis",
"object",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/log.py#L113-L126 |
capless/valley | valley/mixins.py | VariableMixin.get_default_value | def get_default_value(self):
""" return default value """
default = self.default_value
if isinstance(default, collections.Callable):
default = default()
return default | python | def get_default_value(self):
""" return default value """
default = self.default_value
if isinstance(default, collections.Callable):
default = default()
return default | [
"def",
"get_default_value",
"(",
"self",
")",
":",
"default",
"=",
"self",
".",
"default_value",
"if",
"isinstance",
"(",
"default",
",",
"collections",
".",
"Callable",
")",
":",
"default",
"=",
"default",
"(",
")",
"return",
"default"
] | return default value | [
"return",
"default",
"value"
] | train | https://github.com/capless/valley/blob/491e4203e428a9e92264e204d44a1df96a570bbc/valley/mixins.py#L32-L37 |
OpenEnergyPlatform/oedialect | oedialect/dialect.py | OEExecutionContext._init_compiled | def _init_compiled(cls, dialect, connection, dbapi_connection,
compiled, parameters):
"""Initialize execution context for a Compiled construct."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.compiled = compiled
# this should be caught in the engine before
# we get here
assert compiled.can_execute
self.execution_options = compiled.execution_options.union(
connection._execution_options)
self.result_column_struct = (
compiled._result_columns, compiled._ordered_columns,
compiled._textual_ordered_columns)
self.unicode_statement = util.text_type(compiled)
if not dialect.supports_unicode_statements:
self.statement = self.unicode_statement.encode(
self.dialect.encoding)
else:
self.statement = self.unicode_statement
self.isinsert = compiled.isinsert
self.isupdate = compiled.isupdate
self.isdelete = compiled.isdelete
self.is_text = compiled.isplaintext
if not parameters:
self.compiled_parameters = [compiled.construct_params()]
else:
self.compiled_parameters = \
[compiled.construct_params(m, _group_number=grp) for
grp, m in enumerate(parameters)]
self.executemany = len(parameters) > 1
self.cursor = self.create_cursor()
if self.isinsert or self.isupdate or self.isdelete:
self.is_crud = True
self._is_explicit_returning = bool(compiled.statement._returning)
self._is_implicit_returning = bool(
compiled.returning and not compiled.statement._returning)
if self.compiled.insert_prefetch or self.compiled.update_prefetch:
if self.executemany:
self._process_executemany_defaults()
else:
self._process_executesingle_defaults()
processors = compiled._bind_processors
# Convert the dictionary of bind parameter values
# into a dict or list to be sent to the DBAPI's
# execute() or executemany() method.
parameters = []
if dialect.positional:
for compiled_params in self.compiled_parameters:
param = []
for key in self.compiled.positiontup:
if key in processors:
param.append(processors[key](compiled_params[key]))
else:
param.append(compiled_params[key])
parameters.append(dialect.execute_sequence_format(param))
else:
encode = not dialect.supports_unicode_statements
for compiled_params in self.compiled_parameters:
if encode:
param = dict(
(
dialect._encoder(key)[0],
processors[key](compiled_params[key])
if key in processors
else compiled_params[key]
)
for key in compiled_params
)
else:
param = dict(
(
key,
processors[key](compiled_params[key])
if key in processors
else compiled_params[key]
)
for key in compiled_params
)
parameters.append(param)
self.parameters = dialect.execute_sequence_format(parameters)
self.statement = compiled
return self | python | def _init_compiled(cls, dialect, connection, dbapi_connection,
compiled, parameters):
"""Initialize execution context for a Compiled construct."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.compiled = compiled
# this should be caught in the engine before
# we get here
assert compiled.can_execute
self.execution_options = compiled.execution_options.union(
connection._execution_options)
self.result_column_struct = (
compiled._result_columns, compiled._ordered_columns,
compiled._textual_ordered_columns)
self.unicode_statement = util.text_type(compiled)
if not dialect.supports_unicode_statements:
self.statement = self.unicode_statement.encode(
self.dialect.encoding)
else:
self.statement = self.unicode_statement
self.isinsert = compiled.isinsert
self.isupdate = compiled.isupdate
self.isdelete = compiled.isdelete
self.is_text = compiled.isplaintext
if not parameters:
self.compiled_parameters = [compiled.construct_params()]
else:
self.compiled_parameters = \
[compiled.construct_params(m, _group_number=grp) for
grp, m in enumerate(parameters)]
self.executemany = len(parameters) > 1
self.cursor = self.create_cursor()
if self.isinsert or self.isupdate or self.isdelete:
self.is_crud = True
self._is_explicit_returning = bool(compiled.statement._returning)
self._is_implicit_returning = bool(
compiled.returning and not compiled.statement._returning)
if self.compiled.insert_prefetch or self.compiled.update_prefetch:
if self.executemany:
self._process_executemany_defaults()
else:
self._process_executesingle_defaults()
processors = compiled._bind_processors
# Convert the dictionary of bind parameter values
# into a dict or list to be sent to the DBAPI's
# execute() or executemany() method.
parameters = []
if dialect.positional:
for compiled_params in self.compiled_parameters:
param = []
for key in self.compiled.positiontup:
if key in processors:
param.append(processors[key](compiled_params[key]))
else:
param.append(compiled_params[key])
parameters.append(dialect.execute_sequence_format(param))
else:
encode = not dialect.supports_unicode_statements
for compiled_params in self.compiled_parameters:
if encode:
param = dict(
(
dialect._encoder(key)[0],
processors[key](compiled_params[key])
if key in processors
else compiled_params[key]
)
for key in compiled_params
)
else:
param = dict(
(
key,
processors[key](compiled_params[key])
if key in processors
else compiled_params[key]
)
for key in compiled_params
)
parameters.append(param)
self.parameters = dialect.execute_sequence_format(parameters)
self.statement = compiled
return self | [
"def",
"_init_compiled",
"(",
"cls",
",",
"dialect",
",",
"connection",
",",
"dbapi_connection",
",",
"compiled",
",",
"parameters",
")",
":",
"self",
"=",
"cls",
".",
"__new__",
"(",
"cls",
")",
"self",
".",
"root_connection",
"=",
"connection",
"self",
".",
"_dbapi_connection",
"=",
"dbapi_connection",
"self",
".",
"dialect",
"=",
"connection",
".",
"dialect",
"self",
".",
"compiled",
"=",
"compiled",
"# this should be caught in the engine before",
"# we get here",
"assert",
"compiled",
".",
"can_execute",
"self",
".",
"execution_options",
"=",
"compiled",
".",
"execution_options",
".",
"union",
"(",
"connection",
".",
"_execution_options",
")",
"self",
".",
"result_column_struct",
"=",
"(",
"compiled",
".",
"_result_columns",
",",
"compiled",
".",
"_ordered_columns",
",",
"compiled",
".",
"_textual_ordered_columns",
")",
"self",
".",
"unicode_statement",
"=",
"util",
".",
"text_type",
"(",
"compiled",
")",
"if",
"not",
"dialect",
".",
"supports_unicode_statements",
":",
"self",
".",
"statement",
"=",
"self",
".",
"unicode_statement",
".",
"encode",
"(",
"self",
".",
"dialect",
".",
"encoding",
")",
"else",
":",
"self",
".",
"statement",
"=",
"self",
".",
"unicode_statement",
"self",
".",
"isinsert",
"=",
"compiled",
".",
"isinsert",
"self",
".",
"isupdate",
"=",
"compiled",
".",
"isupdate",
"self",
".",
"isdelete",
"=",
"compiled",
".",
"isdelete",
"self",
".",
"is_text",
"=",
"compiled",
".",
"isplaintext",
"if",
"not",
"parameters",
":",
"self",
".",
"compiled_parameters",
"=",
"[",
"compiled",
".",
"construct_params",
"(",
")",
"]",
"else",
":",
"self",
".",
"compiled_parameters",
"=",
"[",
"compiled",
".",
"construct_params",
"(",
"m",
",",
"_group_number",
"=",
"grp",
")",
"for",
"grp",
",",
"m",
"in",
"enumerate",
"(",
"parameters",
")",
"]",
"self",
".",
"executemany",
"=",
"len",
"(",
"parameters",
")",
">",
"1",
"self",
".",
"cursor",
"=",
"self",
".",
"create_cursor",
"(",
")",
"if",
"self",
".",
"isinsert",
"or",
"self",
".",
"isupdate",
"or",
"self",
".",
"isdelete",
":",
"self",
".",
"is_crud",
"=",
"True",
"self",
".",
"_is_explicit_returning",
"=",
"bool",
"(",
"compiled",
".",
"statement",
".",
"_returning",
")",
"self",
".",
"_is_implicit_returning",
"=",
"bool",
"(",
"compiled",
".",
"returning",
"and",
"not",
"compiled",
".",
"statement",
".",
"_returning",
")",
"if",
"self",
".",
"compiled",
".",
"insert_prefetch",
"or",
"self",
".",
"compiled",
".",
"update_prefetch",
":",
"if",
"self",
".",
"executemany",
":",
"self",
".",
"_process_executemany_defaults",
"(",
")",
"else",
":",
"self",
".",
"_process_executesingle_defaults",
"(",
")",
"processors",
"=",
"compiled",
".",
"_bind_processors",
"# Convert the dictionary of bind parameter values",
"# into a dict or list to be sent to the DBAPI's",
"# execute() or executemany() method.",
"parameters",
"=",
"[",
"]",
"if",
"dialect",
".",
"positional",
":",
"for",
"compiled_params",
"in",
"self",
".",
"compiled_parameters",
":",
"param",
"=",
"[",
"]",
"for",
"key",
"in",
"self",
".",
"compiled",
".",
"positiontup",
":",
"if",
"key",
"in",
"processors",
":",
"param",
".",
"append",
"(",
"processors",
"[",
"key",
"]",
"(",
"compiled_params",
"[",
"key",
"]",
")",
")",
"else",
":",
"param",
".",
"append",
"(",
"compiled_params",
"[",
"key",
"]",
")",
"parameters",
".",
"append",
"(",
"dialect",
".",
"execute_sequence_format",
"(",
"param",
")",
")",
"else",
":",
"encode",
"=",
"not",
"dialect",
".",
"supports_unicode_statements",
"for",
"compiled_params",
"in",
"self",
".",
"compiled_parameters",
":",
"if",
"encode",
":",
"param",
"=",
"dict",
"(",
"(",
"dialect",
".",
"_encoder",
"(",
"key",
")",
"[",
"0",
"]",
",",
"processors",
"[",
"key",
"]",
"(",
"compiled_params",
"[",
"key",
"]",
")",
"if",
"key",
"in",
"processors",
"else",
"compiled_params",
"[",
"key",
"]",
")",
"for",
"key",
"in",
"compiled_params",
")",
"else",
":",
"param",
"=",
"dict",
"(",
"(",
"key",
",",
"processors",
"[",
"key",
"]",
"(",
"compiled_params",
"[",
"key",
"]",
")",
"if",
"key",
"in",
"processors",
"else",
"compiled_params",
"[",
"key",
"]",
")",
"for",
"key",
"in",
"compiled_params",
")",
"parameters",
".",
"append",
"(",
"param",
")",
"self",
".",
"parameters",
"=",
"dialect",
".",
"execute_sequence_format",
"(",
"parameters",
")",
"self",
".",
"statement",
"=",
"compiled",
"return",
"self"
] | Initialize execution context for a Compiled construct. | [
"Initialize",
"execution",
"context",
"for",
"a",
"Compiled",
"construct",
"."
] | train | https://github.com/OpenEnergyPlatform/oedialect/blob/40a8d9e9b272ea4674d2c40dd6b3e6cc15f91c1e/oedialect/dialect.py#L38-L139 |
fabianvf/python-rake | RAKE/RAKE.py | split_sentences | def split_sentences(text):
"""
Utility function to return a list of sentences.
@param text The text that must be split in to sentences.
"""
sentence_delimiters = re.compile(u'[.!?,;:\t\\\\"\\(\\)\\\'\u2019\u2013]|\\s\\-\\s')
sentences = sentence_delimiters.split(text)
return sentences | python | def split_sentences(text):
"""
Utility function to return a list of sentences.
@param text The text that must be split in to sentences.
"""
sentence_delimiters = re.compile(u'[.!?,;:\t\\\\"\\(\\)\\\'\u2019\u2013]|\\s\\-\\s')
sentences = sentence_delimiters.split(text)
return sentences | [
"def",
"split_sentences",
"(",
"text",
")",
":",
"sentence_delimiters",
"=",
"re",
".",
"compile",
"(",
"u'[.!?,;:\\t\\\\\\\\\"\\\\(\\\\)\\\\\\'\\u2019\\u2013]|\\\\s\\\\-\\\\s'",
")",
"sentences",
"=",
"sentence_delimiters",
".",
"split",
"(",
"text",
")",
"return",
"sentences"
] | Utility function to return a list of sentences.
@param text The text that must be split in to sentences. | [
"Utility",
"function",
"to",
"return",
"a",
"list",
"of",
"sentences",
"."
] | train | https://github.com/fabianvf/python-rake/blob/54357322c40c7dd975144a189bbc9da0f9c29060/RAKE/RAKE.py#L88-L95 |
crccheck/django-object-actions | django_object_actions/utils.py | takes_instance_or_queryset | def takes_instance_or_queryset(func):
"""Decorator that makes standard Django admin actions compatible."""
@wraps(func)
def decorated_function(self, request, queryset):
# func follows the prototype documented at:
# https://docs.djangoproject.com/en/dev/ref/contrib/admin/actions/#writing-action-functions
if not isinstance(queryset, QuerySet):
try:
# Django >=1.8
queryset = self.get_queryset(request).filter(pk=queryset.pk)
except AttributeError:
try:
# Django >=1.6,<1.8
model = queryset._meta.model
except AttributeError: # pragma: no cover
# Django <1.6
model = queryset._meta.concrete_model
queryset = model.objects.filter(pk=queryset.pk)
return func(self, request, queryset)
return decorated_function | python | def takes_instance_or_queryset(func):
"""Decorator that makes standard Django admin actions compatible."""
@wraps(func)
def decorated_function(self, request, queryset):
# func follows the prototype documented at:
# https://docs.djangoproject.com/en/dev/ref/contrib/admin/actions/#writing-action-functions
if not isinstance(queryset, QuerySet):
try:
# Django >=1.8
queryset = self.get_queryset(request).filter(pk=queryset.pk)
except AttributeError:
try:
# Django >=1.6,<1.8
model = queryset._meta.model
except AttributeError: # pragma: no cover
# Django <1.6
model = queryset._meta.concrete_model
queryset = model.objects.filter(pk=queryset.pk)
return func(self, request, queryset)
return decorated_function | [
"def",
"takes_instance_or_queryset",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"decorated_function",
"(",
"self",
",",
"request",
",",
"queryset",
")",
":",
"# func follows the prototype documented at:",
"# https://docs.djangoproject.com/en/dev/ref/contrib/admin/actions/#writing-action-functions",
"if",
"not",
"isinstance",
"(",
"queryset",
",",
"QuerySet",
")",
":",
"try",
":",
"# Django >=1.8",
"queryset",
"=",
"self",
".",
"get_queryset",
"(",
"request",
")",
".",
"filter",
"(",
"pk",
"=",
"queryset",
".",
"pk",
")",
"except",
"AttributeError",
":",
"try",
":",
"# Django >=1.6,<1.8",
"model",
"=",
"queryset",
".",
"_meta",
".",
"model",
"except",
"AttributeError",
":",
"# pragma: no cover",
"# Django <1.6",
"model",
"=",
"queryset",
".",
"_meta",
".",
"concrete_model",
"queryset",
"=",
"model",
".",
"objects",
".",
"filter",
"(",
"pk",
"=",
"queryset",
".",
"pk",
")",
"return",
"func",
"(",
"self",
",",
"request",
",",
"queryset",
")",
"return",
"decorated_function"
] | Decorator that makes standard Django admin actions compatible. | [
"Decorator",
"that",
"makes",
"standard",
"Django",
"admin",
"actions",
"compatible",
"."
] | train | https://github.com/crccheck/django-object-actions/blob/fb908697a609f46889af15b543d444e5e19d6be2/django_object_actions/utils.py#L280-L299 |
crccheck/django-object-actions | django_object_actions/utils.py | BaseDjangoObjectActions.get_urls | def get_urls(self):
"""Prepend `get_urls` with our own patterns."""
urls = super(BaseDjangoObjectActions, self).get_urls()
return self._get_action_urls() + urls | python | def get_urls(self):
"""Prepend `get_urls` with our own patterns."""
urls = super(BaseDjangoObjectActions, self).get_urls()
return self._get_action_urls() + urls | [
"def",
"get_urls",
"(",
"self",
")",
":",
"urls",
"=",
"super",
"(",
"BaseDjangoObjectActions",
",",
"self",
")",
".",
"get_urls",
"(",
")",
"return",
"self",
".",
"_get_action_urls",
"(",
")",
"+",
"urls"
] | Prepend `get_urls` with our own patterns. | [
"Prepend",
"get_urls",
"with",
"our",
"own",
"patterns",
"."
] | train | https://github.com/crccheck/django-object-actions/blob/fb908697a609f46889af15b543d444e5e19d6be2/django_object_actions/utils.py#L46-L49 |
crccheck/django-object-actions | django_object_actions/utils.py | BaseDjangoObjectActions._get_action_urls | def _get_action_urls(self):
"""Get the url patterns that route each action to a view."""
actions = {}
model_name = self.model._meta.model_name
# e.g.: polls_poll
base_url_name = '%s_%s' % (self.model._meta.app_label, model_name)
# e.g.: polls_poll_actions
model_actions_url_name = '%s_actions' % base_url_name
self.tools_view_name = 'admin:' + model_actions_url_name
# WISHLIST use get_change_actions and get_changelist_actions
# TODO separate change and changelist actions
for action in chain(self.change_actions, self.changelist_actions):
actions[action] = getattr(self, action)
return [
# change, supports the same pks the admin does
# https://github.com/django/django/blob/stable/1.10.x/django/contrib/admin/options.py#L555
url(r'^(?P<pk>.+)/actions/(?P<tool>\w+)/$',
self.admin_site.admin_view( # checks permissions
ChangeActionView.as_view(
model=self.model,
actions=actions,
back='admin:%s_change' % base_url_name,
current_app=self.admin_site.name,
)
),
name=model_actions_url_name),
# changelist
url(r'^actions/(?P<tool>\w+)/$',
self.admin_site.admin_view( # checks permissions
ChangeListActionView.as_view(
model=self.model,
actions=actions,
back='admin:%s_changelist' % base_url_name,
current_app=self.admin_site.name,
)
),
# Dupe name is fine. https://code.djangoproject.com/ticket/14259
name=model_actions_url_name),
] | python | def _get_action_urls(self):
"""Get the url patterns that route each action to a view."""
actions = {}
model_name = self.model._meta.model_name
# e.g.: polls_poll
base_url_name = '%s_%s' % (self.model._meta.app_label, model_name)
# e.g.: polls_poll_actions
model_actions_url_name = '%s_actions' % base_url_name
self.tools_view_name = 'admin:' + model_actions_url_name
# WISHLIST use get_change_actions and get_changelist_actions
# TODO separate change and changelist actions
for action in chain(self.change_actions, self.changelist_actions):
actions[action] = getattr(self, action)
return [
# change, supports the same pks the admin does
# https://github.com/django/django/blob/stable/1.10.x/django/contrib/admin/options.py#L555
url(r'^(?P<pk>.+)/actions/(?P<tool>\w+)/$',
self.admin_site.admin_view( # checks permissions
ChangeActionView.as_view(
model=self.model,
actions=actions,
back='admin:%s_change' % base_url_name,
current_app=self.admin_site.name,
)
),
name=model_actions_url_name),
# changelist
url(r'^actions/(?P<tool>\w+)/$',
self.admin_site.admin_view( # checks permissions
ChangeListActionView.as_view(
model=self.model,
actions=actions,
back='admin:%s_changelist' % base_url_name,
current_app=self.admin_site.name,
)
),
# Dupe name is fine. https://code.djangoproject.com/ticket/14259
name=model_actions_url_name),
] | [
"def",
"_get_action_urls",
"(",
"self",
")",
":",
"actions",
"=",
"{",
"}",
"model_name",
"=",
"self",
".",
"model",
".",
"_meta",
".",
"model_name",
"# e.g.: polls_poll",
"base_url_name",
"=",
"'%s_%s'",
"%",
"(",
"self",
".",
"model",
".",
"_meta",
".",
"app_label",
",",
"model_name",
")",
"# e.g.: polls_poll_actions",
"model_actions_url_name",
"=",
"'%s_actions'",
"%",
"base_url_name",
"self",
".",
"tools_view_name",
"=",
"'admin:'",
"+",
"model_actions_url_name",
"# WISHLIST use get_change_actions and get_changelist_actions",
"# TODO separate change and changelist actions",
"for",
"action",
"in",
"chain",
"(",
"self",
".",
"change_actions",
",",
"self",
".",
"changelist_actions",
")",
":",
"actions",
"[",
"action",
"]",
"=",
"getattr",
"(",
"self",
",",
"action",
")",
"return",
"[",
"# change, supports the same pks the admin does",
"# https://github.com/django/django/blob/stable/1.10.x/django/contrib/admin/options.py#L555",
"url",
"(",
"r'^(?P<pk>.+)/actions/(?P<tool>\\w+)/$'",
",",
"self",
".",
"admin_site",
".",
"admin_view",
"(",
"# checks permissions",
"ChangeActionView",
".",
"as_view",
"(",
"model",
"=",
"self",
".",
"model",
",",
"actions",
"=",
"actions",
",",
"back",
"=",
"'admin:%s_change'",
"%",
"base_url_name",
",",
"current_app",
"=",
"self",
".",
"admin_site",
".",
"name",
",",
")",
")",
",",
"name",
"=",
"model_actions_url_name",
")",
",",
"# changelist",
"url",
"(",
"r'^actions/(?P<tool>\\w+)/$'",
",",
"self",
".",
"admin_site",
".",
"admin_view",
"(",
"# checks permissions",
"ChangeListActionView",
".",
"as_view",
"(",
"model",
"=",
"self",
".",
"model",
",",
"actions",
"=",
"actions",
",",
"back",
"=",
"'admin:%s_changelist'",
"%",
"base_url_name",
",",
"current_app",
"=",
"self",
".",
"admin_site",
".",
"name",
",",
")",
")",
",",
"# Dupe name is fine. https://code.djangoproject.com/ticket/14259",
"name",
"=",
"model_actions_url_name",
")",
",",
"]"
] | Get the url patterns that route each action to a view. | [
"Get",
"the",
"url",
"patterns",
"that",
"route",
"each",
"action",
"to",
"a",
"view",
"."
] | train | https://github.com/crccheck/django-object-actions/blob/fb908697a609f46889af15b543d444e5e19d6be2/django_object_actions/utils.py#L105-L146 |
crccheck/django-object-actions | django_object_actions/utils.py | BaseDjangoObjectActions._get_tool_dict | def _get_tool_dict(self, tool_name):
"""Represents the tool as a dict with extra meta."""
tool = getattr(self, tool_name)
standard_attrs, custom_attrs = self._get_button_attrs(tool)
return dict(
name=tool_name,
label=getattr(tool, 'label', tool_name),
standard_attrs=standard_attrs,
custom_attrs=custom_attrs,
) | python | def _get_tool_dict(self, tool_name):
"""Represents the tool as a dict with extra meta."""
tool = getattr(self, tool_name)
standard_attrs, custom_attrs = self._get_button_attrs(tool)
return dict(
name=tool_name,
label=getattr(tool, 'label', tool_name),
standard_attrs=standard_attrs,
custom_attrs=custom_attrs,
) | [
"def",
"_get_tool_dict",
"(",
"self",
",",
"tool_name",
")",
":",
"tool",
"=",
"getattr",
"(",
"self",
",",
"tool_name",
")",
"standard_attrs",
",",
"custom_attrs",
"=",
"self",
".",
"_get_button_attrs",
"(",
"tool",
")",
"return",
"dict",
"(",
"name",
"=",
"tool_name",
",",
"label",
"=",
"getattr",
"(",
"tool",
",",
"'label'",
",",
"tool_name",
")",
",",
"standard_attrs",
"=",
"standard_attrs",
",",
"custom_attrs",
"=",
"custom_attrs",
",",
")"
] | Represents the tool as a dict with extra meta. | [
"Represents",
"the",
"tool",
"as",
"a",
"dict",
"with",
"extra",
"meta",
"."
] | train | https://github.com/crccheck/django-object-actions/blob/fb908697a609f46889af15b543d444e5e19d6be2/django_object_actions/utils.py#L148-L157 |
crccheck/django-object-actions | django_object_actions/utils.py | BaseDjangoObjectActions._get_button_attrs | def _get_button_attrs(self, tool):
"""
Get the HTML attributes associated with a tool.
There are some standard attributes (class and title) that the template
will always want. Any number of additional attributes can be specified
and passed on. This is kinda awkward and due for a refactor for
readability.
"""
attrs = getattr(tool, 'attrs', {})
# href is not allowed to be set. should an exception be raised instead?
if 'href' in attrs:
attrs.pop('href')
# title is not allowed to be set. should an exception be raised instead?
# `short_description` should be set instead to parallel django admin
# actions
if 'title' in attrs:
attrs.pop('title')
default_attrs = {
'class': attrs.get('class', ''),
'title': getattr(tool, 'short_description', ''),
}
standard_attrs = {}
custom_attrs = {}
for k, v in dict(default_attrs, **attrs).items():
if k in default_attrs:
standard_attrs[k] = v
else:
custom_attrs[k] = v
return standard_attrs, custom_attrs | python | def _get_button_attrs(self, tool):
"""
Get the HTML attributes associated with a tool.
There are some standard attributes (class and title) that the template
will always want. Any number of additional attributes can be specified
and passed on. This is kinda awkward and due for a refactor for
readability.
"""
attrs = getattr(tool, 'attrs', {})
# href is not allowed to be set. should an exception be raised instead?
if 'href' in attrs:
attrs.pop('href')
# title is not allowed to be set. should an exception be raised instead?
# `short_description` should be set instead to parallel django admin
# actions
if 'title' in attrs:
attrs.pop('title')
default_attrs = {
'class': attrs.get('class', ''),
'title': getattr(tool, 'short_description', ''),
}
standard_attrs = {}
custom_attrs = {}
for k, v in dict(default_attrs, **attrs).items():
if k in default_attrs:
standard_attrs[k] = v
else:
custom_attrs[k] = v
return standard_attrs, custom_attrs | [
"def",
"_get_button_attrs",
"(",
"self",
",",
"tool",
")",
":",
"attrs",
"=",
"getattr",
"(",
"tool",
",",
"'attrs'",
",",
"{",
"}",
")",
"# href is not allowed to be set. should an exception be raised instead?",
"if",
"'href'",
"in",
"attrs",
":",
"attrs",
".",
"pop",
"(",
"'href'",
")",
"# title is not allowed to be set. should an exception be raised instead?",
"# `short_description` should be set instead to parallel django admin",
"# actions",
"if",
"'title'",
"in",
"attrs",
":",
"attrs",
".",
"pop",
"(",
"'title'",
")",
"default_attrs",
"=",
"{",
"'class'",
":",
"attrs",
".",
"get",
"(",
"'class'",
",",
"''",
")",
",",
"'title'",
":",
"getattr",
"(",
"tool",
",",
"'short_description'",
",",
"''",
")",
",",
"}",
"standard_attrs",
"=",
"{",
"}",
"custom_attrs",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"dict",
"(",
"default_attrs",
",",
"*",
"*",
"attrs",
")",
".",
"items",
"(",
")",
":",
"if",
"k",
"in",
"default_attrs",
":",
"standard_attrs",
"[",
"k",
"]",
"=",
"v",
"else",
":",
"custom_attrs",
"[",
"k",
"]",
"=",
"v",
"return",
"standard_attrs",
",",
"custom_attrs"
] | Get the HTML attributes associated with a tool.
There are some standard attributes (class and title) that the template
will always want. Any number of additional attributes can be specified
and passed on. This is kinda awkward and due for a refactor for
readability. | [
"Get",
"the",
"HTML",
"attributes",
"associated",
"with",
"a",
"tool",
"."
] | train | https://github.com/crccheck/django-object-actions/blob/fb908697a609f46889af15b543d444e5e19d6be2/django_object_actions/utils.py#L159-L188 |
crccheck/django-object-actions | example_project/polls/admin.py | PollAdmin.question_mark | def question_mark(self, request, obj):
"""Add a question mark."""
obj.question = obj.question + '?'
obj.save() | python | def question_mark(self, request, obj):
"""Add a question mark."""
obj.question = obj.question + '?'
obj.save() | [
"def",
"question_mark",
"(",
"self",
",",
"request",
",",
"obj",
")",
":",
"obj",
".",
"question",
"=",
"obj",
".",
"question",
"+",
"'?'",
"obj",
".",
"save",
"(",
")"
] | Add a question mark. | [
"Add",
"a",
"question",
"mark",
"."
] | train | https://github.com/crccheck/django-object-actions/blob/fb908697a609f46889af15b543d444e5e19d6be2/example_project/polls/admin.py#L115-L118 |
InfoAgeTech/django-core | django_core/html/builders.py | build_link | def build_link(href, text, cls=None, icon_class=None, **attrs):
"""Builds an html link.
:param href: link for the anchor element
:param text: text for the anchor element
:param attrs: other attribute kwargs
>>> build_link('xyz.com', 'hello', 'big')
u'<a href="xyz.com" class="big">hello</a>'
>>> build_link('xyz.com', 'hello', 'big', 'fa fa-times')
u'<a href="xyz.com" class="big"><i class="fa fa-times"></i> hello</a>'
"""
return build_html_element(tag='a',
text=text,
href=href,
cls=cls,
icon_class=icon_class,
**attrs) | python | def build_link(href, text, cls=None, icon_class=None, **attrs):
"""Builds an html link.
:param href: link for the anchor element
:param text: text for the anchor element
:param attrs: other attribute kwargs
>>> build_link('xyz.com', 'hello', 'big')
u'<a href="xyz.com" class="big">hello</a>'
>>> build_link('xyz.com', 'hello', 'big', 'fa fa-times')
u'<a href="xyz.com" class="big"><i class="fa fa-times"></i> hello</a>'
"""
return build_html_element(tag='a',
text=text,
href=href,
cls=cls,
icon_class=icon_class,
**attrs) | [
"def",
"build_link",
"(",
"href",
",",
"text",
",",
"cls",
"=",
"None",
",",
"icon_class",
"=",
"None",
",",
"*",
"*",
"attrs",
")",
":",
"return",
"build_html_element",
"(",
"tag",
"=",
"'a'",
",",
"text",
"=",
"text",
",",
"href",
"=",
"href",
",",
"cls",
"=",
"cls",
",",
"icon_class",
"=",
"icon_class",
",",
"*",
"*",
"attrs",
")"
] | Builds an html link.
:param href: link for the anchor element
:param text: text for the anchor element
:param attrs: other attribute kwargs
>>> build_link('xyz.com', 'hello', 'big')
u'<a href="xyz.com" class="big">hello</a>'
>>> build_link('xyz.com', 'hello', 'big', 'fa fa-times')
u'<a href="xyz.com" class="big"><i class="fa fa-times"></i> hello</a>' | [
"Builds",
"an",
"html",
"link",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/html/builders.py#L8-L25 |
InfoAgeTech/django-core | django_core/html/builders.py | build_html_element | def build_html_element(tag, text=None, icon_class=None, cls=None, **kwargs):
"""Builds an html element.
:param tag: the html tag to build ('a', 'div', 'img', etc)
:param icon_class: the class to apply to an icon element. This only
applies to elements that allow a closing tag.
:param cls: the css class to apply to the tag. This can also be passed
in as a kwarg as "class".
>>> build_html_element(tag='a', href='someurl.com', text='hello')
'<a href='someurl.com'>hello</a>'
"""
if cls is not None:
kwargs['class'] = cls
tag_attrs = ' '.join(['{0}="{1}"'.format(k, v) for k, v in kwargs.items()])
tag_content = '{tag} {tag_attrs}'.format(tag=tag, tag_attrs=tag_attrs)
if tag in ('img', 'input', 'hr', 'br'):
return mark_safe('<{tag_content} />'.format(tag_content=tag_content))
icon = '<i class="{0}"></i> '.format(icon_class) if icon_class else ''
if not text:
text = ''
elif not isinstance(text, SafeText):
text = escape(text)
return mark_safe('<{tag_content}>{icon}{text}</{tag}>'.format(
tag_content=tag_content,
icon=icon,
tag=tag,
text=text)
) | python | def build_html_element(tag, text=None, icon_class=None, cls=None, **kwargs):
"""Builds an html element.
:param tag: the html tag to build ('a', 'div', 'img', etc)
:param icon_class: the class to apply to an icon element. This only
applies to elements that allow a closing tag.
:param cls: the css class to apply to the tag. This can also be passed
in as a kwarg as "class".
>>> build_html_element(tag='a', href='someurl.com', text='hello')
'<a href='someurl.com'>hello</a>'
"""
if cls is not None:
kwargs['class'] = cls
tag_attrs = ' '.join(['{0}="{1}"'.format(k, v) for k, v in kwargs.items()])
tag_content = '{tag} {tag_attrs}'.format(tag=tag, tag_attrs=tag_attrs)
if tag in ('img', 'input', 'hr', 'br'):
return mark_safe('<{tag_content} />'.format(tag_content=tag_content))
icon = '<i class="{0}"></i> '.format(icon_class) if icon_class else ''
if not text:
text = ''
elif not isinstance(text, SafeText):
text = escape(text)
return mark_safe('<{tag_content}>{icon}{text}</{tag}>'.format(
tag_content=tag_content,
icon=icon,
tag=tag,
text=text)
) | [
"def",
"build_html_element",
"(",
"tag",
",",
"text",
"=",
"None",
",",
"icon_class",
"=",
"None",
",",
"cls",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"cls",
"is",
"not",
"None",
":",
"kwargs",
"[",
"'class'",
"]",
"=",
"cls",
"tag_attrs",
"=",
"' '",
".",
"join",
"(",
"[",
"'{0}=\"{1}\"'",
".",
"format",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
"]",
")",
"tag_content",
"=",
"'{tag} {tag_attrs}'",
".",
"format",
"(",
"tag",
"=",
"tag",
",",
"tag_attrs",
"=",
"tag_attrs",
")",
"if",
"tag",
"in",
"(",
"'img'",
",",
"'input'",
",",
"'hr'",
",",
"'br'",
")",
":",
"return",
"mark_safe",
"(",
"'<{tag_content} />'",
".",
"format",
"(",
"tag_content",
"=",
"tag_content",
")",
")",
"icon",
"=",
"'<i class=\"{0}\"></i> '",
".",
"format",
"(",
"icon_class",
")",
"if",
"icon_class",
"else",
"''",
"if",
"not",
"text",
":",
"text",
"=",
"''",
"elif",
"not",
"isinstance",
"(",
"text",
",",
"SafeText",
")",
":",
"text",
"=",
"escape",
"(",
"text",
")",
"return",
"mark_safe",
"(",
"'<{tag_content}>{icon}{text}</{tag}>'",
".",
"format",
"(",
"tag_content",
"=",
"tag_content",
",",
"icon",
"=",
"icon",
",",
"tag",
"=",
"tag",
",",
"text",
"=",
"text",
")",
")"
] | Builds an html element.
:param tag: the html tag to build ('a', 'div', 'img', etc)
:param icon_class: the class to apply to an icon element. This only
applies to elements that allow a closing tag.
:param cls: the css class to apply to the tag. This can also be passed
in as a kwarg as "class".
>>> build_html_element(tag='a', href='someurl.com', text='hello')
'<a href='someurl.com'>hello</a>' | [
"Builds",
"an",
"html",
"element",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/html/builders.py#L28-L62 |
edx/edx-django-release-util | release_util/management/commands/__init__.py | dump_migration_session_state | def dump_migration_session_state(raw):
"""
Serialize a migration session state to yaml using nicer formatting
Args:
raw: object to serialize
Returns: string (of yaml)
Specifically, this forces the "output" member of state step dicts (e.g.
state[0]['output']) to use block formatting. For example, rather than this:
- migration: [app, migration_name]
output: "line 1\nline2\nline3"
You get this:
- migration: [app, migration_name]
output: |
line 1
line 2
line 3
"""
class BlockStyle(str): pass
class SessionDumper(yaml.SafeDumper): pass
def str_block_formatter(dumper, data):
return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')
SessionDumper.add_representer(BlockStyle, str_block_formatter)
raw = deepcopy(raw)
for step in raw:
step['output'] = BlockStyle(step['output'])
step['traceback'] = BlockStyle(step['traceback'])
return yaml.dump(raw, Dumper=SessionDumper) | python | def dump_migration_session_state(raw):
"""
Serialize a migration session state to yaml using nicer formatting
Args:
raw: object to serialize
Returns: string (of yaml)
Specifically, this forces the "output" member of state step dicts (e.g.
state[0]['output']) to use block formatting. For example, rather than this:
- migration: [app, migration_name]
output: "line 1\nline2\nline3"
You get this:
- migration: [app, migration_name]
output: |
line 1
line 2
line 3
"""
class BlockStyle(str): pass
class SessionDumper(yaml.SafeDumper): pass
def str_block_formatter(dumper, data):
return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')
SessionDumper.add_representer(BlockStyle, str_block_formatter)
raw = deepcopy(raw)
for step in raw:
step['output'] = BlockStyle(step['output'])
step['traceback'] = BlockStyle(step['traceback'])
return yaml.dump(raw, Dumper=SessionDumper) | [
"def",
"dump_migration_session_state",
"(",
"raw",
")",
":",
"class",
"BlockStyle",
"(",
"str",
")",
":",
"pass",
"class",
"SessionDumper",
"(",
"yaml",
".",
"SafeDumper",
")",
":",
"pass",
"def",
"str_block_formatter",
"(",
"dumper",
",",
"data",
")",
":",
"return",
"dumper",
".",
"represent_scalar",
"(",
"u'tag:yaml.org,2002:str'",
",",
"data",
",",
"style",
"=",
"'|'",
")",
"SessionDumper",
".",
"add_representer",
"(",
"BlockStyle",
",",
"str_block_formatter",
")",
"raw",
"=",
"deepcopy",
"(",
"raw",
")",
"for",
"step",
"in",
"raw",
":",
"step",
"[",
"'output'",
"]",
"=",
"BlockStyle",
"(",
"step",
"[",
"'output'",
"]",
")",
"step",
"[",
"'traceback'",
"]",
"=",
"BlockStyle",
"(",
"step",
"[",
"'traceback'",
"]",
")",
"return",
"yaml",
".",
"dump",
"(",
"raw",
",",
"Dumper",
"=",
"SessionDumper",
")"
] | Serialize a migration session state to yaml using nicer formatting
Args:
raw: object to serialize
Returns: string (of yaml)
Specifically, this forces the "output" member of state step dicts (e.g.
state[0]['output']) to use block formatting. For example, rather than this:
- migration: [app, migration_name]
output: "line 1\nline2\nline3"
You get this:
- migration: [app, migration_name]
output: |
line 1
line 2
line 3 | [
"Serialize",
"a",
"migration",
"session",
"state",
"to",
"yaml",
"using",
"nicer",
"formatting"
] | train | https://github.com/edx/edx-django-release-util/blob/de0fde41d6a19885ab7dc309472b94fd0fccbc1d/release_util/management/commands/__init__.py#L16-L48 |
edx/edx-django-release-util | release_util/management/commands/__init__.py | MigrationSession.add_migrations | def add_migrations(self, migrations):
"""
Add migrations to be applied.
Args:
migrations: a list of migrations to add of the form [(app, migration_name), ...]
Raises:
MigrationSessionError if called on a closed MigrationSession
"""
if self.__closed:
raise MigrationSessionError("Can't change applied session")
self._to_apply.extend(migrations) | python | def add_migrations(self, migrations):
"""
Add migrations to be applied.
Args:
migrations: a list of migrations to add of the form [(app, migration_name), ...]
Raises:
MigrationSessionError if called on a closed MigrationSession
"""
if self.__closed:
raise MigrationSessionError("Can't change applied session")
self._to_apply.extend(migrations) | [
"def",
"add_migrations",
"(",
"self",
",",
"migrations",
")",
":",
"if",
"self",
".",
"__closed",
":",
"raise",
"MigrationSessionError",
"(",
"\"Can't change applied session\"",
")",
"self",
".",
"_to_apply",
".",
"extend",
"(",
"migrations",
")"
] | Add migrations to be applied.
Args:
migrations: a list of migrations to add of the form [(app, migration_name), ...]
Raises:
MigrationSessionError if called on a closed MigrationSession | [
"Add",
"migrations",
"to",
"be",
"applied",
"."
] | train | https://github.com/edx/edx-django-release-util/blob/de0fde41d6a19885ab7dc309472b94fd0fccbc1d/release_util/management/commands/__init__.py#L146-L157 |
edx/edx-django-release-util | release_util/management/commands/__init__.py | MigrationSession._get_unapplied_migrations | def _get_unapplied_migrations(self, loader):
"""
Output a list of unapplied migrations in the form [['migration1', migration2'], ...].
This implementation is mostly copied from the Django 'showmigrations' mgmt command.
https://github.com/django/django/blob/stable/1.8.x/django/core/management/commands/showmigrations.py
This should only be called from _get_current_migration_state().
"""
unapplied = []
graph = loader.graph
plan = []
seen = set()
# Generate the plan, in the order that migrations have been/should be applied.
for target in graph.leaf_nodes():
for migration in graph.forwards_plan(target):
if migration not in seen:
plan.append(graph.nodes[migration])
seen.add(migration)
# Remove the migrations that have already been applied.
for migration in plan:
if not (migration.app_label, migration.name) in loader.applied_migrations:
# NOTE: Unicode Django application names are unsupported.
unapplied.append([migration.app_label, str(migration.name)])
return unapplied | python | def _get_unapplied_migrations(self, loader):
"""
Output a list of unapplied migrations in the form [['migration1', migration2'], ...].
This implementation is mostly copied from the Django 'showmigrations' mgmt command.
https://github.com/django/django/blob/stable/1.8.x/django/core/management/commands/showmigrations.py
This should only be called from _get_current_migration_state().
"""
unapplied = []
graph = loader.graph
plan = []
seen = set()
# Generate the plan, in the order that migrations have been/should be applied.
for target in graph.leaf_nodes():
for migration in graph.forwards_plan(target):
if migration not in seen:
plan.append(graph.nodes[migration])
seen.add(migration)
# Remove the migrations that have already been applied.
for migration in plan:
if not (migration.app_label, migration.name) in loader.applied_migrations:
# NOTE: Unicode Django application names are unsupported.
unapplied.append([migration.app_label, str(migration.name)])
return unapplied | [
"def",
"_get_unapplied_migrations",
"(",
"self",
",",
"loader",
")",
":",
"unapplied",
"=",
"[",
"]",
"graph",
"=",
"loader",
".",
"graph",
"plan",
"=",
"[",
"]",
"seen",
"=",
"set",
"(",
")",
"# Generate the plan, in the order that migrations have been/should be applied.",
"for",
"target",
"in",
"graph",
".",
"leaf_nodes",
"(",
")",
":",
"for",
"migration",
"in",
"graph",
".",
"forwards_plan",
"(",
"target",
")",
":",
"if",
"migration",
"not",
"in",
"seen",
":",
"plan",
".",
"append",
"(",
"graph",
".",
"nodes",
"[",
"migration",
"]",
")",
"seen",
".",
"add",
"(",
"migration",
")",
"# Remove the migrations that have already been applied.",
"for",
"migration",
"in",
"plan",
":",
"if",
"not",
"(",
"migration",
".",
"app_label",
",",
"migration",
".",
"name",
")",
"in",
"loader",
".",
"applied_migrations",
":",
"# NOTE: Unicode Django application names are unsupported.",
"unapplied",
".",
"append",
"(",
"[",
"migration",
".",
"app_label",
",",
"str",
"(",
"migration",
".",
"name",
")",
"]",
")",
"return",
"unapplied"
] | Output a list of unapplied migrations in the form [['migration1', migration2'], ...].
This implementation is mostly copied from the Django 'showmigrations' mgmt command.
https://github.com/django/django/blob/stable/1.8.x/django/core/management/commands/showmigrations.py
This should only be called from _get_current_migration_state(). | [
"Output",
"a",
"list",
"of",
"unapplied",
"migrations",
"in",
"the",
"form",
"[[",
"migration1",
"migration2",
"]",
"...",
"]",
".",
"This",
"implementation",
"is",
"mostly",
"copied",
"from",
"the",
"Django",
"showmigrations",
"mgmt",
"command",
".",
"https",
":",
"//",
"github",
".",
"com",
"/",
"django",
"/",
"django",
"/",
"blob",
"/",
"stable",
"/",
"1",
".",
"8",
".",
"x",
"/",
"django",
"/",
"core",
"/",
"management",
"/",
"commands",
"/",
"showmigrations",
".",
"py"
] | train | https://github.com/edx/edx-django-release-util/blob/de0fde41d6a19885ab7dc309472b94fd0fccbc1d/release_util/management/commands/__init__.py#L159-L184 |
edx/edx-django-release-util | release_util/management/commands/__init__.py | MigrationSession._get_current_migration_state | def _get_current_migration_state(self, loader, apps):
"""
Extract the most recent migrations from the relevant apps.
If no migrations have been performed, return 'zero' as the most recent migration for the app.
This should only be called from list_migrations().
"""
# Only care about applied migrations for the passed-in apps.
apps = set(apps)
relevant_applied = [migration for migration in loader.applied_migrations if migration[0] in apps]
# Sort them by the most recent migration and convert to a dictionary,
# leaving apps as keys and most recent migration as values.
# NB: this is a dirty trick
most_recents = dict(sorted(relevant_applied, key=lambda m: m[1]))
# Fill in the apps with no migrations with 'zero'.
# NOTE: Unicode Django application names are unsupported.
most_recents = [[app, 'zero' if app not in most_recents else str(most_recents[app])] for app in apps]
return most_recents | python | def _get_current_migration_state(self, loader, apps):
"""
Extract the most recent migrations from the relevant apps.
If no migrations have been performed, return 'zero' as the most recent migration for the app.
This should only be called from list_migrations().
"""
# Only care about applied migrations for the passed-in apps.
apps = set(apps)
relevant_applied = [migration for migration in loader.applied_migrations if migration[0] in apps]
# Sort them by the most recent migration and convert to a dictionary,
# leaving apps as keys and most recent migration as values.
# NB: this is a dirty trick
most_recents = dict(sorted(relevant_applied, key=lambda m: m[1]))
# Fill in the apps with no migrations with 'zero'.
# NOTE: Unicode Django application names are unsupported.
most_recents = [[app, 'zero' if app not in most_recents else str(most_recents[app])] for app in apps]
return most_recents | [
"def",
"_get_current_migration_state",
"(",
"self",
",",
"loader",
",",
"apps",
")",
":",
"# Only care about applied migrations for the passed-in apps.",
"apps",
"=",
"set",
"(",
"apps",
")",
"relevant_applied",
"=",
"[",
"migration",
"for",
"migration",
"in",
"loader",
".",
"applied_migrations",
"if",
"migration",
"[",
"0",
"]",
"in",
"apps",
"]",
"# Sort them by the most recent migration and convert to a dictionary,",
"# leaving apps as keys and most recent migration as values.",
"# NB: this is a dirty trick",
"most_recents",
"=",
"dict",
"(",
"sorted",
"(",
"relevant_applied",
",",
"key",
"=",
"lambda",
"m",
":",
"m",
"[",
"1",
"]",
")",
")",
"# Fill in the apps with no migrations with 'zero'.",
"# NOTE: Unicode Django application names are unsupported.",
"most_recents",
"=",
"[",
"[",
"app",
",",
"'zero'",
"if",
"app",
"not",
"in",
"most_recents",
"else",
"str",
"(",
"most_recents",
"[",
"app",
"]",
")",
"]",
"for",
"app",
"in",
"apps",
"]",
"return",
"most_recents"
] | Extract the most recent migrations from the relevant apps.
If no migrations have been performed, return 'zero' as the most recent migration for the app.
This should only be called from list_migrations(). | [
"Extract",
"the",
"most",
"recent",
"migrations",
"from",
"the",
"relevant",
"apps",
".",
"If",
"no",
"migrations",
"have",
"been",
"performed",
"return",
"zero",
"as",
"the",
"most",
"recent",
"migration",
"for",
"the",
"app",
"."
] | train | https://github.com/edx/edx-django-release-util/blob/de0fde41d6a19885ab7dc309472b94fd0fccbc1d/release_util/management/commands/__init__.py#L186-L203 |
edx/edx-django-release-util | release_util/management/commands/__init__.py | MigrationSession.list_migrations | def list_migrations(self):
"""
Returns a tuple of unapplied, current
"Unapplied" is a list of unapplied migrations. "Current" is a list of the current migration
states for apps with unapplied migrations.
Both are tuples of the form (app, migration_name).
"""
connection = connections[self._database_name]
loader = MigrationLoader(connection, ignore_no_migrations=True)
unapplied = self._get_unapplied_migrations(loader)
currents = self._get_current_migration_state(loader, [u[0] for u in unapplied])
return unapplied, currents | python | def list_migrations(self):
"""
Returns a tuple of unapplied, current
"Unapplied" is a list of unapplied migrations. "Current" is a list of the current migration
states for apps with unapplied migrations.
Both are tuples of the form (app, migration_name).
"""
connection = connections[self._database_name]
loader = MigrationLoader(connection, ignore_no_migrations=True)
unapplied = self._get_unapplied_migrations(loader)
currents = self._get_current_migration_state(loader, [u[0] for u in unapplied])
return unapplied, currents | [
"def",
"list_migrations",
"(",
"self",
")",
":",
"connection",
"=",
"connections",
"[",
"self",
".",
"_database_name",
"]",
"loader",
"=",
"MigrationLoader",
"(",
"connection",
",",
"ignore_no_migrations",
"=",
"True",
")",
"unapplied",
"=",
"self",
".",
"_get_unapplied_migrations",
"(",
"loader",
")",
"currents",
"=",
"self",
".",
"_get_current_migration_state",
"(",
"loader",
",",
"[",
"u",
"[",
"0",
"]",
"for",
"u",
"in",
"unapplied",
"]",
")",
"return",
"unapplied",
",",
"currents"
] | Returns a tuple of unapplied, current
"Unapplied" is a list of unapplied migrations. "Current" is a list of the current migration
states for apps with unapplied migrations.
Both are tuples of the form (app, migration_name). | [
"Returns",
"a",
"tuple",
"of",
"unapplied",
"current"
] | train | https://github.com/edx/edx-django-release-util/blob/de0fde41d6a19885ab7dc309472b94fd0fccbc1d/release_util/management/commands/__init__.py#L205-L218 |
edx/edx-django-release-util | release_util/management/commands/__init__.py | MigrationSession._parse_migrate_output | def _parse_migrate_output(self, output):
"""
Args:
output: str, output of "manage.py migrate"
Returns (succeeded: list(tuple), failed: tuple or None)
Both tuples are of the form (app, migration)
"""
failed = None
succeeded = []
# Mark migrations:
# - before exception migration as success
# - exception migration as failed
for line in output.split('\n'):
line = _remove_escape_characters(line).strip()
line_match = self.migration_regex.match(line)
if line_match:
migration = (line_match.group('app_name'), line_match.group('migration_name'))
if line_match.group('success') == 'OK':
# The migration succeeded
succeeded.append(migration)
else:
# The migration failed
failed = migration
break
return succeeded, failed | python | def _parse_migrate_output(self, output):
"""
Args:
output: str, output of "manage.py migrate"
Returns (succeeded: list(tuple), failed: tuple or None)
Both tuples are of the form (app, migration)
"""
failed = None
succeeded = []
# Mark migrations:
# - before exception migration as success
# - exception migration as failed
for line in output.split('\n'):
line = _remove_escape_characters(line).strip()
line_match = self.migration_regex.match(line)
if line_match:
migration = (line_match.group('app_name'), line_match.group('migration_name'))
if line_match.group('success') == 'OK':
# The migration succeeded
succeeded.append(migration)
else:
# The migration failed
failed = migration
break
return succeeded, failed | [
"def",
"_parse_migrate_output",
"(",
"self",
",",
"output",
")",
":",
"failed",
"=",
"None",
"succeeded",
"=",
"[",
"]",
"# Mark migrations:",
"# - before exception migration as success",
"# - exception migration as failed",
"for",
"line",
"in",
"output",
".",
"split",
"(",
"'\\n'",
")",
":",
"line",
"=",
"_remove_escape_characters",
"(",
"line",
")",
".",
"strip",
"(",
")",
"line_match",
"=",
"self",
".",
"migration_regex",
".",
"match",
"(",
"line",
")",
"if",
"line_match",
":",
"migration",
"=",
"(",
"line_match",
".",
"group",
"(",
"'app_name'",
")",
",",
"line_match",
".",
"group",
"(",
"'migration_name'",
")",
")",
"if",
"line_match",
".",
"group",
"(",
"'success'",
")",
"==",
"'OK'",
":",
"# The migration succeeded",
"succeeded",
".",
"append",
"(",
"migration",
")",
"else",
":",
"# The migration failed",
"failed",
"=",
"migration",
"break",
"return",
"succeeded",
",",
"failed"
] | Args:
output: str, output of "manage.py migrate"
Returns (succeeded: list(tuple), failed: tuple or None)
Both tuples are of the form (app, migration) | [
"Args",
":",
"output",
":",
"str",
"output",
"of",
"manage",
".",
"py",
"migrate",
"Returns",
"(",
"succeeded",
":",
"list",
"(",
"tuple",
")",
"failed",
":",
"tuple",
"or",
"None",
")",
"Both",
"tuples",
"are",
"of",
"the",
"form",
"(",
"app",
"migration",
")"
] | train | https://github.com/edx/edx-django-release-util/blob/de0fde41d6a19885ab7dc309472b94fd0fccbc1d/release_util/management/commands/__init__.py#L220-L245 |
edx/edx-django-release-util | release_util/management/commands/__init__.py | MigrationSession.__apply | def __apply(self, migration=None, run_all=False):
"""
If a migration is supplied, runs that migration and appends to state.
If run_all==True, runs all migrations.
Raises a ValueError if neither "migration" nor "run_all" are provided.
"""
out = StringIO()
trace = None
migrate_kwargs = {
'interactive': False,
'stdout': out,
'database': self._database_name,
}
if migration is not None:
migrate_kwargs.update({
'app_label': migration[0],
'migration_name': migration[1],
})
elif not run_all:
raise ValueError('Either a migration must be provided or "run_all" must be True')
start = self._timer()
try:
call_command("migrate", **migrate_kwargs)
except Exception:
trace = ''.join(traceback.format_exception(*sys.exc_info()))
finally:
end = self._timer()
successes, failure = self._parse_migrate_output(out.getvalue())
self._migration_state.append({
'database': self._database_name,
'migration': 'all' if run_all else (migration[0], migration[1]),
'duration': end - start,
'output': _remove_escape_characters(out.getvalue()),
'succeeded_migrations': successes, # [(app, migration), ...]
'failed_migration': failure, # (app, migration)
'traceback': trace,
'succeeded': failure is None and trace is None,
})
if failure is not None:
raise CommandError("Migration failed for app '{}' - migration '{}'.\n".format(*failure))
elif trace is not None:
raise CommandError("Migrations failed unexpectedly. See self.state['traceback'] for details.") | python | def __apply(self, migration=None, run_all=False):
"""
If a migration is supplied, runs that migration and appends to state.
If run_all==True, runs all migrations.
Raises a ValueError if neither "migration" nor "run_all" are provided.
"""
out = StringIO()
trace = None
migrate_kwargs = {
'interactive': False,
'stdout': out,
'database': self._database_name,
}
if migration is not None:
migrate_kwargs.update({
'app_label': migration[0],
'migration_name': migration[1],
})
elif not run_all:
raise ValueError('Either a migration must be provided or "run_all" must be True')
start = self._timer()
try:
call_command("migrate", **migrate_kwargs)
except Exception:
trace = ''.join(traceback.format_exception(*sys.exc_info()))
finally:
end = self._timer()
successes, failure = self._parse_migrate_output(out.getvalue())
self._migration_state.append({
'database': self._database_name,
'migration': 'all' if run_all else (migration[0], migration[1]),
'duration': end - start,
'output': _remove_escape_characters(out.getvalue()),
'succeeded_migrations': successes, # [(app, migration), ...]
'failed_migration': failure, # (app, migration)
'traceback': trace,
'succeeded': failure is None and trace is None,
})
if failure is not None:
raise CommandError("Migration failed for app '{}' - migration '{}'.\n".format(*failure))
elif trace is not None:
raise CommandError("Migrations failed unexpectedly. See self.state['traceback'] for details.") | [
"def",
"__apply",
"(",
"self",
",",
"migration",
"=",
"None",
",",
"run_all",
"=",
"False",
")",
":",
"out",
"=",
"StringIO",
"(",
")",
"trace",
"=",
"None",
"migrate_kwargs",
"=",
"{",
"'interactive'",
":",
"False",
",",
"'stdout'",
":",
"out",
",",
"'database'",
":",
"self",
".",
"_database_name",
",",
"}",
"if",
"migration",
"is",
"not",
"None",
":",
"migrate_kwargs",
".",
"update",
"(",
"{",
"'app_label'",
":",
"migration",
"[",
"0",
"]",
",",
"'migration_name'",
":",
"migration",
"[",
"1",
"]",
",",
"}",
")",
"elif",
"not",
"run_all",
":",
"raise",
"ValueError",
"(",
"'Either a migration must be provided or \"run_all\" must be True'",
")",
"start",
"=",
"self",
".",
"_timer",
"(",
")",
"try",
":",
"call_command",
"(",
"\"migrate\"",
",",
"*",
"*",
"migrate_kwargs",
")",
"except",
"Exception",
":",
"trace",
"=",
"''",
".",
"join",
"(",
"traceback",
".",
"format_exception",
"(",
"*",
"sys",
".",
"exc_info",
"(",
")",
")",
")",
"finally",
":",
"end",
"=",
"self",
".",
"_timer",
"(",
")",
"successes",
",",
"failure",
"=",
"self",
".",
"_parse_migrate_output",
"(",
"out",
".",
"getvalue",
"(",
")",
")",
"self",
".",
"_migration_state",
".",
"append",
"(",
"{",
"'database'",
":",
"self",
".",
"_database_name",
",",
"'migration'",
":",
"'all'",
"if",
"run_all",
"else",
"(",
"migration",
"[",
"0",
"]",
",",
"migration",
"[",
"1",
"]",
")",
",",
"'duration'",
":",
"end",
"-",
"start",
",",
"'output'",
":",
"_remove_escape_characters",
"(",
"out",
".",
"getvalue",
"(",
")",
")",
",",
"'succeeded_migrations'",
":",
"successes",
",",
"# [(app, migration), ...]",
"'failed_migration'",
":",
"failure",
",",
"# (app, migration)",
"'traceback'",
":",
"trace",
",",
"'succeeded'",
":",
"failure",
"is",
"None",
"and",
"trace",
"is",
"None",
",",
"}",
")",
"if",
"failure",
"is",
"not",
"None",
":",
"raise",
"CommandError",
"(",
"\"Migration failed for app '{}' - migration '{}'.\\n\"",
".",
"format",
"(",
"*",
"failure",
")",
")",
"elif",
"trace",
"is",
"not",
"None",
":",
"raise",
"CommandError",
"(",
"\"Migrations failed unexpectedly. See self.state['traceback'] for details.\"",
")"
] | If a migration is supplied, runs that migration and appends to state.
If run_all==True, runs all migrations.
Raises a ValueError if neither "migration" nor "run_all" are provided. | [
"If",
"a",
"migration",
"is",
"supplied",
"runs",
"that",
"migration",
"and",
"appends",
"to",
"state",
".",
"If",
"run_all",
"==",
"True",
"runs",
"all",
"migrations",
".",
"Raises",
"a",
"ValueError",
"if",
"neither",
"migration",
"nor",
"run_all",
"are",
"provided",
"."
] | train | https://github.com/edx/edx-django-release-util/blob/de0fde41d6a19885ab7dc309472b94fd0fccbc1d/release_util/management/commands/__init__.py#L247-L292 |
edx/edx-django-release-util | release_util/management/commands/__init__.py | MigrationSession.apply | def apply(self):
"""
Applies all migrations that have been added.
Note that some migrations depend on others, so you might end up
running more than one.
"""
if self.__closed:
raise MigrationSessionError("Can't apply applied session")
try:
while self._to_apply:
self.__apply(migration=self._to_apply.pop(0))
except:
raise
finally:
self.__closed = True | python | def apply(self):
"""
Applies all migrations that have been added.
Note that some migrations depend on others, so you might end up
running more than one.
"""
if self.__closed:
raise MigrationSessionError("Can't apply applied session")
try:
while self._to_apply:
self.__apply(migration=self._to_apply.pop(0))
except:
raise
finally:
self.__closed = True | [
"def",
"apply",
"(",
"self",
")",
":",
"if",
"self",
".",
"__closed",
":",
"raise",
"MigrationSessionError",
"(",
"\"Can't apply applied session\"",
")",
"try",
":",
"while",
"self",
".",
"_to_apply",
":",
"self",
".",
"__apply",
"(",
"migration",
"=",
"self",
".",
"_to_apply",
".",
"pop",
"(",
"0",
")",
")",
"except",
":",
"raise",
"finally",
":",
"self",
".",
"__closed",
"=",
"True"
] | Applies all migrations that have been added.
Note that some migrations depend on others, so you might end up
running more than one. | [
"Applies",
"all",
"migrations",
"that",
"have",
"been",
"added",
".",
"Note",
"that",
"some",
"migrations",
"depend",
"on",
"others",
"so",
"you",
"might",
"end",
"up",
"running",
"more",
"than",
"one",
"."
] | train | https://github.com/edx/edx-django-release-util/blob/de0fde41d6a19885ab7dc309472b94fd0fccbc1d/release_util/management/commands/__init__.py#L294-L308 |
edx/edx-django-release-util | release_util/management/commands/__init__.py | MigrationSession.apply_all | def apply_all(self):
"""
Applies all Django model migrations at once, recording the result.
"""
if self.__closed:
raise MigrationSessionError("Can't apply applied session")
if self._to_apply:
raise MigrationSessionError("Can't apply_all with migrations added to session")
try:
self.__apply(run_all=True)
except:
raise
finally:
self.__closed = True | python | def apply_all(self):
"""
Applies all Django model migrations at once, recording the result.
"""
if self.__closed:
raise MigrationSessionError("Can't apply applied session")
if self._to_apply:
raise MigrationSessionError("Can't apply_all with migrations added to session")
try:
self.__apply(run_all=True)
except:
raise
finally:
self.__closed = True | [
"def",
"apply_all",
"(",
"self",
")",
":",
"if",
"self",
".",
"__closed",
":",
"raise",
"MigrationSessionError",
"(",
"\"Can't apply applied session\"",
")",
"if",
"self",
".",
"_to_apply",
":",
"raise",
"MigrationSessionError",
"(",
"\"Can't apply_all with migrations added to session\"",
")",
"try",
":",
"self",
".",
"__apply",
"(",
"run_all",
"=",
"True",
")",
"except",
":",
"raise",
"finally",
":",
"self",
".",
"__closed",
"=",
"True"
] | Applies all Django model migrations at once, recording the result. | [
"Applies",
"all",
"Django",
"model",
"migrations",
"at",
"once",
"recording",
"the",
"result",
"."
] | train | https://github.com/edx/edx-django-release-util/blob/de0fde41d6a19885ab7dc309472b94fd0fccbc1d/release_util/management/commands/__init__.py#L310-L324 |
InfoAgeTech/django-core | django_core/mail/sending.py | send_email_from_template | def send_email_from_template(to_email, from_email, subject,
markdown_template=None,
text_template=None, html_template=None,
fail_silently=False, context=None,
**kwargs):
"""Send an email from a template.
:param to_email: the email address to send the email to
:param from_email: the email address the email will be from
:param subject: the subject of the email
:param markdown_template: the markdown syntax template to use for the
email. If provided, this will generate both the text and html versions
of the email. You must have the "markdown" library installed in order
to use this. pip install markdown.
:param text_template: the template for the text version of the email. This
can be omitted if the markdown_template is provided.
:param html_template: the template for the html version of the email. This
can be omitted if the markdown_template is provided.
:param context: the context for the email templates
"""
return send_emails_from_template(
to_emails=[to_email],
from_email=from_email,
subject=subject,
markdown_template=markdown_template,
text_template=text_template,
html_template=html_template,
fail_silently=fail_silently,
context=context,
**kwargs
) | python | def send_email_from_template(to_email, from_email, subject,
markdown_template=None,
text_template=None, html_template=None,
fail_silently=False, context=None,
**kwargs):
"""Send an email from a template.
:param to_email: the email address to send the email to
:param from_email: the email address the email will be from
:param subject: the subject of the email
:param markdown_template: the markdown syntax template to use for the
email. If provided, this will generate both the text and html versions
of the email. You must have the "markdown" library installed in order
to use this. pip install markdown.
:param text_template: the template for the text version of the email. This
can be omitted if the markdown_template is provided.
:param html_template: the template for the html version of the email. This
can be omitted if the markdown_template is provided.
:param context: the context for the email templates
"""
return send_emails_from_template(
to_emails=[to_email],
from_email=from_email,
subject=subject,
markdown_template=markdown_template,
text_template=text_template,
html_template=html_template,
fail_silently=fail_silently,
context=context,
**kwargs
) | [
"def",
"send_email_from_template",
"(",
"to_email",
",",
"from_email",
",",
"subject",
",",
"markdown_template",
"=",
"None",
",",
"text_template",
"=",
"None",
",",
"html_template",
"=",
"None",
",",
"fail_silently",
"=",
"False",
",",
"context",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"send_emails_from_template",
"(",
"to_emails",
"=",
"[",
"to_email",
"]",
",",
"from_email",
"=",
"from_email",
",",
"subject",
"=",
"subject",
",",
"markdown_template",
"=",
"markdown_template",
",",
"text_template",
"=",
"text_template",
",",
"html_template",
"=",
"html_template",
",",
"fail_silently",
"=",
"fail_silently",
",",
"context",
"=",
"context",
",",
"*",
"*",
"kwargs",
")"
] | Send an email from a template.
:param to_email: the email address to send the email to
:param from_email: the email address the email will be from
:param subject: the subject of the email
:param markdown_template: the markdown syntax template to use for the
email. If provided, this will generate both the text and html versions
of the email. You must have the "markdown" library installed in order
to use this. pip install markdown.
:param text_template: the template for the text version of the email. This
can be omitted if the markdown_template is provided.
:param html_template: the template for the html version of the email. This
can be omitted if the markdown_template is provided.
:param context: the context for the email templates | [
"Send",
"an",
"email",
"from",
"a",
"template",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/mail/sending.py#L9-L39 |
InfoAgeTech/django-core | django_core/mail/sending.py | send_emails_from_template | def send_emails_from_template(to_emails, from_email, subject,
markdown_template=None, text_template=None,
html_template=None, fail_silently=False,
context=None, attachments=None, **kwargs):
"""Send many emails from single template. Each email address listed in the
``to_emails`` will receive an separate email.
:param to_emails: list of email address to send the email to
:param from_email: the email address the email will be from
:param subject: the subject of the email
:param markdown_template: the markdown syntax template to use for the
email. If provided, this will generate both the text and html versions
of the email. You must have the "markdown" library installed in order
to use this. pip install markdown.
:param text_template: the template for the text version of the email. This
can be omitted if the markdown_template is provided.
:param html_template: the template for the html version of the email. This
can be omitted if the markdown_template is provided.
:param context: the context for the email templates
:param attachments: list of additional attachments to add to the email
(example: email.mime.image.MIMEImage object). The attachments will be
added to each email sent.
"""
if not to_emails:
return
if context is None:
context = {}
if markdown_template:
try:
from markdown import markdown
except ImportError:
raise ImportError(
'The application is attempting to send an email by using the '
'"markdown" library, but markdown is not installed. Please '
'install it. See: '
'http://pythonhosted.org/Markdown/install.html'
)
base_html_template = getattr(settings,
'CORE_BASE_HTML_EMAIL_TEMPLATE',
'django_core/mail/base_email.html')
text_content = render_to_string(markdown_template, context)
context['email_content'] = markdown(text_content)
html_content = render_to_string(base_html_template, context)
else:
text_content = render_to_string(text_template, context)
html_content = render_to_string(html_template, context)
emails = []
for email_address in to_emails:
email = EmailMultiAlternatives(
subject=subject,
body=text_content,
from_email=from_email,
to=[email_address],
alternatives=[(html_content, 'text/html')]
)
if attachments:
email.mixed_subtype = 'related'
for attachment in attachments:
email.attach(attachment)
emails.append(email)
connection = mail.get_connection()
connection.open()
connection.send_messages(emails)
connection.close() | python | def send_emails_from_template(to_emails, from_email, subject,
markdown_template=None, text_template=None,
html_template=None, fail_silently=False,
context=None, attachments=None, **kwargs):
"""Send many emails from single template. Each email address listed in the
``to_emails`` will receive an separate email.
:param to_emails: list of email address to send the email to
:param from_email: the email address the email will be from
:param subject: the subject of the email
:param markdown_template: the markdown syntax template to use for the
email. If provided, this will generate both the text and html versions
of the email. You must have the "markdown" library installed in order
to use this. pip install markdown.
:param text_template: the template for the text version of the email. This
can be omitted if the markdown_template is provided.
:param html_template: the template for the html version of the email. This
can be omitted if the markdown_template is provided.
:param context: the context for the email templates
:param attachments: list of additional attachments to add to the email
(example: email.mime.image.MIMEImage object). The attachments will be
added to each email sent.
"""
if not to_emails:
return
if context is None:
context = {}
if markdown_template:
try:
from markdown import markdown
except ImportError:
raise ImportError(
'The application is attempting to send an email by using the '
'"markdown" library, but markdown is not installed. Please '
'install it. See: '
'http://pythonhosted.org/Markdown/install.html'
)
base_html_template = getattr(settings,
'CORE_BASE_HTML_EMAIL_TEMPLATE',
'django_core/mail/base_email.html')
text_content = render_to_string(markdown_template, context)
context['email_content'] = markdown(text_content)
html_content = render_to_string(base_html_template, context)
else:
text_content = render_to_string(text_template, context)
html_content = render_to_string(html_template, context)
emails = []
for email_address in to_emails:
email = EmailMultiAlternatives(
subject=subject,
body=text_content,
from_email=from_email,
to=[email_address],
alternatives=[(html_content, 'text/html')]
)
if attachments:
email.mixed_subtype = 'related'
for attachment in attachments:
email.attach(attachment)
emails.append(email)
connection = mail.get_connection()
connection.open()
connection.send_messages(emails)
connection.close() | [
"def",
"send_emails_from_template",
"(",
"to_emails",
",",
"from_email",
",",
"subject",
",",
"markdown_template",
"=",
"None",
",",
"text_template",
"=",
"None",
",",
"html_template",
"=",
"None",
",",
"fail_silently",
"=",
"False",
",",
"context",
"=",
"None",
",",
"attachments",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"to_emails",
":",
"return",
"if",
"context",
"is",
"None",
":",
"context",
"=",
"{",
"}",
"if",
"markdown_template",
":",
"try",
":",
"from",
"markdown",
"import",
"markdown",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"'The application is attempting to send an email by using the '",
"'\"markdown\" library, but markdown is not installed. Please '",
"'install it. See: '",
"'http://pythonhosted.org/Markdown/install.html'",
")",
"base_html_template",
"=",
"getattr",
"(",
"settings",
",",
"'CORE_BASE_HTML_EMAIL_TEMPLATE'",
",",
"'django_core/mail/base_email.html'",
")",
"text_content",
"=",
"render_to_string",
"(",
"markdown_template",
",",
"context",
")",
"context",
"[",
"'email_content'",
"]",
"=",
"markdown",
"(",
"text_content",
")",
"html_content",
"=",
"render_to_string",
"(",
"base_html_template",
",",
"context",
")",
"else",
":",
"text_content",
"=",
"render_to_string",
"(",
"text_template",
",",
"context",
")",
"html_content",
"=",
"render_to_string",
"(",
"html_template",
",",
"context",
")",
"emails",
"=",
"[",
"]",
"for",
"email_address",
"in",
"to_emails",
":",
"email",
"=",
"EmailMultiAlternatives",
"(",
"subject",
"=",
"subject",
",",
"body",
"=",
"text_content",
",",
"from_email",
"=",
"from_email",
",",
"to",
"=",
"[",
"email_address",
"]",
",",
"alternatives",
"=",
"[",
"(",
"html_content",
",",
"'text/html'",
")",
"]",
")",
"if",
"attachments",
":",
"email",
".",
"mixed_subtype",
"=",
"'related'",
"for",
"attachment",
"in",
"attachments",
":",
"email",
".",
"attach",
"(",
"attachment",
")",
"emails",
".",
"append",
"(",
"email",
")",
"connection",
"=",
"mail",
".",
"get_connection",
"(",
")",
"connection",
".",
"open",
"(",
")",
"connection",
".",
"send_messages",
"(",
"emails",
")",
"connection",
".",
"close",
"(",
")"
] | Send many emails from single template. Each email address listed in the
``to_emails`` will receive an separate email.
:param to_emails: list of email address to send the email to
:param from_email: the email address the email will be from
:param subject: the subject of the email
:param markdown_template: the markdown syntax template to use for the
email. If provided, this will generate both the text and html versions
of the email. You must have the "markdown" library installed in order
to use this. pip install markdown.
:param text_template: the template for the text version of the email. This
can be omitted if the markdown_template is provided.
:param html_template: the template for the html version of the email. This
can be omitted if the markdown_template is provided.
:param context: the context for the email templates
:param attachments: list of additional attachments to add to the email
(example: email.mime.image.MIMEImage object). The attachments will be
added to each email sent. | [
"Send",
"many",
"emails",
"from",
"single",
"template",
".",
"Each",
"email",
"address",
"listed",
"in",
"the",
"to_emails",
"will",
"receive",
"an",
"separate",
"email",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/mail/sending.py#L42-L116 |
MarcoFavorito/flloat | flloat/syntax/pl.py | PLFormula.all_models | def all_models(self, alphabet: _Alphabet) -> Set[PLInterpretation]:
"""Find all the possible interpretations given a set of symbols"""
all_possible_interpretations = alphabet.powerset().symbols
all_models = set()
for i in all_possible_interpretations:
# compute current Interpretation, considering False
# all propositional symbols not present in current interpretation
current_interpretation = PLInterpretation(i)
if self.truth(current_interpretation):
all_models.add(current_interpretation)
self._all_models = all_models
return all_models | python | def all_models(self, alphabet: _Alphabet) -> Set[PLInterpretation]:
"""Find all the possible interpretations given a set of symbols"""
all_possible_interpretations = alphabet.powerset().symbols
all_models = set()
for i in all_possible_interpretations:
# compute current Interpretation, considering False
# all propositional symbols not present in current interpretation
current_interpretation = PLInterpretation(i)
if self.truth(current_interpretation):
all_models.add(current_interpretation)
self._all_models = all_models
return all_models | [
"def",
"all_models",
"(",
"self",
",",
"alphabet",
":",
"_Alphabet",
")",
"->",
"Set",
"[",
"PLInterpretation",
"]",
":",
"all_possible_interpretations",
"=",
"alphabet",
".",
"powerset",
"(",
")",
".",
"symbols",
"all_models",
"=",
"set",
"(",
")",
"for",
"i",
"in",
"all_possible_interpretations",
":",
"# compute current Interpretation, considering False",
"# all propositional symbols not present in current interpretation",
"current_interpretation",
"=",
"PLInterpretation",
"(",
"i",
")",
"if",
"self",
".",
"truth",
"(",
"current_interpretation",
")",
":",
"all_models",
".",
"add",
"(",
"current_interpretation",
")",
"self",
".",
"_all_models",
"=",
"all_models",
"return",
"all_models"
] | Find all the possible interpretations given a set of symbols | [
"Find",
"all",
"the",
"possible",
"interpretations",
"given",
"a",
"set",
"of",
"symbols"
] | train | https://github.com/MarcoFavorito/flloat/blob/5e6de1bea444b68d46d288834031860a8b2f8c2d/flloat/syntax/pl.py#L30-L43 |
MarcoFavorito/flloat | flloat/syntax/pl.py | PLFormula.minimal_models | def minimal_models(self, alphabet: _Alphabet) -> Set[PLInterpretation]:
"""Find models of min size (i.e. the less number of proposition to True).
Very trivial (and inefficient) algorithm: BRUTE FORCE on all the possible interpretations."""
models = self.all_models(alphabet)
minimal_models = set()
for m in models:
min_m = m
for m1 in models:
if min_m.true_propositions.issuperset(m1.true_propositions):
min_m = m1
minimal_models.add(min_m)
return minimal_models | python | def minimal_models(self, alphabet: _Alphabet) -> Set[PLInterpretation]:
"""Find models of min size (i.e. the less number of proposition to True).
Very trivial (and inefficient) algorithm: BRUTE FORCE on all the possible interpretations."""
models = self.all_models(alphabet)
minimal_models = set()
for m in models:
min_m = m
for m1 in models:
if min_m.true_propositions.issuperset(m1.true_propositions):
min_m = m1
minimal_models.add(min_m)
return minimal_models | [
"def",
"minimal_models",
"(",
"self",
",",
"alphabet",
":",
"_Alphabet",
")",
"->",
"Set",
"[",
"PLInterpretation",
"]",
":",
"models",
"=",
"self",
".",
"all_models",
"(",
"alphabet",
")",
"minimal_models",
"=",
"set",
"(",
")",
"for",
"m",
"in",
"models",
":",
"min_m",
"=",
"m",
"for",
"m1",
"in",
"models",
":",
"if",
"min_m",
".",
"true_propositions",
".",
"issuperset",
"(",
"m1",
".",
"true_propositions",
")",
":",
"min_m",
"=",
"m1",
"minimal_models",
".",
"add",
"(",
"min_m",
")",
"return",
"minimal_models"
] | Find models of min size (i.e. the less number of proposition to True).
Very trivial (and inefficient) algorithm: BRUTE FORCE on all the possible interpretations. | [
"Find",
"models",
"of",
"min",
"size",
"(",
"i",
".",
"e",
".",
"the",
"less",
"number",
"of",
"proposition",
"to",
"True",
")",
".",
"Very",
"trivial",
"(",
"and",
"inefficient",
")",
"algorithm",
":",
"BRUTE",
"FORCE",
"on",
"all",
"the",
"possible",
"interpretations",
"."
] | train | https://github.com/MarcoFavorito/flloat/blob/5e6de1bea444b68d46d288834031860a8b2f8c2d/flloat/syntax/pl.py#L46-L59 |
InfoAgeTech/django-core | django_core/db/models/mixins/tokens.py | AbstractTokenModel.save | def save(self, *args, **kwargs):
"""Make sure token is added."""
self.save_prep(instance_or_instances=self)
return super(AbstractTokenModel, self).save(*args, **kwargs) | python | def save(self, *args, **kwargs):
"""Make sure token is added."""
self.save_prep(instance_or_instances=self)
return super(AbstractTokenModel, self).save(*args, **kwargs) | [
"def",
"save",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"save_prep",
"(",
"instance_or_instances",
"=",
"self",
")",
"return",
"super",
"(",
"AbstractTokenModel",
",",
"self",
")",
".",
"save",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Make sure token is added. | [
"Make",
"sure",
"token",
"is",
"added",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/db/models/mixins/tokens.py#L17-L20 |
InfoAgeTech/django-core | django_core/db/models/mixins/tokens.py | AbstractTokenModel.save_prep | def save_prep(cls, instance_or_instances):
"""Preprocess the object before the object is saved. This
automatically gets called when the save method gets called.
"""
instances = make_obj_list(instance_or_instances)
tokens = set(cls.objects.get_available_tokens(
count=len(instances),
token_length=cls.token_length
))
for instance in instances:
if not instance.token:
instance.token = tokens.pop()
super(AbstractTokenModel, cls).save_prep(
instance_or_instances=instances
) | python | def save_prep(cls, instance_or_instances):
"""Preprocess the object before the object is saved. This
automatically gets called when the save method gets called.
"""
instances = make_obj_list(instance_or_instances)
tokens = set(cls.objects.get_available_tokens(
count=len(instances),
token_length=cls.token_length
))
for instance in instances:
if not instance.token:
instance.token = tokens.pop()
super(AbstractTokenModel, cls).save_prep(
instance_or_instances=instances
) | [
"def",
"save_prep",
"(",
"cls",
",",
"instance_or_instances",
")",
":",
"instances",
"=",
"make_obj_list",
"(",
"instance_or_instances",
")",
"tokens",
"=",
"set",
"(",
"cls",
".",
"objects",
".",
"get_available_tokens",
"(",
"count",
"=",
"len",
"(",
"instances",
")",
",",
"token_length",
"=",
"cls",
".",
"token_length",
")",
")",
"for",
"instance",
"in",
"instances",
":",
"if",
"not",
"instance",
".",
"token",
":",
"instance",
".",
"token",
"=",
"tokens",
".",
"pop",
"(",
")",
"super",
"(",
"AbstractTokenModel",
",",
"cls",
")",
".",
"save_prep",
"(",
"instance_or_instances",
"=",
"instances",
")"
] | Preprocess the object before the object is saved. This
automatically gets called when the save method gets called. | [
"Preprocess",
"the",
"object",
"before",
"the",
"object",
"is",
"saved",
".",
"This",
"automatically",
"gets",
"called",
"when",
"the",
"save",
"method",
"gets",
"called",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/db/models/mixins/tokens.py#L23-L40 |
MarcoFavorito/flloat | flloat/base/Formula.py | BinaryOperator._popup | def _popup(self):
"""recursively find commutative binary operator
among child formulas and pop up them at the same level"""
res = ()
for child in self.formulas:
if type(child) == type(self):
superchilds = child.formulas
res += superchilds
else:
res += (child, )
return tuple(res) | python | def _popup(self):
"""recursively find commutative binary operator
among child formulas and pop up them at the same level"""
res = ()
for child in self.formulas:
if type(child) == type(self):
superchilds = child.formulas
res += superchilds
else:
res += (child, )
return tuple(res) | [
"def",
"_popup",
"(",
"self",
")",
":",
"res",
"=",
"(",
")",
"for",
"child",
"in",
"self",
".",
"formulas",
":",
"if",
"type",
"(",
"child",
")",
"==",
"type",
"(",
"self",
")",
":",
"superchilds",
"=",
"child",
".",
"formulas",
"res",
"+=",
"superchilds",
"else",
":",
"res",
"+=",
"(",
"child",
",",
")",
"return",
"tuple",
"(",
"res",
")"
] | recursively find commutative binary operator
among child formulas and pop up them at the same level | [
"recursively",
"find",
"commutative",
"binary",
"operator",
"among",
"child",
"formulas",
"and",
"pop",
"up",
"them",
"at",
"the",
"same",
"level"
] | train | https://github.com/MarcoFavorito/flloat/blob/5e6de1bea444b68d46d288834031860a8b2f8c2d/flloat/base/Formula.py#L80-L90 |
InfoAgeTech/django-core | django_core/views/mixins/generic.py | GenericObjectViewMixin.get_content_object_url | def get_content_object_url(self):
"""Gets the absolute url for the content object."""
if (self.content_object and
hasattr(self.content_object, 'get_absolute_url')):
return self.content_object.get_absolute_url()
return None | python | def get_content_object_url(self):
"""Gets the absolute url for the content object."""
if (self.content_object and
hasattr(self.content_object, 'get_absolute_url')):
return self.content_object.get_absolute_url()
return None | [
"def",
"get_content_object_url",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"content_object",
"and",
"hasattr",
"(",
"self",
".",
"content_object",
",",
"'get_absolute_url'",
")",
")",
":",
"return",
"self",
".",
"content_object",
".",
"get_absolute_url",
"(",
")",
"return",
"None"
] | Gets the absolute url for the content object. | [
"Gets",
"the",
"absolute",
"url",
"for",
"the",
"content",
"object",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/views/mixins/generic.py#L49-L55 |
MarcoFavorito/flloat | flloat/parser/ldlf.py | LDLfLexer.t_ATOM | def t_ATOM(self, t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = LDLfLexer.reserved.get(t.value, 'ATOM') # Check for reserved words
return t | python | def t_ATOM(self, t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = LDLfLexer.reserved.get(t.value, 'ATOM') # Check for reserved words
return t | [
"def",
"t_ATOM",
"(",
"self",
",",
"t",
")",
":",
"t",
".",
"type",
"=",
"LDLfLexer",
".",
"reserved",
".",
"get",
"(",
"t",
".",
"value",
",",
"'ATOM'",
")",
"# Check for reserved words",
"return",
"t"
] | r'[a-zA-Z_][a-zA-Z_0-9]* | [
"r",
"[",
"a",
"-",
"zA",
"-",
"Z_",
"]",
"[",
"a",
"-",
"zA",
"-",
"Z_0",
"-",
"9",
"]",
"*"
] | train | https://github.com/MarcoFavorito/flloat/blob/5e6de1bea444b68d46d288834031860a8b2f8c2d/flloat/parser/ldlf.py#L63-L66 |
MarcoFavorito/flloat | flloat/parser/ldlf.py | LDLfParser.p_temp_formula | def p_temp_formula(self, p):
"""temp_formula : temp_formula EQUIVALENCE temp_formula
| temp_formula IMPLIES temp_formula
| temp_formula OR temp_formula
| temp_formula AND temp_formula
| BOXLSEPARATOR path BOXRSEPARATOR temp_formula
| DIAMONDLSEPARATOR path DIAMONDRSEPARATOR temp_formula
| NOT temp_formula
| TT
| FF
| END
| LAST"""
if len(p) == 2:
if p[1] == Symbols.LOGICAL_TRUE.value:
p[0] = LDLfLogicalTrue()
elif p[1] == Symbols.LOGICAL_FALSE.value:
p[0] = LDLfLogicalFalse()
elif p[1] == Symbols.END.value:
p[0] = LDLfEnd()
elif p[1] == Symbols.LAST.value:
p[0] = LDLfLast()
else:
p[0] = LDLfDiamond(RegExpPropositional(p[1]), LDLfLogicalTrue())
elif len(p) == 3:
p[0] = LDLfNot(p[2])
elif len(p) == 4:
l, o, r = p[1:]
if o == Symbols.EQUIVALENCE.value:
p[0] = LDLfEquivalence([l, r])
elif o == Symbols.IMPLIES.value:
p[0] = LDLfImplies([l, r])
elif o == Symbols.OR.value:
p[0] = LDLfOr([l, r])
elif o == Symbols.AND.value:
p[0] = LDLfAnd([l, r])
else:
raise ValueError
elif len(p) == 5:
if p[1] == Symbols.ALWAYS_BRACKET_LEFT.value:
p[0] = LDLfBox(p[2], p[4])
elif p[1] == Symbols.EVENTUALLY_BRACKET_LEFT.value:
p[0] = LDLfDiamond(p[2], p[4])
else:
raise ValueError
else:
raise ValueError | python | def p_temp_formula(self, p):
"""temp_formula : temp_formula EQUIVALENCE temp_formula
| temp_formula IMPLIES temp_formula
| temp_formula OR temp_formula
| temp_formula AND temp_formula
| BOXLSEPARATOR path BOXRSEPARATOR temp_formula
| DIAMONDLSEPARATOR path DIAMONDRSEPARATOR temp_formula
| NOT temp_formula
| TT
| FF
| END
| LAST"""
if len(p) == 2:
if p[1] == Symbols.LOGICAL_TRUE.value:
p[0] = LDLfLogicalTrue()
elif p[1] == Symbols.LOGICAL_FALSE.value:
p[0] = LDLfLogicalFalse()
elif p[1] == Symbols.END.value:
p[0] = LDLfEnd()
elif p[1] == Symbols.LAST.value:
p[0] = LDLfLast()
else:
p[0] = LDLfDiamond(RegExpPropositional(p[1]), LDLfLogicalTrue())
elif len(p) == 3:
p[0] = LDLfNot(p[2])
elif len(p) == 4:
l, o, r = p[1:]
if o == Symbols.EQUIVALENCE.value:
p[0] = LDLfEquivalence([l, r])
elif o == Symbols.IMPLIES.value:
p[0] = LDLfImplies([l, r])
elif o == Symbols.OR.value:
p[0] = LDLfOr([l, r])
elif o == Symbols.AND.value:
p[0] = LDLfAnd([l, r])
else:
raise ValueError
elif len(p) == 5:
if p[1] == Symbols.ALWAYS_BRACKET_LEFT.value:
p[0] = LDLfBox(p[2], p[4])
elif p[1] == Symbols.EVENTUALLY_BRACKET_LEFT.value:
p[0] = LDLfDiamond(p[2], p[4])
else:
raise ValueError
else:
raise ValueError | [
"def",
"p_temp_formula",
"(",
"self",
",",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"==",
"2",
":",
"if",
"p",
"[",
"1",
"]",
"==",
"Symbols",
".",
"LOGICAL_TRUE",
".",
"value",
":",
"p",
"[",
"0",
"]",
"=",
"LDLfLogicalTrue",
"(",
")",
"elif",
"p",
"[",
"1",
"]",
"==",
"Symbols",
".",
"LOGICAL_FALSE",
".",
"value",
":",
"p",
"[",
"0",
"]",
"=",
"LDLfLogicalFalse",
"(",
")",
"elif",
"p",
"[",
"1",
"]",
"==",
"Symbols",
".",
"END",
".",
"value",
":",
"p",
"[",
"0",
"]",
"=",
"LDLfEnd",
"(",
")",
"elif",
"p",
"[",
"1",
"]",
"==",
"Symbols",
".",
"LAST",
".",
"value",
":",
"p",
"[",
"0",
"]",
"=",
"LDLfLast",
"(",
")",
"else",
":",
"p",
"[",
"0",
"]",
"=",
"LDLfDiamond",
"(",
"RegExpPropositional",
"(",
"p",
"[",
"1",
"]",
")",
",",
"LDLfLogicalTrue",
"(",
")",
")",
"elif",
"len",
"(",
"p",
")",
"==",
"3",
":",
"p",
"[",
"0",
"]",
"=",
"LDLfNot",
"(",
"p",
"[",
"2",
"]",
")",
"elif",
"len",
"(",
"p",
")",
"==",
"4",
":",
"l",
",",
"o",
",",
"r",
"=",
"p",
"[",
"1",
":",
"]",
"if",
"o",
"==",
"Symbols",
".",
"EQUIVALENCE",
".",
"value",
":",
"p",
"[",
"0",
"]",
"=",
"LDLfEquivalence",
"(",
"[",
"l",
",",
"r",
"]",
")",
"elif",
"o",
"==",
"Symbols",
".",
"IMPLIES",
".",
"value",
":",
"p",
"[",
"0",
"]",
"=",
"LDLfImplies",
"(",
"[",
"l",
",",
"r",
"]",
")",
"elif",
"o",
"==",
"Symbols",
".",
"OR",
".",
"value",
":",
"p",
"[",
"0",
"]",
"=",
"LDLfOr",
"(",
"[",
"l",
",",
"r",
"]",
")",
"elif",
"o",
"==",
"Symbols",
".",
"AND",
".",
"value",
":",
"p",
"[",
"0",
"]",
"=",
"LDLfAnd",
"(",
"[",
"l",
",",
"r",
"]",
")",
"else",
":",
"raise",
"ValueError",
"elif",
"len",
"(",
"p",
")",
"==",
"5",
":",
"if",
"p",
"[",
"1",
"]",
"==",
"Symbols",
".",
"ALWAYS_BRACKET_LEFT",
".",
"value",
":",
"p",
"[",
"0",
"]",
"=",
"LDLfBox",
"(",
"p",
"[",
"2",
"]",
",",
"p",
"[",
"4",
"]",
")",
"elif",
"p",
"[",
"1",
"]",
"==",
"Symbols",
".",
"EVENTUALLY_BRACKET_LEFT",
".",
"value",
":",
"p",
"[",
"0",
"]",
"=",
"LDLfDiamond",
"(",
"p",
"[",
"2",
"]",
",",
"p",
"[",
"4",
"]",
")",
"else",
":",
"raise",
"ValueError",
"else",
":",
"raise",
"ValueError"
] | temp_formula : temp_formula EQUIVALENCE temp_formula
| temp_formula IMPLIES temp_formula
| temp_formula OR temp_formula
| temp_formula AND temp_formula
| BOXLSEPARATOR path BOXRSEPARATOR temp_formula
| DIAMONDLSEPARATOR path DIAMONDRSEPARATOR temp_formula
| NOT temp_formula
| TT
| FF
| END
| LAST | [
"temp_formula",
":",
"temp_formula",
"EQUIVALENCE",
"temp_formula",
"|",
"temp_formula",
"IMPLIES",
"temp_formula",
"|",
"temp_formula",
"OR",
"temp_formula",
"|",
"temp_formula",
"AND",
"temp_formula",
"|",
"BOXLSEPARATOR",
"path",
"BOXRSEPARATOR",
"temp_formula",
"|",
"DIAMONDLSEPARATOR",
"path",
"DIAMONDRSEPARATOR",
"temp_formula",
"|",
"NOT",
"temp_formula",
"|",
"TT",
"|",
"FF",
"|",
"END",
"|",
"LAST"
] | train | https://github.com/MarcoFavorito/flloat/blob/5e6de1bea444b68d46d288834031860a8b2f8c2d/flloat/parser/ldlf.py#L92-L137 |
MarcoFavorito/flloat | flloat/parser/ldlf.py | LDLfParser.p_path | def p_path(self, p):
"""path : path UNION path
| path SEQ path
| path STAR
| temp_formula TEST
| propositional"""
if len(p)==2:
p[0] = RegExpPropositional(p[1])
elif len(p)==3:
if p[2]==Symbols.PATH_TEST.value:
p[0] = RegExpTest(p[1])
elif p[2] == Symbols.PATH_STAR.value:
p[0] = RegExpStar(p[1])
else:
raise ValueError
elif len(p)==4:
if p[2]==Symbols.PATH_UNION.value:
p[0] = RegExpUnion([p[1], p[3]])
elif p[2] == Symbols.PATH_SEQUENCE.value:
p[0] = RegExpSequence([p[1], p[3]])
else:
raise ValueError
else:
raise ValueError | python | def p_path(self, p):
"""path : path UNION path
| path SEQ path
| path STAR
| temp_formula TEST
| propositional"""
if len(p)==2:
p[0] = RegExpPropositional(p[1])
elif len(p)==3:
if p[2]==Symbols.PATH_TEST.value:
p[0] = RegExpTest(p[1])
elif p[2] == Symbols.PATH_STAR.value:
p[0] = RegExpStar(p[1])
else:
raise ValueError
elif len(p)==4:
if p[2]==Symbols.PATH_UNION.value:
p[0] = RegExpUnion([p[1], p[3]])
elif p[2] == Symbols.PATH_SEQUENCE.value:
p[0] = RegExpSequence([p[1], p[3]])
else:
raise ValueError
else:
raise ValueError | [
"def",
"p_path",
"(",
"self",
",",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"==",
"2",
":",
"p",
"[",
"0",
"]",
"=",
"RegExpPropositional",
"(",
"p",
"[",
"1",
"]",
")",
"elif",
"len",
"(",
"p",
")",
"==",
"3",
":",
"if",
"p",
"[",
"2",
"]",
"==",
"Symbols",
".",
"PATH_TEST",
".",
"value",
":",
"p",
"[",
"0",
"]",
"=",
"RegExpTest",
"(",
"p",
"[",
"1",
"]",
")",
"elif",
"p",
"[",
"2",
"]",
"==",
"Symbols",
".",
"PATH_STAR",
".",
"value",
":",
"p",
"[",
"0",
"]",
"=",
"RegExpStar",
"(",
"p",
"[",
"1",
"]",
")",
"else",
":",
"raise",
"ValueError",
"elif",
"len",
"(",
"p",
")",
"==",
"4",
":",
"if",
"p",
"[",
"2",
"]",
"==",
"Symbols",
".",
"PATH_UNION",
".",
"value",
":",
"p",
"[",
"0",
"]",
"=",
"RegExpUnion",
"(",
"[",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"3",
"]",
"]",
")",
"elif",
"p",
"[",
"2",
"]",
"==",
"Symbols",
".",
"PATH_SEQUENCE",
".",
"value",
":",
"p",
"[",
"0",
"]",
"=",
"RegExpSequence",
"(",
"[",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"3",
"]",
"]",
")",
"else",
":",
"raise",
"ValueError",
"else",
":",
"raise",
"ValueError"
] | path : path UNION path
| path SEQ path
| path STAR
| temp_formula TEST
| propositional | [
"path",
":",
"path",
"UNION",
"path",
"|",
"path",
"SEQ",
"path",
"|",
"path",
"STAR",
"|",
"temp_formula",
"TEST",
"|",
"propositional"
] | train | https://github.com/MarcoFavorito/flloat/blob/5e6de1bea444b68d46d288834031860a8b2f8c2d/flloat/parser/ldlf.py#L143-L167 |
MarcoFavorito/flloat | flloat/parser/ldlf.py | LDLfParser.p_propositional | def p_propositional(self, p):
"""propositional : propositional EQUIVALENCE propositional
| propositional IMPLIES propositional
| propositional OR propositional
| propositional AND propositional
| NOT propositional
| FALSE
| TRUE
| ATOM"""
if len(p)==4:
if p[2] == Symbols.EQUIVALENCE.value:
p[0] = PLEquivalence([p[1], p[3]])
elif p[2] == Symbols.IMPLIES.value:
p[0] = PLImplies([p[1], p[3]])
elif p[2] == Symbols.OR.value:
p[0] = PLOr([p[1], p[3]])
elif p[2] == Symbols.AND.value:
p[0] = PLAnd([p[1], p[3]])
else:
raise ValueError
# else:
# p[0] = p[2]
elif len(p)==3:
p[0] = PLNot(p[2])
elif len(p)==2:
if p[1]==Symbols.TRUE.value:
p[0] = PLTrue()
elif p[1]==Symbols.FALSE.value:
p[0] = PLFalse()
else:
p[0] = PLAtomic(Symbol(p[1]))
else:
raise ValueError | python | def p_propositional(self, p):
"""propositional : propositional EQUIVALENCE propositional
| propositional IMPLIES propositional
| propositional OR propositional
| propositional AND propositional
| NOT propositional
| FALSE
| TRUE
| ATOM"""
if len(p)==4:
if p[2] == Symbols.EQUIVALENCE.value:
p[0] = PLEquivalence([p[1], p[3]])
elif p[2] == Symbols.IMPLIES.value:
p[0] = PLImplies([p[1], p[3]])
elif p[2] == Symbols.OR.value:
p[0] = PLOr([p[1], p[3]])
elif p[2] == Symbols.AND.value:
p[0] = PLAnd([p[1], p[3]])
else:
raise ValueError
# else:
# p[0] = p[2]
elif len(p)==3:
p[0] = PLNot(p[2])
elif len(p)==2:
if p[1]==Symbols.TRUE.value:
p[0] = PLTrue()
elif p[1]==Symbols.FALSE.value:
p[0] = PLFalse()
else:
p[0] = PLAtomic(Symbol(p[1]))
else:
raise ValueError | [
"def",
"p_propositional",
"(",
"self",
",",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"==",
"4",
":",
"if",
"p",
"[",
"2",
"]",
"==",
"Symbols",
".",
"EQUIVALENCE",
".",
"value",
":",
"p",
"[",
"0",
"]",
"=",
"PLEquivalence",
"(",
"[",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"3",
"]",
"]",
")",
"elif",
"p",
"[",
"2",
"]",
"==",
"Symbols",
".",
"IMPLIES",
".",
"value",
":",
"p",
"[",
"0",
"]",
"=",
"PLImplies",
"(",
"[",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"3",
"]",
"]",
")",
"elif",
"p",
"[",
"2",
"]",
"==",
"Symbols",
".",
"OR",
".",
"value",
":",
"p",
"[",
"0",
"]",
"=",
"PLOr",
"(",
"[",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"3",
"]",
"]",
")",
"elif",
"p",
"[",
"2",
"]",
"==",
"Symbols",
".",
"AND",
".",
"value",
":",
"p",
"[",
"0",
"]",
"=",
"PLAnd",
"(",
"[",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"3",
"]",
"]",
")",
"else",
":",
"raise",
"ValueError",
"# else:",
"# p[0] = p[2]",
"elif",
"len",
"(",
"p",
")",
"==",
"3",
":",
"p",
"[",
"0",
"]",
"=",
"PLNot",
"(",
"p",
"[",
"2",
"]",
")",
"elif",
"len",
"(",
"p",
")",
"==",
"2",
":",
"if",
"p",
"[",
"1",
"]",
"==",
"Symbols",
".",
"TRUE",
".",
"value",
":",
"p",
"[",
"0",
"]",
"=",
"PLTrue",
"(",
")",
"elif",
"p",
"[",
"1",
"]",
"==",
"Symbols",
".",
"FALSE",
".",
"value",
":",
"p",
"[",
"0",
"]",
"=",
"PLFalse",
"(",
")",
"else",
":",
"p",
"[",
"0",
"]",
"=",
"PLAtomic",
"(",
"Symbol",
"(",
"p",
"[",
"1",
"]",
")",
")",
"else",
":",
"raise",
"ValueError"
] | propositional : propositional EQUIVALENCE propositional
| propositional IMPLIES propositional
| propositional OR propositional
| propositional AND propositional
| NOT propositional
| FALSE
| TRUE
| ATOM | [
"propositional",
":",
"propositional",
"EQUIVALENCE",
"propositional",
"|",
"propositional",
"IMPLIES",
"propositional",
"|",
"propositional",
"OR",
"propositional",
"|",
"propositional",
"AND",
"propositional",
"|",
"NOT",
"propositional",
"|",
"FALSE",
"|",
"TRUE",
"|",
"ATOM"
] | train | https://github.com/MarcoFavorito/flloat/blob/5e6de1bea444b68d46d288834031860a8b2f8c2d/flloat/parser/ldlf.py#L170-L202 |
divio/aldryn-sites | aldryn_sites/utils.py | get_redirect_url | def get_redirect_url(current_url, config, https=None):
"""
priorities are (primary domain and aliases are treated the same):
exact redirect match > exact alias match > pattern redirect match > pattern alias match
:param current_url: the url that is being called
:param config: redirect configuration for this url
:param want_https: whether redirects should go to https (None keeps the current scheme)
:return: None for no redirect or an url to redirect to
"""
primary_domain = config['domain']
domains = set(config.get('aliases', [])) | set((primary_domain,))
domain_patterns = compile_regexes(domains)
redirect_domains = set(config.get('redirects', []))
redirect_domain_patterns = compile_regexes(redirect_domains)
url = yurl.URL(current_url)
if https is None:
target_scheme = url.scheme
else:
target_scheme = 'https' if https else 'http'
redirect_url = None
if url.is_host_ip() or url.is_host_ipv4():
# don't redirect for ips
return
if url.host in domains and url.scheme == target_scheme:
# exact host and scheme match: Nothing to do
return
if url.host in domains and url.scheme != target_scheme:
# exact alias match, but scheme mismatch: redirect to changed scheme
redirect_url = url.replace(scheme=target_scheme)
elif url.host in redirect_domains:
# exact redirect match: redirect
redirect_url = url.replace(scheme=target_scheme, host=primary_domain)
elif url.host in domains:
# exact alias match: nothing to do
return
elif match_any(redirect_domain_patterns, url.host):
# pattern redirect match: redirect
redirect_url = url.replace(scheme=target_scheme, host=primary_domain)
elif match_any(domain_patterns, url.host):
# pattern alias match
if url.scheme != target_scheme:
# pattern alias match and scheme mismatch: redirect
redirect_url = url.replace(scheme=target_scheme)
else:
return
if redirect_url:
return '{}'.format(redirect_url) | python | def get_redirect_url(current_url, config, https=None):
"""
priorities are (primary domain and aliases are treated the same):
exact redirect match > exact alias match > pattern redirect match > pattern alias match
:param current_url: the url that is being called
:param config: redirect configuration for this url
:param want_https: whether redirects should go to https (None keeps the current scheme)
:return: None for no redirect or an url to redirect to
"""
primary_domain = config['domain']
domains = set(config.get('aliases', [])) | set((primary_domain,))
domain_patterns = compile_regexes(domains)
redirect_domains = set(config.get('redirects', []))
redirect_domain_patterns = compile_regexes(redirect_domains)
url = yurl.URL(current_url)
if https is None:
target_scheme = url.scheme
else:
target_scheme = 'https' if https else 'http'
redirect_url = None
if url.is_host_ip() or url.is_host_ipv4():
# don't redirect for ips
return
if url.host in domains and url.scheme == target_scheme:
# exact host and scheme match: Nothing to do
return
if url.host in domains and url.scheme != target_scheme:
# exact alias match, but scheme mismatch: redirect to changed scheme
redirect_url = url.replace(scheme=target_scheme)
elif url.host in redirect_domains:
# exact redirect match: redirect
redirect_url = url.replace(scheme=target_scheme, host=primary_domain)
elif url.host in domains:
# exact alias match: nothing to do
return
elif match_any(redirect_domain_patterns, url.host):
# pattern redirect match: redirect
redirect_url = url.replace(scheme=target_scheme, host=primary_domain)
elif match_any(domain_patterns, url.host):
# pattern alias match
if url.scheme != target_scheme:
# pattern alias match and scheme mismatch: redirect
redirect_url = url.replace(scheme=target_scheme)
else:
return
if redirect_url:
return '{}'.format(redirect_url) | [
"def",
"get_redirect_url",
"(",
"current_url",
",",
"config",
",",
"https",
"=",
"None",
")",
":",
"primary_domain",
"=",
"config",
"[",
"'domain'",
"]",
"domains",
"=",
"set",
"(",
"config",
".",
"get",
"(",
"'aliases'",
",",
"[",
"]",
")",
")",
"|",
"set",
"(",
"(",
"primary_domain",
",",
")",
")",
"domain_patterns",
"=",
"compile_regexes",
"(",
"domains",
")",
"redirect_domains",
"=",
"set",
"(",
"config",
".",
"get",
"(",
"'redirects'",
",",
"[",
"]",
")",
")",
"redirect_domain_patterns",
"=",
"compile_regexes",
"(",
"redirect_domains",
")",
"url",
"=",
"yurl",
".",
"URL",
"(",
"current_url",
")",
"if",
"https",
"is",
"None",
":",
"target_scheme",
"=",
"url",
".",
"scheme",
"else",
":",
"target_scheme",
"=",
"'https'",
"if",
"https",
"else",
"'http'",
"redirect_url",
"=",
"None",
"if",
"url",
".",
"is_host_ip",
"(",
")",
"or",
"url",
".",
"is_host_ipv4",
"(",
")",
":",
"# don't redirect for ips",
"return",
"if",
"url",
".",
"host",
"in",
"domains",
"and",
"url",
".",
"scheme",
"==",
"target_scheme",
":",
"# exact host and scheme match: Nothing to do",
"return",
"if",
"url",
".",
"host",
"in",
"domains",
"and",
"url",
".",
"scheme",
"!=",
"target_scheme",
":",
"# exact alias match, but scheme mismatch: redirect to changed scheme",
"redirect_url",
"=",
"url",
".",
"replace",
"(",
"scheme",
"=",
"target_scheme",
")",
"elif",
"url",
".",
"host",
"in",
"redirect_domains",
":",
"# exact redirect match: redirect",
"redirect_url",
"=",
"url",
".",
"replace",
"(",
"scheme",
"=",
"target_scheme",
",",
"host",
"=",
"primary_domain",
")",
"elif",
"url",
".",
"host",
"in",
"domains",
":",
"# exact alias match: nothing to do",
"return",
"elif",
"match_any",
"(",
"redirect_domain_patterns",
",",
"url",
".",
"host",
")",
":",
"# pattern redirect match: redirect",
"redirect_url",
"=",
"url",
".",
"replace",
"(",
"scheme",
"=",
"target_scheme",
",",
"host",
"=",
"primary_domain",
")",
"elif",
"match_any",
"(",
"domain_patterns",
",",
"url",
".",
"host",
")",
":",
"# pattern alias match",
"if",
"url",
".",
"scheme",
"!=",
"target_scheme",
":",
"# pattern alias match and scheme mismatch: redirect",
"redirect_url",
"=",
"url",
".",
"replace",
"(",
"scheme",
"=",
"target_scheme",
")",
"else",
":",
"return",
"if",
"redirect_url",
":",
"return",
"'{}'",
".",
"format",
"(",
"redirect_url",
")"
] | priorities are (primary domain and aliases are treated the same):
exact redirect match > exact alias match > pattern redirect match > pattern alias match
:param current_url: the url that is being called
:param config: redirect configuration for this url
:param want_https: whether redirects should go to https (None keeps the current scheme)
:return: None for no redirect or an url to redirect to | [
"priorities",
"are",
"(",
"primary",
"domain",
"and",
"aliases",
"are",
"treated",
"the",
"same",
")",
":",
"exact",
"redirect",
"match",
">",
"exact",
"alias",
"match",
">",
"pattern",
"redirect",
"match",
">",
"pattern",
"alias",
"match",
":",
"param",
"current_url",
":",
"the",
"url",
"that",
"is",
"being",
"called",
":",
"param",
"config",
":",
"redirect",
"configuration",
"for",
"this",
"url",
":",
"param",
"want_https",
":",
"whether",
"redirects",
"should",
"go",
"to",
"https",
"(",
"None",
"keeps",
"the",
"current",
"scheme",
")",
":",
"return",
":",
"None",
"for",
"no",
"redirect",
"or",
"an",
"url",
"to",
"redirect",
"to"
] | train | https://github.com/divio/aldryn-sites/blob/338ed016f5ad046201256f56dde0c19ef144faf0/aldryn_sites/utils.py#L39-L86 |
txomon/abot | abot/cli.py | AsyncCommandMixin.invoke | def invoke(self, ctx):
"""Given a context, this invokes the attached callback (if it exists)
in the right way.
"""
if self.callback is not None:
loop = asyncio.get_event_loop()
return loop.run_until_complete(self.async_invoke(ctx)) | python | def invoke(self, ctx):
"""Given a context, this invokes the attached callback (if it exists)
in the right way.
"""
if self.callback is not None:
loop = asyncio.get_event_loop()
return loop.run_until_complete(self.async_invoke(ctx)) | [
"def",
"invoke",
"(",
"self",
",",
"ctx",
")",
":",
"if",
"self",
".",
"callback",
"is",
"not",
"None",
":",
"loop",
"=",
"asyncio",
".",
"get_event_loop",
"(",
")",
"return",
"loop",
".",
"run_until_complete",
"(",
"self",
".",
"async_invoke",
"(",
"ctx",
")",
")"
] | Given a context, this invokes the attached callback (if it exists)
in the right way. | [
"Given",
"a",
"context",
"this",
"invokes",
"the",
"attached",
"callback",
"(",
"if",
"it",
"exists",
")",
"in",
"the",
"right",
"way",
"."
] | train | https://github.com/txomon/abot/blob/3ac23c6d14965d4608ed13c284ae1a886b462252/abot/cli.py#L43-L49 |
txomon/abot | abot/cli.py | AsyncCommandMixin.get_help_option | def get_help_option(self, ctx):
"""Returns the help option object."""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
import abot.bot
if value and not ctx.resilient_parsing:
event: abot.bot.MessageEvent = abot.bot.current_event.get()
tbd_tasks.append(event.reply(ctx.get_help()))
ctx.exit()
return click.core.Option(help_options, is_flag=True,
is_eager=True, expose_value=False,
callback=show_help,
help='Show this message and exit.') | python | def get_help_option(self, ctx):
"""Returns the help option object."""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
import abot.bot
if value and not ctx.resilient_parsing:
event: abot.bot.MessageEvent = abot.bot.current_event.get()
tbd_tasks.append(event.reply(ctx.get_help()))
ctx.exit()
return click.core.Option(help_options, is_flag=True,
is_eager=True, expose_value=False,
callback=show_help,
help='Show this message and exit.') | [
"def",
"get_help_option",
"(",
"self",
",",
"ctx",
")",
":",
"help_options",
"=",
"self",
".",
"get_help_option_names",
"(",
"ctx",
")",
"if",
"not",
"help_options",
"or",
"not",
"self",
".",
"add_help_option",
":",
"return",
"def",
"show_help",
"(",
"ctx",
",",
"param",
",",
"value",
")",
":",
"import",
"abot",
".",
"bot",
"if",
"value",
"and",
"not",
"ctx",
".",
"resilient_parsing",
":",
"event",
":",
"abot",
".",
"bot",
".",
"MessageEvent",
"=",
"abot",
".",
"bot",
".",
"current_event",
".",
"get",
"(",
")",
"tbd_tasks",
".",
"append",
"(",
"event",
".",
"reply",
"(",
"ctx",
".",
"get_help",
"(",
")",
")",
")",
"ctx",
".",
"exit",
"(",
")",
"return",
"click",
".",
"core",
".",
"Option",
"(",
"help_options",
",",
"is_flag",
"=",
"True",
",",
"is_eager",
"=",
"True",
",",
"expose_value",
"=",
"False",
",",
"callback",
"=",
"show_help",
",",
"help",
"=",
"'Show this message and exit.'",
")"
] | Returns the help option object. | [
"Returns",
"the",
"help",
"option",
"object",
"."
] | train | https://github.com/txomon/abot/blob/3ac23c6d14965d4608ed13c284ae1a886b462252/abot/cli.py#L64-L80 |
InfoAgeTech/django-core | django_core/forms/widgets.py | MultipleDecimalInputWidget.get_widget_css_class | def get_widget_css_class(self, attrs):
"""Gets the class for the widget."""
size_class = 'size-{0}'.format(self.num_inputs)
if 'class' in attrs:
attrs['class'] += ' {0}'.format(size_class)
else:
attrs['class'] = size_class | python | def get_widget_css_class(self, attrs):
"""Gets the class for the widget."""
size_class = 'size-{0}'.format(self.num_inputs)
if 'class' in attrs:
attrs['class'] += ' {0}'.format(size_class)
else:
attrs['class'] = size_class | [
"def",
"get_widget_css_class",
"(",
"self",
",",
"attrs",
")",
":",
"size_class",
"=",
"'size-{0}'",
".",
"format",
"(",
"self",
".",
"num_inputs",
")",
"if",
"'class'",
"in",
"attrs",
":",
"attrs",
"[",
"'class'",
"]",
"+=",
"' {0}'",
".",
"format",
"(",
"size_class",
")",
"else",
":",
"attrs",
"[",
"'class'",
"]",
"=",
"size_class"
] | Gets the class for the widget. | [
"Gets",
"the",
"class",
"for",
"the",
"widget",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/forms/widgets.py#L78-L85 |
MarcoFavorito/flloat | flloat/parser/pl.py | PLLexer.t_ATOM | def t_ATOM(self, t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = PLLexer.reserved.get(t.value, 'ATOM') # Check for reserved words
return t | python | def t_ATOM(self, t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = PLLexer.reserved.get(t.value, 'ATOM') # Check for reserved words
return t | [
"def",
"t_ATOM",
"(",
"self",
",",
"t",
")",
":",
"t",
".",
"type",
"=",
"PLLexer",
".",
"reserved",
".",
"get",
"(",
"t",
".",
"value",
",",
"'ATOM'",
")",
"# Check for reserved words",
"return",
"t"
] | r'[a-zA-Z_][a-zA-Z_0-9]* | [
"r",
"[",
"a",
"-",
"zA",
"-",
"Z_",
"]",
"[",
"a",
"-",
"zA",
"-",
"Z_0",
"-",
"9",
"]",
"*"
] | train | https://github.com/MarcoFavorito/flloat/blob/5e6de1bea444b68d46d288834031860a8b2f8c2d/flloat/parser/pl.py#L41-L44 |
MarcoFavorito/flloat | flloat/parser/pl.py | PLParser.p_formula_atom | def p_formula_atom(self, p):
"""formula : ATOM
| TRUE
| FALSE"""
if p[1]==Symbols.TRUE.value:
p[0] = PLTrue()
elif p[1]==Symbols.FALSE.value:
p[0] = PLFalse()
else:
p[0] = PLAtomic(Symbol(p[1])) | python | def p_formula_atom(self, p):
"""formula : ATOM
| TRUE
| FALSE"""
if p[1]==Symbols.TRUE.value:
p[0] = PLTrue()
elif p[1]==Symbols.FALSE.value:
p[0] = PLFalse()
else:
p[0] = PLAtomic(Symbol(p[1])) | [
"def",
"p_formula_atom",
"(",
"self",
",",
"p",
")",
":",
"if",
"p",
"[",
"1",
"]",
"==",
"Symbols",
".",
"TRUE",
".",
"value",
":",
"p",
"[",
"0",
"]",
"=",
"PLTrue",
"(",
")",
"elif",
"p",
"[",
"1",
"]",
"==",
"Symbols",
".",
"FALSE",
".",
"value",
":",
"p",
"[",
"0",
"]",
"=",
"PLFalse",
"(",
")",
"else",
":",
"p",
"[",
"0",
"]",
"=",
"PLAtomic",
"(",
"Symbol",
"(",
"p",
"[",
"1",
"]",
")",
")"
] | formula : ATOM
| TRUE
| FALSE | [
"formula",
":",
"ATOM",
"|",
"TRUE",
"|",
"FALSE"
] | train | https://github.com/MarcoFavorito/flloat/blob/5e6de1bea444b68d46d288834031860a8b2f8c2d/flloat/parser/pl.py#L60-L69 |
InfoAgeTech/django-core | django_core/db/models/managers.py | BaseManager.get_or_none | def get_or_none(self, prefetch_related=None, select_related=False,
**kwargs):
"""Gets a single object based on kwargs or None if one is not found.
:param prefetch_related: list or tuple of fields to prefetch for an
object. This takes precedence over select_related.
Example:
>> get_or_none(prefetch_related=['some_field',
... 'some_field__some_fields_field'])
See:
https://docs.djangoproject.com/en/dev/ref/models/querysets/#prefetch-related
:param select_related: boolean when set to True will follow foreign-key
relationships to prevent many db queries when looping over foreign
keys. If this value is boolean True, the immediate foreign keys
will be selected, but not foreign keys of foreign keys. If this
value is set to a list or tuple, then those will be the fields to
to follow and select.
Example:
>> # Both of the following are valid
>> get_or_none(select_related=True)
>> get_or_none(select_related=['some_field',
... 'some_field__some_fields_field'])
See: https://docs.djangoproject.com/en/dev/ref/models/querysets/
:param kwargs: list of fields and their values to retrieve.
"""
try:
if prefetch_related:
query_set = self.prefetch_related(*prefetch_related)
elif select_related == True:
query_set = self.select_related()
elif isinstance(select_related, (list, tuple)):
query_set = self.select_related(*select_related)
else:
query_set = self
return query_set.get(**kwargs)
except self.model.DoesNotExist:
return None | python | def get_or_none(self, prefetch_related=None, select_related=False,
**kwargs):
"""Gets a single object based on kwargs or None if one is not found.
:param prefetch_related: list or tuple of fields to prefetch for an
object. This takes precedence over select_related.
Example:
>> get_or_none(prefetch_related=['some_field',
... 'some_field__some_fields_field'])
See:
https://docs.djangoproject.com/en/dev/ref/models/querysets/#prefetch-related
:param select_related: boolean when set to True will follow foreign-key
relationships to prevent many db queries when looping over foreign
keys. If this value is boolean True, the immediate foreign keys
will be selected, but not foreign keys of foreign keys. If this
value is set to a list or tuple, then those will be the fields to
to follow and select.
Example:
>> # Both of the following are valid
>> get_or_none(select_related=True)
>> get_or_none(select_related=['some_field',
... 'some_field__some_fields_field'])
See: https://docs.djangoproject.com/en/dev/ref/models/querysets/
:param kwargs: list of fields and their values to retrieve.
"""
try:
if prefetch_related:
query_set = self.prefetch_related(*prefetch_related)
elif select_related == True:
query_set = self.select_related()
elif isinstance(select_related, (list, tuple)):
query_set = self.select_related(*select_related)
else:
query_set = self
return query_set.get(**kwargs)
except self.model.DoesNotExist:
return None | [
"def",
"get_or_none",
"(",
"self",
",",
"prefetch_related",
"=",
"None",
",",
"select_related",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"if",
"prefetch_related",
":",
"query_set",
"=",
"self",
".",
"prefetch_related",
"(",
"*",
"prefetch_related",
")",
"elif",
"select_related",
"==",
"True",
":",
"query_set",
"=",
"self",
".",
"select_related",
"(",
")",
"elif",
"isinstance",
"(",
"select_related",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"query_set",
"=",
"self",
".",
"select_related",
"(",
"*",
"select_related",
")",
"else",
":",
"query_set",
"=",
"self",
"return",
"query_set",
".",
"get",
"(",
"*",
"*",
"kwargs",
")",
"except",
"self",
".",
"model",
".",
"DoesNotExist",
":",
"return",
"None"
] | Gets a single object based on kwargs or None if one is not found.
:param prefetch_related: list or tuple of fields to prefetch for an
object. This takes precedence over select_related.
Example:
>> get_or_none(prefetch_related=['some_field',
... 'some_field__some_fields_field'])
See:
https://docs.djangoproject.com/en/dev/ref/models/querysets/#prefetch-related
:param select_related: boolean when set to True will follow foreign-key
relationships to prevent many db queries when looping over foreign
keys. If this value is boolean True, the immediate foreign keys
will be selected, but not foreign keys of foreign keys. If this
value is set to a list or tuple, then those will be the fields to
to follow and select.
Example:
>> # Both of the following are valid
>> get_or_none(select_related=True)
>> get_or_none(select_related=['some_field',
... 'some_field__some_fields_field'])
See: https://docs.djangoproject.com/en/dev/ref/models/querysets/
:param kwargs: list of fields and their values to retrieve. | [
"Gets",
"a",
"single",
"object",
"based",
"on",
"kwargs",
"or",
"None",
"if",
"one",
"is",
"not",
"found",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/db/models/managers.py#L14-L59 |
InfoAgeTech/django-core | django_core/db/models/managers.py | CommonManager.get_by_id_or_404 | def get_by_id_or_404(self, id, **kwargs):
"""Gets by a instance instance r raises a 404 is one isn't found."""
obj = self.get_by_id(id=id, **kwargs)
if obj:
return obj
raise Http404 | python | def get_by_id_or_404(self, id, **kwargs):
"""Gets by a instance instance r raises a 404 is one isn't found."""
obj = self.get_by_id(id=id, **kwargs)
if obj:
return obj
raise Http404 | [
"def",
"get_by_id_or_404",
"(",
"self",
",",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"obj",
"=",
"self",
".",
"get_by_id",
"(",
"id",
"=",
"id",
",",
"*",
"*",
"kwargs",
")",
"if",
"obj",
":",
"return",
"obj",
"raise",
"Http404"
] | Gets by a instance instance r raises a 404 is one isn't found. | [
"Gets",
"by",
"a",
"instance",
"instance",
"r",
"raises",
"a",
"404",
"is",
"one",
"isn",
"t",
"found",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/db/models/managers.py#L71-L78 |
InfoAgeTech/django-core | django_core/db/models/managers.py | CommonManager.bulk_create | def bulk_create(self, objs, *args, **kwargs):
"""Insert many object at once."""
if hasattr(self.model, 'save_prep'):
# Method from AbstractBaseModel. If the model class doesn't
# subclass AbstractBaseModel, then don't call this.
self.model.save_prep(instance_or_instances=objs)
return super(CommonManager, self).bulk_create(objs=objs,
*args,
**kwargs) | python | def bulk_create(self, objs, *args, **kwargs):
"""Insert many object at once."""
if hasattr(self.model, 'save_prep'):
# Method from AbstractBaseModel. If the model class doesn't
# subclass AbstractBaseModel, then don't call this.
self.model.save_prep(instance_or_instances=objs)
return super(CommonManager, self).bulk_create(objs=objs,
*args,
**kwargs) | [
"def",
"bulk_create",
"(",
"self",
",",
"objs",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"hasattr",
"(",
"self",
".",
"model",
",",
"'save_prep'",
")",
":",
"# Method from AbstractBaseModel. If the model class doesn't",
"# subclass AbstractBaseModel, then don't call this.",
"self",
".",
"model",
".",
"save_prep",
"(",
"instance_or_instances",
"=",
"objs",
")",
"return",
"super",
"(",
"CommonManager",
",",
"self",
")",
".",
"bulk_create",
"(",
"objs",
"=",
"objs",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Insert many object at once. | [
"Insert",
"many",
"object",
"at",
"once",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/db/models/managers.py#L89-L98 |
InfoAgeTech/django-core | django_core/db/models/managers.py | CommonManager.delete_by_ids | def delete_by_ids(self, ids):
"""Delete objects by ids.
:param ids: list of objects ids to delete.
:return: True if objects were deleted. Otherwise, return False if no
objects were found or the delete was not successful.
"""
try:
self.filter(id__in=ids).delete()
return True
except self.model.DoesNotExist:
return False | python | def delete_by_ids(self, ids):
"""Delete objects by ids.
:param ids: list of objects ids to delete.
:return: True if objects were deleted. Otherwise, return False if no
objects were found or the delete was not successful.
"""
try:
self.filter(id__in=ids).delete()
return True
except self.model.DoesNotExist:
return False | [
"def",
"delete_by_ids",
"(",
"self",
",",
"ids",
")",
":",
"try",
":",
"self",
".",
"filter",
"(",
"id__in",
"=",
"ids",
")",
".",
"delete",
"(",
")",
"return",
"True",
"except",
"self",
".",
"model",
".",
"DoesNotExist",
":",
"return",
"False"
] | Delete objects by ids.
:param ids: list of objects ids to delete.
:return: True if objects were deleted. Otherwise, return False if no
objects were found or the delete was not successful. | [
"Delete",
"objects",
"by",
"ids",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/db/models/managers.py#L104-L115 |
InfoAgeTech/django-core | django_core/db/models/managers.py | SlugManager.is_slug_available | def is_slug_available(self, slug, **kwargs):
"""Checks to see if a slug is available. If the slug is already being used
this method returns False. Otherwise, return True.
"""
try:
self.get(slug=slug, **kwargs)
return False
except self.model.DoesNotExist:
return True | python | def is_slug_available(self, slug, **kwargs):
"""Checks to see if a slug is available. If the slug is already being used
this method returns False. Otherwise, return True.
"""
try:
self.get(slug=slug, **kwargs)
return False
except self.model.DoesNotExist:
return True | [
"def",
"is_slug_available",
"(",
"self",
",",
"slug",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"self",
".",
"get",
"(",
"slug",
"=",
"slug",
",",
"*",
"*",
"kwargs",
")",
"return",
"False",
"except",
"self",
".",
"model",
".",
"DoesNotExist",
":",
"return",
"True"
] | Checks to see if a slug is available. If the slug is already being used
this method returns False. Otherwise, return True. | [
"Checks",
"to",
"see",
"if",
"a",
"slug",
"is",
"available",
".",
"If",
"the",
"slug",
"is",
"already",
"being",
"used",
"this",
"method",
"returns",
"False",
".",
"Otherwise",
"return",
"True",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/db/models/managers.py#L135-L143 |
InfoAgeTech/django-core | django_core/db/models/managers.py | SlugManager.get_next_slug | def get_next_slug(self, slug, **kwargs):
"""Gets the next available slug.
:param slug: the slug to slugify
:param kwargs: additional filter criteria to check for when looking for
a unique slug.
Example:
if the value "my-slug" is already taken, this method will append "-n"
to the end of the slug until the next available slug is found.
"""
original_slug = slug = slugify(slug)
count = 0
while not self.is_slug_available(slug=slug, **kwargs):
count += 1
slug = '{0}-{1}'.format(original_slug, count)
return slug | python | def get_next_slug(self, slug, **kwargs):
"""Gets the next available slug.
:param slug: the slug to slugify
:param kwargs: additional filter criteria to check for when looking for
a unique slug.
Example:
if the value "my-slug" is already taken, this method will append "-n"
to the end of the slug until the next available slug is found.
"""
original_slug = slug = slugify(slug)
count = 0
while not self.is_slug_available(slug=slug, **kwargs):
count += 1
slug = '{0}-{1}'.format(original_slug, count)
return slug | [
"def",
"get_next_slug",
"(",
"self",
",",
"slug",
",",
"*",
"*",
"kwargs",
")",
":",
"original_slug",
"=",
"slug",
"=",
"slugify",
"(",
"slug",
")",
"count",
"=",
"0",
"while",
"not",
"self",
".",
"is_slug_available",
"(",
"slug",
"=",
"slug",
",",
"*",
"*",
"kwargs",
")",
":",
"count",
"+=",
"1",
"slug",
"=",
"'{0}-{1}'",
".",
"format",
"(",
"original_slug",
",",
"count",
")",
"return",
"slug"
] | Gets the next available slug.
:param slug: the slug to slugify
:param kwargs: additional filter criteria to check for when looking for
a unique slug.
Example:
if the value "my-slug" is already taken, this method will append "-n"
to the end of the slug until the next available slug is found. | [
"Gets",
"the",
"next",
"available",
"slug",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/db/models/managers.py#L145-L165 |
InfoAgeTech/django-core | django_core/db/models/managers.py | TokenManager.get_next_token | def get_next_token(self, length=15, **kwargs):
"""Gets the next available token.
:param length: length of the token
:param kwargs: additional filter criteria to check for when looking for
a unique token.
"""
return self.get_available_tokens(count=1,
token_length=length,
**kwargs)[0] | python | def get_next_token(self, length=15, **kwargs):
"""Gets the next available token.
:param length: length of the token
:param kwargs: additional filter criteria to check for when looking for
a unique token.
"""
return self.get_available_tokens(count=1,
token_length=length,
**kwargs)[0] | [
"def",
"get_next_token",
"(",
"self",
",",
"length",
"=",
"15",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"get_available_tokens",
"(",
"count",
"=",
"1",
",",
"token_length",
"=",
"length",
",",
"*",
"*",
"kwargs",
")",
"[",
"0",
"]"
] | Gets the next available token.
:param length: length of the token
:param kwargs: additional filter criteria to check for when looking for
a unique token. | [
"Gets",
"the",
"next",
"available",
"token",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/db/models/managers.py#L183-L193 |
InfoAgeTech/django-core | django_core/db/models/managers.py | TokenManager.get_available_tokens | def get_available_tokens(self, count=10, token_length=15, **kwargs):
"""Gets a list of available tokens.
:param count: the number of tokens to return.
:param token_length: the length of the tokens. The higher the number
the easier it will be to return a list. If token_length == 1
there's a strong probability that the enough tokens will exist in
the db.
"""
# This is the number of extra tokens to try and retrieve so calls to
# the db can be limited
token_buffer = int(math.ceil(count * .05))
if token_buffer < 5:
token_buffer = 5
available = set([])
while True:
tokens = [random_alphanum(length=token_length)
for t in range(count + token_buffer)]
db_tokens = self.filter(token__in=tokens).values_list('token',
flat=True)
available.update(set(tokens).difference(db_tokens))
if len(available) >= count:
return list(available)[:count] | python | def get_available_tokens(self, count=10, token_length=15, **kwargs):
"""Gets a list of available tokens.
:param count: the number of tokens to return.
:param token_length: the length of the tokens. The higher the number
the easier it will be to return a list. If token_length == 1
there's a strong probability that the enough tokens will exist in
the db.
"""
# This is the number of extra tokens to try and retrieve so calls to
# the db can be limited
token_buffer = int(math.ceil(count * .05))
if token_buffer < 5:
token_buffer = 5
available = set([])
while True:
tokens = [random_alphanum(length=token_length)
for t in range(count + token_buffer)]
db_tokens = self.filter(token__in=tokens).values_list('token',
flat=True)
available.update(set(tokens).difference(db_tokens))
if len(available) >= count:
return list(available)[:count] | [
"def",
"get_available_tokens",
"(",
"self",
",",
"count",
"=",
"10",
",",
"token_length",
"=",
"15",
",",
"*",
"*",
"kwargs",
")",
":",
"# This is the number of extra tokens to try and retrieve so calls to",
"# the db can be limited",
"token_buffer",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"count",
"*",
".05",
")",
")",
"if",
"token_buffer",
"<",
"5",
":",
"token_buffer",
"=",
"5",
"available",
"=",
"set",
"(",
"[",
"]",
")",
"while",
"True",
":",
"tokens",
"=",
"[",
"random_alphanum",
"(",
"length",
"=",
"token_length",
")",
"for",
"t",
"in",
"range",
"(",
"count",
"+",
"token_buffer",
")",
"]",
"db_tokens",
"=",
"self",
".",
"filter",
"(",
"token__in",
"=",
"tokens",
")",
".",
"values_list",
"(",
"'token'",
",",
"flat",
"=",
"True",
")",
"available",
".",
"update",
"(",
"set",
"(",
"tokens",
")",
".",
"difference",
"(",
"db_tokens",
")",
")",
"if",
"len",
"(",
"available",
")",
">=",
"count",
":",
"return",
"list",
"(",
"available",
")",
"[",
":",
"count",
"]"
] | Gets a list of available tokens.
:param count: the number of tokens to return.
:param token_length: the length of the tokens. The higher the number
the easier it will be to return a list. If token_length == 1
there's a strong probability that the enough tokens will exist in
the db. | [
"Gets",
"a",
"list",
"of",
"available",
"tokens",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/db/models/managers.py#L195-L222 |
InfoAgeTech/django-core | django_core/db/models/managers.py | GenericManager.create_generic | def create_generic(self, content_object=None, **kwargs):
"""Create a generic object.
:param content_object: the content object to create a new object for.
"""
if content_object:
kwargs['content_type'] = ContentType.objects.get_for_model(
content_object
)
kwargs['object_id'] = content_object.id
return self.create(**kwargs) | python | def create_generic(self, content_object=None, **kwargs):
"""Create a generic object.
:param content_object: the content object to create a new object for.
"""
if content_object:
kwargs['content_type'] = ContentType.objects.get_for_model(
content_object
)
kwargs['object_id'] = content_object.id
return self.create(**kwargs) | [
"def",
"create_generic",
"(",
"self",
",",
"content_object",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"content_object",
":",
"kwargs",
"[",
"'content_type'",
"]",
"=",
"ContentType",
".",
"objects",
".",
"get_for_model",
"(",
"content_object",
")",
"kwargs",
"[",
"'object_id'",
"]",
"=",
"content_object",
".",
"id",
"return",
"self",
".",
"create",
"(",
"*",
"*",
"kwargs",
")"
] | Create a generic object.
:param content_object: the content object to create a new object for. | [
"Create",
"a",
"generic",
"object",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/db/models/managers.py#L241-L252 |
InfoAgeTech/django-core | django_core/db/models/managers.py | GenericManager.filter_generic | def filter_generic(self, content_object=None, **kwargs):
"""Filter by a generic object.
:param content_object: the content object to filter on.
"""
if content_object:
kwargs['content_type'] = ContentType.objects.get_for_model(
content_object
)
kwargs['object_id'] = content_object.id
return self.filter(**kwargs) | python | def filter_generic(self, content_object=None, **kwargs):
"""Filter by a generic object.
:param content_object: the content object to filter on.
"""
if content_object:
kwargs['content_type'] = ContentType.objects.get_for_model(
content_object
)
kwargs['object_id'] = content_object.id
return self.filter(**kwargs) | [
"def",
"filter_generic",
"(",
"self",
",",
"content_object",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"content_object",
":",
"kwargs",
"[",
"'content_type'",
"]",
"=",
"ContentType",
".",
"objects",
".",
"get_for_model",
"(",
"content_object",
")",
"kwargs",
"[",
"'object_id'",
"]",
"=",
"content_object",
".",
"id",
"return",
"self",
".",
"filter",
"(",
"*",
"*",
"kwargs",
")"
] | Filter by a generic object.
:param content_object: the content object to filter on. | [
"Filter",
"by",
"a",
"generic",
"object",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/db/models/managers.py#L254-L265 |
InfoAgeTech/django-core | django_core/db/models/managers.py | GenericManager.get_or_create_generic | def get_or_create_generic(self, content_object=None, **kwargs):
"""Gets or creates a generic object. This is a wrapper for
get_or_create(...) when you need to get or create a generic object.
:param obj: the object to get or create
:param kwargs: any other kwargs that the model accepts.
"""
if content_object:
kwargs['content_type'] = ContentType.objects.get_for_model(
content_object
)
kwargs['object_id'] = content_object.id
return self.get_or_create(**kwargs) | python | def get_or_create_generic(self, content_object=None, **kwargs):
"""Gets or creates a generic object. This is a wrapper for
get_or_create(...) when you need to get or create a generic object.
:param obj: the object to get or create
:param kwargs: any other kwargs that the model accepts.
"""
if content_object:
kwargs['content_type'] = ContentType.objects.get_for_model(
content_object
)
kwargs['object_id'] = content_object.id
return self.get_or_create(**kwargs) | [
"def",
"get_or_create_generic",
"(",
"self",
",",
"content_object",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"content_object",
":",
"kwargs",
"[",
"'content_type'",
"]",
"=",
"ContentType",
".",
"objects",
".",
"get_for_model",
"(",
"content_object",
")",
"kwargs",
"[",
"'object_id'",
"]",
"=",
"content_object",
".",
"id",
"return",
"self",
".",
"get_or_create",
"(",
"*",
"*",
"kwargs",
")"
] | Gets or creates a generic object. This is a wrapper for
get_or_create(...) when you need to get or create a generic object.
:param obj: the object to get or create
:param kwargs: any other kwargs that the model accepts. | [
"Gets",
"or",
"creates",
"a",
"generic",
"object",
".",
"This",
"is",
"a",
"wrapper",
"for",
"get_or_create",
"(",
"...",
")",
"when",
"you",
"need",
"to",
"get",
"or",
"create",
"a",
"generic",
"object",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/db/models/managers.py#L267-L280 |
InfoAgeTech/django-core | django_core/db/models/managers.py | GenericManager.get_by_model | def get_by_model(self, model):
"""Gets all object by a specific model."""
content_type = ContentType.objects.get_for_model(model)
return self.filter(content_type=content_type) | python | def get_by_model(self, model):
"""Gets all object by a specific model."""
content_type = ContentType.objects.get_for_model(model)
return self.filter(content_type=content_type) | [
"def",
"get_by_model",
"(",
"self",
",",
"model",
")",
":",
"content_type",
"=",
"ContentType",
".",
"objects",
".",
"get_for_model",
"(",
"model",
")",
"return",
"self",
".",
"filter",
"(",
"content_type",
"=",
"content_type",
")"
] | Gets all object by a specific model. | [
"Gets",
"all",
"object",
"by",
"a",
"specific",
"model",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/db/models/managers.py#L286-L289 |
InfoAgeTech/django-core | django_core/templatetags/collection_tags.py | attr | def attr(obj, attr):
"""
Does the same thing as getattr.
getattr(obj, attr, '')
"""
if not obj or not hasattr(obj, attr):
return ''
return getattr(obj, attr, '') | python | def attr(obj, attr):
"""
Does the same thing as getattr.
getattr(obj, attr, '')
"""
if not obj or not hasattr(obj, attr):
return ''
return getattr(obj, attr, '') | [
"def",
"attr",
"(",
"obj",
",",
"attr",
")",
":",
"if",
"not",
"obj",
"or",
"not",
"hasattr",
"(",
"obj",
",",
"attr",
")",
":",
"return",
"''",
"return",
"getattr",
"(",
"obj",
",",
"attr",
",",
"''",
")"
] | Does the same thing as getattr.
getattr(obj, attr, '') | [
"Does",
"the",
"same",
"thing",
"as",
"getattr",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/templatetags/collection_tags.py#L29-L38 |
InfoAgeTech/django-core | django_core/templatetags/collection_tags.py | make_iterable | def make_iterable(obj):
"""Make an object iterable.
>>> make_iterable(obj='hello')
('hello',)
>>> make_iterable(obj=None)
()
"""
if not obj:
return tuple()
if isinstance(obj, (list, tuple, set)):
return obj
return (obj,) | python | def make_iterable(obj):
"""Make an object iterable.
>>> make_iterable(obj='hello')
('hello',)
>>> make_iterable(obj=None)
()
"""
if not obj:
return tuple()
if isinstance(obj, (list, tuple, set)):
return obj
return (obj,) | [
"def",
"make_iterable",
"(",
"obj",
")",
":",
"if",
"not",
"obj",
":",
"return",
"tuple",
"(",
")",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"list",
",",
"tuple",
",",
"set",
")",
")",
":",
"return",
"obj",
"return",
"(",
"obj",
",",
")"
] | Make an object iterable.
>>> make_iterable(obj='hello')
('hello',)
>>> make_iterable(obj=None)
() | [
"Make",
"an",
"object",
"iterable",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/templatetags/collection_tags.py#L48-L62 |
InfoAgeTech/django-core | django_core/views/request.py | ApiFormView.get_form_kwargs | def get_form_kwargs(self):
"""Add the 'data' to the form args so you can validate the form
data on a get request.
"""
kwargs = super(ApiFormView, self).get_form_kwargs()
kwargs['data'] = kwargs.get('initial')
return kwargs | python | def get_form_kwargs(self):
"""Add the 'data' to the form args so you can validate the form
data on a get request.
"""
kwargs = super(ApiFormView, self).get_form_kwargs()
kwargs['data'] = kwargs.get('initial')
return kwargs | [
"def",
"get_form_kwargs",
"(",
"self",
")",
":",
"kwargs",
"=",
"super",
"(",
"ApiFormView",
",",
"self",
")",
".",
"get_form_kwargs",
"(",
")",
"kwargs",
"[",
"'data'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'initial'",
")",
"return",
"kwargs"
] | Add the 'data' to the form args so you can validate the form
data on a get request. | [
"Add",
"the",
"data",
"to",
"the",
"form",
"args",
"so",
"you",
"can",
"validate",
"the",
"form",
"data",
"on",
"a",
"get",
"request",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/views/request.py#L22-L28 |
InfoAgeTech/django-core | django_core/views/request.py | ApiFormView.form_invalid | def form_invalid(self, form, context=None, **kwargs):
"""This will return the request with form errors as well as any
additional context.
"""
if not context:
context = {}
context['errors'] = form.errors
return super(ApiFormView, self).render_to_response(context=context,
status=400) | python | def form_invalid(self, form, context=None, **kwargs):
"""This will return the request with form errors as well as any
additional context.
"""
if not context:
context = {}
context['errors'] = form.errors
return super(ApiFormView, self).render_to_response(context=context,
status=400) | [
"def",
"form_invalid",
"(",
"self",
",",
"form",
",",
"context",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"context",
":",
"context",
"=",
"{",
"}",
"context",
"[",
"'errors'",
"]",
"=",
"form",
".",
"errors",
"return",
"super",
"(",
"ApiFormView",
",",
"self",
")",
".",
"render_to_response",
"(",
"context",
"=",
"context",
",",
"status",
"=",
"400",
")"
] | This will return the request with form errors as well as any
additional context. | [
"This",
"will",
"return",
"the",
"request",
"with",
"form",
"errors",
"as",
"well",
"as",
"any",
"additional",
"context",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/views/request.py#L30-L39 |
InfoAgeTech/django-core | django_core/utils/validators.py | is_valid_hex | def is_valid_hex(value):
"""Boolean indicating of the value is a valid hex value."""
if not value:
return False
regex = re.compile(HEX_COLOR_REGEX)
return bool(regex.match(value)) | python | def is_valid_hex(value):
"""Boolean indicating of the value is a valid hex value."""
if not value:
return False
regex = re.compile(HEX_COLOR_REGEX)
return bool(regex.match(value)) | [
"def",
"is_valid_hex",
"(",
"value",
")",
":",
"if",
"not",
"value",
":",
"return",
"False",
"regex",
"=",
"re",
".",
"compile",
"(",
"HEX_COLOR_REGEX",
")",
"return",
"bool",
"(",
"regex",
".",
"match",
"(",
"value",
")",
")"
] | Boolean indicating of the value is a valid hex value. | [
"Boolean",
"indicating",
"of",
"the",
"value",
"is",
"a",
"valid",
"hex",
"value",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/utils/validators.py#L34-L40 |
InfoAgeTech/django-core | django_core/utils/validators.py | is_valid_rgb_color | def is_valid_rgb_color(value):
"""Checks whether the value is a valid rgb or rgba color string.
Valid colors consist of:
- rgb(255, 255, 255)
- rgba(23, 34, 45, .5)
"""
if not value:
return False
regex = re.compile(RGB_COLOR_REGEX)
return bool(regex.match(value)) | python | def is_valid_rgb_color(value):
"""Checks whether the value is a valid rgb or rgba color string.
Valid colors consist of:
- rgb(255, 255, 255)
- rgba(23, 34, 45, .5)
"""
if not value:
return False
regex = re.compile(RGB_COLOR_REGEX)
return bool(regex.match(value)) | [
"def",
"is_valid_rgb_color",
"(",
"value",
")",
":",
"if",
"not",
"value",
":",
"return",
"False",
"regex",
"=",
"re",
".",
"compile",
"(",
"RGB_COLOR_REGEX",
")",
"return",
"bool",
"(",
"regex",
".",
"match",
"(",
"value",
")",
")"
] | Checks whether the value is a valid rgb or rgba color string.
Valid colors consist of:
- rgb(255, 255, 255)
- rgba(23, 34, 45, .5) | [
"Checks",
"whether",
"the",
"value",
"is",
"a",
"valid",
"rgb",
"or",
"rgba",
"color",
"string",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/utils/validators.py#L51-L63 |
InfoAgeTech/django-core | django_core/utils/validators.py | validate_password_strength | def validate_password_strength(value):
"""Validates that a password is as least 7 characters long and has at least
1 digit and 1 letter.
"""
min_length = 7
if len(value) < min_length:
raise ValidationError(_('Password must be at least {0} characters '
'long.').format(min_length))
# check for digit
if not any(char.isdigit() for char in value):
raise ValidationError(_('Password must contain at least 1 digit.'))
# check for letter
if not any(char.isalpha() for char in value):
raise ValidationError(_('Password must contain at least 1 letter.')) | python | def validate_password_strength(value):
"""Validates that a password is as least 7 characters long and has at least
1 digit and 1 letter.
"""
min_length = 7
if len(value) < min_length:
raise ValidationError(_('Password must be at least {0} characters '
'long.').format(min_length))
# check for digit
if not any(char.isdigit() for char in value):
raise ValidationError(_('Password must contain at least 1 digit.'))
# check for letter
if not any(char.isalpha() for char in value):
raise ValidationError(_('Password must contain at least 1 letter.')) | [
"def",
"validate_password_strength",
"(",
"value",
")",
":",
"min_length",
"=",
"7",
"if",
"len",
"(",
"value",
")",
"<",
"min_length",
":",
"raise",
"ValidationError",
"(",
"_",
"(",
"'Password must be at least {0} characters '",
"'long.'",
")",
".",
"format",
"(",
"min_length",
")",
")",
"# check for digit",
"if",
"not",
"any",
"(",
"char",
".",
"isdigit",
"(",
")",
"for",
"char",
"in",
"value",
")",
":",
"raise",
"ValidationError",
"(",
"_",
"(",
"'Password must contain at least 1 digit.'",
")",
")",
"# check for letter",
"if",
"not",
"any",
"(",
"char",
".",
"isalpha",
"(",
")",
"for",
"char",
"in",
"value",
")",
":",
"raise",
"ValidationError",
"(",
"_",
"(",
"'Password must contain at least 1 letter.'",
")",
")"
] | Validates that a password is as least 7 characters long and has at least
1 digit and 1 letter. | [
"Validates",
"that",
"a",
"password",
"is",
"as",
"least",
"7",
"characters",
"long",
"and",
"has",
"at",
"least",
"1",
"digit",
"and",
"1",
"letter",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/utils/validators.py#L66-L82 |
dls-controls/annotypes | annotypes/_compat.py | add_metaclass | def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper | python | def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper | [
"def",
"add_metaclass",
"(",
"metaclass",
")",
":",
"def",
"wrapper",
"(",
"cls",
")",
":",
"orig_vars",
"=",
"cls",
".",
"__dict__",
".",
"copy",
"(",
")",
"orig_vars",
".",
"pop",
"(",
"'__dict__'",
",",
"None",
")",
"orig_vars",
".",
"pop",
"(",
"'__weakref__'",
",",
"None",
")",
"return",
"metaclass",
"(",
"cls",
".",
"__name__",
",",
"cls",
".",
"__bases__",
",",
"orig_vars",
")",
"return",
"wrapper"
] | Class decorator for creating a class with a metaclass. | [
"Class",
"decorator",
"for",
"creating",
"a",
"class",
"with",
"a",
"metaclass",
"."
] | train | https://github.com/dls-controls/annotypes/blob/31ab68a0367bb70ebd9898e8b9fa9405423465bd/annotypes/_compat.py#L6-L13 |
InfoAgeTech/django-core | django_core/forms/fields.py | MultipleDecimalField.clean | def clean(self, value):
"""Validates that the input can be converted to a list of decimals."""
if not value:
return None
# if any value exists, then add "0" as a placeholder to the remaining
# values.
if isinstance(value, list) and any(value):
for i, item in enumerate(value):
if not item:
value[i] = '0'
return super(MultipleDecimalField, self).clean(value) | python | def clean(self, value):
"""Validates that the input can be converted to a list of decimals."""
if not value:
return None
# if any value exists, then add "0" as a placeholder to the remaining
# values.
if isinstance(value, list) and any(value):
for i, item in enumerate(value):
if not item:
value[i] = '0'
return super(MultipleDecimalField, self).clean(value) | [
"def",
"clean",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"value",
":",
"return",
"None",
"# if any value exists, then add \"0\" as a placeholder to the remaining",
"# values.",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
"and",
"any",
"(",
"value",
")",
":",
"for",
"i",
",",
"item",
"in",
"enumerate",
"(",
"value",
")",
":",
"if",
"not",
"item",
":",
"value",
"[",
"i",
"]",
"=",
"'0'",
"return",
"super",
"(",
"MultipleDecimalField",
",",
"self",
")",
".",
"clean",
"(",
"value",
")"
] | Validates that the input can be converted to a list of decimals. | [
"Validates",
"that",
"the",
"input",
"can",
"be",
"converted",
"to",
"a",
"list",
"of",
"decimals",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/forms/fields.py#L130-L142 |
InfoAgeTech/django-core | django_core/forms/fields.py | MultipleDecimalField.to_python | def to_python(self, value):
"""Validates that the input can be converted to a list of decimals."""
if not value:
return None
if isinstance(value, list):
for index, position_val in enumerate(value):
val = super(MultipleDecimalField, self).to_python(position_val)
value[index] = val
return value | python | def to_python(self, value):
"""Validates that the input can be converted to a list of decimals."""
if not value:
return None
if isinstance(value, list):
for index, position_val in enumerate(value):
val = super(MultipleDecimalField, self).to_python(position_val)
value[index] = val
return value | [
"def",
"to_python",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"value",
":",
"return",
"None",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"for",
"index",
",",
"position_val",
"in",
"enumerate",
"(",
"value",
")",
":",
"val",
"=",
"super",
"(",
"MultipleDecimalField",
",",
"self",
")",
".",
"to_python",
"(",
"position_val",
")",
"value",
"[",
"index",
"]",
"=",
"val",
"return",
"value"
] | Validates that the input can be converted to a list of decimals. | [
"Validates",
"that",
"the",
"input",
"can",
"be",
"converted",
"to",
"a",
"list",
"of",
"decimals",
"."
] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/forms/fields.py#L162-L172 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.