repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
listlengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
listlengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/job.py
|
JobManager.job_delete
|
def job_delete(self, job_id, wait=True):
"""
Delete a job.
:param job_id: Job identifier to delete.
:type job_id: :py:class:`uuid.UUID`
:param bool wait: If the job is currently running, wait for it to complete before deleting it.
"""
job_id = normalize_job_id(job_id)
self.logger.info('deleting job with id: ' + str(job_id) + ' and callback function: ' + self._jobs[job_id]['callback'].__name__)
job_desc = self._jobs[job_id]
with self._job_lock:
job_desc['enabled'] = False
if wait and self.job_is_running(job_id):
job_desc['job'].join()
del self._jobs[job_id]
|
python
|
def job_delete(self, job_id, wait=True):
"""
Delete a job.
:param job_id: Job identifier to delete.
:type job_id: :py:class:`uuid.UUID`
:param bool wait: If the job is currently running, wait for it to complete before deleting it.
"""
job_id = normalize_job_id(job_id)
self.logger.info('deleting job with id: ' + str(job_id) + ' and callback function: ' + self._jobs[job_id]['callback'].__name__)
job_desc = self._jobs[job_id]
with self._job_lock:
job_desc['enabled'] = False
if wait and self.job_is_running(job_id):
job_desc['job'].join()
del self._jobs[job_id]
|
[
"def",
"job_delete",
"(",
"self",
",",
"job_id",
",",
"wait",
"=",
"True",
")",
":",
"job_id",
"=",
"normalize_job_id",
"(",
"job_id",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'deleting job with id: '",
"+",
"str",
"(",
"job_id",
")",
"+",
"' and callback function: '",
"+",
"self",
".",
"_jobs",
"[",
"job_id",
"]",
"[",
"'callback'",
"]",
".",
"__name__",
")",
"job_desc",
"=",
"self",
".",
"_jobs",
"[",
"job_id",
"]",
"with",
"self",
".",
"_job_lock",
":",
"job_desc",
"[",
"'enabled'",
"]",
"=",
"False",
"if",
"wait",
"and",
"self",
".",
"job_is_running",
"(",
"job_id",
")",
":",
"job_desc",
"[",
"'job'",
"]",
".",
"join",
"(",
")",
"del",
"self",
".",
"_jobs",
"[",
"job_id",
"]"
] |
Delete a job.
:param job_id: Job identifier to delete.
:type job_id: :py:class:`uuid.UUID`
:param bool wait: If the job is currently running, wait for it to complete before deleting it.
|
[
"Delete",
"a",
"job",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/job.py#L368-L383
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/job.py
|
JobManager.job_is_enabled
|
def job_is_enabled(self, job_id):
"""
Check if a job is enabled.
:param job_id: Job identifier to check the status of.
:type job_id: :py:class:`uuid.UUID`
:rtype: bool
"""
job_id = normalize_job_id(job_id)
job_desc = self._jobs[job_id]
return job_desc['enabled']
|
python
|
def job_is_enabled(self, job_id):
"""
Check if a job is enabled.
:param job_id: Job identifier to check the status of.
:type job_id: :py:class:`uuid.UUID`
:rtype: bool
"""
job_id = normalize_job_id(job_id)
job_desc = self._jobs[job_id]
return job_desc['enabled']
|
[
"def",
"job_is_enabled",
"(",
"self",
",",
"job_id",
")",
":",
"job_id",
"=",
"normalize_job_id",
"(",
"job_id",
")",
"job_desc",
"=",
"self",
".",
"_jobs",
"[",
"job_id",
"]",
"return",
"job_desc",
"[",
"'enabled'",
"]"
] |
Check if a job is enabled.
:param job_id: Job identifier to check the status of.
:type job_id: :py:class:`uuid.UUID`
:rtype: bool
|
[
"Check",
"if",
"a",
"job",
"is",
"enabled",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/job.py#L396-L406
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/job.py
|
JobManager.job_is_running
|
def job_is_running(self, job_id):
"""
Check if a job is currently running. False is returned if the job does
not exist.
:param job_id: Job identifier to check the status of.
:type job_id: :py:class:`uuid.UUID`
:rtype: bool
"""
job_id = normalize_job_id(job_id)
if job_id not in self._jobs:
return False
job_desc = self._jobs[job_id]
if job_desc['job']:
return job_desc['job'].is_alive()
return False
|
python
|
def job_is_running(self, job_id):
"""
Check if a job is currently running. False is returned if the job does
not exist.
:param job_id: Job identifier to check the status of.
:type job_id: :py:class:`uuid.UUID`
:rtype: bool
"""
job_id = normalize_job_id(job_id)
if job_id not in self._jobs:
return False
job_desc = self._jobs[job_id]
if job_desc['job']:
return job_desc['job'].is_alive()
return False
|
[
"def",
"job_is_running",
"(",
"self",
",",
"job_id",
")",
":",
"job_id",
"=",
"normalize_job_id",
"(",
"job_id",
")",
"if",
"job_id",
"not",
"in",
"self",
".",
"_jobs",
":",
"return",
"False",
"job_desc",
"=",
"self",
".",
"_jobs",
"[",
"job_id",
"]",
"if",
"job_desc",
"[",
"'job'",
"]",
":",
"return",
"job_desc",
"[",
"'job'",
"]",
".",
"is_alive",
"(",
")",
"return",
"False"
] |
Check if a job is currently running. False is returned if the job does
not exist.
:param job_id: Job identifier to check the status of.
:type job_id: :py:class:`uuid.UUID`
:rtype: bool
|
[
"Check",
"if",
"a",
"job",
"is",
"currently",
"running",
".",
"False",
"is",
"returned",
"if",
"the",
"job",
"does",
"not",
"exist",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/job.py#L408-L423
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/argparse_types.py
|
bin_b64_type
|
def bin_b64_type(arg):
"""An argparse type representing binary data encoded in base64."""
try:
arg = base64.standard_b64decode(arg)
except (binascii.Error, TypeError):
raise argparse.ArgumentTypeError("{0} is invalid base64 data".format(repr(arg)))
return arg
|
python
|
def bin_b64_type(arg):
"""An argparse type representing binary data encoded in base64."""
try:
arg = base64.standard_b64decode(arg)
except (binascii.Error, TypeError):
raise argparse.ArgumentTypeError("{0} is invalid base64 data".format(repr(arg)))
return arg
|
[
"def",
"bin_b64_type",
"(",
"arg",
")",
":",
"try",
":",
"arg",
"=",
"base64",
".",
"standard_b64decode",
"(",
"arg",
")",
"except",
"(",
"binascii",
".",
"Error",
",",
"TypeError",
")",
":",
"raise",
"argparse",
".",
"ArgumentTypeError",
"(",
"\"{0} is invalid base64 data\"",
".",
"format",
"(",
"repr",
"(",
"arg",
")",
")",
")",
"return",
"arg"
] |
An argparse type representing binary data encoded in base64.
|
[
"An",
"argparse",
"type",
"representing",
"binary",
"data",
"encoded",
"in",
"base64",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/argparse_types.py#L78-L84
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/argparse_types.py
|
bin_hex_type
|
def bin_hex_type(arg):
"""An argparse type representing binary data encoded in hex."""
if re.match(r'^[a-f0-9]{2}(:[a-f0-9]{2})+$', arg, re.I):
arg = arg.replace(':', '')
elif re.match(r'^(\\x[a-f0-9]{2})+$', arg, re.I):
arg = arg.replace('\\x', '')
try:
arg = binascii.a2b_hex(arg)
except (binascii.Error, TypeError):
raise argparse.ArgumentTypeError("{0} is invalid hex data".format(repr(arg)))
return arg
|
python
|
def bin_hex_type(arg):
"""An argparse type representing binary data encoded in hex."""
if re.match(r'^[a-f0-9]{2}(:[a-f0-9]{2})+$', arg, re.I):
arg = arg.replace(':', '')
elif re.match(r'^(\\x[a-f0-9]{2})+$', arg, re.I):
arg = arg.replace('\\x', '')
try:
arg = binascii.a2b_hex(arg)
except (binascii.Error, TypeError):
raise argparse.ArgumentTypeError("{0} is invalid hex data".format(repr(arg)))
return arg
|
[
"def",
"bin_hex_type",
"(",
"arg",
")",
":",
"if",
"re",
".",
"match",
"(",
"r'^[a-f0-9]{2}(:[a-f0-9]{2})+$'",
",",
"arg",
",",
"re",
".",
"I",
")",
":",
"arg",
"=",
"arg",
".",
"replace",
"(",
"':'",
",",
"''",
")",
"elif",
"re",
".",
"match",
"(",
"r'^(\\\\x[a-f0-9]{2})+$'",
",",
"arg",
",",
"re",
".",
"I",
")",
":",
"arg",
"=",
"arg",
".",
"replace",
"(",
"'\\\\x'",
",",
"''",
")",
"try",
":",
"arg",
"=",
"binascii",
".",
"a2b_hex",
"(",
"arg",
")",
"except",
"(",
"binascii",
".",
"Error",
",",
"TypeError",
")",
":",
"raise",
"argparse",
".",
"ArgumentTypeError",
"(",
"\"{0} is invalid hex data\"",
".",
"format",
"(",
"repr",
"(",
"arg",
")",
")",
")",
"return",
"arg"
] |
An argparse type representing binary data encoded in hex.
|
[
"An",
"argparse",
"type",
"representing",
"binary",
"data",
"encoded",
"in",
"hex",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/argparse_types.py#L86-L96
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/argparse_types.py
|
dir_type
|
def dir_type(arg):
"""An argparse type representing a valid directory."""
if not os.path.isdir(arg):
raise argparse.ArgumentTypeError("{0} is not a valid directory".format(repr(arg)))
return arg
|
python
|
def dir_type(arg):
"""An argparse type representing a valid directory."""
if not os.path.isdir(arg):
raise argparse.ArgumentTypeError("{0} is not a valid directory".format(repr(arg)))
return arg
|
[
"def",
"dir_type",
"(",
"arg",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"arg",
")",
":",
"raise",
"argparse",
".",
"ArgumentTypeError",
"(",
"\"{0} is not a valid directory\"",
".",
"format",
"(",
"repr",
"(",
"arg",
")",
")",
")",
"return",
"arg"
] |
An argparse type representing a valid directory.
|
[
"An",
"argparse",
"type",
"representing",
"a",
"valid",
"directory",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/argparse_types.py#L98-L102
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/argparse_types.py
|
email_type
|
def email_type(arg):
"""An argparse type representing an email address."""
if not is_valid_email_address(arg):
raise argparse.ArgumentTypeError("{0} is not a valid email address".format(repr(arg)))
return arg
|
python
|
def email_type(arg):
"""An argparse type representing an email address."""
if not is_valid_email_address(arg):
raise argparse.ArgumentTypeError("{0} is not a valid email address".format(repr(arg)))
return arg
|
[
"def",
"email_type",
"(",
"arg",
")",
":",
"if",
"not",
"is_valid_email_address",
"(",
"arg",
")",
":",
"raise",
"argparse",
".",
"ArgumentTypeError",
"(",
"\"{0} is not a valid email address\"",
".",
"format",
"(",
"repr",
"(",
"arg",
")",
")",
")",
"return",
"arg"
] |
An argparse type representing an email address.
|
[
"An",
"argparse",
"type",
"representing",
"an",
"email",
"address",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/argparse_types.py#L104-L108
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/argparse_types.py
|
log_level_type
|
def log_level_type(arg):
"""An argparse type representing a logging level."""
if not arg.upper() in ('NOTSET', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'):
raise argparse.ArgumentTypeError("{0} is not a valid log level".format(repr(arg)))
return getattr(logging, arg.upper())
|
python
|
def log_level_type(arg):
"""An argparse type representing a logging level."""
if not arg.upper() in ('NOTSET', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'):
raise argparse.ArgumentTypeError("{0} is not a valid log level".format(repr(arg)))
return getattr(logging, arg.upper())
|
[
"def",
"log_level_type",
"(",
"arg",
")",
":",
"if",
"not",
"arg",
".",
"upper",
"(",
")",
"in",
"(",
"'NOTSET'",
",",
"'DEBUG'",
",",
"'INFO'",
",",
"'WARNING'",
",",
"'ERROR'",
",",
"'CRITICAL'",
")",
":",
"raise",
"argparse",
".",
"ArgumentTypeError",
"(",
"\"{0} is not a valid log level\"",
".",
"format",
"(",
"repr",
"(",
"arg",
")",
")",
")",
"return",
"getattr",
"(",
"logging",
",",
"arg",
".",
"upper",
"(",
")",
")"
] |
An argparse type representing a logging level.
|
[
"An",
"argparse",
"type",
"representing",
"a",
"logging",
"level",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/argparse_types.py#L110-L114
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/argparse_types.py
|
port_type
|
def port_type(arg):
"""An argparse type representing a tcp or udp port number."""
error_msg = "{0} is not a valid port".format(repr(arg))
try:
arg = ast.literal_eval(arg)
except ValueError:
raise argparse.ArgumentTypeError(error_msg)
if arg < 0 or arg > 65535:
raise argparse.ArgumentTypeError(error_msg)
return arg
|
python
|
def port_type(arg):
"""An argparse type representing a tcp or udp port number."""
error_msg = "{0} is not a valid port".format(repr(arg))
try:
arg = ast.literal_eval(arg)
except ValueError:
raise argparse.ArgumentTypeError(error_msg)
if arg < 0 or arg > 65535:
raise argparse.ArgumentTypeError(error_msg)
return arg
|
[
"def",
"port_type",
"(",
"arg",
")",
":",
"error_msg",
"=",
"\"{0} is not a valid port\"",
".",
"format",
"(",
"repr",
"(",
"arg",
")",
")",
"try",
":",
"arg",
"=",
"ast",
".",
"literal_eval",
"(",
"arg",
")",
"except",
"ValueError",
":",
"raise",
"argparse",
".",
"ArgumentTypeError",
"(",
"error_msg",
")",
"if",
"arg",
"<",
"0",
"or",
"arg",
">",
"65535",
":",
"raise",
"argparse",
".",
"ArgumentTypeError",
"(",
"error_msg",
")",
"return",
"arg"
] |
An argparse type representing a tcp or udp port number.
|
[
"An",
"argparse",
"type",
"representing",
"a",
"tcp",
"or",
"udp",
"port",
"number",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/argparse_types.py#L116-L125
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/argparse_types.py
|
timespan_type
|
def timespan_type(arg):
"""An argparse type representing a timespan such as 6h for 6 hours."""
try:
arg = parse_timespan(arg)
except ValueError:
raise argparse.ArgumentTypeError("{0} is not a valid time span".format(repr(arg)))
return arg
|
python
|
def timespan_type(arg):
"""An argparse type representing a timespan such as 6h for 6 hours."""
try:
arg = parse_timespan(arg)
except ValueError:
raise argparse.ArgumentTypeError("{0} is not a valid time span".format(repr(arg)))
return arg
|
[
"def",
"timespan_type",
"(",
"arg",
")",
":",
"try",
":",
"arg",
"=",
"parse_timespan",
"(",
"arg",
")",
"except",
"ValueError",
":",
"raise",
"argparse",
".",
"ArgumentTypeError",
"(",
"\"{0} is not a valid time span\"",
".",
"format",
"(",
"repr",
"(",
"arg",
")",
")",
")",
"return",
"arg"
] |
An argparse type representing a timespan such as 6h for 6 hours.
|
[
"An",
"argparse",
"type",
"representing",
"a",
"timespan",
"such",
"as",
"6h",
"for",
"6",
"hours",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/argparse_types.py#L127-L133
|
thespacedoctor/sherlock
|
sherlock/catalogue_conesearch.py
|
catalogue_conesearch.search
|
def search(self):
"""
*trigger the conesearch*
**Return:**
- ``matchIndies`` -- the indicies of the input transient sources (syncs with ``uniqueMatchDicts``)
- ``uniqueMatchDicts`` -- the crossmatch results
**Usage:**
See class docstring for usage examples
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``search`` method')
# ACCOUNT FOR TYPE OF SEARCH
sqlWhere = False
magnitudeLimitFilter = self.magnitudeLimitFilter
disCols = ["zColName",
"distanceColName", "semiMajorColName"]
sqlWhere = ""
if self.physicalSearch == True:
for d in disCols:
colName = self.colMaps[self.tableName][d]
if colName:
sqlWhere += " or `%(colName)s` is not null" % locals()
if len(sqlWhere):
sqlWhere = " and (" + sqlWhere[4:] + ")"
if self.upperMagnitudeLimit != False and self.upperMagnitudeLimit and not self.lowerMagnitudeLimit:
mag = self.upperMagnitudeLimit
sqlWhere += " and `%(magnitudeLimitFilter)s` > %(mag)s" % locals()
if self.lowerMagnitudeLimit != False and self.lowerMagnitudeLimit and not self.upperMagnitudeLimit:
mag = self.lowerMagnitudeLimit
sqlWhere += " and `%(magnitudeLimitFilter)s` < %(mag)s" % locals()
# THE GENERAL (INBETWEEN) CASE
if self.lowerMagnitudeLimit != False and self.lowerMagnitudeLimit and self.upperMagnitudeLimit != False and self.upperMagnitudeLimit:
upperMagnitudeLimit = self.upperMagnitudeLimit
lowerMagnitudeLimit = self.lowerMagnitudeLimit
sqlWhere += " and ((`%(magnitudeLimitFilter)s` > %(upperMagnitudeLimit)s and `%(magnitudeLimitFilter)s` < %(lowerMagnitudeLimit)s) or `%(magnitudeLimitFilter)s` is null)" % locals()
if sqlWhere and " and" == sqlWhere[0:4]:
sqlWhere = sqlWhere[5:]
# THE COLUMN MAP LIFTED FROM ``tcs_helper_catalogue_tables_info` TABLE
# IN CATALOGUE DATABASE (COLUMN NAMES ENDDING WITH 'ColName')
columns = {}
for k, v in self.colMaps[self.tableName].iteritems():
name = k.replace("ColName", "")
if "colname" in k.lower() and v:
columns[k] = "`%(v)s` as `%(name)s`" % locals()
columns = ", ".join(columns.values())
cs = hmptyConesearch(
log=self.log,
dbConn=self.dbConn,
tableName=self.tableName,
columns=columns,
ra=self.ra,
dec=self.dec,
radiusArcsec=self.radius,
separations=True,
distinct=False,
sqlWhere=sqlWhere,
closest=self.nearestOnly,
raCol="ra",
decCol="dec"
)
matchIndies, matches = cs.search()
# MATCH ARE NOT NECESSARILY UNIQUE IF MANY TRANSIENT MATCH ONE SOURCE
uniqueMatchDicts = []
uniqueMatchDicts[:] = [copy.copy(d) for d in matches.list]
self.log.debug('completed the ``search`` method')
return matchIndies, uniqueMatchDicts
|
python
|
def search(self):
"""
*trigger the conesearch*
**Return:**
- ``matchIndies`` -- the indicies of the input transient sources (syncs with ``uniqueMatchDicts``)
- ``uniqueMatchDicts`` -- the crossmatch results
**Usage:**
See class docstring for usage examples
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``search`` method')
# ACCOUNT FOR TYPE OF SEARCH
sqlWhere = False
magnitudeLimitFilter = self.magnitudeLimitFilter
disCols = ["zColName",
"distanceColName", "semiMajorColName"]
sqlWhere = ""
if self.physicalSearch == True:
for d in disCols:
colName = self.colMaps[self.tableName][d]
if colName:
sqlWhere += " or `%(colName)s` is not null" % locals()
if len(sqlWhere):
sqlWhere = " and (" + sqlWhere[4:] + ")"
if self.upperMagnitudeLimit != False and self.upperMagnitudeLimit and not self.lowerMagnitudeLimit:
mag = self.upperMagnitudeLimit
sqlWhere += " and `%(magnitudeLimitFilter)s` > %(mag)s" % locals()
if self.lowerMagnitudeLimit != False and self.lowerMagnitudeLimit and not self.upperMagnitudeLimit:
mag = self.lowerMagnitudeLimit
sqlWhere += " and `%(magnitudeLimitFilter)s` < %(mag)s" % locals()
# THE GENERAL (INBETWEEN) CASE
if self.lowerMagnitudeLimit != False and self.lowerMagnitudeLimit and self.upperMagnitudeLimit != False and self.upperMagnitudeLimit:
upperMagnitudeLimit = self.upperMagnitudeLimit
lowerMagnitudeLimit = self.lowerMagnitudeLimit
sqlWhere += " and ((`%(magnitudeLimitFilter)s` > %(upperMagnitudeLimit)s and `%(magnitudeLimitFilter)s` < %(lowerMagnitudeLimit)s) or `%(magnitudeLimitFilter)s` is null)" % locals()
if sqlWhere and " and" == sqlWhere[0:4]:
sqlWhere = sqlWhere[5:]
# THE COLUMN MAP LIFTED FROM ``tcs_helper_catalogue_tables_info` TABLE
# IN CATALOGUE DATABASE (COLUMN NAMES ENDDING WITH 'ColName')
columns = {}
for k, v in self.colMaps[self.tableName].iteritems():
name = k.replace("ColName", "")
if "colname" in k.lower() and v:
columns[k] = "`%(v)s` as `%(name)s`" % locals()
columns = ", ".join(columns.values())
cs = hmptyConesearch(
log=self.log,
dbConn=self.dbConn,
tableName=self.tableName,
columns=columns,
ra=self.ra,
dec=self.dec,
radiusArcsec=self.radius,
separations=True,
distinct=False,
sqlWhere=sqlWhere,
closest=self.nearestOnly,
raCol="ra",
decCol="dec"
)
matchIndies, matches = cs.search()
# MATCH ARE NOT NECESSARILY UNIQUE IF MANY TRANSIENT MATCH ONE SOURCE
uniqueMatchDicts = []
uniqueMatchDicts[:] = [copy.copy(d) for d in matches.list]
self.log.debug('completed the ``search`` method')
return matchIndies, uniqueMatchDicts
|
[
"def",
"search",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``search`` method'",
")",
"# ACCOUNT FOR TYPE OF SEARCH",
"sqlWhere",
"=",
"False",
"magnitudeLimitFilter",
"=",
"self",
".",
"magnitudeLimitFilter",
"disCols",
"=",
"[",
"\"zColName\"",
",",
"\"distanceColName\"",
",",
"\"semiMajorColName\"",
"]",
"sqlWhere",
"=",
"\"\"",
"if",
"self",
".",
"physicalSearch",
"==",
"True",
":",
"for",
"d",
"in",
"disCols",
":",
"colName",
"=",
"self",
".",
"colMaps",
"[",
"self",
".",
"tableName",
"]",
"[",
"d",
"]",
"if",
"colName",
":",
"sqlWhere",
"+=",
"\" or `%(colName)s` is not null\"",
"%",
"locals",
"(",
")",
"if",
"len",
"(",
"sqlWhere",
")",
":",
"sqlWhere",
"=",
"\" and (\"",
"+",
"sqlWhere",
"[",
"4",
":",
"]",
"+",
"\")\"",
"if",
"self",
".",
"upperMagnitudeLimit",
"!=",
"False",
"and",
"self",
".",
"upperMagnitudeLimit",
"and",
"not",
"self",
".",
"lowerMagnitudeLimit",
":",
"mag",
"=",
"self",
".",
"upperMagnitudeLimit",
"sqlWhere",
"+=",
"\" and `%(magnitudeLimitFilter)s` > %(mag)s\"",
"%",
"locals",
"(",
")",
"if",
"self",
".",
"lowerMagnitudeLimit",
"!=",
"False",
"and",
"self",
".",
"lowerMagnitudeLimit",
"and",
"not",
"self",
".",
"upperMagnitudeLimit",
":",
"mag",
"=",
"self",
".",
"lowerMagnitudeLimit",
"sqlWhere",
"+=",
"\" and `%(magnitudeLimitFilter)s` < %(mag)s\"",
"%",
"locals",
"(",
")",
"# THE GENERAL (INBETWEEN) CASE",
"if",
"self",
".",
"lowerMagnitudeLimit",
"!=",
"False",
"and",
"self",
".",
"lowerMagnitudeLimit",
"and",
"self",
".",
"upperMagnitudeLimit",
"!=",
"False",
"and",
"self",
".",
"upperMagnitudeLimit",
":",
"upperMagnitudeLimit",
"=",
"self",
".",
"upperMagnitudeLimit",
"lowerMagnitudeLimit",
"=",
"self",
".",
"lowerMagnitudeLimit",
"sqlWhere",
"+=",
"\" and ((`%(magnitudeLimitFilter)s` > %(upperMagnitudeLimit)s and `%(magnitudeLimitFilter)s` < %(lowerMagnitudeLimit)s) or `%(magnitudeLimitFilter)s` is null)\"",
"%",
"locals",
"(",
")",
"if",
"sqlWhere",
"and",
"\" and\"",
"==",
"sqlWhere",
"[",
"0",
":",
"4",
"]",
":",
"sqlWhere",
"=",
"sqlWhere",
"[",
"5",
":",
"]",
"# THE COLUMN MAP LIFTED FROM ``tcs_helper_catalogue_tables_info` TABLE",
"# IN CATALOGUE DATABASE (COLUMN NAMES ENDDING WITH 'ColName')",
"columns",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"self",
".",
"colMaps",
"[",
"self",
".",
"tableName",
"]",
".",
"iteritems",
"(",
")",
":",
"name",
"=",
"k",
".",
"replace",
"(",
"\"ColName\"",
",",
"\"\"",
")",
"if",
"\"colname\"",
"in",
"k",
".",
"lower",
"(",
")",
"and",
"v",
":",
"columns",
"[",
"k",
"]",
"=",
"\"`%(v)s` as `%(name)s`\"",
"%",
"locals",
"(",
")",
"columns",
"=",
"\", \"",
".",
"join",
"(",
"columns",
".",
"values",
"(",
")",
")",
"cs",
"=",
"hmptyConesearch",
"(",
"log",
"=",
"self",
".",
"log",
",",
"dbConn",
"=",
"self",
".",
"dbConn",
",",
"tableName",
"=",
"self",
".",
"tableName",
",",
"columns",
"=",
"columns",
",",
"ra",
"=",
"self",
".",
"ra",
",",
"dec",
"=",
"self",
".",
"dec",
",",
"radiusArcsec",
"=",
"self",
".",
"radius",
",",
"separations",
"=",
"True",
",",
"distinct",
"=",
"False",
",",
"sqlWhere",
"=",
"sqlWhere",
",",
"closest",
"=",
"self",
".",
"nearestOnly",
",",
"raCol",
"=",
"\"ra\"",
",",
"decCol",
"=",
"\"dec\"",
")",
"matchIndies",
",",
"matches",
"=",
"cs",
".",
"search",
"(",
")",
"# MATCH ARE NOT NECESSARILY UNIQUE IF MANY TRANSIENT MATCH ONE SOURCE",
"uniqueMatchDicts",
"=",
"[",
"]",
"uniqueMatchDicts",
"[",
":",
"]",
"=",
"[",
"copy",
".",
"copy",
"(",
"d",
")",
"for",
"d",
"in",
"matches",
".",
"list",
"]",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``search`` method'",
")",
"return",
"matchIndies",
",",
"uniqueMatchDicts"
] |
*trigger the conesearch*
**Return:**
- ``matchIndies`` -- the indicies of the input transient sources (syncs with ``uniqueMatchDicts``)
- ``uniqueMatchDicts`` -- the crossmatch results
**Usage:**
See class docstring for usage examples
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
|
[
"*",
"trigger",
"the",
"conesearch",
"*"
] |
train
|
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/catalogue_conesearch.py#L204-L289
|
thespacedoctor/sherlock
|
sherlock/transient_catalogue_crossmatch.py
|
transient_catalogue_crossmatch.match
|
def match(self):
"""
*match the transients against the sherlock-catalogues according to the search algorithm and return matches alongside the predicted classification(s)*
**Return:**
- ``classification`` -- the crossmatch results and classifications assigned to the transients
See the class docstring for usage.
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``match`` method')
classifications = []
# COUNT NUMBER OF TRANSIENT TO CROSSMATCH
numberOfTransients = len(self.transients)
count = 0
# GRAB SEARCH ALGORITHM
sa = self.settings["search algorithm"]
# FOR EACH TRANSIENT SOURCE IN THE LIST ...
allCatalogueMatches = []
catalogueMatches = []
nonSynonymTransients = self.transients[:]
# SYNONYM SEARCHES
# ITERATE THROUGH SEARCH ALGORITHM IN ORDER
# PRESENTED IN THE SETTINGS FILE
brightnessFilters = ["bright", "faint", "general"]
for search_name, searchPara in sa.iteritems():
for bf in brightnessFilters:
if bf not in searchPara:
continue
if "synonym" not in searchPara[bf] or searchPara[bf]["synonym"] == False:
continue
self.log.debug(""" searching: %(search_name)s""" % locals())
if "physical radius kpc" in searchPara[bf]:
# THE PHYSICAL SEPARATION SEARCHES
self.log.debug(
'checking physical distance crossmatches in %(search_name)s' % locals())
catalogueMatches = self.physical_separation_crossmatch_against_catalogue(
objectList=self.transients,
searchPara=searchPara,
search_name=search_name + " physical",
brightnessFilter=bf,
classificationType="synonym"
)
else:
# THE ANGULAR SEPARATION SEARCHES
self.log.debug(
'Crossmatching against %(search_name)s' % locals())
# RENAMED from searchCatalogue
catalogueMatches = self.angular_crossmatch_against_catalogue(
objectList=self.transients,
searchPara=searchPara,
search_name=search_name + " angular",
brightnessFilter=bf,
classificationType="synonym"
)
# ADD CLASSIFICATION AND CROSSMATCHES IF FOUND
if catalogueMatches:
allCatalogueMatches = allCatalogueMatches + catalogueMatches
synonymIDs = []
synonymIDs[:] = [xm["transient_object_id"]
for xm in allCatalogueMatches]
nonSynonymTransients = []
nonSynonymTransients[:] = [
t for t in self.transients if t["id"] not in synonymIDs]
# ASSOCIATION SEARCHES
# ITERATE THROUGH SEARCH ALGORITHM IN ORDER
# PRESENTED IN THE SETTINGS FILE
if len(nonSynonymTransients) > 0:
for search_name, searchPara in sa.iteritems():
self.log.debug(""" searching: %(search_name)s""" % locals())
for bf in brightnessFilters:
if bf not in searchPara:
continue
if "association" not in searchPara[bf] or searchPara[bf]["association"] == False:
continue
if "physical radius kpc" in searchPara[bf]:
# THE PHYSICAL SEPARATION SEARCHES
self.log.debug(
'checking physical distance crossmatches in %(search_name)s' % locals())
catalogueMatches = self.physical_separation_crossmatch_against_catalogue(
objectList=nonSynonymTransients,
searchPara=searchPara,
search_name=search_name + " physical",
brightnessFilter=bf,
classificationType="association"
)
else:
# THE ANGULAR SEPARATION SEARCHES
self.log.debug(
'Crossmatching against %(search_name)s' % locals())
# RENAMED from searchCatalogue
catalogueMatches = self.angular_crossmatch_against_catalogue(
objectList=nonSynonymTransients,
searchPara=searchPara,
search_name=search_name + " angular",
brightnessFilter=bf,
classificationType="association"
)
# ADD CLASSIFICATION AND CROSSMATCHES IF FOUND
if catalogueMatches:
allCatalogueMatches = allCatalogueMatches + catalogueMatches
catalogueMatches = []
associationIDs = []
associationIDs[:] = [xm["transient_object_id"]
for xm in allCatalogueMatches]
nonAssociationTransients = []
nonAssociationTransients[:] = [
t for t in self.transients if t["id"] not in associationIDs]
# ANNOTATION SEARCHES
# ITERATE THROUGH SEARCH ALGORITHM IN ORDER
# PRESENTED IN THE SETTINGS FILE
brightnessFilters = ["bright", "faint", "general"]
for search_name, searchPara in sa.iteritems():
for bf in brightnessFilters:
if bf not in searchPara:
continue
if "annotation" not in searchPara[bf] or searchPara[bf]["annotation"] == False:
continue
self.log.debug(""" searching: %(search_name)s""" % locals())
if "physical radius kpc" in searchPara[bf]:
# THE PHYSICAL SEPARATION SEARCHES
self.log.debug(
'checking physical distance crossmatches in %(search_name)s' % locals())
if bf in searchPara:
catalogueMatches = self.physical_separation_crossmatch_against_catalogue(
objectList=nonAssociationTransients,
searchPara=searchPara,
search_name=search_name + " physical",
brightnessFilter=bf,
classificationType="annotation"
)
else:
# THE ANGULAR SEPARATION SEARCHES
self.log.debug(
'Crossmatching against %(search_name)s' % locals())
# RENAMED from searchCatalogue
if bf in searchPara:
catalogueMatches = self.angular_crossmatch_against_catalogue(
objectList=nonAssociationTransients,
searchPara=searchPara,
search_name=search_name + " angular",
brightnessFilter=bf,
classificationType="annotation"
)
# ADD CLASSIFICATION AND CROSSMATCHES IF FOUND
if catalogueMatches:
allCatalogueMatches = allCatalogueMatches + catalogueMatches
self.log.debug('completed the ``match`` method')
return allCatalogueMatches
|
python
|
def match(self):
"""
*match the transients against the sherlock-catalogues according to the search algorithm and return matches alongside the predicted classification(s)*
**Return:**
- ``classification`` -- the crossmatch results and classifications assigned to the transients
See the class docstring for usage.
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``match`` method')
classifications = []
# COUNT NUMBER OF TRANSIENT TO CROSSMATCH
numberOfTransients = len(self.transients)
count = 0
# GRAB SEARCH ALGORITHM
sa = self.settings["search algorithm"]
# FOR EACH TRANSIENT SOURCE IN THE LIST ...
allCatalogueMatches = []
catalogueMatches = []
nonSynonymTransients = self.transients[:]
# SYNONYM SEARCHES
# ITERATE THROUGH SEARCH ALGORITHM IN ORDER
# PRESENTED IN THE SETTINGS FILE
brightnessFilters = ["bright", "faint", "general"]
for search_name, searchPara in sa.iteritems():
for bf in brightnessFilters:
if bf not in searchPara:
continue
if "synonym" not in searchPara[bf] or searchPara[bf]["synonym"] == False:
continue
self.log.debug(""" searching: %(search_name)s""" % locals())
if "physical radius kpc" in searchPara[bf]:
# THE PHYSICAL SEPARATION SEARCHES
self.log.debug(
'checking physical distance crossmatches in %(search_name)s' % locals())
catalogueMatches = self.physical_separation_crossmatch_against_catalogue(
objectList=self.transients,
searchPara=searchPara,
search_name=search_name + " physical",
brightnessFilter=bf,
classificationType="synonym"
)
else:
# THE ANGULAR SEPARATION SEARCHES
self.log.debug(
'Crossmatching against %(search_name)s' % locals())
# RENAMED from searchCatalogue
catalogueMatches = self.angular_crossmatch_against_catalogue(
objectList=self.transients,
searchPara=searchPara,
search_name=search_name + " angular",
brightnessFilter=bf,
classificationType="synonym"
)
# ADD CLASSIFICATION AND CROSSMATCHES IF FOUND
if catalogueMatches:
allCatalogueMatches = allCatalogueMatches + catalogueMatches
synonymIDs = []
synonymIDs[:] = [xm["transient_object_id"]
for xm in allCatalogueMatches]
nonSynonymTransients = []
nonSynonymTransients[:] = [
t for t in self.transients if t["id"] not in synonymIDs]
# ASSOCIATION SEARCHES
# ITERATE THROUGH SEARCH ALGORITHM IN ORDER
# PRESENTED IN THE SETTINGS FILE
if len(nonSynonymTransients) > 0:
for search_name, searchPara in sa.iteritems():
self.log.debug(""" searching: %(search_name)s""" % locals())
for bf in brightnessFilters:
if bf not in searchPara:
continue
if "association" not in searchPara[bf] or searchPara[bf]["association"] == False:
continue
if "physical radius kpc" in searchPara[bf]:
# THE PHYSICAL SEPARATION SEARCHES
self.log.debug(
'checking physical distance crossmatches in %(search_name)s' % locals())
catalogueMatches = self.physical_separation_crossmatch_against_catalogue(
objectList=nonSynonymTransients,
searchPara=searchPara,
search_name=search_name + " physical",
brightnessFilter=bf,
classificationType="association"
)
else:
# THE ANGULAR SEPARATION SEARCHES
self.log.debug(
'Crossmatching against %(search_name)s' % locals())
# RENAMED from searchCatalogue
catalogueMatches = self.angular_crossmatch_against_catalogue(
objectList=nonSynonymTransients,
searchPara=searchPara,
search_name=search_name + " angular",
brightnessFilter=bf,
classificationType="association"
)
# ADD CLASSIFICATION AND CROSSMATCHES IF FOUND
if catalogueMatches:
allCatalogueMatches = allCatalogueMatches + catalogueMatches
catalogueMatches = []
associationIDs = []
associationIDs[:] = [xm["transient_object_id"]
for xm in allCatalogueMatches]
nonAssociationTransients = []
nonAssociationTransients[:] = [
t for t in self.transients if t["id"] not in associationIDs]
# ANNOTATION SEARCHES
# ITERATE THROUGH SEARCH ALGORITHM IN ORDER
# PRESENTED IN THE SETTINGS FILE
brightnessFilters = ["bright", "faint", "general"]
for search_name, searchPara in sa.iteritems():
for bf in brightnessFilters:
if bf not in searchPara:
continue
if "annotation" not in searchPara[bf] or searchPara[bf]["annotation"] == False:
continue
self.log.debug(""" searching: %(search_name)s""" % locals())
if "physical radius kpc" in searchPara[bf]:
# THE PHYSICAL SEPARATION SEARCHES
self.log.debug(
'checking physical distance crossmatches in %(search_name)s' % locals())
if bf in searchPara:
catalogueMatches = self.physical_separation_crossmatch_against_catalogue(
objectList=nonAssociationTransients,
searchPara=searchPara,
search_name=search_name + " physical",
brightnessFilter=bf,
classificationType="annotation"
)
else:
# THE ANGULAR SEPARATION SEARCHES
self.log.debug(
'Crossmatching against %(search_name)s' % locals())
# RENAMED from searchCatalogue
if bf in searchPara:
catalogueMatches = self.angular_crossmatch_against_catalogue(
objectList=nonAssociationTransients,
searchPara=searchPara,
search_name=search_name + " angular",
brightnessFilter=bf,
classificationType="annotation"
)
# ADD CLASSIFICATION AND CROSSMATCHES IF FOUND
if catalogueMatches:
allCatalogueMatches = allCatalogueMatches + catalogueMatches
self.log.debug('completed the ``match`` method')
return allCatalogueMatches
|
[
"def",
"match",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``match`` method'",
")",
"classifications",
"=",
"[",
"]",
"# COUNT NUMBER OF TRANSIENT TO CROSSMATCH",
"numberOfTransients",
"=",
"len",
"(",
"self",
".",
"transients",
")",
"count",
"=",
"0",
"# GRAB SEARCH ALGORITHM",
"sa",
"=",
"self",
".",
"settings",
"[",
"\"search algorithm\"",
"]",
"# FOR EACH TRANSIENT SOURCE IN THE LIST ...",
"allCatalogueMatches",
"=",
"[",
"]",
"catalogueMatches",
"=",
"[",
"]",
"nonSynonymTransients",
"=",
"self",
".",
"transients",
"[",
":",
"]",
"# SYNONYM SEARCHES",
"# ITERATE THROUGH SEARCH ALGORITHM IN ORDER",
"# PRESENTED IN THE SETTINGS FILE",
"brightnessFilters",
"=",
"[",
"\"bright\"",
",",
"\"faint\"",
",",
"\"general\"",
"]",
"for",
"search_name",
",",
"searchPara",
"in",
"sa",
".",
"iteritems",
"(",
")",
":",
"for",
"bf",
"in",
"brightnessFilters",
":",
"if",
"bf",
"not",
"in",
"searchPara",
":",
"continue",
"if",
"\"synonym\"",
"not",
"in",
"searchPara",
"[",
"bf",
"]",
"or",
"searchPara",
"[",
"bf",
"]",
"[",
"\"synonym\"",
"]",
"==",
"False",
":",
"continue",
"self",
".",
"log",
".",
"debug",
"(",
"\"\"\" searching: %(search_name)s\"\"\"",
"%",
"locals",
"(",
")",
")",
"if",
"\"physical radius kpc\"",
"in",
"searchPara",
"[",
"bf",
"]",
":",
"# THE PHYSICAL SEPARATION SEARCHES",
"self",
".",
"log",
".",
"debug",
"(",
"'checking physical distance crossmatches in %(search_name)s'",
"%",
"locals",
"(",
")",
")",
"catalogueMatches",
"=",
"self",
".",
"physical_separation_crossmatch_against_catalogue",
"(",
"objectList",
"=",
"self",
".",
"transients",
",",
"searchPara",
"=",
"searchPara",
",",
"search_name",
"=",
"search_name",
"+",
"\" physical\"",
",",
"brightnessFilter",
"=",
"bf",
",",
"classificationType",
"=",
"\"synonym\"",
")",
"else",
":",
"# THE ANGULAR SEPARATION SEARCHES",
"self",
".",
"log",
".",
"debug",
"(",
"'Crossmatching against %(search_name)s'",
"%",
"locals",
"(",
")",
")",
"# RENAMED from searchCatalogue",
"catalogueMatches",
"=",
"self",
".",
"angular_crossmatch_against_catalogue",
"(",
"objectList",
"=",
"self",
".",
"transients",
",",
"searchPara",
"=",
"searchPara",
",",
"search_name",
"=",
"search_name",
"+",
"\" angular\"",
",",
"brightnessFilter",
"=",
"bf",
",",
"classificationType",
"=",
"\"synonym\"",
")",
"# ADD CLASSIFICATION AND CROSSMATCHES IF FOUND",
"if",
"catalogueMatches",
":",
"allCatalogueMatches",
"=",
"allCatalogueMatches",
"+",
"catalogueMatches",
"synonymIDs",
"=",
"[",
"]",
"synonymIDs",
"[",
":",
"]",
"=",
"[",
"xm",
"[",
"\"transient_object_id\"",
"]",
"for",
"xm",
"in",
"allCatalogueMatches",
"]",
"nonSynonymTransients",
"=",
"[",
"]",
"nonSynonymTransients",
"[",
":",
"]",
"=",
"[",
"t",
"for",
"t",
"in",
"self",
".",
"transients",
"if",
"t",
"[",
"\"id\"",
"]",
"not",
"in",
"synonymIDs",
"]",
"# ASSOCIATION SEARCHES",
"# ITERATE THROUGH SEARCH ALGORITHM IN ORDER",
"# PRESENTED IN THE SETTINGS FILE",
"if",
"len",
"(",
"nonSynonymTransients",
")",
">",
"0",
":",
"for",
"search_name",
",",
"searchPara",
"in",
"sa",
".",
"iteritems",
"(",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"\"\" searching: %(search_name)s\"\"\"",
"%",
"locals",
"(",
")",
")",
"for",
"bf",
"in",
"brightnessFilters",
":",
"if",
"bf",
"not",
"in",
"searchPara",
":",
"continue",
"if",
"\"association\"",
"not",
"in",
"searchPara",
"[",
"bf",
"]",
"or",
"searchPara",
"[",
"bf",
"]",
"[",
"\"association\"",
"]",
"==",
"False",
":",
"continue",
"if",
"\"physical radius kpc\"",
"in",
"searchPara",
"[",
"bf",
"]",
":",
"# THE PHYSICAL SEPARATION SEARCHES",
"self",
".",
"log",
".",
"debug",
"(",
"'checking physical distance crossmatches in %(search_name)s'",
"%",
"locals",
"(",
")",
")",
"catalogueMatches",
"=",
"self",
".",
"physical_separation_crossmatch_against_catalogue",
"(",
"objectList",
"=",
"nonSynonymTransients",
",",
"searchPara",
"=",
"searchPara",
",",
"search_name",
"=",
"search_name",
"+",
"\" physical\"",
",",
"brightnessFilter",
"=",
"bf",
",",
"classificationType",
"=",
"\"association\"",
")",
"else",
":",
"# THE ANGULAR SEPARATION SEARCHES",
"self",
".",
"log",
".",
"debug",
"(",
"'Crossmatching against %(search_name)s'",
"%",
"locals",
"(",
")",
")",
"# RENAMED from searchCatalogue",
"catalogueMatches",
"=",
"self",
".",
"angular_crossmatch_against_catalogue",
"(",
"objectList",
"=",
"nonSynonymTransients",
",",
"searchPara",
"=",
"searchPara",
",",
"search_name",
"=",
"search_name",
"+",
"\" angular\"",
",",
"brightnessFilter",
"=",
"bf",
",",
"classificationType",
"=",
"\"association\"",
")",
"# ADD CLASSIFICATION AND CROSSMATCHES IF FOUND",
"if",
"catalogueMatches",
":",
"allCatalogueMatches",
"=",
"allCatalogueMatches",
"+",
"catalogueMatches",
"catalogueMatches",
"=",
"[",
"]",
"associationIDs",
"=",
"[",
"]",
"associationIDs",
"[",
":",
"]",
"=",
"[",
"xm",
"[",
"\"transient_object_id\"",
"]",
"for",
"xm",
"in",
"allCatalogueMatches",
"]",
"nonAssociationTransients",
"=",
"[",
"]",
"nonAssociationTransients",
"[",
":",
"]",
"=",
"[",
"t",
"for",
"t",
"in",
"self",
".",
"transients",
"if",
"t",
"[",
"\"id\"",
"]",
"not",
"in",
"associationIDs",
"]",
"# ANNOTATION SEARCHES",
"# ITERATE THROUGH SEARCH ALGORITHM IN ORDER",
"# PRESENTED IN THE SETTINGS FILE",
"brightnessFilters",
"=",
"[",
"\"bright\"",
",",
"\"faint\"",
",",
"\"general\"",
"]",
"for",
"search_name",
",",
"searchPara",
"in",
"sa",
".",
"iteritems",
"(",
")",
":",
"for",
"bf",
"in",
"brightnessFilters",
":",
"if",
"bf",
"not",
"in",
"searchPara",
":",
"continue",
"if",
"\"annotation\"",
"not",
"in",
"searchPara",
"[",
"bf",
"]",
"or",
"searchPara",
"[",
"bf",
"]",
"[",
"\"annotation\"",
"]",
"==",
"False",
":",
"continue",
"self",
".",
"log",
".",
"debug",
"(",
"\"\"\" searching: %(search_name)s\"\"\"",
"%",
"locals",
"(",
")",
")",
"if",
"\"physical radius kpc\"",
"in",
"searchPara",
"[",
"bf",
"]",
":",
"# THE PHYSICAL SEPARATION SEARCHES",
"self",
".",
"log",
".",
"debug",
"(",
"'checking physical distance crossmatches in %(search_name)s'",
"%",
"locals",
"(",
")",
")",
"if",
"bf",
"in",
"searchPara",
":",
"catalogueMatches",
"=",
"self",
".",
"physical_separation_crossmatch_against_catalogue",
"(",
"objectList",
"=",
"nonAssociationTransients",
",",
"searchPara",
"=",
"searchPara",
",",
"search_name",
"=",
"search_name",
"+",
"\" physical\"",
",",
"brightnessFilter",
"=",
"bf",
",",
"classificationType",
"=",
"\"annotation\"",
")",
"else",
":",
"# THE ANGULAR SEPARATION SEARCHES",
"self",
".",
"log",
".",
"debug",
"(",
"'Crossmatching against %(search_name)s'",
"%",
"locals",
"(",
")",
")",
"# RENAMED from searchCatalogue",
"if",
"bf",
"in",
"searchPara",
":",
"catalogueMatches",
"=",
"self",
".",
"angular_crossmatch_against_catalogue",
"(",
"objectList",
"=",
"nonAssociationTransients",
",",
"searchPara",
"=",
"searchPara",
",",
"search_name",
"=",
"search_name",
"+",
"\" angular\"",
",",
"brightnessFilter",
"=",
"bf",
",",
"classificationType",
"=",
"\"annotation\"",
")",
"# ADD CLASSIFICATION AND CROSSMATCHES IF FOUND",
"if",
"catalogueMatches",
":",
"allCatalogueMatches",
"=",
"allCatalogueMatches",
"+",
"catalogueMatches",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``match`` method'",
")",
"return",
"allCatalogueMatches"
] |
*match the transients against the sherlock-catalogues according to the search algorithm and return matches alongside the predicted classification(s)*
**Return:**
- ``classification`` -- the crossmatch results and classifications assigned to the transients
See the class docstring for usage.
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
|
[
"*",
"match",
"the",
"transients",
"against",
"the",
"sherlock",
"-",
"catalogues",
"according",
"to",
"the",
"search",
"algorithm",
"and",
"return",
"matches",
"alongside",
"the",
"predicted",
"classification",
"(",
"s",
")",
"*"
] |
train
|
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/transient_catalogue_crossmatch.py#L88-L262
|
thespacedoctor/sherlock
|
sherlock/transient_catalogue_crossmatch.py
|
transient_catalogue_crossmatch.angular_crossmatch_against_catalogue
|
def angular_crossmatch_against_catalogue(
self,
objectList,
searchPara={},
search_name="",
brightnessFilter=False,
physicalSearch=False,
classificationType=False
):
"""*perform an angular separation crossmatch against a given catalogue in the database and annotate the crossmatch with some value added parameters (distances, physical separations, sub-type of transient etc)*
**Key Arguments:**
- ``objectList`` -- the list of transient locations to match against the crossmatch catalogue
- ``searchPara`` -- the search parameters for this individual search as lifted from the search algorithm in the sherlock settings file
- ``search_name`` -- the name of the search as given in the sherlock settings file
- ``brightnessFilter`` -- is this search to be constrained by magnitude of the catalogue sources? Default *False*. [bright|faint|general]
- ``physicalSearch`` -- is this angular search a sub-part of a physical separation search
- ``classificationType`` -- synonym, association or annotation. Default *False*
**Return:**
- matchedObjects -- any sources matched against the object
**Usage:**
Take a list of transients from somewhere
.. code-block:: python
transients = [
{'ps1_designation': u'PS1-14aef',
'name': u'4L3Piiq',
'detection_list_id': 2,
'local_comments': u'',
'ra': 0.02548233704918263,
'followup_id': 2065412L,
'dec': -4.284933417540423,
'id': 1000006110041705700L,
'object_classification': 0L
},
{'ps1_designation': u'PS1-13dcr',
'name': u'3I3Phzx',
'detection_list_id': 2,
'local_comments': u'',
'ra': 4.754236999477372,
'followup_id': 1140386L,
'dec': 28.276703631398625,
'id': 1001901011281636100L,
'object_classification': 0L
},
{'ps1_designation': u'PS1-13dhc',
'name': u'3I3Pixd',
'detection_list_id': 2,
'local_comments': u'',
'ra': 1.3324973428505413,
'followup_id': 1202386L,
'dec': 32.98869220595689,
'id': 1000519791325919200L,
'object_classification': 0L
}
]
Then run the ``angular_crossmatch_against_catalogue`` method to crossmatch against the catalogues and return results:
.. code-block:: python
# ANGULAR CONESEARCH ON CATALOGUE
search_name = "ned_d spec sn"
searchPara = self.settings["search algorithm"][search_name]
matchedObjects = xmatcher.angular_crossmatch_against_catalogue(
objectList=transients,
searchPara=searchPara,
search_name=search_name
)
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``angular_crossmatch_against_catalogue`` method')
self.log.info("STARTING %s SEARCH" %
(search_name,))
start_time = time.time()
# DEFAULTS
# print search_name, classificationType
magnitudeLimitFilter = None
upperMagnitudeLimit = False
lowerMagnitudeLimit = False
catalogueName = searchPara["database table"]
if not "mag column" in searchPara:
searchPara["mag column"] = None
if brightnessFilter:
if "mag column" in searchPara and searchPara["mag column"]:
magnitudeLimitFilter = self.colMaps[
catalogueName][searchPara["mag column"] + "ColName"]
theseSearchPara = searchPara[brightnessFilter]
else:
theseSearchPara = searchPara
# EXTRACT PARAMETERS FROM ARGUMENTS & SETTINGS FILE
if classificationType == "synonym":
radius = self.settings["synonym radius arcsec"]
matchedType = theseSearchPara["synonym"]
elif classificationType == "association":
radius = theseSearchPara["angular radius arcsec"]
matchedType = theseSearchPara["association"]
elif classificationType == "annotation":
radius = theseSearchPara["angular radius arcsec"]
matchedType = theseSearchPara["annotation"]
if brightnessFilter == "faint":
upperMagnitudeLimit = theseSearchPara["mag limit"]
elif brightnessFilter == "bright":
lowerMagnitudeLimit = theseSearchPara["mag limit"]
elif brightnessFilter == "general":
if "faint" in searchPara:
lowerMagnitudeLimit = searchPara["faint"]["mag limit"]
if "bright" in searchPara:
upperMagnitudeLimit = searchPara["bright"]["mag limit"]
# VARIABLES
matchedObjects = []
matchSubset = []
transRAs = []
transRAs[:] = [t['ra'] for t in objectList]
transDecs = []
transDecs[:] = [t['dec'] for t in objectList]
if len(transRAs) == 0:
return []
cs = catalogue_conesearch(
log=self.log,
ra=transRAs,
dec=transDecs,
radiusArcsec=radius,
colMaps=self.colMaps,
tableName=catalogueName,
dbConn=self.dbConn,
nearestOnly=False,
physicalSearch=physicalSearch,
upperMagnitudeLimit=upperMagnitudeLimit,
lowerMagnitudeLimit=lowerMagnitudeLimit,
magnitudeLimitFilter=magnitudeLimitFilter
)
# catalogueMatches ARE ORDERED BY ANGULAR SEPARATION
indices, catalogueMatches = cs.search()
count = 1
annotatedcatalogueMatches = []
for i, xm in zip(indices, catalogueMatches):
# CALCULATE PHYSICAL PARAMETERS ... IF WE CAN
if "cmSepArcsec" in xm:
xm["separationArcsec"] = xm["cmSepArcsec"]
# CALCULATE SEPARATION IN ARCSEC
calculator = separations(
log=self.log,
ra1=objectList[i]["ra"],
dec1=objectList[i]["dec"],
ra2=xm["ra"],
dec2=xm["dec"]
)
angularSeparation, north, east = calculator.get()
xm["northSeparationArcsec"] = north
xm["eastSeparationArcsec"] = east
del xm["cmSepArcsec"]
xm["association_type"] = matchedType
xm["catalogue_view_name"] = catalogueName
xm["transient_object_id"] = objectList[i]["id"]
xm["catalogue_table_name"] = self.colMaps[
catalogueName]["description"]
xm["catalogue_table_id"] = self.colMaps[
catalogueName]["table_id"]
xm["catalogue_view_id"] = self.colMaps[
catalogueName]["id"]
if classificationType == "synonym":
xm["classificationReliability"] = 1
elif classificationType == "association":
xm["classificationReliability"] = 2
elif classificationType == "annotation":
xm["classificationReliability"] = 3
xm = self._annotate_crossmatch_with_value_added_parameters(
crossmatchDict=xm,
catalogueName=catalogueName,
searchPara=theseSearchPara,
search_name=search_name
)
annotatedcatalogueMatches.append(xm)
catalogueMatches = annotatedcatalogueMatches
# IF BRIGHT STAR SEARCH
if brightnessFilter == "bright" and "star" in search_name:
catalogueMatches = self._bright_star_match(
matchedObjects=catalogueMatches,
catalogueName=catalogueName,
lowerMagnitudeLimit=lowerMagnitudeLimit,
magnitudeLimitFilter=searchPara["mag column"]
)
if brightnessFilter == "general" and "galaxy" in search_name and "galaxy-like" not in search_name and "physical radius kpc" not in theseSearchPara:
catalogueMatches = self._galaxy_association_cuts(
matchedObjects=catalogueMatches,
catalogueName=catalogueName,
lowerMagnitudeLimit=lowerMagnitudeLimit,
upperMagnitudeLimit=upperMagnitudeLimit,
magnitudeLimitFilter=searchPara["mag column"]
)
if "match nearest source only" in theseSearchPara and theseSearchPara["match nearest source only"] == True and len(catalogueMatches):
nearestMatches = []
transList = []
for c in catalogueMatches:
if c["transient_object_id"] not in transList:
transList.append(c["transient_object_id"])
nearestMatches.append(c)
catalogueMatches = nearestMatches
self.log.debug(
'completed the ``angular_crossmatch_against_catalogue`` method')
self.log.debug("FINISHED %s SEARCH IN %0.5f s" %
(search_name, time.time() - start_time,))
return catalogueMatches
|
python
|
def angular_crossmatch_against_catalogue(
self,
objectList,
searchPara={},
search_name="",
brightnessFilter=False,
physicalSearch=False,
classificationType=False
):
"""*perform an angular separation crossmatch against a given catalogue in the database and annotate the crossmatch with some value added parameters (distances, physical separations, sub-type of transient etc)*
**Key Arguments:**
- ``objectList`` -- the list of transient locations to match against the crossmatch catalogue
- ``searchPara`` -- the search parameters for this individual search as lifted from the search algorithm in the sherlock settings file
- ``search_name`` -- the name of the search as given in the sherlock settings file
- ``brightnessFilter`` -- is this search to be constrained by magnitude of the catalogue sources? Default *False*. [bright|faint|general]
- ``physicalSearch`` -- is this angular search a sub-part of a physical separation search
- ``classificationType`` -- synonym, association or annotation. Default *False*
**Return:**
- matchedObjects -- any sources matched against the object
**Usage:**
Take a list of transients from somewhere
.. code-block:: python
transients = [
{'ps1_designation': u'PS1-14aef',
'name': u'4L3Piiq',
'detection_list_id': 2,
'local_comments': u'',
'ra': 0.02548233704918263,
'followup_id': 2065412L,
'dec': -4.284933417540423,
'id': 1000006110041705700L,
'object_classification': 0L
},
{'ps1_designation': u'PS1-13dcr',
'name': u'3I3Phzx',
'detection_list_id': 2,
'local_comments': u'',
'ra': 4.754236999477372,
'followup_id': 1140386L,
'dec': 28.276703631398625,
'id': 1001901011281636100L,
'object_classification': 0L
},
{'ps1_designation': u'PS1-13dhc',
'name': u'3I3Pixd',
'detection_list_id': 2,
'local_comments': u'',
'ra': 1.3324973428505413,
'followup_id': 1202386L,
'dec': 32.98869220595689,
'id': 1000519791325919200L,
'object_classification': 0L
}
]
Then run the ``angular_crossmatch_against_catalogue`` method to crossmatch against the catalogues and return results:
.. code-block:: python
# ANGULAR CONESEARCH ON CATALOGUE
search_name = "ned_d spec sn"
searchPara = self.settings["search algorithm"][search_name]
matchedObjects = xmatcher.angular_crossmatch_against_catalogue(
objectList=transients,
searchPara=searchPara,
search_name=search_name
)
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``angular_crossmatch_against_catalogue`` method')
self.log.info("STARTING %s SEARCH" %
(search_name,))
start_time = time.time()
# DEFAULTS
# print search_name, classificationType
magnitudeLimitFilter = None
upperMagnitudeLimit = False
lowerMagnitudeLimit = False
catalogueName = searchPara["database table"]
if not "mag column" in searchPara:
searchPara["mag column"] = None
if brightnessFilter:
if "mag column" in searchPara and searchPara["mag column"]:
magnitudeLimitFilter = self.colMaps[
catalogueName][searchPara["mag column"] + "ColName"]
theseSearchPara = searchPara[brightnessFilter]
else:
theseSearchPara = searchPara
# EXTRACT PARAMETERS FROM ARGUMENTS & SETTINGS FILE
if classificationType == "synonym":
radius = self.settings["synonym radius arcsec"]
matchedType = theseSearchPara["synonym"]
elif classificationType == "association":
radius = theseSearchPara["angular radius arcsec"]
matchedType = theseSearchPara["association"]
elif classificationType == "annotation":
radius = theseSearchPara["angular radius arcsec"]
matchedType = theseSearchPara["annotation"]
if brightnessFilter == "faint":
upperMagnitudeLimit = theseSearchPara["mag limit"]
elif brightnessFilter == "bright":
lowerMagnitudeLimit = theseSearchPara["mag limit"]
elif brightnessFilter == "general":
if "faint" in searchPara:
lowerMagnitudeLimit = searchPara["faint"]["mag limit"]
if "bright" in searchPara:
upperMagnitudeLimit = searchPara["bright"]["mag limit"]
# VARIABLES
matchedObjects = []
matchSubset = []
transRAs = []
transRAs[:] = [t['ra'] for t in objectList]
transDecs = []
transDecs[:] = [t['dec'] for t in objectList]
if len(transRAs) == 0:
return []
cs = catalogue_conesearch(
log=self.log,
ra=transRAs,
dec=transDecs,
radiusArcsec=radius,
colMaps=self.colMaps,
tableName=catalogueName,
dbConn=self.dbConn,
nearestOnly=False,
physicalSearch=physicalSearch,
upperMagnitudeLimit=upperMagnitudeLimit,
lowerMagnitudeLimit=lowerMagnitudeLimit,
magnitudeLimitFilter=magnitudeLimitFilter
)
# catalogueMatches ARE ORDERED BY ANGULAR SEPARATION
indices, catalogueMatches = cs.search()
count = 1
annotatedcatalogueMatches = []
for i, xm in zip(indices, catalogueMatches):
# CALCULATE PHYSICAL PARAMETERS ... IF WE CAN
if "cmSepArcsec" in xm:
xm["separationArcsec"] = xm["cmSepArcsec"]
# CALCULATE SEPARATION IN ARCSEC
calculator = separations(
log=self.log,
ra1=objectList[i]["ra"],
dec1=objectList[i]["dec"],
ra2=xm["ra"],
dec2=xm["dec"]
)
angularSeparation, north, east = calculator.get()
xm["northSeparationArcsec"] = north
xm["eastSeparationArcsec"] = east
del xm["cmSepArcsec"]
xm["association_type"] = matchedType
xm["catalogue_view_name"] = catalogueName
xm["transient_object_id"] = objectList[i]["id"]
xm["catalogue_table_name"] = self.colMaps[
catalogueName]["description"]
xm["catalogue_table_id"] = self.colMaps[
catalogueName]["table_id"]
xm["catalogue_view_id"] = self.colMaps[
catalogueName]["id"]
if classificationType == "synonym":
xm["classificationReliability"] = 1
elif classificationType == "association":
xm["classificationReliability"] = 2
elif classificationType == "annotation":
xm["classificationReliability"] = 3
xm = self._annotate_crossmatch_with_value_added_parameters(
crossmatchDict=xm,
catalogueName=catalogueName,
searchPara=theseSearchPara,
search_name=search_name
)
annotatedcatalogueMatches.append(xm)
catalogueMatches = annotatedcatalogueMatches
# IF BRIGHT STAR SEARCH
if brightnessFilter == "bright" and "star" in search_name:
catalogueMatches = self._bright_star_match(
matchedObjects=catalogueMatches,
catalogueName=catalogueName,
lowerMagnitudeLimit=lowerMagnitudeLimit,
magnitudeLimitFilter=searchPara["mag column"]
)
if brightnessFilter == "general" and "galaxy" in search_name and "galaxy-like" not in search_name and "physical radius kpc" not in theseSearchPara:
catalogueMatches = self._galaxy_association_cuts(
matchedObjects=catalogueMatches,
catalogueName=catalogueName,
lowerMagnitudeLimit=lowerMagnitudeLimit,
upperMagnitudeLimit=upperMagnitudeLimit,
magnitudeLimitFilter=searchPara["mag column"]
)
if "match nearest source only" in theseSearchPara and theseSearchPara["match nearest source only"] == True and len(catalogueMatches):
nearestMatches = []
transList = []
for c in catalogueMatches:
if c["transient_object_id"] not in transList:
transList.append(c["transient_object_id"])
nearestMatches.append(c)
catalogueMatches = nearestMatches
self.log.debug(
'completed the ``angular_crossmatch_against_catalogue`` method')
self.log.debug("FINISHED %s SEARCH IN %0.5f s" %
(search_name, time.time() - start_time,))
return catalogueMatches
|
[
"def",
"angular_crossmatch_against_catalogue",
"(",
"self",
",",
"objectList",
",",
"searchPara",
"=",
"{",
"}",
",",
"search_name",
"=",
"\"\"",
",",
"brightnessFilter",
"=",
"False",
",",
"physicalSearch",
"=",
"False",
",",
"classificationType",
"=",
"False",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``angular_crossmatch_against_catalogue`` method'",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"STARTING %s SEARCH\"",
"%",
"(",
"search_name",
",",
")",
")",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"# DEFAULTS",
"# print search_name, classificationType",
"magnitudeLimitFilter",
"=",
"None",
"upperMagnitudeLimit",
"=",
"False",
"lowerMagnitudeLimit",
"=",
"False",
"catalogueName",
"=",
"searchPara",
"[",
"\"database table\"",
"]",
"if",
"not",
"\"mag column\"",
"in",
"searchPara",
":",
"searchPara",
"[",
"\"mag column\"",
"]",
"=",
"None",
"if",
"brightnessFilter",
":",
"if",
"\"mag column\"",
"in",
"searchPara",
"and",
"searchPara",
"[",
"\"mag column\"",
"]",
":",
"magnitudeLimitFilter",
"=",
"self",
".",
"colMaps",
"[",
"catalogueName",
"]",
"[",
"searchPara",
"[",
"\"mag column\"",
"]",
"+",
"\"ColName\"",
"]",
"theseSearchPara",
"=",
"searchPara",
"[",
"brightnessFilter",
"]",
"else",
":",
"theseSearchPara",
"=",
"searchPara",
"# EXTRACT PARAMETERS FROM ARGUMENTS & SETTINGS FILE",
"if",
"classificationType",
"==",
"\"synonym\"",
":",
"radius",
"=",
"self",
".",
"settings",
"[",
"\"synonym radius arcsec\"",
"]",
"matchedType",
"=",
"theseSearchPara",
"[",
"\"synonym\"",
"]",
"elif",
"classificationType",
"==",
"\"association\"",
":",
"radius",
"=",
"theseSearchPara",
"[",
"\"angular radius arcsec\"",
"]",
"matchedType",
"=",
"theseSearchPara",
"[",
"\"association\"",
"]",
"elif",
"classificationType",
"==",
"\"annotation\"",
":",
"radius",
"=",
"theseSearchPara",
"[",
"\"angular radius arcsec\"",
"]",
"matchedType",
"=",
"theseSearchPara",
"[",
"\"annotation\"",
"]",
"if",
"brightnessFilter",
"==",
"\"faint\"",
":",
"upperMagnitudeLimit",
"=",
"theseSearchPara",
"[",
"\"mag limit\"",
"]",
"elif",
"brightnessFilter",
"==",
"\"bright\"",
":",
"lowerMagnitudeLimit",
"=",
"theseSearchPara",
"[",
"\"mag limit\"",
"]",
"elif",
"brightnessFilter",
"==",
"\"general\"",
":",
"if",
"\"faint\"",
"in",
"searchPara",
":",
"lowerMagnitudeLimit",
"=",
"searchPara",
"[",
"\"faint\"",
"]",
"[",
"\"mag limit\"",
"]",
"if",
"\"bright\"",
"in",
"searchPara",
":",
"upperMagnitudeLimit",
"=",
"searchPara",
"[",
"\"bright\"",
"]",
"[",
"\"mag limit\"",
"]",
"# VARIABLES",
"matchedObjects",
"=",
"[",
"]",
"matchSubset",
"=",
"[",
"]",
"transRAs",
"=",
"[",
"]",
"transRAs",
"[",
":",
"]",
"=",
"[",
"t",
"[",
"'ra'",
"]",
"for",
"t",
"in",
"objectList",
"]",
"transDecs",
"=",
"[",
"]",
"transDecs",
"[",
":",
"]",
"=",
"[",
"t",
"[",
"'dec'",
"]",
"for",
"t",
"in",
"objectList",
"]",
"if",
"len",
"(",
"transRAs",
")",
"==",
"0",
":",
"return",
"[",
"]",
"cs",
"=",
"catalogue_conesearch",
"(",
"log",
"=",
"self",
".",
"log",
",",
"ra",
"=",
"transRAs",
",",
"dec",
"=",
"transDecs",
",",
"radiusArcsec",
"=",
"radius",
",",
"colMaps",
"=",
"self",
".",
"colMaps",
",",
"tableName",
"=",
"catalogueName",
",",
"dbConn",
"=",
"self",
".",
"dbConn",
",",
"nearestOnly",
"=",
"False",
",",
"physicalSearch",
"=",
"physicalSearch",
",",
"upperMagnitudeLimit",
"=",
"upperMagnitudeLimit",
",",
"lowerMagnitudeLimit",
"=",
"lowerMagnitudeLimit",
",",
"magnitudeLimitFilter",
"=",
"magnitudeLimitFilter",
")",
"# catalogueMatches ARE ORDERED BY ANGULAR SEPARATION",
"indices",
",",
"catalogueMatches",
"=",
"cs",
".",
"search",
"(",
")",
"count",
"=",
"1",
"annotatedcatalogueMatches",
"=",
"[",
"]",
"for",
"i",
",",
"xm",
"in",
"zip",
"(",
"indices",
",",
"catalogueMatches",
")",
":",
"# CALCULATE PHYSICAL PARAMETERS ... IF WE CAN",
"if",
"\"cmSepArcsec\"",
"in",
"xm",
":",
"xm",
"[",
"\"separationArcsec\"",
"]",
"=",
"xm",
"[",
"\"cmSepArcsec\"",
"]",
"# CALCULATE SEPARATION IN ARCSEC",
"calculator",
"=",
"separations",
"(",
"log",
"=",
"self",
".",
"log",
",",
"ra1",
"=",
"objectList",
"[",
"i",
"]",
"[",
"\"ra\"",
"]",
",",
"dec1",
"=",
"objectList",
"[",
"i",
"]",
"[",
"\"dec\"",
"]",
",",
"ra2",
"=",
"xm",
"[",
"\"ra\"",
"]",
",",
"dec2",
"=",
"xm",
"[",
"\"dec\"",
"]",
")",
"angularSeparation",
",",
"north",
",",
"east",
"=",
"calculator",
".",
"get",
"(",
")",
"xm",
"[",
"\"northSeparationArcsec\"",
"]",
"=",
"north",
"xm",
"[",
"\"eastSeparationArcsec\"",
"]",
"=",
"east",
"del",
"xm",
"[",
"\"cmSepArcsec\"",
"]",
"xm",
"[",
"\"association_type\"",
"]",
"=",
"matchedType",
"xm",
"[",
"\"catalogue_view_name\"",
"]",
"=",
"catalogueName",
"xm",
"[",
"\"transient_object_id\"",
"]",
"=",
"objectList",
"[",
"i",
"]",
"[",
"\"id\"",
"]",
"xm",
"[",
"\"catalogue_table_name\"",
"]",
"=",
"self",
".",
"colMaps",
"[",
"catalogueName",
"]",
"[",
"\"description\"",
"]",
"xm",
"[",
"\"catalogue_table_id\"",
"]",
"=",
"self",
".",
"colMaps",
"[",
"catalogueName",
"]",
"[",
"\"table_id\"",
"]",
"xm",
"[",
"\"catalogue_view_id\"",
"]",
"=",
"self",
".",
"colMaps",
"[",
"catalogueName",
"]",
"[",
"\"id\"",
"]",
"if",
"classificationType",
"==",
"\"synonym\"",
":",
"xm",
"[",
"\"classificationReliability\"",
"]",
"=",
"1",
"elif",
"classificationType",
"==",
"\"association\"",
":",
"xm",
"[",
"\"classificationReliability\"",
"]",
"=",
"2",
"elif",
"classificationType",
"==",
"\"annotation\"",
":",
"xm",
"[",
"\"classificationReliability\"",
"]",
"=",
"3",
"xm",
"=",
"self",
".",
"_annotate_crossmatch_with_value_added_parameters",
"(",
"crossmatchDict",
"=",
"xm",
",",
"catalogueName",
"=",
"catalogueName",
",",
"searchPara",
"=",
"theseSearchPara",
",",
"search_name",
"=",
"search_name",
")",
"annotatedcatalogueMatches",
".",
"append",
"(",
"xm",
")",
"catalogueMatches",
"=",
"annotatedcatalogueMatches",
"# IF BRIGHT STAR SEARCH",
"if",
"brightnessFilter",
"==",
"\"bright\"",
"and",
"\"star\"",
"in",
"search_name",
":",
"catalogueMatches",
"=",
"self",
".",
"_bright_star_match",
"(",
"matchedObjects",
"=",
"catalogueMatches",
",",
"catalogueName",
"=",
"catalogueName",
",",
"lowerMagnitudeLimit",
"=",
"lowerMagnitudeLimit",
",",
"magnitudeLimitFilter",
"=",
"searchPara",
"[",
"\"mag column\"",
"]",
")",
"if",
"brightnessFilter",
"==",
"\"general\"",
"and",
"\"galaxy\"",
"in",
"search_name",
"and",
"\"galaxy-like\"",
"not",
"in",
"search_name",
"and",
"\"physical radius kpc\"",
"not",
"in",
"theseSearchPara",
":",
"catalogueMatches",
"=",
"self",
".",
"_galaxy_association_cuts",
"(",
"matchedObjects",
"=",
"catalogueMatches",
",",
"catalogueName",
"=",
"catalogueName",
",",
"lowerMagnitudeLimit",
"=",
"lowerMagnitudeLimit",
",",
"upperMagnitudeLimit",
"=",
"upperMagnitudeLimit",
",",
"magnitudeLimitFilter",
"=",
"searchPara",
"[",
"\"mag column\"",
"]",
")",
"if",
"\"match nearest source only\"",
"in",
"theseSearchPara",
"and",
"theseSearchPara",
"[",
"\"match nearest source only\"",
"]",
"==",
"True",
"and",
"len",
"(",
"catalogueMatches",
")",
":",
"nearestMatches",
"=",
"[",
"]",
"transList",
"=",
"[",
"]",
"for",
"c",
"in",
"catalogueMatches",
":",
"if",
"c",
"[",
"\"transient_object_id\"",
"]",
"not",
"in",
"transList",
":",
"transList",
".",
"append",
"(",
"c",
"[",
"\"transient_object_id\"",
"]",
")",
"nearestMatches",
".",
"append",
"(",
"c",
")",
"catalogueMatches",
"=",
"nearestMatches",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``angular_crossmatch_against_catalogue`` method'",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"FINISHED %s SEARCH IN %0.5f s\"",
"%",
"(",
"search_name",
",",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
",",
")",
")",
"return",
"catalogueMatches"
] |
*perform an angular separation crossmatch against a given catalogue in the database and annotate the crossmatch with some value added parameters (distances, physical separations, sub-type of transient etc)*
**Key Arguments:**
- ``objectList`` -- the list of transient locations to match against the crossmatch catalogue
- ``searchPara`` -- the search parameters for this individual search as lifted from the search algorithm in the sherlock settings file
- ``search_name`` -- the name of the search as given in the sherlock settings file
- ``brightnessFilter`` -- is this search to be constrained by magnitude of the catalogue sources? Default *False*. [bright|faint|general]
- ``physicalSearch`` -- is this angular search a sub-part of a physical separation search
- ``classificationType`` -- synonym, association or annotation. Default *False*
**Return:**
- matchedObjects -- any sources matched against the object
**Usage:**
Take a list of transients from somewhere
.. code-block:: python
transients = [
{'ps1_designation': u'PS1-14aef',
'name': u'4L3Piiq',
'detection_list_id': 2,
'local_comments': u'',
'ra': 0.02548233704918263,
'followup_id': 2065412L,
'dec': -4.284933417540423,
'id': 1000006110041705700L,
'object_classification': 0L
},
{'ps1_designation': u'PS1-13dcr',
'name': u'3I3Phzx',
'detection_list_id': 2,
'local_comments': u'',
'ra': 4.754236999477372,
'followup_id': 1140386L,
'dec': 28.276703631398625,
'id': 1001901011281636100L,
'object_classification': 0L
},
{'ps1_designation': u'PS1-13dhc',
'name': u'3I3Pixd',
'detection_list_id': 2,
'local_comments': u'',
'ra': 1.3324973428505413,
'followup_id': 1202386L,
'dec': 32.98869220595689,
'id': 1000519791325919200L,
'object_classification': 0L
}
]
Then run the ``angular_crossmatch_against_catalogue`` method to crossmatch against the catalogues and return results:
.. code-block:: python
# ANGULAR CONESEARCH ON CATALOGUE
search_name = "ned_d spec sn"
searchPara = self.settings["search algorithm"][search_name]
matchedObjects = xmatcher.angular_crossmatch_against_catalogue(
objectList=transients,
searchPara=searchPara,
search_name=search_name
)
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
|
[
"*",
"perform",
"an",
"angular",
"separation",
"crossmatch",
"against",
"a",
"given",
"catalogue",
"in",
"the",
"database",
"and",
"annotate",
"the",
"crossmatch",
"with",
"some",
"value",
"added",
"parameters",
"(",
"distances",
"physical",
"separations",
"sub",
"-",
"type",
"of",
"transient",
"etc",
")",
"*"
] |
train
|
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/transient_catalogue_crossmatch.py#L264-L512
|
thespacedoctor/sherlock
|
sherlock/transient_catalogue_crossmatch.py
|
transient_catalogue_crossmatch._annotate_crossmatch_with_value_added_parameters
|
def _annotate_crossmatch_with_value_added_parameters(
self,
crossmatchDict,
catalogueName,
searchPara,
search_name):
"""*annotate each crossmatch with physical parameters such are distances etc*
**Key Arguments:**
- ``crossmatchDict`` -- the crossmatch dictionary
- ``catalogueName`` -- the name of the catalogue the crossmatch results from
- ``searchPara`` -- the search parameters for this individual search as lifted from the search algorithm in the sherlock settings file
- ``search_name`` -- the name of the search as given in the sherlock settings file
**Return:**
- ``crossmatchDict`` -- the annotated crossmatch dictionary
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_annotate_crossmatch_with_value_added_parameters`` method')
redshift = None
z = None
scale = None
distance = None
distance_modulus = None
major_axis_arcsec = None
direct_distance = None
direct_distance_scale = None
direct_distance_modulus = None
# IF THERE'S A REDSHIFT, CALCULATE PHYSICAL PARAMETERS
if 'z' in crossmatchDict:
# THE CATALOGUE HAS A REDSHIFT COLUMN
redshift = crossmatchDict['z']
elif 'photoZ' in crossmatchDict:
redshift = crossmatchDict['photoZ']
if redshift and redshift > 0.0:
# CALCULATE DISTANCE MODULUS, ETC
c = converter(log=self.log)
dists = c.redshift_to_distance(
z=redshift,
WM=0.3,
WV=0.7,
H0=70.0
)
if dists:
z = dists['z']
scale = dists["da_scale"]
distance = dists["dl_mpc"]
distance_modulus = dists["dmod"]
# ADD MAJOR AXIS VALUE
if "or within semi major axis" in searchPara and searchPara["or within semi major axis"] == True and "semiMajor" in crossmatchDict and crossmatchDict["semiMajor"]:
major_axis_arcsec = crossmatchDict[
"semiMajor"] * self.colMaps[catalogueName]["semiMajorToArcsec"]
if "semiMajor" in crossmatchDict:
del crossmatchDict["semiMajor"]
# ADD DISTANCE VALUES
if "distance" in crossmatchDict and crossmatchDict["distance"]:
direct_distance = crossmatchDict["distance"]
direct_distance_scale = direct_distance / 206.264806
direct_distance_modulus = 5 * \
math.log10(direct_distance * 1e6) - 5
# crossmatchDict['z'] = z
crossmatchDict['scale'] = scale
crossmatchDict['distance'] = distance
crossmatchDict['distance_modulus'] = distance_modulus
crossmatchDict['major_axis_arcsec'] = major_axis_arcsec
crossmatchDict['direct_distance'] = direct_distance
crossmatchDict['direct_distance_scale'] = direct_distance_scale
crossmatchDict['direct_distance_modulus'] = direct_distance_modulus
crossmatchDict['catalogue_object_type'] = self.colMaps[
catalogueName]["object_type"]
crossmatchDict["search_name"] = search_name
crossmatchDict["raDeg"] = crossmatchDict["ra"]
crossmatchDict["decDeg"] = crossmatchDict["dec"]
del crossmatchDict["ra"]
del crossmatchDict["dec"]
crossmatchDict["original_search_radius_arcsec"] = searchPara[
"angular radius arcsec"]
physical_separation_kpc = None
# CALCULATE MOST ACCURATE PHYSICAL SEPARATION
if crossmatchDict["direct_distance_scale"]:
physical_separation_kpc = crossmatchDict[
"direct_distance_scale"] * crossmatchDict["separationArcsec"]
elif crossmatchDict["scale"]:
physical_separation_kpc = crossmatchDict[
"scale"] * crossmatchDict["separationArcsec"]
crossmatchDict["physical_separation_kpc"] = physical_separation_kpc
self.log.debug(
'completed the ``_annotate_crossmatch_with_value_added_parameters`` method')
return crossmatchDict
|
python
|
def _annotate_crossmatch_with_value_added_parameters(
self,
crossmatchDict,
catalogueName,
searchPara,
search_name):
"""*annotate each crossmatch with physical parameters such are distances etc*
**Key Arguments:**
- ``crossmatchDict`` -- the crossmatch dictionary
- ``catalogueName`` -- the name of the catalogue the crossmatch results from
- ``searchPara`` -- the search parameters for this individual search as lifted from the search algorithm in the sherlock settings file
- ``search_name`` -- the name of the search as given in the sherlock settings file
**Return:**
- ``crossmatchDict`` -- the annotated crossmatch dictionary
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_annotate_crossmatch_with_value_added_parameters`` method')
redshift = None
z = None
scale = None
distance = None
distance_modulus = None
major_axis_arcsec = None
direct_distance = None
direct_distance_scale = None
direct_distance_modulus = None
# IF THERE'S A REDSHIFT, CALCULATE PHYSICAL PARAMETERS
if 'z' in crossmatchDict:
# THE CATALOGUE HAS A REDSHIFT COLUMN
redshift = crossmatchDict['z']
elif 'photoZ' in crossmatchDict:
redshift = crossmatchDict['photoZ']
if redshift and redshift > 0.0:
# CALCULATE DISTANCE MODULUS, ETC
c = converter(log=self.log)
dists = c.redshift_to_distance(
z=redshift,
WM=0.3,
WV=0.7,
H0=70.0
)
if dists:
z = dists['z']
scale = dists["da_scale"]
distance = dists["dl_mpc"]
distance_modulus = dists["dmod"]
# ADD MAJOR AXIS VALUE
if "or within semi major axis" in searchPara and searchPara["or within semi major axis"] == True and "semiMajor" in crossmatchDict and crossmatchDict["semiMajor"]:
major_axis_arcsec = crossmatchDict[
"semiMajor"] * self.colMaps[catalogueName]["semiMajorToArcsec"]
if "semiMajor" in crossmatchDict:
del crossmatchDict["semiMajor"]
# ADD DISTANCE VALUES
if "distance" in crossmatchDict and crossmatchDict["distance"]:
direct_distance = crossmatchDict["distance"]
direct_distance_scale = direct_distance / 206.264806
direct_distance_modulus = 5 * \
math.log10(direct_distance * 1e6) - 5
# crossmatchDict['z'] = z
crossmatchDict['scale'] = scale
crossmatchDict['distance'] = distance
crossmatchDict['distance_modulus'] = distance_modulus
crossmatchDict['major_axis_arcsec'] = major_axis_arcsec
crossmatchDict['direct_distance'] = direct_distance
crossmatchDict['direct_distance_scale'] = direct_distance_scale
crossmatchDict['direct_distance_modulus'] = direct_distance_modulus
crossmatchDict['catalogue_object_type'] = self.colMaps[
catalogueName]["object_type"]
crossmatchDict["search_name"] = search_name
crossmatchDict["raDeg"] = crossmatchDict["ra"]
crossmatchDict["decDeg"] = crossmatchDict["dec"]
del crossmatchDict["ra"]
del crossmatchDict["dec"]
crossmatchDict["original_search_radius_arcsec"] = searchPara[
"angular radius arcsec"]
physical_separation_kpc = None
# CALCULATE MOST ACCURATE PHYSICAL SEPARATION
if crossmatchDict["direct_distance_scale"]:
physical_separation_kpc = crossmatchDict[
"direct_distance_scale"] * crossmatchDict["separationArcsec"]
elif crossmatchDict["scale"]:
physical_separation_kpc = crossmatchDict[
"scale"] * crossmatchDict["separationArcsec"]
crossmatchDict["physical_separation_kpc"] = physical_separation_kpc
self.log.debug(
'completed the ``_annotate_crossmatch_with_value_added_parameters`` method')
return crossmatchDict
|
[
"def",
"_annotate_crossmatch_with_value_added_parameters",
"(",
"self",
",",
"crossmatchDict",
",",
"catalogueName",
",",
"searchPara",
",",
"search_name",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_annotate_crossmatch_with_value_added_parameters`` method'",
")",
"redshift",
"=",
"None",
"z",
"=",
"None",
"scale",
"=",
"None",
"distance",
"=",
"None",
"distance_modulus",
"=",
"None",
"major_axis_arcsec",
"=",
"None",
"direct_distance",
"=",
"None",
"direct_distance_scale",
"=",
"None",
"direct_distance_modulus",
"=",
"None",
"# IF THERE'S A REDSHIFT, CALCULATE PHYSICAL PARAMETERS",
"if",
"'z'",
"in",
"crossmatchDict",
":",
"# THE CATALOGUE HAS A REDSHIFT COLUMN",
"redshift",
"=",
"crossmatchDict",
"[",
"'z'",
"]",
"elif",
"'photoZ'",
"in",
"crossmatchDict",
":",
"redshift",
"=",
"crossmatchDict",
"[",
"'photoZ'",
"]",
"if",
"redshift",
"and",
"redshift",
">",
"0.0",
":",
"# CALCULATE DISTANCE MODULUS, ETC",
"c",
"=",
"converter",
"(",
"log",
"=",
"self",
".",
"log",
")",
"dists",
"=",
"c",
".",
"redshift_to_distance",
"(",
"z",
"=",
"redshift",
",",
"WM",
"=",
"0.3",
",",
"WV",
"=",
"0.7",
",",
"H0",
"=",
"70.0",
")",
"if",
"dists",
":",
"z",
"=",
"dists",
"[",
"'z'",
"]",
"scale",
"=",
"dists",
"[",
"\"da_scale\"",
"]",
"distance",
"=",
"dists",
"[",
"\"dl_mpc\"",
"]",
"distance_modulus",
"=",
"dists",
"[",
"\"dmod\"",
"]",
"# ADD MAJOR AXIS VALUE",
"if",
"\"or within semi major axis\"",
"in",
"searchPara",
"and",
"searchPara",
"[",
"\"or within semi major axis\"",
"]",
"==",
"True",
"and",
"\"semiMajor\"",
"in",
"crossmatchDict",
"and",
"crossmatchDict",
"[",
"\"semiMajor\"",
"]",
":",
"major_axis_arcsec",
"=",
"crossmatchDict",
"[",
"\"semiMajor\"",
"]",
"*",
"self",
".",
"colMaps",
"[",
"catalogueName",
"]",
"[",
"\"semiMajorToArcsec\"",
"]",
"if",
"\"semiMajor\"",
"in",
"crossmatchDict",
":",
"del",
"crossmatchDict",
"[",
"\"semiMajor\"",
"]",
"# ADD DISTANCE VALUES",
"if",
"\"distance\"",
"in",
"crossmatchDict",
"and",
"crossmatchDict",
"[",
"\"distance\"",
"]",
":",
"direct_distance",
"=",
"crossmatchDict",
"[",
"\"distance\"",
"]",
"direct_distance_scale",
"=",
"direct_distance",
"/",
"206.264806",
"direct_distance_modulus",
"=",
"5",
"*",
"math",
".",
"log10",
"(",
"direct_distance",
"*",
"1e6",
")",
"-",
"5",
"# crossmatchDict['z'] = z",
"crossmatchDict",
"[",
"'scale'",
"]",
"=",
"scale",
"crossmatchDict",
"[",
"'distance'",
"]",
"=",
"distance",
"crossmatchDict",
"[",
"'distance_modulus'",
"]",
"=",
"distance_modulus",
"crossmatchDict",
"[",
"'major_axis_arcsec'",
"]",
"=",
"major_axis_arcsec",
"crossmatchDict",
"[",
"'direct_distance'",
"]",
"=",
"direct_distance",
"crossmatchDict",
"[",
"'direct_distance_scale'",
"]",
"=",
"direct_distance_scale",
"crossmatchDict",
"[",
"'direct_distance_modulus'",
"]",
"=",
"direct_distance_modulus",
"crossmatchDict",
"[",
"'catalogue_object_type'",
"]",
"=",
"self",
".",
"colMaps",
"[",
"catalogueName",
"]",
"[",
"\"object_type\"",
"]",
"crossmatchDict",
"[",
"\"search_name\"",
"]",
"=",
"search_name",
"crossmatchDict",
"[",
"\"raDeg\"",
"]",
"=",
"crossmatchDict",
"[",
"\"ra\"",
"]",
"crossmatchDict",
"[",
"\"decDeg\"",
"]",
"=",
"crossmatchDict",
"[",
"\"dec\"",
"]",
"del",
"crossmatchDict",
"[",
"\"ra\"",
"]",
"del",
"crossmatchDict",
"[",
"\"dec\"",
"]",
"crossmatchDict",
"[",
"\"original_search_radius_arcsec\"",
"]",
"=",
"searchPara",
"[",
"\"angular radius arcsec\"",
"]",
"physical_separation_kpc",
"=",
"None",
"# CALCULATE MOST ACCURATE PHYSICAL SEPARATION",
"if",
"crossmatchDict",
"[",
"\"direct_distance_scale\"",
"]",
":",
"physical_separation_kpc",
"=",
"crossmatchDict",
"[",
"\"direct_distance_scale\"",
"]",
"*",
"crossmatchDict",
"[",
"\"separationArcsec\"",
"]",
"elif",
"crossmatchDict",
"[",
"\"scale\"",
"]",
":",
"physical_separation_kpc",
"=",
"crossmatchDict",
"[",
"\"scale\"",
"]",
"*",
"crossmatchDict",
"[",
"\"separationArcsec\"",
"]",
"crossmatchDict",
"[",
"\"physical_separation_kpc\"",
"]",
"=",
"physical_separation_kpc",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_annotate_crossmatch_with_value_added_parameters`` method'",
")",
"return",
"crossmatchDict"
] |
*annotate each crossmatch with physical parameters such are distances etc*
**Key Arguments:**
- ``crossmatchDict`` -- the crossmatch dictionary
- ``catalogueName`` -- the name of the catalogue the crossmatch results from
- ``searchPara`` -- the search parameters for this individual search as lifted from the search algorithm in the sherlock settings file
- ``search_name`` -- the name of the search as given in the sherlock settings file
**Return:**
- ``crossmatchDict`` -- the annotated crossmatch dictionary
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
|
[
"*",
"annotate",
"each",
"crossmatch",
"with",
"physical",
"parameters",
"such",
"are",
"distances",
"etc",
"*"
] |
train
|
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/transient_catalogue_crossmatch.py#L514-L621
|
thespacedoctor/sherlock
|
sherlock/transient_catalogue_crossmatch.py
|
transient_catalogue_crossmatch._bright_star_match
|
def _bright_star_match(
self,
matchedObjects,
catalogueName,
magnitudeLimitFilter,
lowerMagnitudeLimit):
"""*perform a bright star match on the crossmatch results if required by the catalogue search*
**Key Arguments:**
- ``matchedObjects`` -- the list of matched sources from the catalogue crossmatch
- ``catalogueName`` -- the name of the catalogue the crossmatch results from
- ``magnitudeLimitFilter`` -- the name of the column containing the magnitude to filter on
- ``lowerMagnitudeLimit`` -- the lower magnitude limit to match bright stars against
**Return:**
- ``brightStarMatches`` -- the trimmed matched sources (bright stars associations only)
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_bright_star_match`` method')
import decimal
decimal.getcontext().prec = 10
# MATCH BRIGHT STAR ASSOCIATIONS
brightStarMatches = []
for row in matchedObjects:
mag = decimal.Decimal(row[magnitudeLimitFilter])
if mag and mag < lowerMagnitudeLimit:
sep = decimal.Decimal(row["separationArcsec"])
if sep < decimal.Decimal(decimal.Decimal(10)**(-decimal.Decimal(0.2) * mag + decimal.Decimal(3.7))) and sep < 20.:
brightStarMatches.append(row)
self.log.debug('completed the ``_bright_star_match`` method')
return brightStarMatches
|
python
|
def _bright_star_match(
self,
matchedObjects,
catalogueName,
magnitudeLimitFilter,
lowerMagnitudeLimit):
"""*perform a bright star match on the crossmatch results if required by the catalogue search*
**Key Arguments:**
- ``matchedObjects`` -- the list of matched sources from the catalogue crossmatch
- ``catalogueName`` -- the name of the catalogue the crossmatch results from
- ``magnitudeLimitFilter`` -- the name of the column containing the magnitude to filter on
- ``lowerMagnitudeLimit`` -- the lower magnitude limit to match bright stars against
**Return:**
- ``brightStarMatches`` -- the trimmed matched sources (bright stars associations only)
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_bright_star_match`` method')
import decimal
decimal.getcontext().prec = 10
# MATCH BRIGHT STAR ASSOCIATIONS
brightStarMatches = []
for row in matchedObjects:
mag = decimal.Decimal(row[magnitudeLimitFilter])
if mag and mag < lowerMagnitudeLimit:
sep = decimal.Decimal(row["separationArcsec"])
if sep < decimal.Decimal(decimal.Decimal(10)**(-decimal.Decimal(0.2) * mag + decimal.Decimal(3.7))) and sep < 20.:
brightStarMatches.append(row)
self.log.debug('completed the ``_bright_star_match`` method')
return brightStarMatches
|
[
"def",
"_bright_star_match",
"(",
"self",
",",
"matchedObjects",
",",
"catalogueName",
",",
"magnitudeLimitFilter",
",",
"lowerMagnitudeLimit",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_bright_star_match`` method'",
")",
"import",
"decimal",
"decimal",
".",
"getcontext",
"(",
")",
".",
"prec",
"=",
"10",
"# MATCH BRIGHT STAR ASSOCIATIONS",
"brightStarMatches",
"=",
"[",
"]",
"for",
"row",
"in",
"matchedObjects",
":",
"mag",
"=",
"decimal",
".",
"Decimal",
"(",
"row",
"[",
"magnitudeLimitFilter",
"]",
")",
"if",
"mag",
"and",
"mag",
"<",
"lowerMagnitudeLimit",
":",
"sep",
"=",
"decimal",
".",
"Decimal",
"(",
"row",
"[",
"\"separationArcsec\"",
"]",
")",
"if",
"sep",
"<",
"decimal",
".",
"Decimal",
"(",
"decimal",
".",
"Decimal",
"(",
"10",
")",
"**",
"(",
"-",
"decimal",
".",
"Decimal",
"(",
"0.2",
")",
"*",
"mag",
"+",
"decimal",
".",
"Decimal",
"(",
"3.7",
")",
")",
")",
"and",
"sep",
"<",
"20.",
":",
"brightStarMatches",
".",
"append",
"(",
"row",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_bright_star_match`` method'",
")",
"return",
"brightStarMatches"
] |
*perform a bright star match on the crossmatch results if required by the catalogue search*
**Key Arguments:**
- ``matchedObjects`` -- the list of matched sources from the catalogue crossmatch
- ``catalogueName`` -- the name of the catalogue the crossmatch results from
- ``magnitudeLimitFilter`` -- the name of the column containing the magnitude to filter on
- ``lowerMagnitudeLimit`` -- the lower magnitude limit to match bright stars against
**Return:**
- ``brightStarMatches`` -- the trimmed matched sources (bright stars associations only)
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
|
[
"*",
"perform",
"a",
"bright",
"star",
"match",
"on",
"the",
"crossmatch",
"results",
"if",
"required",
"by",
"the",
"catalogue",
"search",
"*"
] |
train
|
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/transient_catalogue_crossmatch.py#L623-L665
|
thespacedoctor/sherlock
|
sherlock/transient_catalogue_crossmatch.py
|
transient_catalogue_crossmatch._galaxy_association_cuts
|
def _galaxy_association_cuts(
self,
matchedObjects,
catalogueName,
magnitudeLimitFilter,
upperMagnitudeLimit,
lowerMagnitudeLimit):
"""*perform a bright star match on the crossmatch results if required by the catalogue search*
**Key Arguments:**
- ``matchedObjects`` -- the list of matched sources from the catalogue crossmatch
- ``catalogueName`` -- the name of the catalogue the crossmatch results from
- ``magnitudeLimitFilter`` -- the name of the column containing the magnitude to filter on
- ``lowerMagnitudeLimit`` -- the lower magnitude limit to match general galaxies against
- ``upperMagnitudeLimit`` -- the upper magnitude limit to match general galaxies against
**Return:**
- ``galaxyMatches`` -- the trimmed matched sources (associated galaxies only)
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_galaxy_association_cuts`` method')
import decimal
decimal.getcontext().prec = 10
# MATCH BRIGHT STAR ASSOCIATIONS
galaxyMatches = []
for row in matchedObjects:
if not magnitudeLimitFilter or row[magnitudeLimitFilter] == None:
galaxyMatches.append(row)
else:
mag = decimal.Decimal(row[magnitudeLimitFilter])
if mag and mag < lowerMagnitudeLimit and mag > upperMagnitudeLimit:
sep = decimal.Decimal(row["separationArcsec"])
if sep < decimal.Decimal(decimal.Decimal(10)**(decimal.Decimal((decimal.Decimal(25.) - mag) / decimal.Decimal(6.)))):
galaxyMatches.append(row)
self.log.debug('completed the ``_galaxy_association_cuts`` method')
return galaxyMatches
|
python
|
def _galaxy_association_cuts(
self,
matchedObjects,
catalogueName,
magnitudeLimitFilter,
upperMagnitudeLimit,
lowerMagnitudeLimit):
"""*perform a bright star match on the crossmatch results if required by the catalogue search*
**Key Arguments:**
- ``matchedObjects`` -- the list of matched sources from the catalogue crossmatch
- ``catalogueName`` -- the name of the catalogue the crossmatch results from
- ``magnitudeLimitFilter`` -- the name of the column containing the magnitude to filter on
- ``lowerMagnitudeLimit`` -- the lower magnitude limit to match general galaxies against
- ``upperMagnitudeLimit`` -- the upper magnitude limit to match general galaxies against
**Return:**
- ``galaxyMatches`` -- the trimmed matched sources (associated galaxies only)
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_galaxy_association_cuts`` method')
import decimal
decimal.getcontext().prec = 10
# MATCH BRIGHT STAR ASSOCIATIONS
galaxyMatches = []
for row in matchedObjects:
if not magnitudeLimitFilter or row[magnitudeLimitFilter] == None:
galaxyMatches.append(row)
else:
mag = decimal.Decimal(row[magnitudeLimitFilter])
if mag and mag < lowerMagnitudeLimit and mag > upperMagnitudeLimit:
sep = decimal.Decimal(row["separationArcsec"])
if sep < decimal.Decimal(decimal.Decimal(10)**(decimal.Decimal((decimal.Decimal(25.) - mag) / decimal.Decimal(6.)))):
galaxyMatches.append(row)
self.log.debug('completed the ``_galaxy_association_cuts`` method')
return galaxyMatches
|
[
"def",
"_galaxy_association_cuts",
"(",
"self",
",",
"matchedObjects",
",",
"catalogueName",
",",
"magnitudeLimitFilter",
",",
"upperMagnitudeLimit",
",",
"lowerMagnitudeLimit",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_galaxy_association_cuts`` method'",
")",
"import",
"decimal",
"decimal",
".",
"getcontext",
"(",
")",
".",
"prec",
"=",
"10",
"# MATCH BRIGHT STAR ASSOCIATIONS",
"galaxyMatches",
"=",
"[",
"]",
"for",
"row",
"in",
"matchedObjects",
":",
"if",
"not",
"magnitudeLimitFilter",
"or",
"row",
"[",
"magnitudeLimitFilter",
"]",
"==",
"None",
":",
"galaxyMatches",
".",
"append",
"(",
"row",
")",
"else",
":",
"mag",
"=",
"decimal",
".",
"Decimal",
"(",
"row",
"[",
"magnitudeLimitFilter",
"]",
")",
"if",
"mag",
"and",
"mag",
"<",
"lowerMagnitudeLimit",
"and",
"mag",
">",
"upperMagnitudeLimit",
":",
"sep",
"=",
"decimal",
".",
"Decimal",
"(",
"row",
"[",
"\"separationArcsec\"",
"]",
")",
"if",
"sep",
"<",
"decimal",
".",
"Decimal",
"(",
"decimal",
".",
"Decimal",
"(",
"10",
")",
"**",
"(",
"decimal",
".",
"Decimal",
"(",
"(",
"decimal",
".",
"Decimal",
"(",
"25.",
")",
"-",
"mag",
")",
"/",
"decimal",
".",
"Decimal",
"(",
"6.",
")",
")",
")",
")",
":",
"galaxyMatches",
".",
"append",
"(",
"row",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_galaxy_association_cuts`` method'",
")",
"return",
"galaxyMatches"
] |
*perform a bright star match on the crossmatch results if required by the catalogue search*
**Key Arguments:**
- ``matchedObjects`` -- the list of matched sources from the catalogue crossmatch
- ``catalogueName`` -- the name of the catalogue the crossmatch results from
- ``magnitudeLimitFilter`` -- the name of the column containing the magnitude to filter on
- ``lowerMagnitudeLimit`` -- the lower magnitude limit to match general galaxies against
- ``upperMagnitudeLimit`` -- the upper magnitude limit to match general galaxies against
**Return:**
- ``galaxyMatches`` -- the trimmed matched sources (associated galaxies only)
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
|
[
"*",
"perform",
"a",
"bright",
"star",
"match",
"on",
"the",
"crossmatch",
"results",
"if",
"required",
"by",
"the",
"catalogue",
"search",
"*"
] |
train
|
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/transient_catalogue_crossmatch.py#L667-L714
|
thespacedoctor/sherlock
|
sherlock/transient_catalogue_crossmatch.py
|
transient_catalogue_crossmatch.physical_separation_crossmatch_against_catalogue
|
def physical_separation_crossmatch_against_catalogue(
self,
objectList,
searchPara,
search_name,
brightnessFilter=False,
classificationType=False
):
"""*perform an physical separation crossmatch against a given catalogue in the database*
This search is basically the same as the angular separation search except extra filtering is done to exclude sources outside the physical search radius (matched sources require distance info to calulate physical separations)
**Key Arguments:**
- ``objectList`` -- transients to be crossmatched
- ``searchPara`` -- parameters of the search (from settings file)
- ``search_name`` -- the name of the search
- ``brightnessFilter`` -- is this search to be constrained by magnitude of the catalogue sources? Default *False*. [bright|faint|general]
- ``classificationType`` -- synonym, association or annotation. Default *False*
**Return:**
- matchedObjects -- any sources matched against the object
To run a physical separation crossmatch, run in a similar way to the angular separation crossmatch:
**Usage:**
.. code-block:: python
search_name = "ned spec sn"
searchPara = self.settings["search algorithm"][search_name]
matchedObjects = xmatcher.physical_separation_crossmatch_against_catalogue(
objectList=transients,
searchPara=searchPara,
search_name=search_name
)
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``physical_separation_crossmatch_against_catalogue`` method')
start_time = time.time()
bf = brightnessFilter
# SETUP PARAMETERS
tableName = searchPara["database table"]
if bf:
angularRadius = searchPara[bf]["angular radius arcsec"]
physicalRadius = searchPara[bf]["physical radius kpc"]
matchedType = searchPara[bf][classificationType]
if "match nearest source only" in searchPara[bf]:
nearestOnly = searchPara[bf]["match nearest source only"]
else:
nearestOnly = False
else:
angularRadius = searchPara["angular radius arcsec"]
physicalRadius = searchPara["physical radius kpc"]
matchedType = searchPara[classificationType]
if "match nearest source only" in searchPara:
nearestOnly = searchPara["match nearest source only"]
else:
nearestOnly = False
matchedObjects = []
matchSubset = []
# RETURN ALL ANGULAR MATCHES BEFORE RETURNING NEAREST PHYSICAL SEARCH
tmpSearchPara = dict(searchPara)
tmpSearchPara["match nearest source only"] = False
# ANGULAR CONESEARCH ON CATALOGUE - RETURN ALL MATCHES
catalogueMatches = self.angular_crossmatch_against_catalogue(
objectList=objectList,
searchPara=tmpSearchPara,
search_name=search_name,
physicalSearch=True,
brightnessFilter=brightnessFilter,
classificationType=classificationType
)
# OK - WE HAVE SOME ANGULAR SEPARATION MATCHES. NOW SEARCH THROUGH THESE FOR MATCHES WITH
# A PHYSICAL SEPARATION WITHIN THE PHYSICAL RADIUS.
if catalogueMatches:
for row in catalogueMatches:
thisMatch = False
physical_separation_kpc = row["physical_separation_kpc"]
newsearch_name = search_name
# FIRST CHECK FOR MAJOR AXIS MEASUREMENT
# BYPASS NED FAULTY AXES MEASUREMENTS:
# https://gist.github.com/search?utf8=%E2%9C%93&q=user%3Athespacedoctor+ned
if row["major_axis_arcsec"] and ("ned" not in search_name or (row["unkMag"] and row["unkMag"] < 20.)):
if row["separationArcsec"] < row["major_axis_arcsec"] * self.settings["galaxy radius stetch factor"]:
thisMatch = True
newsearch_name = newsearch_name + \
" (within %s * major axis)" % (
self.settings["galaxy radius stetch factor"],)
newAngularSep = row[
"major_axis_arcsec"] * self.settings["galaxy radius stetch factor"]
else:
continue
# NOW CHECK FOR A DIRECT DISTANCE MEASUREMENT
elif row["direct_distance_scale"] and physical_separation_kpc < physicalRadius:
if row["separationArcsec"] > 300.:
continue
thisMatch = True
newsearch_name = newsearch_name + " (direct distance)"
newAngularSep = physicalRadius / \
row["direct_distance_scale"]
# NEW CHECK FOR A REDSHIFT DISTANCE
elif row["scale"] and physical_separation_kpc < physicalRadius:
thisMatch = True
newsearch_name = newsearch_name + " (redshift distance)"
newAngularSep = physicalRadius / row["scale"]
if thisMatch == True:
row["physical_separation_kpc"] = physical_separation_kpc
row["original_search_radius_arcsec"] = newAngularSep
if physical_separation_kpc:
self.log.debug(
"\t\tPhysical separation = %.2f kpc" % (physical_separation_kpc,))
row["search_name"] = newsearch_name
matchSubset.append(row)
if matchSubset:
from operator import itemgetter
matchSubset = sorted(matchSubset, key=itemgetter(
'physical_separation_kpc'), reverse=False)
if nearestOnly == True:
theseMatches = matchSubset[0]
else:
theseMatches = matchSubset
matchedObjects = matchSubset
self.log.debug(
'completed the ``physical_separation_crossmatch_against_catalogue`` method')
self.log.debug("FINISHED %s SEARCH IN %0.5f s" %
(search_name, time.time() - start_time,))
return matchedObjects
|
python
|
def physical_separation_crossmatch_against_catalogue(
self,
objectList,
searchPara,
search_name,
brightnessFilter=False,
classificationType=False
):
"""*perform an physical separation crossmatch against a given catalogue in the database*
This search is basically the same as the angular separation search except extra filtering is done to exclude sources outside the physical search radius (matched sources require distance info to calulate physical separations)
**Key Arguments:**
- ``objectList`` -- transients to be crossmatched
- ``searchPara`` -- parameters of the search (from settings file)
- ``search_name`` -- the name of the search
- ``brightnessFilter`` -- is this search to be constrained by magnitude of the catalogue sources? Default *False*. [bright|faint|general]
- ``classificationType`` -- synonym, association or annotation. Default *False*
**Return:**
- matchedObjects -- any sources matched against the object
To run a physical separation crossmatch, run in a similar way to the angular separation crossmatch:
**Usage:**
.. code-block:: python
search_name = "ned spec sn"
searchPara = self.settings["search algorithm"][search_name]
matchedObjects = xmatcher.physical_separation_crossmatch_against_catalogue(
objectList=transients,
searchPara=searchPara,
search_name=search_name
)
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``physical_separation_crossmatch_against_catalogue`` method')
start_time = time.time()
bf = brightnessFilter
# SETUP PARAMETERS
tableName = searchPara["database table"]
if bf:
angularRadius = searchPara[bf]["angular radius arcsec"]
physicalRadius = searchPara[bf]["physical radius kpc"]
matchedType = searchPara[bf][classificationType]
if "match nearest source only" in searchPara[bf]:
nearestOnly = searchPara[bf]["match nearest source only"]
else:
nearestOnly = False
else:
angularRadius = searchPara["angular radius arcsec"]
physicalRadius = searchPara["physical radius kpc"]
matchedType = searchPara[classificationType]
if "match nearest source only" in searchPara:
nearestOnly = searchPara["match nearest source only"]
else:
nearestOnly = False
matchedObjects = []
matchSubset = []
# RETURN ALL ANGULAR MATCHES BEFORE RETURNING NEAREST PHYSICAL SEARCH
tmpSearchPara = dict(searchPara)
tmpSearchPara["match nearest source only"] = False
# ANGULAR CONESEARCH ON CATALOGUE - RETURN ALL MATCHES
catalogueMatches = self.angular_crossmatch_against_catalogue(
objectList=objectList,
searchPara=tmpSearchPara,
search_name=search_name,
physicalSearch=True,
brightnessFilter=brightnessFilter,
classificationType=classificationType
)
# OK - WE HAVE SOME ANGULAR SEPARATION MATCHES. NOW SEARCH THROUGH THESE FOR MATCHES WITH
# A PHYSICAL SEPARATION WITHIN THE PHYSICAL RADIUS.
if catalogueMatches:
for row in catalogueMatches:
thisMatch = False
physical_separation_kpc = row["physical_separation_kpc"]
newsearch_name = search_name
# FIRST CHECK FOR MAJOR AXIS MEASUREMENT
# BYPASS NED FAULTY AXES MEASUREMENTS:
# https://gist.github.com/search?utf8=%E2%9C%93&q=user%3Athespacedoctor+ned
if row["major_axis_arcsec"] and ("ned" not in search_name or (row["unkMag"] and row["unkMag"] < 20.)):
if row["separationArcsec"] < row["major_axis_arcsec"] * self.settings["galaxy radius stetch factor"]:
thisMatch = True
newsearch_name = newsearch_name + \
" (within %s * major axis)" % (
self.settings["galaxy radius stetch factor"],)
newAngularSep = row[
"major_axis_arcsec"] * self.settings["galaxy radius stetch factor"]
else:
continue
# NOW CHECK FOR A DIRECT DISTANCE MEASUREMENT
elif row["direct_distance_scale"] and physical_separation_kpc < physicalRadius:
if row["separationArcsec"] > 300.:
continue
thisMatch = True
newsearch_name = newsearch_name + " (direct distance)"
newAngularSep = physicalRadius / \
row["direct_distance_scale"]
# NEW CHECK FOR A REDSHIFT DISTANCE
elif row["scale"] and physical_separation_kpc < physicalRadius:
thisMatch = True
newsearch_name = newsearch_name + " (redshift distance)"
newAngularSep = physicalRadius / row["scale"]
if thisMatch == True:
row["physical_separation_kpc"] = physical_separation_kpc
row["original_search_radius_arcsec"] = newAngularSep
if physical_separation_kpc:
self.log.debug(
"\t\tPhysical separation = %.2f kpc" % (physical_separation_kpc,))
row["search_name"] = newsearch_name
matchSubset.append(row)
if matchSubset:
from operator import itemgetter
matchSubset = sorted(matchSubset, key=itemgetter(
'physical_separation_kpc'), reverse=False)
if nearestOnly == True:
theseMatches = matchSubset[0]
else:
theseMatches = matchSubset
matchedObjects = matchSubset
self.log.debug(
'completed the ``physical_separation_crossmatch_against_catalogue`` method')
self.log.debug("FINISHED %s SEARCH IN %0.5f s" %
(search_name, time.time() - start_time,))
return matchedObjects
|
[
"def",
"physical_separation_crossmatch_against_catalogue",
"(",
"self",
",",
"objectList",
",",
"searchPara",
",",
"search_name",
",",
"brightnessFilter",
"=",
"False",
",",
"classificationType",
"=",
"False",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``physical_separation_crossmatch_against_catalogue`` method'",
")",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"bf",
"=",
"brightnessFilter",
"# SETUP PARAMETERS",
"tableName",
"=",
"searchPara",
"[",
"\"database table\"",
"]",
"if",
"bf",
":",
"angularRadius",
"=",
"searchPara",
"[",
"bf",
"]",
"[",
"\"angular radius arcsec\"",
"]",
"physicalRadius",
"=",
"searchPara",
"[",
"bf",
"]",
"[",
"\"physical radius kpc\"",
"]",
"matchedType",
"=",
"searchPara",
"[",
"bf",
"]",
"[",
"classificationType",
"]",
"if",
"\"match nearest source only\"",
"in",
"searchPara",
"[",
"bf",
"]",
":",
"nearestOnly",
"=",
"searchPara",
"[",
"bf",
"]",
"[",
"\"match nearest source only\"",
"]",
"else",
":",
"nearestOnly",
"=",
"False",
"else",
":",
"angularRadius",
"=",
"searchPara",
"[",
"\"angular radius arcsec\"",
"]",
"physicalRadius",
"=",
"searchPara",
"[",
"\"physical radius kpc\"",
"]",
"matchedType",
"=",
"searchPara",
"[",
"classificationType",
"]",
"if",
"\"match nearest source only\"",
"in",
"searchPara",
":",
"nearestOnly",
"=",
"searchPara",
"[",
"\"match nearest source only\"",
"]",
"else",
":",
"nearestOnly",
"=",
"False",
"matchedObjects",
"=",
"[",
"]",
"matchSubset",
"=",
"[",
"]",
"# RETURN ALL ANGULAR MATCHES BEFORE RETURNING NEAREST PHYSICAL SEARCH",
"tmpSearchPara",
"=",
"dict",
"(",
"searchPara",
")",
"tmpSearchPara",
"[",
"\"match nearest source only\"",
"]",
"=",
"False",
"# ANGULAR CONESEARCH ON CATALOGUE - RETURN ALL MATCHES",
"catalogueMatches",
"=",
"self",
".",
"angular_crossmatch_against_catalogue",
"(",
"objectList",
"=",
"objectList",
",",
"searchPara",
"=",
"tmpSearchPara",
",",
"search_name",
"=",
"search_name",
",",
"physicalSearch",
"=",
"True",
",",
"brightnessFilter",
"=",
"brightnessFilter",
",",
"classificationType",
"=",
"classificationType",
")",
"# OK - WE HAVE SOME ANGULAR SEPARATION MATCHES. NOW SEARCH THROUGH THESE FOR MATCHES WITH",
"# A PHYSICAL SEPARATION WITHIN THE PHYSICAL RADIUS.",
"if",
"catalogueMatches",
":",
"for",
"row",
"in",
"catalogueMatches",
":",
"thisMatch",
"=",
"False",
"physical_separation_kpc",
"=",
"row",
"[",
"\"physical_separation_kpc\"",
"]",
"newsearch_name",
"=",
"search_name",
"# FIRST CHECK FOR MAJOR AXIS MEASUREMENT",
"# BYPASS NED FAULTY AXES MEASUREMENTS:",
"# https://gist.github.com/search?utf8=%E2%9C%93&q=user%3Athespacedoctor+ned",
"if",
"row",
"[",
"\"major_axis_arcsec\"",
"]",
"and",
"(",
"\"ned\"",
"not",
"in",
"search_name",
"or",
"(",
"row",
"[",
"\"unkMag\"",
"]",
"and",
"row",
"[",
"\"unkMag\"",
"]",
"<",
"20.",
")",
")",
":",
"if",
"row",
"[",
"\"separationArcsec\"",
"]",
"<",
"row",
"[",
"\"major_axis_arcsec\"",
"]",
"*",
"self",
".",
"settings",
"[",
"\"galaxy radius stetch factor\"",
"]",
":",
"thisMatch",
"=",
"True",
"newsearch_name",
"=",
"newsearch_name",
"+",
"\" (within %s * major axis)\"",
"%",
"(",
"self",
".",
"settings",
"[",
"\"galaxy radius stetch factor\"",
"]",
",",
")",
"newAngularSep",
"=",
"row",
"[",
"\"major_axis_arcsec\"",
"]",
"*",
"self",
".",
"settings",
"[",
"\"galaxy radius stetch factor\"",
"]",
"else",
":",
"continue",
"# NOW CHECK FOR A DIRECT DISTANCE MEASUREMENT",
"elif",
"row",
"[",
"\"direct_distance_scale\"",
"]",
"and",
"physical_separation_kpc",
"<",
"physicalRadius",
":",
"if",
"row",
"[",
"\"separationArcsec\"",
"]",
">",
"300.",
":",
"continue",
"thisMatch",
"=",
"True",
"newsearch_name",
"=",
"newsearch_name",
"+",
"\" (direct distance)\"",
"newAngularSep",
"=",
"physicalRadius",
"/",
"row",
"[",
"\"direct_distance_scale\"",
"]",
"# NEW CHECK FOR A REDSHIFT DISTANCE",
"elif",
"row",
"[",
"\"scale\"",
"]",
"and",
"physical_separation_kpc",
"<",
"physicalRadius",
":",
"thisMatch",
"=",
"True",
"newsearch_name",
"=",
"newsearch_name",
"+",
"\" (redshift distance)\"",
"newAngularSep",
"=",
"physicalRadius",
"/",
"row",
"[",
"\"scale\"",
"]",
"if",
"thisMatch",
"==",
"True",
":",
"row",
"[",
"\"physical_separation_kpc\"",
"]",
"=",
"physical_separation_kpc",
"row",
"[",
"\"original_search_radius_arcsec\"",
"]",
"=",
"newAngularSep",
"if",
"physical_separation_kpc",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"\\t\\tPhysical separation = %.2f kpc\"",
"%",
"(",
"physical_separation_kpc",
",",
")",
")",
"row",
"[",
"\"search_name\"",
"]",
"=",
"newsearch_name",
"matchSubset",
".",
"append",
"(",
"row",
")",
"if",
"matchSubset",
":",
"from",
"operator",
"import",
"itemgetter",
"matchSubset",
"=",
"sorted",
"(",
"matchSubset",
",",
"key",
"=",
"itemgetter",
"(",
"'physical_separation_kpc'",
")",
",",
"reverse",
"=",
"False",
")",
"if",
"nearestOnly",
"==",
"True",
":",
"theseMatches",
"=",
"matchSubset",
"[",
"0",
"]",
"else",
":",
"theseMatches",
"=",
"matchSubset",
"matchedObjects",
"=",
"matchSubset",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``physical_separation_crossmatch_against_catalogue`` method'",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"FINISHED %s SEARCH IN %0.5f s\"",
"%",
"(",
"search_name",
",",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
",",
")",
")",
"return",
"matchedObjects"
] |
*perform an physical separation crossmatch against a given catalogue in the database*
This search is basically the same as the angular separation search except extra filtering is done to exclude sources outside the physical search radius (matched sources require distance info to calulate physical separations)
**Key Arguments:**
- ``objectList`` -- transients to be crossmatched
- ``searchPara`` -- parameters of the search (from settings file)
- ``search_name`` -- the name of the search
- ``brightnessFilter`` -- is this search to be constrained by magnitude of the catalogue sources? Default *False*. [bright|faint|general]
- ``classificationType`` -- synonym, association or annotation. Default *False*
**Return:**
- matchedObjects -- any sources matched against the object
To run a physical separation crossmatch, run in a similar way to the angular separation crossmatch:
**Usage:**
.. code-block:: python
search_name = "ned spec sn"
searchPara = self.settings["search algorithm"][search_name]
matchedObjects = xmatcher.physical_separation_crossmatch_against_catalogue(
objectList=transients,
searchPara=searchPara,
search_name=search_name
)
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
|
[
"*",
"perform",
"an",
"physical",
"separation",
"crossmatch",
"against",
"a",
"given",
"catalogue",
"in",
"the",
"database",
"*"
] |
train
|
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/transient_catalogue_crossmatch.py#L716-L866
|
pytroll/posttroll
|
posttroll/publisher.py
|
get_own_ip
|
def get_own_ip():
"""Get the host's ip number.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
sock.connect(("8.8.8.8", 80))
except socket.gaierror:
ip_ = "127.0.0.1"
else:
ip_ = sock.getsockname()[0]
finally:
sock.close()
return ip_
|
python
|
def get_own_ip():
"""Get the host's ip number.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
sock.connect(("8.8.8.8", 80))
except socket.gaierror:
ip_ = "127.0.0.1"
else:
ip_ = sock.getsockname()[0]
finally:
sock.close()
return ip_
|
[
"def",
"get_own_ip",
"(",
")",
":",
"sock",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_DGRAM",
")",
"try",
":",
"sock",
".",
"connect",
"(",
"(",
"\"8.8.8.8\"",
",",
"80",
")",
")",
"except",
"socket",
".",
"gaierror",
":",
"ip_",
"=",
"\"127.0.0.1\"",
"else",
":",
"ip_",
"=",
"sock",
".",
"getsockname",
"(",
")",
"[",
"0",
"]",
"finally",
":",
"sock",
".",
"close",
"(",
")",
"return",
"ip_"
] |
Get the host's ip number.
|
[
"Get",
"the",
"host",
"s",
"ip",
"number",
"."
] |
train
|
https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/publisher.py#L41-L53
|
pytroll/posttroll
|
posttroll/publisher.py
|
Publisher.send
|
def send(self, msg):
"""Send the given message.
"""
with self._pub_lock:
self.publish.send_string(msg)
return self
|
python
|
def send(self, msg):
"""Send the given message.
"""
with self._pub_lock:
self.publish.send_string(msg)
return self
|
[
"def",
"send",
"(",
"self",
",",
"msg",
")",
":",
"with",
"self",
".",
"_pub_lock",
":",
"self",
".",
"publish",
".",
"send_string",
"(",
"msg",
")",
"return",
"self"
] |
Send the given message.
|
[
"Send",
"the",
"given",
"message",
"."
] |
train
|
https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/publisher.py#L119-L124
|
pytroll/posttroll
|
posttroll/publisher.py
|
Publisher.stop
|
def stop(self):
"""Stop the publisher.
"""
self.publish.setsockopt(zmq.LINGER, 1)
self.publish.close()
return self
|
python
|
def stop(self):
"""Stop the publisher.
"""
self.publish.setsockopt(zmq.LINGER, 1)
self.publish.close()
return self
|
[
"def",
"stop",
"(",
"self",
")",
":",
"self",
".",
"publish",
".",
"setsockopt",
"(",
"zmq",
".",
"LINGER",
",",
"1",
")",
"self",
".",
"publish",
".",
"close",
"(",
")",
"return",
"self"
] |
Stop the publisher.
|
[
"Stop",
"the",
"publisher",
"."
] |
train
|
https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/publisher.py#L126-L131
|
pytroll/posttroll
|
posttroll/publisher.py
|
Publisher.heartbeat
|
def heartbeat(self, min_interval=0):
"""Send a heartbeat ... but only if *min_interval* seconds has passed
since last beat.
"""
if not self._heartbeat:
self._heartbeat = _PublisherHeartbeat(self)
self._heartbeat(min_interval)
|
python
|
def heartbeat(self, min_interval=0):
"""Send a heartbeat ... but only if *min_interval* seconds has passed
since last beat.
"""
if not self._heartbeat:
self._heartbeat = _PublisherHeartbeat(self)
self._heartbeat(min_interval)
|
[
"def",
"heartbeat",
"(",
"self",
",",
"min_interval",
"=",
"0",
")",
":",
"if",
"not",
"self",
".",
"_heartbeat",
":",
"self",
".",
"_heartbeat",
"=",
"_PublisherHeartbeat",
"(",
"self",
")",
"self",
".",
"_heartbeat",
"(",
"min_interval",
")"
] |
Send a heartbeat ... but only if *min_interval* seconds has passed
since last beat.
|
[
"Send",
"a",
"heartbeat",
"...",
"but",
"only",
"if",
"*",
"min_interval",
"*",
"seconds",
"has",
"passed",
"since",
"last",
"beat",
"."
] |
train
|
https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/publisher.py#L133-L139
|
pytroll/posttroll
|
posttroll/publisher.py
|
NoisyPublisher.start
|
def start(self):
"""Start the publisher.
"""
pub_addr = "tcp://*:" + str(self._port)
self._publisher = self._publisher_class(pub_addr, self._name)
LOGGER.debug("entering publish %s", str(self._publisher.destination))
addr = ("tcp://" + str(get_own_ip()) + ":" +
str(self._publisher.port_number))
self._broadcaster = sendaddressservice(self._name, addr,
self._aliases,
self._broadcast_interval,
self._nameservers).start()
return self._publisher
|
python
|
def start(self):
"""Start the publisher.
"""
pub_addr = "tcp://*:" + str(self._port)
self._publisher = self._publisher_class(pub_addr, self._name)
LOGGER.debug("entering publish %s", str(self._publisher.destination))
addr = ("tcp://" + str(get_own_ip()) + ":" +
str(self._publisher.port_number))
self._broadcaster = sendaddressservice(self._name, addr,
self._aliases,
self._broadcast_interval,
self._nameservers).start()
return self._publisher
|
[
"def",
"start",
"(",
"self",
")",
":",
"pub_addr",
"=",
"\"tcp://*:\"",
"+",
"str",
"(",
"self",
".",
"_port",
")",
"self",
".",
"_publisher",
"=",
"self",
".",
"_publisher_class",
"(",
"pub_addr",
",",
"self",
".",
"_name",
")",
"LOGGER",
".",
"debug",
"(",
"\"entering publish %s\"",
",",
"str",
"(",
"self",
".",
"_publisher",
".",
"destination",
")",
")",
"addr",
"=",
"(",
"\"tcp://\"",
"+",
"str",
"(",
"get_own_ip",
"(",
")",
")",
"+",
"\":\"",
"+",
"str",
"(",
"self",
".",
"_publisher",
".",
"port_number",
")",
")",
"self",
".",
"_broadcaster",
"=",
"sendaddressservice",
"(",
"self",
".",
"_name",
",",
"addr",
",",
"self",
".",
"_aliases",
",",
"self",
".",
"_broadcast_interval",
",",
"self",
".",
"_nameservers",
")",
".",
"start",
"(",
")",
"return",
"self",
".",
"_publisher"
] |
Start the publisher.
|
[
"Start",
"the",
"publisher",
"."
] |
train
|
https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/publisher.py#L197-L209
|
pytroll/posttroll
|
posttroll/publisher.py
|
NoisyPublisher.stop
|
def stop(self):
"""Stop the publisher.
"""
LOGGER.debug("exiting publish")
if self._publisher is not None:
self._publisher.stop()
self._publisher = None
if self._broadcaster is not None:
self._broadcaster.stop()
self._broadcaster = None
|
python
|
def stop(self):
"""Stop the publisher.
"""
LOGGER.debug("exiting publish")
if self._publisher is not None:
self._publisher.stop()
self._publisher = None
if self._broadcaster is not None:
self._broadcaster.stop()
self._broadcaster = None
|
[
"def",
"stop",
"(",
"self",
")",
":",
"LOGGER",
".",
"debug",
"(",
"\"exiting publish\"",
")",
"if",
"self",
".",
"_publisher",
"is",
"not",
"None",
":",
"self",
".",
"_publisher",
".",
"stop",
"(",
")",
"self",
".",
"_publisher",
"=",
"None",
"if",
"self",
".",
"_broadcaster",
"is",
"not",
"None",
":",
"self",
".",
"_broadcaster",
".",
"stop",
"(",
")",
"self",
".",
"_broadcaster",
"=",
"None"
] |
Stop the publisher.
|
[
"Stop",
"the",
"publisher",
"."
] |
train
|
https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/publisher.py#L216-L225
|
dfm/transit
|
transit/simple.py
|
SimpleSystem.light_curve
|
def light_curve(self, t, texp=0.0, tol=1e-8, maxdepth=4):
"""
Get the light curve evaluated at a list of times using the current
model.
:param t:
The times where the light curve should be evaluated (in days).
:param tol:
The stopping criterion for the exposure time integration.
:param maxdepth:
The maximum recursion depth of the exposure time integrator.
"""
t = np.atleast_1d(t)
return CythonSolver().simple_light_curve(self._get_params(),
t, texp, tol, maxdepth)
|
python
|
def light_curve(self, t, texp=0.0, tol=1e-8, maxdepth=4):
"""
Get the light curve evaluated at a list of times using the current
model.
:param t:
The times where the light curve should be evaluated (in days).
:param tol:
The stopping criterion for the exposure time integration.
:param maxdepth:
The maximum recursion depth of the exposure time integrator.
"""
t = np.atleast_1d(t)
return CythonSolver().simple_light_curve(self._get_params(),
t, texp, tol, maxdepth)
|
[
"def",
"light_curve",
"(",
"self",
",",
"t",
",",
"texp",
"=",
"0.0",
",",
"tol",
"=",
"1e-8",
",",
"maxdepth",
"=",
"4",
")",
":",
"t",
"=",
"np",
".",
"atleast_1d",
"(",
"t",
")",
"return",
"CythonSolver",
"(",
")",
".",
"simple_light_curve",
"(",
"self",
".",
"_get_params",
"(",
")",
",",
"t",
",",
"texp",
",",
"tol",
",",
"maxdepth",
")"
] |
Get the light curve evaluated at a list of times using the current
model.
:param t:
The times where the light curve should be evaluated (in days).
:param tol:
The stopping criterion for the exposure time integration.
:param maxdepth:
The maximum recursion depth of the exposure time integrator.
|
[
"Get",
"the",
"light",
"curve",
"evaluated",
"at",
"a",
"list",
"of",
"times",
"using",
"the",
"current",
"model",
"."
] |
train
|
https://github.com/dfm/transit/blob/482d99b506657fa3fd54a388f9c6be13b9e57bce/transit/simple.py#L68-L85
|
dfm/transit
|
transit/simple.py
|
SimpleSystem.light_curve_gradient
|
def light_curve_gradient(self, t, texp=0.0, tol=1e-8, maxdepth=4):
"""
Get the light curve evaluated at a list of times using the current
model.
:param t:
The times where the light curve should be evaluated (in days).
:param tol:
The stopping criterion for the exposure time integration.
:param maxdepth:
The maximum recursion depth of the exposure time integrator.
"""
t = np.atleast_1d(t)
f, df = CythonSolver().simple_gradient(self._get_params(),
t, texp, tol, maxdepth)
return f, df
|
python
|
def light_curve_gradient(self, t, texp=0.0, tol=1e-8, maxdepth=4):
"""
Get the light curve evaluated at a list of times using the current
model.
:param t:
The times where the light curve should be evaluated (in days).
:param tol:
The stopping criterion for the exposure time integration.
:param maxdepth:
The maximum recursion depth of the exposure time integrator.
"""
t = np.atleast_1d(t)
f, df = CythonSolver().simple_gradient(self._get_params(),
t, texp, tol, maxdepth)
return f, df
|
[
"def",
"light_curve_gradient",
"(",
"self",
",",
"t",
",",
"texp",
"=",
"0.0",
",",
"tol",
"=",
"1e-8",
",",
"maxdepth",
"=",
"4",
")",
":",
"t",
"=",
"np",
".",
"atleast_1d",
"(",
"t",
")",
"f",
",",
"df",
"=",
"CythonSolver",
"(",
")",
".",
"simple_gradient",
"(",
"self",
".",
"_get_params",
"(",
")",
",",
"t",
",",
"texp",
",",
"tol",
",",
"maxdepth",
")",
"return",
"f",
",",
"df"
] |
Get the light curve evaluated at a list of times using the current
model.
:param t:
The times where the light curve should be evaluated (in days).
:param tol:
The stopping criterion for the exposure time integration.
:param maxdepth:
The maximum recursion depth of the exposure time integrator.
|
[
"Get",
"the",
"light",
"curve",
"evaluated",
"at",
"a",
"list",
"of",
"times",
"using",
"the",
"current",
"model",
"."
] |
train
|
https://github.com/dfm/transit/blob/482d99b506657fa3fd54a388f9c6be13b9e57bce/transit/simple.py#L87-L106
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/utilities.py
|
configure_stream_logger
|
def configure_stream_logger(logger='', level=None, formatter='%(levelname)-8s %(message)s'):
"""
Configure the default stream handler for logging messages to the console,
remove other logging handlers, and enable capturing warnings.
.. versionadded:: 1.3.0
:param str logger: The logger to add the stream handler for.
:param level: The level to set the logger to, will default to WARNING if no level is specified.
:type level: None, int, str
:param formatter: The format to use for logging messages to the console.
:type formatter: str, :py:class:`logging.Formatter`
:return: The new configured stream handler.
:rtype: :py:class:`logging.StreamHandler`
"""
level = level or logging.WARNING
if isinstance(level, str):
level = getattr(logging, level, None)
if level is None:
raise ValueError('invalid log level: ' + level)
root_logger = logging.getLogger('')
for handler in root_logger.handlers:
root_logger.removeHandler(handler)
logging.getLogger(logger).setLevel(logging.DEBUG)
console_log_handler = logging.StreamHandler()
console_log_handler.setLevel(level)
if isinstance(formatter, str):
formatter = logging.Formatter(formatter)
elif not isinstance(formatter, logging.Formatter):
raise TypeError('formatter must be an instance of logging.Formatter')
console_log_handler.setFormatter(formatter)
logging.getLogger(logger).addHandler(console_log_handler)
logging.captureWarnings(True)
return console_log_handler
|
python
|
def configure_stream_logger(logger='', level=None, formatter='%(levelname)-8s %(message)s'):
"""
Configure the default stream handler for logging messages to the console,
remove other logging handlers, and enable capturing warnings.
.. versionadded:: 1.3.0
:param str logger: The logger to add the stream handler for.
:param level: The level to set the logger to, will default to WARNING if no level is specified.
:type level: None, int, str
:param formatter: The format to use for logging messages to the console.
:type formatter: str, :py:class:`logging.Formatter`
:return: The new configured stream handler.
:rtype: :py:class:`logging.StreamHandler`
"""
level = level or logging.WARNING
if isinstance(level, str):
level = getattr(logging, level, None)
if level is None:
raise ValueError('invalid log level: ' + level)
root_logger = logging.getLogger('')
for handler in root_logger.handlers:
root_logger.removeHandler(handler)
logging.getLogger(logger).setLevel(logging.DEBUG)
console_log_handler = logging.StreamHandler()
console_log_handler.setLevel(level)
if isinstance(formatter, str):
formatter = logging.Formatter(formatter)
elif not isinstance(formatter, logging.Formatter):
raise TypeError('formatter must be an instance of logging.Formatter')
console_log_handler.setFormatter(formatter)
logging.getLogger(logger).addHandler(console_log_handler)
logging.captureWarnings(True)
return console_log_handler
|
[
"def",
"configure_stream_logger",
"(",
"logger",
"=",
"''",
",",
"level",
"=",
"None",
",",
"formatter",
"=",
"'%(levelname)-8s %(message)s'",
")",
":",
"level",
"=",
"level",
"or",
"logging",
".",
"WARNING",
"if",
"isinstance",
"(",
"level",
",",
"str",
")",
":",
"level",
"=",
"getattr",
"(",
"logging",
",",
"level",
",",
"None",
")",
"if",
"level",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'invalid log level: '",
"+",
"level",
")",
"root_logger",
"=",
"logging",
".",
"getLogger",
"(",
"''",
")",
"for",
"handler",
"in",
"root_logger",
".",
"handlers",
":",
"root_logger",
".",
"removeHandler",
"(",
"handler",
")",
"logging",
".",
"getLogger",
"(",
"logger",
")",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"console_log_handler",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"console_log_handler",
".",
"setLevel",
"(",
"level",
")",
"if",
"isinstance",
"(",
"formatter",
",",
"str",
")",
":",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"formatter",
")",
"elif",
"not",
"isinstance",
"(",
"formatter",
",",
"logging",
".",
"Formatter",
")",
":",
"raise",
"TypeError",
"(",
"'formatter must be an instance of logging.Formatter'",
")",
"console_log_handler",
".",
"setFormatter",
"(",
"formatter",
")",
"logging",
".",
"getLogger",
"(",
"logger",
")",
".",
"addHandler",
"(",
"console_log_handler",
")",
"logging",
".",
"captureWarnings",
"(",
"True",
")",
"return",
"console_log_handler"
] |
Configure the default stream handler for logging messages to the console,
remove other logging handlers, and enable capturing warnings.
.. versionadded:: 1.3.0
:param str logger: The logger to add the stream handler for.
:param level: The level to set the logger to, will default to WARNING if no level is specified.
:type level: None, int, str
:param formatter: The format to use for logging messages to the console.
:type formatter: str, :py:class:`logging.Formatter`
:return: The new configured stream handler.
:rtype: :py:class:`logging.StreamHandler`
|
[
"Configure",
"the",
"default",
"stream",
"handler",
"for",
"logging",
"messages",
"to",
"the",
"console",
"remove",
"other",
"logging",
"handlers",
"and",
"enable",
"capturing",
"warnings",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/utilities.py#L420-L454
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/utilities.py
|
download
|
def download(url, filename=None):
"""
Download a file from a url and save it to disk.
:param str url: The URL to fetch the file from.
:param str filename: The destination file to write the data to.
"""
# requirements os, shutil, urllib.parse, urllib.request
if not filename:
url_parts = urllib.parse.urlparse(url)
filename = os.path.basename(url_parts.path)
url_h = urllib.request.urlopen(url)
with open(filename, 'wb') as file_h:
shutil.copyfileobj(url_h, file_h)
url_h.close()
return
|
python
|
def download(url, filename=None):
"""
Download a file from a url and save it to disk.
:param str url: The URL to fetch the file from.
:param str filename: The destination file to write the data to.
"""
# requirements os, shutil, urllib.parse, urllib.request
if not filename:
url_parts = urllib.parse.urlparse(url)
filename = os.path.basename(url_parts.path)
url_h = urllib.request.urlopen(url)
with open(filename, 'wb') as file_h:
shutil.copyfileobj(url_h, file_h)
url_h.close()
return
|
[
"def",
"download",
"(",
"url",
",",
"filename",
"=",
"None",
")",
":",
"# requirements os, shutil, urllib.parse, urllib.request",
"if",
"not",
"filename",
":",
"url_parts",
"=",
"urllib",
".",
"parse",
".",
"urlparse",
"(",
"url",
")",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"url_parts",
".",
"path",
")",
"url_h",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"url",
")",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"file_h",
":",
"shutil",
".",
"copyfileobj",
"(",
"url_h",
",",
"file_h",
")",
"url_h",
".",
"close",
"(",
")",
"return"
] |
Download a file from a url and save it to disk.
:param str url: The URL to fetch the file from.
:param str filename: The destination file to write the data to.
|
[
"Download",
"a",
"file",
"from",
"a",
"url",
"and",
"save",
"it",
"to",
"disk",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/utilities.py#L456-L471
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/utilities.py
|
format_bytes_size
|
def format_bytes_size(val):
"""
Take a number of bytes and convert it to a human readable number.
:param int val: The number of bytes to format.
:return: The size in a human readable format.
:rtype: str
"""
if not val:
return '0 bytes'
for sz_name in ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']:
if val < 1024.0:
return "{0:.2f} {1}".format(val, sz_name)
val /= 1024.0
raise OverflowError()
|
python
|
def format_bytes_size(val):
"""
Take a number of bytes and convert it to a human readable number.
:param int val: The number of bytes to format.
:return: The size in a human readable format.
:rtype: str
"""
if not val:
return '0 bytes'
for sz_name in ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']:
if val < 1024.0:
return "{0:.2f} {1}".format(val, sz_name)
val /= 1024.0
raise OverflowError()
|
[
"def",
"format_bytes_size",
"(",
"val",
")",
":",
"if",
"not",
"val",
":",
"return",
"'0 bytes'",
"for",
"sz_name",
"in",
"[",
"'bytes'",
",",
"'KB'",
",",
"'MB'",
",",
"'GB'",
",",
"'TB'",
",",
"'PB'",
",",
"'EB'",
"]",
":",
"if",
"val",
"<",
"1024.0",
":",
"return",
"\"{0:.2f} {1}\"",
".",
"format",
"(",
"val",
",",
"sz_name",
")",
"val",
"/=",
"1024.0",
"raise",
"OverflowError",
"(",
")"
] |
Take a number of bytes and convert it to a human readable number.
:param int val: The number of bytes to format.
:return: The size in a human readable format.
:rtype: str
|
[
"Take",
"a",
"number",
"of",
"bytes",
"and",
"convert",
"it",
"to",
"a",
"human",
"readable",
"number",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/utilities.py#L485-L499
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/utilities.py
|
grep
|
def grep(expression, file, flags=0, invert=False):
"""
Search a file and return a list of all lines that match a regular expression.
:param str expression: The regex to search for.
:param file: The file to search in.
:type file: str, file
:param int flags: The regex flags to use when searching.
:param bool invert: Select non matching lines instead.
:return: All the matching lines.
:rtype: list
"""
# requirements = re
if isinstance(file, str):
file = open(file)
lines = []
for line in file:
if bool(re.search(expression, line, flags=flags)) ^ invert:
lines.append(line)
return lines
|
python
|
def grep(expression, file, flags=0, invert=False):
"""
Search a file and return a list of all lines that match a regular expression.
:param str expression: The regex to search for.
:param file: The file to search in.
:type file: str, file
:param int flags: The regex flags to use when searching.
:param bool invert: Select non matching lines instead.
:return: All the matching lines.
:rtype: list
"""
# requirements = re
if isinstance(file, str):
file = open(file)
lines = []
for line in file:
if bool(re.search(expression, line, flags=flags)) ^ invert:
lines.append(line)
return lines
|
[
"def",
"grep",
"(",
"expression",
",",
"file",
",",
"flags",
"=",
"0",
",",
"invert",
"=",
"False",
")",
":",
"# requirements = re",
"if",
"isinstance",
"(",
"file",
",",
"str",
")",
":",
"file",
"=",
"open",
"(",
"file",
")",
"lines",
"=",
"[",
"]",
"for",
"line",
"in",
"file",
":",
"if",
"bool",
"(",
"re",
".",
"search",
"(",
"expression",
",",
"line",
",",
"flags",
"=",
"flags",
")",
")",
"^",
"invert",
":",
"lines",
".",
"append",
"(",
"line",
")",
"return",
"lines"
] |
Search a file and return a list of all lines that match a regular expression.
:param str expression: The regex to search for.
:param file: The file to search in.
:type file: str, file
:param int flags: The regex flags to use when searching.
:param bool invert: Select non matching lines instead.
:return: All the matching lines.
:rtype: list
|
[
"Search",
"a",
"file",
"and",
"return",
"a",
"list",
"of",
"all",
"lines",
"that",
"match",
"a",
"regular",
"expression",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/utilities.py#L501-L520
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/utilities.py
|
open_uri
|
def open_uri(uri):
"""
Open a URI in a platform intelligent way. On Windows this will use
'cmd.exe /c start' and on Linux this will use gvfs-open or xdg-open
depending on which is available. If no suitable application can be
found to open the URI, a RuntimeError will be raised.
.. versionadded:: 1.3.0
:param str uri: The URI to open.
"""
close_fds = True
startupinfo = None
proc_args = []
if sys.platform.startswith('win'):
proc_args.append(which('cmd.exe'))
proc_args.append('/c')
proc_args.append('start')
uri = uri.replace('&', '^&')
close_fds = False
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
elif which('gvfs-open'):
proc_args.append(which('gvfs-open'))
elif which('xdg-open'):
proc_args.append(which('xdg-open'))
else:
raise RuntimeError('could not find suitable application to open uri')
proc_args.append(uri)
proc_h = subprocess.Popen(proc_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=close_fds, startupinfo=startupinfo)
return proc_h.wait() == 0
|
python
|
def open_uri(uri):
"""
Open a URI in a platform intelligent way. On Windows this will use
'cmd.exe /c start' and on Linux this will use gvfs-open or xdg-open
depending on which is available. If no suitable application can be
found to open the URI, a RuntimeError will be raised.
.. versionadded:: 1.3.0
:param str uri: The URI to open.
"""
close_fds = True
startupinfo = None
proc_args = []
if sys.platform.startswith('win'):
proc_args.append(which('cmd.exe'))
proc_args.append('/c')
proc_args.append('start')
uri = uri.replace('&', '^&')
close_fds = False
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
elif which('gvfs-open'):
proc_args.append(which('gvfs-open'))
elif which('xdg-open'):
proc_args.append(which('xdg-open'))
else:
raise RuntimeError('could not find suitable application to open uri')
proc_args.append(uri)
proc_h = subprocess.Popen(proc_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=close_fds, startupinfo=startupinfo)
return proc_h.wait() == 0
|
[
"def",
"open_uri",
"(",
"uri",
")",
":",
"close_fds",
"=",
"True",
"startupinfo",
"=",
"None",
"proc_args",
"=",
"[",
"]",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'win'",
")",
":",
"proc_args",
".",
"append",
"(",
"which",
"(",
"'cmd.exe'",
")",
")",
"proc_args",
".",
"append",
"(",
"'/c'",
")",
"proc_args",
".",
"append",
"(",
"'start'",
")",
"uri",
"=",
"uri",
".",
"replace",
"(",
"'&'",
",",
"'^&'",
")",
"close_fds",
"=",
"False",
"startupinfo",
"=",
"subprocess",
".",
"STARTUPINFO",
"(",
")",
"startupinfo",
".",
"dwFlags",
"|=",
"subprocess",
".",
"STARTF_USESHOWWINDOW",
"startupinfo",
".",
"wShowWindow",
"=",
"subprocess",
".",
"SW_HIDE",
"elif",
"which",
"(",
"'gvfs-open'",
")",
":",
"proc_args",
".",
"append",
"(",
"which",
"(",
"'gvfs-open'",
")",
")",
"elif",
"which",
"(",
"'xdg-open'",
")",
":",
"proc_args",
".",
"append",
"(",
"which",
"(",
"'xdg-open'",
")",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"'could not find suitable application to open uri'",
")",
"proc_args",
".",
"append",
"(",
"uri",
")",
"proc_h",
"=",
"subprocess",
".",
"Popen",
"(",
"proc_args",
",",
"stdin",
"=",
"subprocess",
".",
"PIPE",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
"close_fds",
"=",
"close_fds",
",",
"startupinfo",
"=",
"startupinfo",
")",
"return",
"proc_h",
".",
"wait",
"(",
")",
"==",
"0"
] |
Open a URI in a platform intelligent way. On Windows this will use
'cmd.exe /c start' and on Linux this will use gvfs-open or xdg-open
depending on which is available. If no suitable application can be
found to open the URI, a RuntimeError will be raised.
.. versionadded:: 1.3.0
:param str uri: The URI to open.
|
[
"Open",
"a",
"URI",
"in",
"a",
"platform",
"intelligent",
"way",
".",
"On",
"Windows",
"this",
"will",
"use",
"cmd",
".",
"exe",
"/",
"c",
"start",
"and",
"on",
"Linux",
"this",
"will",
"use",
"gvfs",
"-",
"open",
"or",
"xdg",
"-",
"open",
"depending",
"on",
"which",
"is",
"available",
".",
"If",
"no",
"suitable",
"application",
"can",
"be",
"found",
"to",
"open",
"the",
"URI",
"a",
"RuntimeError",
"will",
"be",
"raised",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/utilities.py#L533-L564
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/utilities.py
|
parse_case_snake_to_camel
|
def parse_case_snake_to_camel(snake, upper_first=True):
"""
Convert a string from snake_case to CamelCase.
:param str snake: The snake_case string to convert.
:param bool upper_first: Whether or not to capitalize the first
character of the string.
:return: The CamelCase version of string.
:rtype: str
"""
snake = snake.split('_')
first_part = snake[0]
if upper_first:
first_part = first_part.title()
return first_part + ''.join(word.title() for word in snake[1:])
|
python
|
def parse_case_snake_to_camel(snake, upper_first=True):
"""
Convert a string from snake_case to CamelCase.
:param str snake: The snake_case string to convert.
:param bool upper_first: Whether or not to capitalize the first
character of the string.
:return: The CamelCase version of string.
:rtype: str
"""
snake = snake.split('_')
first_part = snake[0]
if upper_first:
first_part = first_part.title()
return first_part + ''.join(word.title() for word in snake[1:])
|
[
"def",
"parse_case_snake_to_camel",
"(",
"snake",
",",
"upper_first",
"=",
"True",
")",
":",
"snake",
"=",
"snake",
".",
"split",
"(",
"'_'",
")",
"first_part",
"=",
"snake",
"[",
"0",
"]",
"if",
"upper_first",
":",
"first_part",
"=",
"first_part",
".",
"title",
"(",
")",
"return",
"first_part",
"+",
"''",
".",
"join",
"(",
"word",
".",
"title",
"(",
")",
"for",
"word",
"in",
"snake",
"[",
"1",
":",
"]",
")"
] |
Convert a string from snake_case to CamelCase.
:param str snake: The snake_case string to convert.
:param bool upper_first: Whether or not to capitalize the first
character of the string.
:return: The CamelCase version of string.
:rtype: str
|
[
"Convert",
"a",
"string",
"from",
"snake_case",
"to",
"CamelCase",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/utilities.py#L577-L591
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/utilities.py
|
parse_server
|
def parse_server(server, default_port):
"""
Convert a server string to a tuple suitable for passing to connect, for
example converting 'www.google.com:443' to ('www.google.com', 443).
:param str server: The server string to convert.
:param int default_port: The port to use in case one is not specified
in the server string.
:return: The parsed server information.
:rtype: tuple
"""
server = server.rsplit(':', 1)
host = server[0]
if host.startswith('[') and host.endswith(']'):
host = host[1:-1]
if len(server) == 1:
return (host, default_port)
port = server[1]
if not port:
port = default_port
else:
port = int(port)
return (host, port)
|
python
|
def parse_server(server, default_port):
"""
Convert a server string to a tuple suitable for passing to connect, for
example converting 'www.google.com:443' to ('www.google.com', 443).
:param str server: The server string to convert.
:param int default_port: The port to use in case one is not specified
in the server string.
:return: The parsed server information.
:rtype: tuple
"""
server = server.rsplit(':', 1)
host = server[0]
if host.startswith('[') and host.endswith(']'):
host = host[1:-1]
if len(server) == 1:
return (host, default_port)
port = server[1]
if not port:
port = default_port
else:
port = int(port)
return (host, port)
|
[
"def",
"parse_server",
"(",
"server",
",",
"default_port",
")",
":",
"server",
"=",
"server",
".",
"rsplit",
"(",
"':'",
",",
"1",
")",
"host",
"=",
"server",
"[",
"0",
"]",
"if",
"host",
".",
"startswith",
"(",
"'['",
")",
"and",
"host",
".",
"endswith",
"(",
"']'",
")",
":",
"host",
"=",
"host",
"[",
"1",
":",
"-",
"1",
"]",
"if",
"len",
"(",
"server",
")",
"==",
"1",
":",
"return",
"(",
"host",
",",
"default_port",
")",
"port",
"=",
"server",
"[",
"1",
"]",
"if",
"not",
"port",
":",
"port",
"=",
"default_port",
"else",
":",
"port",
"=",
"int",
"(",
"port",
")",
"return",
"(",
"host",
",",
"port",
")"
] |
Convert a server string to a tuple suitable for passing to connect, for
example converting 'www.google.com:443' to ('www.google.com', 443).
:param str server: The server string to convert.
:param int default_port: The port to use in case one is not specified
in the server string.
:return: The parsed server information.
:rtype: tuple
|
[
"Convert",
"a",
"server",
"string",
"to",
"a",
"tuple",
"suitable",
"for",
"passing",
"to",
"connect",
"for",
"example",
"converting",
"www",
".",
"google",
".",
"com",
":",
"443",
"to",
"(",
"www",
".",
"google",
".",
"com",
"443",
")",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/utilities.py#L593-L615
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/utilities.py
|
parse_timespan
|
def parse_timespan(timedef):
"""
Convert a string timespan definition to seconds, for example converting
'1m30s' to 90. If *timedef* is already an int, the value will be returned
unmodified.
:param timedef: The timespan definition to convert to seconds.
:type timedef: int, str
:return: The converted value in seconds.
:rtype: int
"""
if isinstance(timedef, int):
return timedef
converter_order = ('w', 'd', 'h', 'm', 's')
converters = {
'w': 604800,
'd': 86400,
'h': 3600,
'm': 60,
's': 1
}
timedef = timedef.lower()
if timedef.isdigit():
return int(timedef)
elif len(timedef) == 0:
return 0
seconds = -1
for spec in converter_order:
timedef = timedef.split(spec)
if len(timedef) == 1:
timedef = timedef[0]
continue
elif len(timedef) > 2 or not timedef[0].isdigit():
seconds = -1
break
adjustment = converters[spec]
seconds = max(seconds, 0)
seconds += (int(timedef[0]) * adjustment)
timedef = timedef[1]
if not len(timedef):
break
if seconds < 0:
raise ValueError('invalid time format')
return seconds
|
python
|
def parse_timespan(timedef):
"""
Convert a string timespan definition to seconds, for example converting
'1m30s' to 90. If *timedef* is already an int, the value will be returned
unmodified.
:param timedef: The timespan definition to convert to seconds.
:type timedef: int, str
:return: The converted value in seconds.
:rtype: int
"""
if isinstance(timedef, int):
return timedef
converter_order = ('w', 'd', 'h', 'm', 's')
converters = {
'w': 604800,
'd': 86400,
'h': 3600,
'm': 60,
's': 1
}
timedef = timedef.lower()
if timedef.isdigit():
return int(timedef)
elif len(timedef) == 0:
return 0
seconds = -1
for spec in converter_order:
timedef = timedef.split(spec)
if len(timedef) == 1:
timedef = timedef[0]
continue
elif len(timedef) > 2 or not timedef[0].isdigit():
seconds = -1
break
adjustment = converters[spec]
seconds = max(seconds, 0)
seconds += (int(timedef[0]) * adjustment)
timedef = timedef[1]
if not len(timedef):
break
if seconds < 0:
raise ValueError('invalid time format')
return seconds
|
[
"def",
"parse_timespan",
"(",
"timedef",
")",
":",
"if",
"isinstance",
"(",
"timedef",
",",
"int",
")",
":",
"return",
"timedef",
"converter_order",
"=",
"(",
"'w'",
",",
"'d'",
",",
"'h'",
",",
"'m'",
",",
"'s'",
")",
"converters",
"=",
"{",
"'w'",
":",
"604800",
",",
"'d'",
":",
"86400",
",",
"'h'",
":",
"3600",
",",
"'m'",
":",
"60",
",",
"'s'",
":",
"1",
"}",
"timedef",
"=",
"timedef",
".",
"lower",
"(",
")",
"if",
"timedef",
".",
"isdigit",
"(",
")",
":",
"return",
"int",
"(",
"timedef",
")",
"elif",
"len",
"(",
"timedef",
")",
"==",
"0",
":",
"return",
"0",
"seconds",
"=",
"-",
"1",
"for",
"spec",
"in",
"converter_order",
":",
"timedef",
"=",
"timedef",
".",
"split",
"(",
"spec",
")",
"if",
"len",
"(",
"timedef",
")",
"==",
"1",
":",
"timedef",
"=",
"timedef",
"[",
"0",
"]",
"continue",
"elif",
"len",
"(",
"timedef",
")",
">",
"2",
"or",
"not",
"timedef",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"seconds",
"=",
"-",
"1",
"break",
"adjustment",
"=",
"converters",
"[",
"spec",
"]",
"seconds",
"=",
"max",
"(",
"seconds",
",",
"0",
")",
"seconds",
"+=",
"(",
"int",
"(",
"timedef",
"[",
"0",
"]",
")",
"*",
"adjustment",
")",
"timedef",
"=",
"timedef",
"[",
"1",
"]",
"if",
"not",
"len",
"(",
"timedef",
")",
":",
"break",
"if",
"seconds",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'invalid time format'",
")",
"return",
"seconds"
] |
Convert a string timespan definition to seconds, for example converting
'1m30s' to 90. If *timedef* is already an int, the value will be returned
unmodified.
:param timedef: The timespan definition to convert to seconds.
:type timedef: int, str
:return: The converted value in seconds.
:rtype: int
|
[
"Convert",
"a",
"string",
"timespan",
"definition",
"to",
"seconds",
"for",
"example",
"converting",
"1m30s",
"to",
"90",
".",
"If",
"*",
"timedef",
"*",
"is",
"already",
"an",
"int",
"the",
"value",
"will",
"be",
"returned",
"unmodified",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/utilities.py#L617-L660
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/utilities.py
|
parse_to_slug
|
def parse_to_slug(words, maxlen=24):
"""
Parse a string into a slug format suitable for use in URLs and other
character restricted applications. Only utf-8 strings are supported at this
time.
:param str words: The words to parse.
:param int maxlen: The maximum length of the slug.
:return: The parsed words as a slug.
:rtype: str
"""
slug = ''
maxlen = min(maxlen, len(words))
for c in words:
if len(slug) == maxlen:
break
c = ord(c)
if c == 0x27:
continue
elif c >= 0x30 and c <= 0x39:
slug += chr(c)
elif c >= 0x41 and c <= 0x5a:
slug += chr(c + 0x20)
elif c >= 0x61 and c <= 0x7a:
slug += chr(c)
elif len(slug) and slug[-1] != '-':
slug += '-'
if len(slug) and slug[-1] == '-':
slug = slug[:-1]
return slug
|
python
|
def parse_to_slug(words, maxlen=24):
"""
Parse a string into a slug format suitable for use in URLs and other
character restricted applications. Only utf-8 strings are supported at this
time.
:param str words: The words to parse.
:param int maxlen: The maximum length of the slug.
:return: The parsed words as a slug.
:rtype: str
"""
slug = ''
maxlen = min(maxlen, len(words))
for c in words:
if len(slug) == maxlen:
break
c = ord(c)
if c == 0x27:
continue
elif c >= 0x30 and c <= 0x39:
slug += chr(c)
elif c >= 0x41 and c <= 0x5a:
slug += chr(c + 0x20)
elif c >= 0x61 and c <= 0x7a:
slug += chr(c)
elif len(slug) and slug[-1] != '-':
slug += '-'
if len(slug) and slug[-1] == '-':
slug = slug[:-1]
return slug
|
[
"def",
"parse_to_slug",
"(",
"words",
",",
"maxlen",
"=",
"24",
")",
":",
"slug",
"=",
"''",
"maxlen",
"=",
"min",
"(",
"maxlen",
",",
"len",
"(",
"words",
")",
")",
"for",
"c",
"in",
"words",
":",
"if",
"len",
"(",
"slug",
")",
"==",
"maxlen",
":",
"break",
"c",
"=",
"ord",
"(",
"c",
")",
"if",
"c",
"==",
"0x27",
":",
"continue",
"elif",
"c",
">=",
"0x30",
"and",
"c",
"<=",
"0x39",
":",
"slug",
"+=",
"chr",
"(",
"c",
")",
"elif",
"c",
">=",
"0x41",
"and",
"c",
"<=",
"0x5a",
":",
"slug",
"+=",
"chr",
"(",
"c",
"+",
"0x20",
")",
"elif",
"c",
">=",
"0x61",
"and",
"c",
"<=",
"0x7a",
":",
"slug",
"+=",
"chr",
"(",
"c",
")",
"elif",
"len",
"(",
"slug",
")",
"and",
"slug",
"[",
"-",
"1",
"]",
"!=",
"'-'",
":",
"slug",
"+=",
"'-'",
"if",
"len",
"(",
"slug",
")",
"and",
"slug",
"[",
"-",
"1",
"]",
"==",
"'-'",
":",
"slug",
"=",
"slug",
"[",
":",
"-",
"1",
"]",
"return",
"slug"
] |
Parse a string into a slug format suitable for use in URLs and other
character restricted applications. Only utf-8 strings are supported at this
time.
:param str words: The words to parse.
:param int maxlen: The maximum length of the slug.
:return: The parsed words as a slug.
:rtype: str
|
[
"Parse",
"a",
"string",
"into",
"a",
"slug",
"format",
"suitable",
"for",
"use",
"in",
"URLs",
"and",
"other",
"character",
"restricted",
"applications",
".",
"Only",
"utf",
"-",
"8",
"strings",
"are",
"supported",
"at",
"this",
"time",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/utilities.py#L662-L691
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/utilities.py
|
random_string_alphanumeric
|
def random_string_alphanumeric(size):
"""
Generate a random string of *size* length consisting of mixed case letters
and numbers. This function is not meant for cryptographic purposes.
:param int size: The length of the string to return.
:return: A string consisting of random characters.
:rtype: str
"""
# requirements = random, string
return ''.join(random.choice(string.ascii_letters + string.digits) for x in range(size))
|
python
|
def random_string_alphanumeric(size):
"""
Generate a random string of *size* length consisting of mixed case letters
and numbers. This function is not meant for cryptographic purposes.
:param int size: The length of the string to return.
:return: A string consisting of random characters.
:rtype: str
"""
# requirements = random, string
return ''.join(random.choice(string.ascii_letters + string.digits) for x in range(size))
|
[
"def",
"random_string_alphanumeric",
"(",
"size",
")",
":",
"# requirements = random, string",
"return",
"''",
".",
"join",
"(",
"random",
".",
"choice",
"(",
"string",
".",
"ascii_letters",
"+",
"string",
".",
"digits",
")",
"for",
"x",
"in",
"range",
"(",
"size",
")",
")"
] |
Generate a random string of *size* length consisting of mixed case letters
and numbers. This function is not meant for cryptographic purposes.
:param int size: The length of the string to return.
:return: A string consisting of random characters.
:rtype: str
|
[
"Generate",
"a",
"random",
"string",
"of",
"*",
"size",
"*",
"length",
"consisting",
"of",
"mixed",
"case",
"letters",
"and",
"numbers",
".",
"This",
"function",
"is",
"not",
"meant",
"for",
"cryptographic",
"purposes",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/utilities.py#L693-L703
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/utilities.py
|
random_string_lower_numeric
|
def random_string_lower_numeric(size):
"""
Generate a random string of *size* length consisting of lowercase letters
and numbers. This function is not meant for cryptographic purposes.
:param int size: The length of the string to return.
:return: A string consisting of random characters.
:rtype: str
"""
# requirements = random, string
return ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(size))
|
python
|
def random_string_lower_numeric(size):
"""
Generate a random string of *size* length consisting of lowercase letters
and numbers. This function is not meant for cryptographic purposes.
:param int size: The length of the string to return.
:return: A string consisting of random characters.
:rtype: str
"""
# requirements = random, string
return ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(size))
|
[
"def",
"random_string_lower_numeric",
"(",
"size",
")",
":",
"# requirements = random, string",
"return",
"''",
".",
"join",
"(",
"random",
".",
"choice",
"(",
"string",
".",
"ascii_lowercase",
"+",
"string",
".",
"digits",
")",
"for",
"x",
"in",
"range",
"(",
"size",
")",
")"
] |
Generate a random string of *size* length consisting of lowercase letters
and numbers. This function is not meant for cryptographic purposes.
:param int size: The length of the string to return.
:return: A string consisting of random characters.
:rtype: str
|
[
"Generate",
"a",
"random",
"string",
"of",
"*",
"size",
"*",
"length",
"consisting",
"of",
"lowercase",
"letters",
"and",
"numbers",
".",
"This",
"function",
"is",
"not",
"meant",
"for",
"cryptographic",
"purposes",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/utilities.py#L705-L715
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/utilities.py
|
selection_collision
|
def selection_collision(selections, poolsize):
"""
Calculate the probability that two random values selected from an arbitrary
sized pool of unique values will be equal. This is commonly known as the
"Birthday Problem".
:param int selections: The number of random selections.
:param int poolsize: The number of unique random values in the pool to choose from.
:rtype: float
:return: The chance that a collision will occur as a percentage.
"""
# requirments = sys
probability = 100.0
poolsize = float(poolsize)
for i in range(selections):
probability = probability * (poolsize - i) / poolsize
probability = (100.0 - probability)
return probability
|
python
|
def selection_collision(selections, poolsize):
"""
Calculate the probability that two random values selected from an arbitrary
sized pool of unique values will be equal. This is commonly known as the
"Birthday Problem".
:param int selections: The number of random selections.
:param int poolsize: The number of unique random values in the pool to choose from.
:rtype: float
:return: The chance that a collision will occur as a percentage.
"""
# requirments = sys
probability = 100.0
poolsize = float(poolsize)
for i in range(selections):
probability = probability * (poolsize - i) / poolsize
probability = (100.0 - probability)
return probability
|
[
"def",
"selection_collision",
"(",
"selections",
",",
"poolsize",
")",
":",
"# requirments = sys",
"probability",
"=",
"100.0",
"poolsize",
"=",
"float",
"(",
"poolsize",
")",
"for",
"i",
"in",
"range",
"(",
"selections",
")",
":",
"probability",
"=",
"probability",
"*",
"(",
"poolsize",
"-",
"i",
")",
"/",
"poolsize",
"probability",
"=",
"(",
"100.0",
"-",
"probability",
")",
"return",
"probability"
] |
Calculate the probability that two random values selected from an arbitrary
sized pool of unique values will be equal. This is commonly known as the
"Birthday Problem".
:param int selections: The number of random selections.
:param int poolsize: The number of unique random values in the pool to choose from.
:rtype: float
:return: The chance that a collision will occur as a percentage.
|
[
"Calculate",
"the",
"probability",
"that",
"two",
"random",
"values",
"selected",
"from",
"an",
"arbitrary",
"sized",
"pool",
"of",
"unique",
"values",
"will",
"be",
"equal",
".",
"This",
"is",
"commonly",
"known",
"as",
"the",
"Birthday",
"Problem",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/utilities.py#L717-L734
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/utilities.py
|
unique
|
def unique(seq, key=None):
"""
Create a unique list or tuple from a provided list or tuple and preserve the
order.
:param seq: The list or tuple to preserve unique items from.
:type seq: list, tuple
:param key: If key is provided it will be called during the
comparison process.
:type key: function, None
"""
if key is None:
key = lambda x: x
preserved_type = type(seq)
if preserved_type not in (list, tuple):
raise TypeError("unique argument 1 must be list or tuple, not {0}".format(preserved_type.__name__))
seen = []
result = []
for item in seq:
marker = key(item)
if marker in seen:
continue
seen.append(marker)
result.append(item)
return preserved_type(result)
|
python
|
def unique(seq, key=None):
"""
Create a unique list or tuple from a provided list or tuple and preserve the
order.
:param seq: The list or tuple to preserve unique items from.
:type seq: list, tuple
:param key: If key is provided it will be called during the
comparison process.
:type key: function, None
"""
if key is None:
key = lambda x: x
preserved_type = type(seq)
if preserved_type not in (list, tuple):
raise TypeError("unique argument 1 must be list or tuple, not {0}".format(preserved_type.__name__))
seen = []
result = []
for item in seq:
marker = key(item)
if marker in seen:
continue
seen.append(marker)
result.append(item)
return preserved_type(result)
|
[
"def",
"unique",
"(",
"seq",
",",
"key",
"=",
"None",
")",
":",
"if",
"key",
"is",
"None",
":",
"key",
"=",
"lambda",
"x",
":",
"x",
"preserved_type",
"=",
"type",
"(",
"seq",
")",
"if",
"preserved_type",
"not",
"in",
"(",
"list",
",",
"tuple",
")",
":",
"raise",
"TypeError",
"(",
"\"unique argument 1 must be list or tuple, not {0}\"",
".",
"format",
"(",
"preserved_type",
".",
"__name__",
")",
")",
"seen",
"=",
"[",
"]",
"result",
"=",
"[",
"]",
"for",
"item",
"in",
"seq",
":",
"marker",
"=",
"key",
"(",
"item",
")",
"if",
"marker",
"in",
"seen",
":",
"continue",
"seen",
".",
"append",
"(",
"marker",
")",
"result",
".",
"append",
"(",
"item",
")",
"return",
"preserved_type",
"(",
"result",
")"
] |
Create a unique list or tuple from a provided list or tuple and preserve the
order.
:param seq: The list or tuple to preserve unique items from.
:type seq: list, tuple
:param key: If key is provided it will be called during the
comparison process.
:type key: function, None
|
[
"Create",
"a",
"unique",
"list",
"or",
"tuple",
"from",
"a",
"provided",
"list",
"or",
"tuple",
"and",
"preserve",
"the",
"order",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/utilities.py#L748-L772
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/utilities.py
|
weighted_choice
|
def weighted_choice(choices, weight):
"""
Make a random selection from the specified choices. Apply the *weight*
function to each to return a positive integer representing shares of
selection pool the choice should received. The *weight* function is passed a
single argument of the choice from the *choices* iterable.
:param choices: The choices to select from.
:type choices: list, tuple
:param weight: The function used for gather weight information for choices.
:type weight: function
:return: A randomly selected choice from the provided *choices*.
"""
# requirements = random
weights = []
# get weight values for each of the choices
for choice in choices:
choice_weight = weight(choice)
if not (isinstance(choice_weight, int) and choice_weight > 0):
raise TypeError('weight results must be positive integers')
weights.append(choice_weight)
# make a selection within the acceptable range
selection = random.randint(0, sum(weights) - 1)
# find and return the corresponding choice
for idx, choice in enumerate(choices):
if selection < sum(weights[:idx + 1]):
return choice
raise RuntimeError('no selection could be made')
|
python
|
def weighted_choice(choices, weight):
"""
Make a random selection from the specified choices. Apply the *weight*
function to each to return a positive integer representing shares of
selection pool the choice should received. The *weight* function is passed a
single argument of the choice from the *choices* iterable.
:param choices: The choices to select from.
:type choices: list, tuple
:param weight: The function used for gather weight information for choices.
:type weight: function
:return: A randomly selected choice from the provided *choices*.
"""
# requirements = random
weights = []
# get weight values for each of the choices
for choice in choices:
choice_weight = weight(choice)
if not (isinstance(choice_weight, int) and choice_weight > 0):
raise TypeError('weight results must be positive integers')
weights.append(choice_weight)
# make a selection within the acceptable range
selection = random.randint(0, sum(weights) - 1)
# find and return the corresponding choice
for idx, choice in enumerate(choices):
if selection < sum(weights[:idx + 1]):
return choice
raise RuntimeError('no selection could be made')
|
[
"def",
"weighted_choice",
"(",
"choices",
",",
"weight",
")",
":",
"# requirements = random",
"weights",
"=",
"[",
"]",
"# get weight values for each of the choices",
"for",
"choice",
"in",
"choices",
":",
"choice_weight",
"=",
"weight",
"(",
"choice",
")",
"if",
"not",
"(",
"isinstance",
"(",
"choice_weight",
",",
"int",
")",
"and",
"choice_weight",
">",
"0",
")",
":",
"raise",
"TypeError",
"(",
"'weight results must be positive integers'",
")",
"weights",
".",
"append",
"(",
"choice_weight",
")",
"# make a selection within the acceptable range",
"selection",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"sum",
"(",
"weights",
")",
"-",
"1",
")",
"# find and return the corresponding choice",
"for",
"idx",
",",
"choice",
"in",
"enumerate",
"(",
"choices",
")",
":",
"if",
"selection",
"<",
"sum",
"(",
"weights",
"[",
":",
"idx",
"+",
"1",
"]",
")",
":",
"return",
"choice",
"raise",
"RuntimeError",
"(",
"'no selection could be made'",
")"
] |
Make a random selection from the specified choices. Apply the *weight*
function to each to return a positive integer representing shares of
selection pool the choice should received. The *weight* function is passed a
single argument of the choice from the *choices* iterable.
:param choices: The choices to select from.
:type choices: list, tuple
:param weight: The function used for gather weight information for choices.
:type weight: function
:return: A randomly selected choice from the provided *choices*.
|
[
"Make",
"a",
"random",
"selection",
"from",
"the",
"specified",
"choices",
".",
"Apply",
"the",
"*",
"weight",
"*",
"function",
"to",
"each",
"to",
"return",
"a",
"positive",
"integer",
"representing",
"shares",
"of",
"selection",
"pool",
"the",
"choice",
"should",
"received",
".",
"The",
"*",
"weight",
"*",
"function",
"is",
"passed",
"a",
"single",
"argument",
"of",
"the",
"choice",
"from",
"the",
"*",
"choices",
"*",
"iterable",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/utilities.py#L774-L803
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/utilities.py
|
xfrange
|
def xfrange(start, stop=None, step=1):
"""
Iterate through an arithmetic progression.
:param start: Starting number.
:type start: float, int, long
:param stop: Stopping number.
:type stop: float, int, long
:param step: Stepping size.
:type step: float, int, long
"""
if stop is None:
stop = start
start = 0.0
start = float(start)
while start < stop:
yield start
start += step
|
python
|
def xfrange(start, stop=None, step=1):
"""
Iterate through an arithmetic progression.
:param start: Starting number.
:type start: float, int, long
:param stop: Stopping number.
:type stop: float, int, long
:param step: Stepping size.
:type step: float, int, long
"""
if stop is None:
stop = start
start = 0.0
start = float(start)
while start < stop:
yield start
start += step
|
[
"def",
"xfrange",
"(",
"start",
",",
"stop",
"=",
"None",
",",
"step",
"=",
"1",
")",
":",
"if",
"stop",
"is",
"None",
":",
"stop",
"=",
"start",
"start",
"=",
"0.0",
"start",
"=",
"float",
"(",
"start",
")",
"while",
"start",
"<",
"stop",
":",
"yield",
"start",
"start",
"+=",
"step"
] |
Iterate through an arithmetic progression.
:param start: Starting number.
:type start: float, int, long
:param stop: Stopping number.
:type stop: float, int, long
:param step: Stepping size.
:type step: float, int, long
|
[
"Iterate",
"through",
"an",
"arithmetic",
"progression",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/utilities.py#L824-L841
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/utilities.py
|
Cache.cache_clean
|
def cache_clean(self):
"""
Remove expired items from the cache.
"""
now = time.time()
keys_for_removal = collections.deque()
for key, (_, expiration) in self.__cache.items():
if expiration < now:
keys_for_removal.append(key)
for key in keys_for_removal:
del self.__cache[key]
|
python
|
def cache_clean(self):
"""
Remove expired items from the cache.
"""
now = time.time()
keys_for_removal = collections.deque()
for key, (_, expiration) in self.__cache.items():
if expiration < now:
keys_for_removal.append(key)
for key in keys_for_removal:
del self.__cache[key]
|
[
"def",
"cache_clean",
"(",
"self",
")",
":",
"now",
"=",
"time",
".",
"time",
"(",
")",
"keys_for_removal",
"=",
"collections",
".",
"deque",
"(",
")",
"for",
"key",
",",
"(",
"_",
",",
"expiration",
")",
"in",
"self",
".",
"__cache",
".",
"items",
"(",
")",
":",
"if",
"expiration",
"<",
"now",
":",
"keys_for_removal",
".",
"append",
"(",
"key",
")",
"for",
"key",
"in",
"keys_for_removal",
":",
"del",
"self",
".",
"__cache",
"[",
"key",
"]"
] |
Remove expired items from the cache.
|
[
"Remove",
"expired",
"items",
"from",
"the",
"cache",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/utilities.py#L210-L220
|
zeroSteiner/smoke-zephyr
|
smoke_zephyr/utilities.py
|
SectionConfigParser.set
|
def set(self, option, value):
"""
Set an option to an arbitrary value.
:param str option: The name of the option to set.
:param value: The value to set the option to.
"""
self.config_parser.set(self.section_name, option, value)
|
python
|
def set(self, option, value):
"""
Set an option to an arbitrary value.
:param str option: The name of the option to set.
:param value: The value to set the option to.
"""
self.config_parser.set(self.section_name, option, value)
|
[
"def",
"set",
"(",
"self",
",",
"option",
",",
"value",
")",
":",
"self",
".",
"config_parser",
".",
"set",
"(",
"self",
".",
"section_name",
",",
"option",
",",
"value",
")"
] |
Set an option to an arbitrary value.
:param str option: The name of the option to set.
:param value: The value to set the option to.
|
[
"Set",
"an",
"option",
"to",
"an",
"arbitrary",
"value",
"."
] |
train
|
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/utilities.py#L396-L403
|
dusktreader/py-buzz
|
buzz/__init__.py
|
Buzz.reformat_exception
|
def reformat_exception(cls, message, err, *format_args, **format_kwds):
"""
Reformats an exception by adding a message to it and reporting the
original exception name and message
"""
final_message = message.format(*format_args, **format_kwds)
final_message = "{} -- {}: {}".format(
final_message,
type(err).__name__,
str(err),
)
final_message = cls.sanitize_errstr(final_message)
return final_message
|
python
|
def reformat_exception(cls, message, err, *format_args, **format_kwds):
"""
Reformats an exception by adding a message to it and reporting the
original exception name and message
"""
final_message = message.format(*format_args, **format_kwds)
final_message = "{} -- {}: {}".format(
final_message,
type(err).__name__,
str(err),
)
final_message = cls.sanitize_errstr(final_message)
return final_message
|
[
"def",
"reformat_exception",
"(",
"cls",
",",
"message",
",",
"err",
",",
"*",
"format_args",
",",
"*",
"*",
"format_kwds",
")",
":",
"final_message",
"=",
"message",
".",
"format",
"(",
"*",
"format_args",
",",
"*",
"*",
"format_kwds",
")",
"final_message",
"=",
"\"{} -- {}: {}\"",
".",
"format",
"(",
"final_message",
",",
"type",
"(",
"err",
")",
".",
"__name__",
",",
"str",
"(",
"err",
")",
",",
")",
"final_message",
"=",
"cls",
".",
"sanitize_errstr",
"(",
"final_message",
")",
"return",
"final_message"
] |
Reformats an exception by adding a message to it and reporting the
original exception name and message
|
[
"Reformats",
"an",
"exception",
"by",
"adding",
"a",
"message",
"to",
"it",
"and",
"reporting",
"the",
"original",
"exception",
"name",
"and",
"message"
] |
train
|
https://github.com/dusktreader/py-buzz/blob/f2fd97abe158a1688188647992a5be6531058ec3/buzz/__init__.py#L52-L64
|
dusktreader/py-buzz
|
buzz/__init__.py
|
Buzz.handle_errors
|
def handle_errors(
cls, message, *format_args,
re_raise=True, exception_class=Exception,
do_finally=None, do_except=None, do_else=None,
**format_kwds
):
"""
provides a context manager that will intercept exceptions and repackage
them as Buzz instances with a message attached:
.. code-block:: python
with Buzz.handle_errors("It didn't work"):
some_code_that_might_raise_an_exception()
:param: message: The message to attach to the raised Buzz
:param: format_args: Format arguments. Follows str.format conv.
:param: format_kwds: Format keyword args. Follows str.format conv.
:param: re_raise: If true, the re-packaged exception will be
raised
:param: exception_class: Limits the class of exceptions that will be
re-packaged as a Buzz exception.
Any other exception types will not be caught
and re-packaged.
Defaults to Exception (will handle all
exceptions)
:param: do_finally: A function that should always be called at the
end of the block. Should take no parameters
:param: do_except: A function that should be called only if there
was an exception. Should take the raised
exception as its first parameter, the final
message for the exception that will be raised
as its second, and the traceback as its third
:param: do_else: A function taht should be called only if there
were no exceptions encountered
"""
try:
yield
except exception_class as err:
try:
final_message = cls.reformat_exception(
message, err, *format_args, **format_kwds
)
except Exception as msg_err:
raise cls(
"Failed while formatting message: {}".format(repr(msg_err))
)
trace = cls.get_traceback()
if do_except is not None:
do_except(err, final_message, trace)
if re_raise:
raise cls(final_message).with_traceback(trace)
else:
if do_else is not None:
do_else()
finally:
if do_finally is not None:
do_finally()
|
python
|
def handle_errors(
cls, message, *format_args,
re_raise=True, exception_class=Exception,
do_finally=None, do_except=None, do_else=None,
**format_kwds
):
"""
provides a context manager that will intercept exceptions and repackage
them as Buzz instances with a message attached:
.. code-block:: python
with Buzz.handle_errors("It didn't work"):
some_code_that_might_raise_an_exception()
:param: message: The message to attach to the raised Buzz
:param: format_args: Format arguments. Follows str.format conv.
:param: format_kwds: Format keyword args. Follows str.format conv.
:param: re_raise: If true, the re-packaged exception will be
raised
:param: exception_class: Limits the class of exceptions that will be
re-packaged as a Buzz exception.
Any other exception types will not be caught
and re-packaged.
Defaults to Exception (will handle all
exceptions)
:param: do_finally: A function that should always be called at the
end of the block. Should take no parameters
:param: do_except: A function that should be called only if there
was an exception. Should take the raised
exception as its first parameter, the final
message for the exception that will be raised
as its second, and the traceback as its third
:param: do_else: A function taht should be called only if there
were no exceptions encountered
"""
try:
yield
except exception_class as err:
try:
final_message = cls.reformat_exception(
message, err, *format_args, **format_kwds
)
except Exception as msg_err:
raise cls(
"Failed while formatting message: {}".format(repr(msg_err))
)
trace = cls.get_traceback()
if do_except is not None:
do_except(err, final_message, trace)
if re_raise:
raise cls(final_message).with_traceback(trace)
else:
if do_else is not None:
do_else()
finally:
if do_finally is not None:
do_finally()
|
[
"def",
"handle_errors",
"(",
"cls",
",",
"message",
",",
"*",
"format_args",
",",
"re_raise",
"=",
"True",
",",
"exception_class",
"=",
"Exception",
",",
"do_finally",
"=",
"None",
",",
"do_except",
"=",
"None",
",",
"do_else",
"=",
"None",
",",
"*",
"*",
"format_kwds",
")",
":",
"try",
":",
"yield",
"except",
"exception_class",
"as",
"err",
":",
"try",
":",
"final_message",
"=",
"cls",
".",
"reformat_exception",
"(",
"message",
",",
"err",
",",
"*",
"format_args",
",",
"*",
"*",
"format_kwds",
")",
"except",
"Exception",
"as",
"msg_err",
":",
"raise",
"cls",
"(",
"\"Failed while formatting message: {}\"",
".",
"format",
"(",
"repr",
"(",
"msg_err",
")",
")",
")",
"trace",
"=",
"cls",
".",
"get_traceback",
"(",
")",
"if",
"do_except",
"is",
"not",
"None",
":",
"do_except",
"(",
"err",
",",
"final_message",
",",
"trace",
")",
"if",
"re_raise",
":",
"raise",
"cls",
"(",
"final_message",
")",
".",
"with_traceback",
"(",
"trace",
")",
"else",
":",
"if",
"do_else",
"is",
"not",
"None",
":",
"do_else",
"(",
")",
"finally",
":",
"if",
"do_finally",
"is",
"not",
"None",
":",
"do_finally",
"(",
")"
] |
provides a context manager that will intercept exceptions and repackage
them as Buzz instances with a message attached:
.. code-block:: python
with Buzz.handle_errors("It didn't work"):
some_code_that_might_raise_an_exception()
:param: message: The message to attach to the raised Buzz
:param: format_args: Format arguments. Follows str.format conv.
:param: format_kwds: Format keyword args. Follows str.format conv.
:param: re_raise: If true, the re-packaged exception will be
raised
:param: exception_class: Limits the class of exceptions that will be
re-packaged as a Buzz exception.
Any other exception types will not be caught
and re-packaged.
Defaults to Exception (will handle all
exceptions)
:param: do_finally: A function that should always be called at the
end of the block. Should take no parameters
:param: do_except: A function that should be called only if there
was an exception. Should take the raised
exception as its first parameter, the final
message for the exception that will be raised
as its second, and the traceback as its third
:param: do_else: A function taht should be called only if there
were no exceptions encountered
|
[
"provides",
"a",
"context",
"manager",
"that",
"will",
"intercept",
"exceptions",
"and",
"repackage",
"them",
"as",
"Buzz",
"instances",
"with",
"a",
"message",
"attached",
":"
] |
train
|
https://github.com/dusktreader/py-buzz/blob/f2fd97abe158a1688188647992a5be6531058ec3/buzz/__init__.py#L75-L133
|
dusktreader/py-buzz
|
buzz/__init__.py
|
Buzz.require_condition
|
def require_condition(cls, expr, message, *format_args, **format_kwds):
"""
used to assert a certain state. If the expression renders a false
value, an exception will be raised with the supplied message
:param: message: The failure message to attach to the raised Buzz
:param: expr: A boolean value indicating an evaluated expression
:param: format_args: Format arguments. Follows str.format convention
:param: format_kwds: Format keyword args. Follows str.format convetion
"""
if not expr:
raise cls(message, *format_args, **format_kwds)
|
python
|
def require_condition(cls, expr, message, *format_args, **format_kwds):
"""
used to assert a certain state. If the expression renders a false
value, an exception will be raised with the supplied message
:param: message: The failure message to attach to the raised Buzz
:param: expr: A boolean value indicating an evaluated expression
:param: format_args: Format arguments. Follows str.format convention
:param: format_kwds: Format keyword args. Follows str.format convetion
"""
if not expr:
raise cls(message, *format_args, **format_kwds)
|
[
"def",
"require_condition",
"(",
"cls",
",",
"expr",
",",
"message",
",",
"*",
"format_args",
",",
"*",
"*",
"format_kwds",
")",
":",
"if",
"not",
"expr",
":",
"raise",
"cls",
"(",
"message",
",",
"*",
"format_args",
",",
"*",
"*",
"format_kwds",
")"
] |
used to assert a certain state. If the expression renders a false
value, an exception will be raised with the supplied message
:param: message: The failure message to attach to the raised Buzz
:param: expr: A boolean value indicating an evaluated expression
:param: format_args: Format arguments. Follows str.format convention
:param: format_kwds: Format keyword args. Follows str.format convetion
|
[
"used",
"to",
"assert",
"a",
"certain",
"state",
".",
"If",
"the",
"expression",
"renders",
"a",
"false",
"value",
"an",
"exception",
"will",
"be",
"raised",
"with",
"the",
"supplied",
"message"
] |
train
|
https://github.com/dusktreader/py-buzz/blob/f2fd97abe158a1688188647992a5be6531058ec3/buzz/__init__.py#L136-L147
|
ozgurgunes/django-manifest
|
manifest/accounts/managers.py
|
AccountActivationManager.create_user
|
def create_user(self, username, email, password, active=False,
send_email=True):
"""
A simple wrapper that creates a new :class:`User`.
:param username:
String containing the username of the new user.
:param email:
String containing the email address of the new user.
:param password:
String containing the password for the new user.
:param active:
Boolean that defines if the user requires activation by clicking
on a link in an email. Defauts to ``True``.
:param send_email:
Boolean that defines if the user should be send an email. You
could set this to ``False`` when you want to create a user in
your own code, but don't want the user to activate through email.
:return: :class:`User` instance representing the new user.
"""
user = super(AccountActivationManager, self).create_user(username, email, password)
if isinstance(user.username, str):
username = user.username.encode('utf-8')
salt, activation_key = generate_sha1(username)
user.is_active = active
user.activation_key = activation_key
user.save(using=self._db)
if send_email:
user.send_activation_email()
return user
|
python
|
def create_user(self, username, email, password, active=False,
send_email=True):
"""
A simple wrapper that creates a new :class:`User`.
:param username:
String containing the username of the new user.
:param email:
String containing the email address of the new user.
:param password:
String containing the password for the new user.
:param active:
Boolean that defines if the user requires activation by clicking
on a link in an email. Defauts to ``True``.
:param send_email:
Boolean that defines if the user should be send an email. You
could set this to ``False`` when you want to create a user in
your own code, but don't want the user to activate through email.
:return: :class:`User` instance representing the new user.
"""
user = super(AccountActivationManager, self).create_user(username, email, password)
if isinstance(user.username, str):
username = user.username.encode('utf-8')
salt, activation_key = generate_sha1(username)
user.is_active = active
user.activation_key = activation_key
user.save(using=self._db)
if send_email:
user.send_activation_email()
return user
|
[
"def",
"create_user",
"(",
"self",
",",
"username",
",",
"email",
",",
"password",
",",
"active",
"=",
"False",
",",
"send_email",
"=",
"True",
")",
":",
"user",
"=",
"super",
"(",
"AccountActivationManager",
",",
"self",
")",
".",
"create_user",
"(",
"username",
",",
"email",
",",
"password",
")",
"if",
"isinstance",
"(",
"user",
".",
"username",
",",
"str",
")",
":",
"username",
"=",
"user",
".",
"username",
".",
"encode",
"(",
"'utf-8'",
")",
"salt",
",",
"activation_key",
"=",
"generate_sha1",
"(",
"username",
")",
"user",
".",
"is_active",
"=",
"active",
"user",
".",
"activation_key",
"=",
"activation_key",
"user",
".",
"save",
"(",
"using",
"=",
"self",
".",
"_db",
")",
"if",
"send_email",
":",
"user",
".",
"send_activation_email",
"(",
")",
"return",
"user"
] |
A simple wrapper that creates a new :class:`User`.
:param username:
String containing the username of the new user.
:param email:
String containing the email address of the new user.
:param password:
String containing the password for the new user.
:param active:
Boolean that defines if the user requires activation by clicking
on a link in an email. Defauts to ``True``.
:param send_email:
Boolean that defines if the user should be send an email. You
could set this to ``False`` when you want to create a user in
your own code, but don't want the user to activate through email.
:return: :class:`User` instance representing the new user.
|
[
"A",
"simple",
"wrapper",
"that",
"creates",
"a",
"new",
":",
"class",
":",
"User",
"."
] |
train
|
https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/managers.py#L23-L62
|
ozgurgunes/django-manifest
|
manifest/accounts/managers.py
|
AccountActivationManager.activate_user
|
def activate_user(self, username, activation_key):
"""
Activate an :class:`User` by supplying a valid ``activation_key``.
If the key is valid and an user is found, activates the user and
return it. Also sends the ``activation_complete`` signal.
:param activation_key:
String containing the secret SHA1 for a valid activation.
:return:
The newly activated :class:`User` or ``False`` if not successful.
"""
if SHA1_RE.search(activation_key):
try:
user = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
return False
if not user.activation_key_expired():
user.activation_key = defaults.ACCOUNTS_ACTIVATED
user.is_active = True
user.save(using=self._db)
# Send the activation_complete signal
accounts_signals.activation_complete.send(sender=None,
user=user)
return user
return False
|
python
|
def activate_user(self, username, activation_key):
"""
Activate an :class:`User` by supplying a valid ``activation_key``.
If the key is valid and an user is found, activates the user and
return it. Also sends the ``activation_complete`` signal.
:param activation_key:
String containing the secret SHA1 for a valid activation.
:return:
The newly activated :class:`User` or ``False`` if not successful.
"""
if SHA1_RE.search(activation_key):
try:
user = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
return False
if not user.activation_key_expired():
user.activation_key = defaults.ACCOUNTS_ACTIVATED
user.is_active = True
user.save(using=self._db)
# Send the activation_complete signal
accounts_signals.activation_complete.send(sender=None,
user=user)
return user
return False
|
[
"def",
"activate_user",
"(",
"self",
",",
"username",
",",
"activation_key",
")",
":",
"if",
"SHA1_RE",
".",
"search",
"(",
"activation_key",
")",
":",
"try",
":",
"user",
"=",
"self",
".",
"get",
"(",
"activation_key",
"=",
"activation_key",
")",
"except",
"self",
".",
"model",
".",
"DoesNotExist",
":",
"return",
"False",
"if",
"not",
"user",
".",
"activation_key_expired",
"(",
")",
":",
"user",
".",
"activation_key",
"=",
"defaults",
".",
"ACCOUNTS_ACTIVATED",
"user",
".",
"is_active",
"=",
"True",
"user",
".",
"save",
"(",
"using",
"=",
"self",
".",
"_db",
")",
"# Send the activation_complete signal",
"accounts_signals",
".",
"activation_complete",
".",
"send",
"(",
"sender",
"=",
"None",
",",
"user",
"=",
"user",
")",
"return",
"user",
"return",
"False"
] |
Activate an :class:`User` by supplying a valid ``activation_key``.
If the key is valid and an user is found, activates the user and
return it. Also sends the ``activation_complete`` signal.
:param activation_key:
String containing the secret SHA1 for a valid activation.
:return:
The newly activated :class:`User` or ``False`` if not successful.
|
[
"Activate",
"an",
":",
"class",
":",
"User",
"by",
"supplying",
"a",
"valid",
"activation_key",
"."
] |
train
|
https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/managers.py#L64-L91
|
ozgurgunes/django-manifest
|
manifest/accounts/managers.py
|
AccountActivationManager.delete_expired_users
|
def delete_expired_users(self):
"""
Checks for expired users and delete's the ``User`` associated with
it. Skips if the user ``is_staff``.
:return: A list containing the deleted users.
"""
deleted_users = []
for user in self.filter(is_staff=False, is_active=False):
if user.activation_key_expired():
deleted_users.append(user)
user.delete()
return deleted_users
|
python
|
def delete_expired_users(self):
"""
Checks for expired users and delete's the ``User`` associated with
it. Skips if the user ``is_staff``.
:return: A list containing the deleted users.
"""
deleted_users = []
for user in self.filter(is_staff=False, is_active=False):
if user.activation_key_expired():
deleted_users.append(user)
user.delete()
return deleted_users
|
[
"def",
"delete_expired_users",
"(",
"self",
")",
":",
"deleted_users",
"=",
"[",
"]",
"for",
"user",
"in",
"self",
".",
"filter",
"(",
"is_staff",
"=",
"False",
",",
"is_active",
"=",
"False",
")",
":",
"if",
"user",
".",
"activation_key_expired",
"(",
")",
":",
"deleted_users",
".",
"append",
"(",
"user",
")",
"user",
".",
"delete",
"(",
")",
"return",
"deleted_users"
] |
Checks for expired users and delete's the ``User`` associated with
it. Skips if the user ``is_staff``.
:return: A list containing the deleted users.
|
[
"Checks",
"for",
"expired",
"users",
"and",
"delete",
"s",
"the",
"User",
"associated",
"with",
"it",
".",
"Skips",
"if",
"the",
"user",
"is_staff",
"."
] |
train
|
https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/managers.py#L93-L106
|
ozgurgunes/django-manifest
|
manifest/accounts/managers.py
|
EmailConfirmationManager.confirm_email
|
def confirm_email(self, username, confirmation_key):
"""
Confirm an email address by checking a ``confirmation_key``.
A valid ``confirmation_key`` will set the newly wanted email address
as the current email address. Returns the user after success or
``False`` when the confirmation key is invalid.
:param confirmation_key:
String containing the secret SHA1 that is used for verification.
:return:
The verified :class:`User` or ``False`` if not successful.
"""
if SHA1_RE.search(confirmation_key):
try:
user = self.select_related().get(username=username,
email_confirmation_key=confirmation_key,
email_unconfirmed__isnull=False)
except self.model.DoesNotExist:
return False
else:
user.email = user.email_unconfirmed
user.email_unconfirmed, user.email_confirmation_key = '',''
user.save(using=self._db)
# Send the confirmation_complete signal
accounts_signals.confirmation_complete.send(sender=None,
user=user)
return user
return False
|
python
|
def confirm_email(self, username, confirmation_key):
"""
Confirm an email address by checking a ``confirmation_key``.
A valid ``confirmation_key`` will set the newly wanted email address
as the current email address. Returns the user after success or
``False`` when the confirmation key is invalid.
:param confirmation_key:
String containing the secret SHA1 that is used for verification.
:return:
The verified :class:`User` or ``False`` if not successful.
"""
if SHA1_RE.search(confirmation_key):
try:
user = self.select_related().get(username=username,
email_confirmation_key=confirmation_key,
email_unconfirmed__isnull=False)
except self.model.DoesNotExist:
return False
else:
user.email = user.email_unconfirmed
user.email_unconfirmed, user.email_confirmation_key = '',''
user.save(using=self._db)
# Send the confirmation_complete signal
accounts_signals.confirmation_complete.send(sender=None,
user=user)
return user
return False
|
[
"def",
"confirm_email",
"(",
"self",
",",
"username",
",",
"confirmation_key",
")",
":",
"if",
"SHA1_RE",
".",
"search",
"(",
"confirmation_key",
")",
":",
"try",
":",
"user",
"=",
"self",
".",
"select_related",
"(",
")",
".",
"get",
"(",
"username",
"=",
"username",
",",
"email_confirmation_key",
"=",
"confirmation_key",
",",
"email_unconfirmed__isnull",
"=",
"False",
")",
"except",
"self",
".",
"model",
".",
"DoesNotExist",
":",
"return",
"False",
"else",
":",
"user",
".",
"email",
"=",
"user",
".",
"email_unconfirmed",
"user",
".",
"email_unconfirmed",
",",
"user",
".",
"email_confirmation_key",
"=",
"''",
",",
"''",
"user",
".",
"save",
"(",
"using",
"=",
"self",
".",
"_db",
")",
"# Send the confirmation_complete signal",
"accounts_signals",
".",
"confirmation_complete",
".",
"send",
"(",
"sender",
"=",
"None",
",",
"user",
"=",
"user",
")",
"return",
"user",
"return",
"False"
] |
Confirm an email address by checking a ``confirmation_key``.
A valid ``confirmation_key`` will set the newly wanted email address
as the current email address. Returns the user after success or
``False`` when the confirmation key is invalid.
:param confirmation_key:
String containing the secret SHA1 that is used for verification.
:return:
The verified :class:`User` or ``False`` if not successful.
|
[
"Confirm",
"an",
"email",
"address",
"by",
"checking",
"a",
"confirmation_key",
"."
] |
train
|
https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/managers.py#L113-L143
|
ozgurgunes/django-manifest
|
manifest/accounts/managers.py
|
UserProfileManager.get_visible_profiles
|
def get_visible_profiles(self, user=None):
"""
Returns all the visible profiles available to this user.
For now keeps it simple by just applying the cases when a user is not
active, a user has it's profile closed to everyone or a user only
allows registered users to view their profile.
:param user:
A Django :class:`User` instance.
:return:
All profiles that are visible to this user.
"""
profiles = self.select_related().all()
filter_kwargs = {'is_active': True}
profiles = profiles.filter(**filter_kwargs)
if user and isinstance(user, AnonymousUser):
profiles = []
return profiles
|
python
|
def get_visible_profiles(self, user=None):
"""
Returns all the visible profiles available to this user.
For now keeps it simple by just applying the cases when a user is not
active, a user has it's profile closed to everyone or a user only
allows registered users to view their profile.
:param user:
A Django :class:`User` instance.
:return:
All profiles that are visible to this user.
"""
profiles = self.select_related().all()
filter_kwargs = {'is_active': True}
profiles = profiles.filter(**filter_kwargs)
if user and isinstance(user, AnonymousUser):
profiles = []
return profiles
|
[
"def",
"get_visible_profiles",
"(",
"self",
",",
"user",
"=",
"None",
")",
":",
"profiles",
"=",
"self",
".",
"select_related",
"(",
")",
".",
"all",
"(",
")",
"filter_kwargs",
"=",
"{",
"'is_active'",
":",
"True",
"}",
"profiles",
"=",
"profiles",
".",
"filter",
"(",
"*",
"*",
"filter_kwargs",
")",
"if",
"user",
"and",
"isinstance",
"(",
"user",
",",
"AnonymousUser",
")",
":",
"profiles",
"=",
"[",
"]",
"return",
"profiles"
] |
Returns all the visible profiles available to this user.
For now keeps it simple by just applying the cases when a user is not
active, a user has it's profile closed to everyone or a user only
allows registered users to view their profile.
:param user:
A Django :class:`User` instance.
:return:
All profiles that are visible to this user.
|
[
"Returns",
"all",
"the",
"visible",
"profiles",
"available",
"to",
"this",
"user",
"."
] |
train
|
https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/managers.py#L151-L173
|
Nic30/sphinx-hwt
|
sphinx_hwt/sphinx_hwt.py
|
SchematicLink.visit_html
|
def visit_html(self, node):
"""
Generate html elements and schematic json
"""
parentClsNode = node.parent.parent
assert parentClsNode.attributes['objtype'] == 'class'
assert parentClsNode.attributes['domain'] == 'py'
sign = node.parent.parent.children[0]
assert isinstance(sign, desc_signature)
absolute_name = sign.attributes['ids'][0]
_construct = node["constructor_fn "]
serialno = node["serialno"]
try:
if _construct is None:
unitCls = generic_import(absolute_name)
if not issubclass(unitCls, Unit):
raise AssertionError(
"Can not use hwt-schematic sphinx directive and create schematic"
" for %s because it is not subclass of %r" % (absolute_name, Unit))
u = unitCls()
else:
assert len(_construct) > 0 and RE_IS_ID.match(_construct), _construct
_absolute_name = []
assert ".." not in absolute_name, absolute_name
for n in absolute_name.split(sep=".")[:-1]:
if n != "":
_absolute_name.append(n)
_absolute_name.append(_construct)
constructor_fn = generic_import(_absolute_name)
u = constructor_fn()
if not isinstance(u, Unit):
raise AssertionError(
"Can not use hwt-schematic sphinx directive and create schematic"
" for %s because function did not returned instance of %r, (%r)" % (
_absolute_name, Unit, u))
schem_file = SchematicPaths.get_sch_file_name_absolute(
self.document, absolute_name, serialno)
makedirs(path.dirname(schem_file), exist_ok=True)
with open(schem_file, "w") as f:
synthesised(u, DEFAULT_PLATFORM)
g = UnitToLNode(u, optimizations=DEFAULT_LAYOUT_OPTIMIZATIONS)
idStore = ElkIdStore()
data = g.toElkJson(idStore)
json.dump(data, f)
viewer = SchematicPaths.get_sch_viewer_link(self.document)
sch_name = SchematicPaths.get_sch_file_name(
self.document, absolute_name, serialno)
ref = nodes.reference(text=_("schematic"), # internal=False,
refuri="%s?schematic=%s" % (
viewer,
path.join(SchematicPaths.SCHEMATIC_DIR_PREFIX,
sch_name)))
node += ref
except Exception as e:
logging.error(e, exc_info=True)
raise Exception(
"Error occured while processing of %s" % absolute_name)
|
python
|
def visit_html(self, node):
"""
Generate html elements and schematic json
"""
parentClsNode = node.parent.parent
assert parentClsNode.attributes['objtype'] == 'class'
assert parentClsNode.attributes['domain'] == 'py'
sign = node.parent.parent.children[0]
assert isinstance(sign, desc_signature)
absolute_name = sign.attributes['ids'][0]
_construct = node["constructor_fn "]
serialno = node["serialno"]
try:
if _construct is None:
unitCls = generic_import(absolute_name)
if not issubclass(unitCls, Unit):
raise AssertionError(
"Can not use hwt-schematic sphinx directive and create schematic"
" for %s because it is not subclass of %r" % (absolute_name, Unit))
u = unitCls()
else:
assert len(_construct) > 0 and RE_IS_ID.match(_construct), _construct
_absolute_name = []
assert ".." not in absolute_name, absolute_name
for n in absolute_name.split(sep=".")[:-1]:
if n != "":
_absolute_name.append(n)
_absolute_name.append(_construct)
constructor_fn = generic_import(_absolute_name)
u = constructor_fn()
if not isinstance(u, Unit):
raise AssertionError(
"Can not use hwt-schematic sphinx directive and create schematic"
" for %s because function did not returned instance of %r, (%r)" % (
_absolute_name, Unit, u))
schem_file = SchematicPaths.get_sch_file_name_absolute(
self.document, absolute_name, serialno)
makedirs(path.dirname(schem_file), exist_ok=True)
with open(schem_file, "w") as f:
synthesised(u, DEFAULT_PLATFORM)
g = UnitToLNode(u, optimizations=DEFAULT_LAYOUT_OPTIMIZATIONS)
idStore = ElkIdStore()
data = g.toElkJson(idStore)
json.dump(data, f)
viewer = SchematicPaths.get_sch_viewer_link(self.document)
sch_name = SchematicPaths.get_sch_file_name(
self.document, absolute_name, serialno)
ref = nodes.reference(text=_("schematic"), # internal=False,
refuri="%s?schematic=%s" % (
viewer,
path.join(SchematicPaths.SCHEMATIC_DIR_PREFIX,
sch_name)))
node += ref
except Exception as e:
logging.error(e, exc_info=True)
raise Exception(
"Error occured while processing of %s" % absolute_name)
|
[
"def",
"visit_html",
"(",
"self",
",",
"node",
")",
":",
"parentClsNode",
"=",
"node",
".",
"parent",
".",
"parent",
"assert",
"parentClsNode",
".",
"attributes",
"[",
"'objtype'",
"]",
"==",
"'class'",
"assert",
"parentClsNode",
".",
"attributes",
"[",
"'domain'",
"]",
"==",
"'py'",
"sign",
"=",
"node",
".",
"parent",
".",
"parent",
".",
"children",
"[",
"0",
"]",
"assert",
"isinstance",
"(",
"sign",
",",
"desc_signature",
")",
"absolute_name",
"=",
"sign",
".",
"attributes",
"[",
"'ids'",
"]",
"[",
"0",
"]",
"_construct",
"=",
"node",
"[",
"\"constructor_fn \"",
"]",
"serialno",
"=",
"node",
"[",
"\"serialno\"",
"]",
"try",
":",
"if",
"_construct",
"is",
"None",
":",
"unitCls",
"=",
"generic_import",
"(",
"absolute_name",
")",
"if",
"not",
"issubclass",
"(",
"unitCls",
",",
"Unit",
")",
":",
"raise",
"AssertionError",
"(",
"\"Can not use hwt-schematic sphinx directive and create schematic\"",
"\" for %s because it is not subclass of %r\"",
"%",
"(",
"absolute_name",
",",
"Unit",
")",
")",
"u",
"=",
"unitCls",
"(",
")",
"else",
":",
"assert",
"len",
"(",
"_construct",
")",
">",
"0",
"and",
"RE_IS_ID",
".",
"match",
"(",
"_construct",
")",
",",
"_construct",
"_absolute_name",
"=",
"[",
"]",
"assert",
"\"..\"",
"not",
"in",
"absolute_name",
",",
"absolute_name",
"for",
"n",
"in",
"absolute_name",
".",
"split",
"(",
"sep",
"=",
"\".\"",
")",
"[",
":",
"-",
"1",
"]",
":",
"if",
"n",
"!=",
"\"\"",
":",
"_absolute_name",
".",
"append",
"(",
"n",
")",
"_absolute_name",
".",
"append",
"(",
"_construct",
")",
"constructor_fn",
"=",
"generic_import",
"(",
"_absolute_name",
")",
"u",
"=",
"constructor_fn",
"(",
")",
"if",
"not",
"isinstance",
"(",
"u",
",",
"Unit",
")",
":",
"raise",
"AssertionError",
"(",
"\"Can not use hwt-schematic sphinx directive and create schematic\"",
"\" for %s because function did not returned instance of %r, (%r)\"",
"%",
"(",
"_absolute_name",
",",
"Unit",
",",
"u",
")",
")",
"schem_file",
"=",
"SchematicPaths",
".",
"get_sch_file_name_absolute",
"(",
"self",
".",
"document",
",",
"absolute_name",
",",
"serialno",
")",
"makedirs",
"(",
"path",
".",
"dirname",
"(",
"schem_file",
")",
",",
"exist_ok",
"=",
"True",
")",
"with",
"open",
"(",
"schem_file",
",",
"\"w\"",
")",
"as",
"f",
":",
"synthesised",
"(",
"u",
",",
"DEFAULT_PLATFORM",
")",
"g",
"=",
"UnitToLNode",
"(",
"u",
",",
"optimizations",
"=",
"DEFAULT_LAYOUT_OPTIMIZATIONS",
")",
"idStore",
"=",
"ElkIdStore",
"(",
")",
"data",
"=",
"g",
".",
"toElkJson",
"(",
"idStore",
")",
"json",
".",
"dump",
"(",
"data",
",",
"f",
")",
"viewer",
"=",
"SchematicPaths",
".",
"get_sch_viewer_link",
"(",
"self",
".",
"document",
")",
"sch_name",
"=",
"SchematicPaths",
".",
"get_sch_file_name",
"(",
"self",
".",
"document",
",",
"absolute_name",
",",
"serialno",
")",
"ref",
"=",
"nodes",
".",
"reference",
"(",
"text",
"=",
"_",
"(",
"\"schematic\"",
")",
",",
"# internal=False,",
"refuri",
"=",
"\"%s?schematic=%s\"",
"%",
"(",
"viewer",
",",
"path",
".",
"join",
"(",
"SchematicPaths",
".",
"SCHEMATIC_DIR_PREFIX",
",",
"sch_name",
")",
")",
")",
"node",
"+=",
"ref",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"error",
"(",
"e",
",",
"exc_info",
"=",
"True",
")",
"raise",
"Exception",
"(",
"\"Error occured while processing of %s\"",
"%",
"absolute_name",
")"
] |
Generate html elements and schematic json
|
[
"Generate",
"html",
"elements",
"and",
"schematic",
"json"
] |
train
|
https://github.com/Nic30/sphinx-hwt/blob/3aee09f467be74433ae2a6b2de55c6b90e5920ae/sphinx_hwt/sphinx_hwt.py#L94-L155
|
Nic30/sphinx-hwt
|
setup.py
|
build_npm.run
|
def run(self):
has_npm = npm_installation_check()
if has_npm:
run_npm_install()
else:
print("Warning: npm not installed using prebuilded js files!",
file=sys.stderr)
"""
Download npm packages required by package.json and extract required
files from them
"""
for js in JS_FILES:
downloaded_js_name = os.path.join(TOP_DIR, js)
installed_js_name = os.path.join(TOP_DIR, "sphinx_hwt", "html", js)
if has_npm:
assert os.path.exists(downloaded_js_name), downloaded_js_name
os.makedirs(os.path.dirname(installed_js_name), exist_ok=True)
copyfile(downloaded_js_name, installed_js_name)
print("copy generated from NPM packages", installed_js_name)
else:
if os.path.exists(installed_js_name):
print("using prebuilded", installed_js_name)
else:
raise Exception("Can not find npm,"
" which is required for the installation "
"and this is pacpage has not js prebuilded")
|
python
|
def run(self):
has_npm = npm_installation_check()
if has_npm:
run_npm_install()
else:
print("Warning: npm not installed using prebuilded js files!",
file=sys.stderr)
"""
Download npm packages required by package.json and extract required
files from them
"""
for js in JS_FILES:
downloaded_js_name = os.path.join(TOP_DIR, js)
installed_js_name = os.path.join(TOP_DIR, "sphinx_hwt", "html", js)
if has_npm:
assert os.path.exists(downloaded_js_name), downloaded_js_name
os.makedirs(os.path.dirname(installed_js_name), exist_ok=True)
copyfile(downloaded_js_name, installed_js_name)
print("copy generated from NPM packages", installed_js_name)
else:
if os.path.exists(installed_js_name):
print("using prebuilded", installed_js_name)
else:
raise Exception("Can not find npm,"
" which is required for the installation "
"and this is pacpage has not js prebuilded")
|
[
"def",
"run",
"(",
"self",
")",
":",
"has_npm",
"=",
"npm_installation_check",
"(",
")",
"if",
"has_npm",
":",
"run_npm_install",
"(",
")",
"else",
":",
"print",
"(",
"\"Warning: npm not installed using prebuilded js files!\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"for",
"js",
"in",
"JS_FILES",
":",
"downloaded_js_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"TOP_DIR",
",",
"js",
")",
"installed_js_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"TOP_DIR",
",",
"\"sphinx_hwt\"",
",",
"\"html\"",
",",
"js",
")",
"if",
"has_npm",
":",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"downloaded_js_name",
")",
",",
"downloaded_js_name",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"installed_js_name",
")",
",",
"exist_ok",
"=",
"True",
")",
"copyfile",
"(",
"downloaded_js_name",
",",
"installed_js_name",
")",
"print",
"(",
"\"copy generated from NPM packages\"",
",",
"installed_js_name",
")",
"else",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"installed_js_name",
")",
":",
"print",
"(",
"\"using prebuilded\"",
",",
"installed_js_name",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Can not find npm,\"",
"\" which is required for the installation \"",
"\"and this is pacpage has not js prebuilded\"",
")"
] |
Download npm packages required by package.json and extract required
files from them
|
[
"Download",
"npm",
"packages",
"required",
"by",
"package",
".",
"json",
"and",
"extract",
"required",
"files",
"from",
"them"
] |
train
|
https://github.com/Nic30/sphinx-hwt/blob/3aee09f467be74433ae2a6b2de55c6b90e5920ae/setup.py#L81-L106
|
dfm/transit
|
transit/transit.py
|
Central.density
|
def density(self):
"""Stellar density in CGS units
"""
r = self.radius * _Rsun
m = self.mass * _Msun
return 0.75 * m / (np.pi * r * r * r)
|
python
|
def density(self):
"""Stellar density in CGS units
"""
r = self.radius * _Rsun
m = self.mass * _Msun
return 0.75 * m / (np.pi * r * r * r)
|
[
"def",
"density",
"(",
"self",
")",
":",
"r",
"=",
"self",
".",
"radius",
"*",
"_Rsun",
"m",
"=",
"self",
".",
"mass",
"*",
"_Msun",
"return",
"0.75",
"*",
"m",
"/",
"(",
"np",
".",
"pi",
"*",
"r",
"*",
"r",
"*",
"r",
")"
] |
Stellar density in CGS units
|
[
"Stellar",
"density",
"in",
"CGS",
"units"
] |
train
|
https://github.com/dfm/transit/blob/482d99b506657fa3fd54a388f9c6be13b9e57bce/transit/transit.py#L101-L106
|
dfm/transit
|
transit/transit.py
|
Body.duration
|
def duration(self):
"""
The approximate duration of the transit :math:`T_\mathrm{tot}` from
Equation (14) in Winn (2010).
"""
self._check_ps()
rstar = self.system.central.radius
k = self.r/rstar
dur = self.period / np.pi
arg = rstar/self.a * np.sqrt((1+k)**2 - self.b**2)
arg /= np.sin(np.radians(self.incl))
dur *= np.arcsin(arg)
if self.e > 0.0:
dur *= np.sqrt(1 - self.e**2) / (1 + self.e * np.sin(self.omega))
return dur
|
python
|
def duration(self):
"""
The approximate duration of the transit :math:`T_\mathrm{tot}` from
Equation (14) in Winn (2010).
"""
self._check_ps()
rstar = self.system.central.radius
k = self.r/rstar
dur = self.period / np.pi
arg = rstar/self.a * np.sqrt((1+k)**2 - self.b**2)
arg /= np.sin(np.radians(self.incl))
dur *= np.arcsin(arg)
if self.e > 0.0:
dur *= np.sqrt(1 - self.e**2) / (1 + self.e * np.sin(self.omega))
return dur
|
[
"def",
"duration",
"(",
"self",
")",
":",
"self",
".",
"_check_ps",
"(",
")",
"rstar",
"=",
"self",
".",
"system",
".",
"central",
".",
"radius",
"k",
"=",
"self",
".",
"r",
"/",
"rstar",
"dur",
"=",
"self",
".",
"period",
"/",
"np",
".",
"pi",
"arg",
"=",
"rstar",
"/",
"self",
".",
"a",
"*",
"np",
".",
"sqrt",
"(",
"(",
"1",
"+",
"k",
")",
"**",
"2",
"-",
"self",
".",
"b",
"**",
"2",
")",
"arg",
"/=",
"np",
".",
"sin",
"(",
"np",
".",
"radians",
"(",
"self",
".",
"incl",
")",
")",
"dur",
"*=",
"np",
".",
"arcsin",
"(",
"arg",
")",
"if",
"self",
".",
"e",
">",
"0.0",
":",
"dur",
"*=",
"np",
".",
"sqrt",
"(",
"1",
"-",
"self",
".",
"e",
"**",
"2",
")",
"/",
"(",
"1",
"+",
"self",
".",
"e",
"*",
"np",
".",
"sin",
"(",
"self",
".",
"omega",
")",
")",
"return",
"dur"
] |
The approximate duration of the transit :math:`T_\mathrm{tot}` from
Equation (14) in Winn (2010).
|
[
"The",
"approximate",
"duration",
"of",
"the",
"transit",
":",
"math",
":",
"T_",
"\\",
"mathrm",
"{",
"tot",
"}",
"from",
"Equation",
"(",
"14",
")",
"in",
"Winn",
"(",
"2010",
")",
"."
] |
train
|
https://github.com/dfm/transit/blob/482d99b506657fa3fd54a388f9c6be13b9e57bce/transit/transit.py#L348-L364
|
dfm/transit
|
transit/transit.py
|
System.add_body
|
def add_body(self, body):
"""
Add a :class:`Body` to the system. This function also sets the
``system`` attribute of the body.
:param body:
The :class:`Body` to add.
"""
body.system = self
self.bodies.append(body)
self.unfrozen = np.concatenate((
self.unfrozen[:-2], np.zeros(7, dtype=bool), self.unfrozen[-2:]
))
|
python
|
def add_body(self, body):
"""
Add a :class:`Body` to the system. This function also sets the
``system`` attribute of the body.
:param body:
The :class:`Body` to add.
"""
body.system = self
self.bodies.append(body)
self.unfrozen = np.concatenate((
self.unfrozen[:-2], np.zeros(7, dtype=bool), self.unfrozen[-2:]
))
|
[
"def",
"add_body",
"(",
"self",
",",
"body",
")",
":",
"body",
".",
"system",
"=",
"self",
"self",
".",
"bodies",
".",
"append",
"(",
"body",
")",
"self",
".",
"unfrozen",
"=",
"np",
".",
"concatenate",
"(",
"(",
"self",
".",
"unfrozen",
"[",
":",
"-",
"2",
"]",
",",
"np",
".",
"zeros",
"(",
"7",
",",
"dtype",
"=",
"bool",
")",
",",
"self",
".",
"unfrozen",
"[",
"-",
"2",
":",
"]",
")",
")"
] |
Add a :class:`Body` to the system. This function also sets the
``system`` attribute of the body.
:param body:
The :class:`Body` to add.
|
[
"Add",
"a",
":",
"class",
":",
"Body",
"to",
"the",
"system",
".",
"This",
"function",
"also",
"sets",
"the",
"system",
"attribute",
"of",
"the",
"body",
"."
] |
train
|
https://github.com/dfm/transit/blob/482d99b506657fa3fd54a388f9c6be13b9e57bce/transit/transit.py#L410-L423
|
dfm/transit
|
transit/transit.py
|
System.light_curve
|
def light_curve(self, t, texp=0.0, tol=1e-8, maxdepth=4, use_batman=False):
"""
Get the light curve evaluated at a list of times using the current
model.
:param t:
The times where the light curve should be evaluated (in days).
:param tol:
The stopping criterion for the exposure time integration.
:param maxdepth:
The maximum recursion depth of the exposure time integrator.
"""
t = np.atleast_1d(t)
if len(self.bodies) == 0:
return self.central.flux + np.zeros_like(t)
return CythonSolver().kepler_light_curve(len(self.bodies),
self._get_params(),
t, texp, tol, maxdepth,
use_batman=use_batman)
|
python
|
def light_curve(self, t, texp=0.0, tol=1e-8, maxdepth=4, use_batman=False):
"""
Get the light curve evaluated at a list of times using the current
model.
:param t:
The times where the light curve should be evaluated (in days).
:param tol:
The stopping criterion for the exposure time integration.
:param maxdepth:
The maximum recursion depth of the exposure time integrator.
"""
t = np.atleast_1d(t)
if len(self.bodies) == 0:
return self.central.flux + np.zeros_like(t)
return CythonSolver().kepler_light_curve(len(self.bodies),
self._get_params(),
t, texp, tol, maxdepth,
use_batman=use_batman)
|
[
"def",
"light_curve",
"(",
"self",
",",
"t",
",",
"texp",
"=",
"0.0",
",",
"tol",
"=",
"1e-8",
",",
"maxdepth",
"=",
"4",
",",
"use_batman",
"=",
"False",
")",
":",
"t",
"=",
"np",
".",
"atleast_1d",
"(",
"t",
")",
"if",
"len",
"(",
"self",
".",
"bodies",
")",
"==",
"0",
":",
"return",
"self",
".",
"central",
".",
"flux",
"+",
"np",
".",
"zeros_like",
"(",
"t",
")",
"return",
"CythonSolver",
"(",
")",
".",
"kepler_light_curve",
"(",
"len",
"(",
"self",
".",
"bodies",
")",
",",
"self",
".",
"_get_params",
"(",
")",
",",
"t",
",",
"texp",
",",
"tol",
",",
"maxdepth",
",",
"use_batman",
"=",
"use_batman",
")"
] |
Get the light curve evaluated at a list of times using the current
model.
:param t:
The times where the light curve should be evaluated (in days).
:param tol:
The stopping criterion for the exposure time integration.
:param maxdepth:
The maximum recursion depth of the exposure time integrator.
|
[
"Get",
"the",
"light",
"curve",
"evaluated",
"at",
"a",
"list",
"of",
"times",
"using",
"the",
"current",
"model",
"."
] |
train
|
https://github.com/dfm/transit/blob/482d99b506657fa3fd54a388f9c6be13b9e57bce/transit/transit.py#L428-L450
|
dfm/transit
|
transit/transit.py
|
System.light_curve_gradient
|
def light_curve_gradient(self, t, texp=0.0, tol=1e-8, maxdepth=4):
"""
Get the light curve evaluated at a list of times using the current
model.
:param t:
The times where the light curve should be evaluated (in days).
:param tol:
The stopping criterion for the exposure time integration.
:param maxdepth:
The maximum recursion depth of the exposure time integrator.
"""
t = np.atleast_1d(t)
if len(self.bodies) == 0:
grad = np.zeros((len(t), 5), dtype=float)
grad[:, 0] = 1.0
return self.central.flux + np.zeros_like(t), grad[:, self.unfrozen]
f, df = CythonSolver().kepler_gradient(len(self.bodies),
self._get_params(),
t, texp, tol, maxdepth)
return f, df[:, self.unfrozen].T
|
python
|
def light_curve_gradient(self, t, texp=0.0, tol=1e-8, maxdepth=4):
"""
Get the light curve evaluated at a list of times using the current
model.
:param t:
The times where the light curve should be evaluated (in days).
:param tol:
The stopping criterion for the exposure time integration.
:param maxdepth:
The maximum recursion depth of the exposure time integrator.
"""
t = np.atleast_1d(t)
if len(self.bodies) == 0:
grad = np.zeros((len(t), 5), dtype=float)
grad[:, 0] = 1.0
return self.central.flux + np.zeros_like(t), grad[:, self.unfrozen]
f, df = CythonSolver().kepler_gradient(len(self.bodies),
self._get_params(),
t, texp, tol, maxdepth)
return f, df[:, self.unfrozen].T
|
[
"def",
"light_curve_gradient",
"(",
"self",
",",
"t",
",",
"texp",
"=",
"0.0",
",",
"tol",
"=",
"1e-8",
",",
"maxdepth",
"=",
"4",
")",
":",
"t",
"=",
"np",
".",
"atleast_1d",
"(",
"t",
")",
"if",
"len",
"(",
"self",
".",
"bodies",
")",
"==",
"0",
":",
"grad",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"t",
")",
",",
"5",
")",
",",
"dtype",
"=",
"float",
")",
"grad",
"[",
":",
",",
"0",
"]",
"=",
"1.0",
"return",
"self",
".",
"central",
".",
"flux",
"+",
"np",
".",
"zeros_like",
"(",
"t",
")",
",",
"grad",
"[",
":",
",",
"self",
".",
"unfrozen",
"]",
"f",
",",
"df",
"=",
"CythonSolver",
"(",
")",
".",
"kepler_gradient",
"(",
"len",
"(",
"self",
".",
"bodies",
")",
",",
"self",
".",
"_get_params",
"(",
")",
",",
"t",
",",
"texp",
",",
"tol",
",",
"maxdepth",
")",
"return",
"f",
",",
"df",
"[",
":",
",",
"self",
".",
"unfrozen",
"]",
".",
"T"
] |
Get the light curve evaluated at a list of times using the current
model.
:param t:
The times where the light curve should be evaluated (in days).
:param tol:
The stopping criterion for the exposure time integration.
:param maxdepth:
The maximum recursion depth of the exposure time integrator.
|
[
"Get",
"the",
"light",
"curve",
"evaluated",
"at",
"a",
"list",
"of",
"times",
"using",
"the",
"current",
"model",
"."
] |
train
|
https://github.com/dfm/transit/blob/482d99b506657fa3fd54a388f9c6be13b9e57bce/transit/transit.py#L452-L476
|
ARMmbed/autoversion
|
scripts/tag_and_release.py
|
main
|
def main():
"""Tags the current repository
and commits changes to news files
"""
# see:
# https://packaging.python.org/tutorials/distributing-packages/#uploading-your-project-to-pypi
twine_repo = os.getenv('TWINE_REPOSITORY_URL') or os.getenv('TWINE_REPOSITORY')
print('tagging and releasing to %s as %s' % (
twine_repo,
os.getenv('TWINE_USERNAME')
))
if not twine_repo:
raise Exception('cannot release to implicit pypi repository. explicitly set the repo/url.')
version = subprocess.check_output(['pipenv', 'run', 'python', 'setup.py', '--version']).decode().strip()
if 'dev' in version:
raise Exception('cannot release unversioned project: %s' % version)
print('Preparing environment')
subprocess.check_call(['git', 'config', '--global', 'user.name', 'monty-bot'])
subprocess.check_call(['git', 'config', '--global', 'user.email', 'monty-bot@arm.com'])
url = subprocess.check_output(['git', 'remote', 'get-url', 'origin'])
branch_name = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
new_url = git_url_ssh_to_https(url.decode())
subprocess.check_call(['git', 'remote', 'set-url', 'origin', new_url])
branch_spec = 'origin/%s' % branch_name.decode('utf-8').strip()
subprocess.check_call(['git', 'branch', '--set-upstream-to', branch_spec])
print('Committing the changelog & version')
subprocess.check_call(['git', 'add', 'src/auto_version/__version__.py'])
subprocess.check_call(['git', 'add', 'CHANGELOG.md'])
subprocess.check_call(['git', 'add', 'docs/news/*'])
message = ':checkered_flag: :newspaper: Releasing version %s\n[skip ci]' % version
subprocess.check_call(['git', 'commit', '-m', message])
print('Tagging the project')
subprocess.check_call(['git', 'tag', '-a', version, '-m', 'Release %s' % version])
print('Pushing changes back to GitHub')
subprocess.check_call(['git', 'push', '--follow-tags'])
print('Marking this commit as latest')
subprocess.check_call(['git', 'tag', '-f', 'latest'])
subprocess.check_call(['git', 'push', '-f', '--tags'])
print('Generating a release package')
subprocess.check_call(
['pipenv', 'run', 'python', 'setup.py', 'clean', '--all', 'bdist_wheel', '--dist-dir', 'release-dist'])
print('Uploading to PyPI')
subprocess.check_call(['pipenv', 'run', 'python', '-m', 'twine', 'upload', 'release-dist/*'])
print('Done.')
|
python
|
def main():
"""Tags the current repository
and commits changes to news files
"""
# see:
# https://packaging.python.org/tutorials/distributing-packages/#uploading-your-project-to-pypi
twine_repo = os.getenv('TWINE_REPOSITORY_URL') or os.getenv('TWINE_REPOSITORY')
print('tagging and releasing to %s as %s' % (
twine_repo,
os.getenv('TWINE_USERNAME')
))
if not twine_repo:
raise Exception('cannot release to implicit pypi repository. explicitly set the repo/url.')
version = subprocess.check_output(['pipenv', 'run', 'python', 'setup.py', '--version']).decode().strip()
if 'dev' in version:
raise Exception('cannot release unversioned project: %s' % version)
print('Preparing environment')
subprocess.check_call(['git', 'config', '--global', 'user.name', 'monty-bot'])
subprocess.check_call(['git', 'config', '--global', 'user.email', 'monty-bot@arm.com'])
url = subprocess.check_output(['git', 'remote', 'get-url', 'origin'])
branch_name = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
new_url = git_url_ssh_to_https(url.decode())
subprocess.check_call(['git', 'remote', 'set-url', 'origin', new_url])
branch_spec = 'origin/%s' % branch_name.decode('utf-8').strip()
subprocess.check_call(['git', 'branch', '--set-upstream-to', branch_spec])
print('Committing the changelog & version')
subprocess.check_call(['git', 'add', 'src/auto_version/__version__.py'])
subprocess.check_call(['git', 'add', 'CHANGELOG.md'])
subprocess.check_call(['git', 'add', 'docs/news/*'])
message = ':checkered_flag: :newspaper: Releasing version %s\n[skip ci]' % version
subprocess.check_call(['git', 'commit', '-m', message])
print('Tagging the project')
subprocess.check_call(['git', 'tag', '-a', version, '-m', 'Release %s' % version])
print('Pushing changes back to GitHub')
subprocess.check_call(['git', 'push', '--follow-tags'])
print('Marking this commit as latest')
subprocess.check_call(['git', 'tag', '-f', 'latest'])
subprocess.check_call(['git', 'push', '-f', '--tags'])
print('Generating a release package')
subprocess.check_call(
['pipenv', 'run', 'python', 'setup.py', 'clean', '--all', 'bdist_wheel', '--dist-dir', 'release-dist'])
print('Uploading to PyPI')
subprocess.check_call(['pipenv', 'run', 'python', '-m', 'twine', 'upload', 'release-dist/*'])
print('Done.')
|
[
"def",
"main",
"(",
")",
":",
"# see:",
"# https://packaging.python.org/tutorials/distributing-packages/#uploading-your-project-to-pypi",
"twine_repo",
"=",
"os",
".",
"getenv",
"(",
"'TWINE_REPOSITORY_URL'",
")",
"or",
"os",
".",
"getenv",
"(",
"'TWINE_REPOSITORY'",
")",
"print",
"(",
"'tagging and releasing to %s as %s'",
"%",
"(",
"twine_repo",
",",
"os",
".",
"getenv",
"(",
"'TWINE_USERNAME'",
")",
")",
")",
"if",
"not",
"twine_repo",
":",
"raise",
"Exception",
"(",
"'cannot release to implicit pypi repository. explicitly set the repo/url.'",
")",
"version",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'pipenv'",
",",
"'run'",
",",
"'python'",
",",
"'setup.py'",
",",
"'--version'",
"]",
")",
".",
"decode",
"(",
")",
".",
"strip",
"(",
")",
"if",
"'dev'",
"in",
"version",
":",
"raise",
"Exception",
"(",
"'cannot release unversioned project: %s'",
"%",
"version",
")",
"print",
"(",
"'Preparing environment'",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'git'",
",",
"'config'",
",",
"'--global'",
",",
"'user.name'",
",",
"'monty-bot'",
"]",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'git'",
",",
"'config'",
",",
"'--global'",
",",
"'user.email'",
",",
"'monty-bot@arm.com'",
"]",
")",
"url",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'git'",
",",
"'remote'",
",",
"'get-url'",
",",
"'origin'",
"]",
")",
"branch_name",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'git'",
",",
"'rev-parse'",
",",
"'--abbrev-ref'",
",",
"'HEAD'",
"]",
")",
"new_url",
"=",
"git_url_ssh_to_https",
"(",
"url",
".",
"decode",
"(",
")",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'git'",
",",
"'remote'",
",",
"'set-url'",
",",
"'origin'",
",",
"new_url",
"]",
")",
"branch_spec",
"=",
"'origin/%s'",
"%",
"branch_name",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"strip",
"(",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'git'",
",",
"'branch'",
",",
"'--set-upstream-to'",
",",
"branch_spec",
"]",
")",
"print",
"(",
"'Committing the changelog & version'",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'git'",
",",
"'add'",
",",
"'src/auto_version/__version__.py'",
"]",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'git'",
",",
"'add'",
",",
"'CHANGELOG.md'",
"]",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'git'",
",",
"'add'",
",",
"'docs/news/*'",
"]",
")",
"message",
"=",
"':checkered_flag: :newspaper: Releasing version %s\\n[skip ci]'",
"%",
"version",
"subprocess",
".",
"check_call",
"(",
"[",
"'git'",
",",
"'commit'",
",",
"'-m'",
",",
"message",
"]",
")",
"print",
"(",
"'Tagging the project'",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'git'",
",",
"'tag'",
",",
"'-a'",
",",
"version",
",",
"'-m'",
",",
"'Release %s'",
"%",
"version",
"]",
")",
"print",
"(",
"'Pushing changes back to GitHub'",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'git'",
",",
"'push'",
",",
"'--follow-tags'",
"]",
")",
"print",
"(",
"'Marking this commit as latest'",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'git'",
",",
"'tag'",
",",
"'-f'",
",",
"'latest'",
"]",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'git'",
",",
"'push'",
",",
"'-f'",
",",
"'--tags'",
"]",
")",
"print",
"(",
"'Generating a release package'",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'pipenv'",
",",
"'run'",
",",
"'python'",
",",
"'setup.py'",
",",
"'clean'",
",",
"'--all'",
",",
"'bdist_wheel'",
",",
"'--dist-dir'",
",",
"'release-dist'",
"]",
")",
"print",
"(",
"'Uploading to PyPI'",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'pipenv'",
",",
"'run'",
",",
"'python'",
",",
"'-m'",
",",
"'twine'",
",",
"'upload'",
",",
"'release-dist/*'",
"]",
")",
"print",
"(",
"'Done.'",
")"
] |
Tags the current repository
and commits changes to news files
|
[
"Tags",
"the",
"current",
"repository"
] |
train
|
https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/scripts/tag_and_release.py#L39-L86
|
pytroll/posttroll
|
posttroll/__init__.py
|
get_context
|
def get_context():
"""Provide the context to use.
This function takes care of creating new contexts in case of forks.
"""
pid = os.getpid()
if pid not in context:
context[pid] = zmq.Context()
logger.debug('renewed context for PID %d', pid)
return context[pid]
|
python
|
def get_context():
"""Provide the context to use.
This function takes care of creating new contexts in case of forks.
"""
pid = os.getpid()
if pid not in context:
context[pid] = zmq.Context()
logger.debug('renewed context for PID %d', pid)
return context[pid]
|
[
"def",
"get_context",
"(",
")",
":",
"pid",
"=",
"os",
".",
"getpid",
"(",
")",
"if",
"pid",
"not",
"in",
"context",
":",
"context",
"[",
"pid",
"]",
"=",
"zmq",
".",
"Context",
"(",
")",
"logger",
".",
"debug",
"(",
"'renewed context for PID %d'",
",",
"pid",
")",
"return",
"context",
"[",
"pid",
"]"
] |
Provide the context to use.
This function takes care of creating new contexts in case of forks.
|
[
"Provide",
"the",
"context",
"to",
"use",
"."
] |
train
|
https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/__init__.py#L37-L46
|
pytroll/posttroll
|
posttroll/__init__.py
|
strp_isoformat
|
def strp_isoformat(strg):
"""Decode an ISO formatted string to a datetime object.
Allow a time-string without microseconds.
We handle input like: 2011-11-14T12:51:25.123456
"""
if isinstance(strg, datetime):
return strg
if len(strg) < 19 or len(strg) > 26:
if len(strg) > 30:
strg = strg[:30] + '...'
raise ValueError("Invalid ISO formatted time string '%s'"%strg)
if strg.find(".") == -1:
strg += '.000000'
if sys.version[0:3] >= '2.6':
return datetime.strptime(strg, "%Y-%m-%dT%H:%M:%S.%f")
else:
dat, mis = strg.split(".")
dat = datetime.strptime(dat, "%Y-%m-%dT%H:%M:%S")
mis = int(float('.' + mis)*1000000)
return dat.replace(microsecond=mis)
|
python
|
def strp_isoformat(strg):
"""Decode an ISO formatted string to a datetime object.
Allow a time-string without microseconds.
We handle input like: 2011-11-14T12:51:25.123456
"""
if isinstance(strg, datetime):
return strg
if len(strg) < 19 or len(strg) > 26:
if len(strg) > 30:
strg = strg[:30] + '...'
raise ValueError("Invalid ISO formatted time string '%s'"%strg)
if strg.find(".") == -1:
strg += '.000000'
if sys.version[0:3] >= '2.6':
return datetime.strptime(strg, "%Y-%m-%dT%H:%M:%S.%f")
else:
dat, mis = strg.split(".")
dat = datetime.strptime(dat, "%Y-%m-%dT%H:%M:%S")
mis = int(float('.' + mis)*1000000)
return dat.replace(microsecond=mis)
|
[
"def",
"strp_isoformat",
"(",
"strg",
")",
":",
"if",
"isinstance",
"(",
"strg",
",",
"datetime",
")",
":",
"return",
"strg",
"if",
"len",
"(",
"strg",
")",
"<",
"19",
"or",
"len",
"(",
"strg",
")",
">",
"26",
":",
"if",
"len",
"(",
"strg",
")",
">",
"30",
":",
"strg",
"=",
"strg",
"[",
":",
"30",
"]",
"+",
"'...'",
"raise",
"ValueError",
"(",
"\"Invalid ISO formatted time string '%s'\"",
"%",
"strg",
")",
"if",
"strg",
".",
"find",
"(",
"\".\"",
")",
"==",
"-",
"1",
":",
"strg",
"+=",
"'.000000'",
"if",
"sys",
".",
"version",
"[",
"0",
":",
"3",
"]",
">=",
"'2.6'",
":",
"return",
"datetime",
".",
"strptime",
"(",
"strg",
",",
"\"%Y-%m-%dT%H:%M:%S.%f\"",
")",
"else",
":",
"dat",
",",
"mis",
"=",
"strg",
".",
"split",
"(",
"\".\"",
")",
"dat",
"=",
"datetime",
".",
"strptime",
"(",
"dat",
",",
"\"%Y-%m-%dT%H:%M:%S\"",
")",
"mis",
"=",
"int",
"(",
"float",
"(",
"'.'",
"+",
"mis",
")",
"*",
"1000000",
")",
"return",
"dat",
".",
"replace",
"(",
"microsecond",
"=",
"mis",
")"
] |
Decode an ISO formatted string to a datetime object.
Allow a time-string without microseconds.
We handle input like: 2011-11-14T12:51:25.123456
|
[
"Decode",
"an",
"ISO",
"formatted",
"string",
"to",
"a",
"datetime",
"object",
".",
"Allow",
"a",
"time",
"-",
"string",
"without",
"microseconds",
"."
] |
train
|
https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/__init__.py#L49-L69
|
Clinical-Genomics/trailblazer
|
trailblazer/cli/delete.py
|
delete
|
def delete(context, force, yes, analysis_id):
"""Delete an analysis log from the database."""
analysis_obj = context.obj['store'].analysis(analysis_id)
if analysis_obj is None:
print(click.style('analysis log not found', fg='red'))
context.abort()
print(click.style(f"{analysis_obj.family}: {analysis_obj.status}"))
if analysis_obj.is_temp:
if yes or click.confirm(f"remove analysis log?"):
analysis_obj.delete()
context.obj['store'].commit()
print(click.style(f"analysis deleted: {analysis_obj.family}", fg='blue'))
else:
if analysis_obj.is_deleted:
print(click.style(f"{analysis_obj.family}: already deleted", fg='red'))
context.abort()
if Path(analysis_obj.out_dir).exists():
root_dir = context.obj['store'].families_dir
family_dir = analysis_obj.out_dir
if not force and (len(family_dir) <= len(root_dir) or root_dir not in family_dir):
print(click.style(f"unknown analysis output dir: {analysis_obj.out_dir}", fg='red'))
print(click.style("use '--force' to override"))
context.abort()
if yes or click.confirm(f"remove analysis output: {analysis_obj.out_dir}?"):
shutil.rmtree(analysis_obj.out_dir, ignore_errors=True)
analysis_obj.is_deleted = True
context.obj['store'].commit()
print(click.style(f"analysis deleted: {analysis_obj.family}", fg='blue'))
else:
print(click.style(f"analysis output doesn't exist: {analysis_obj.out_dir}", fg='red'))
context.abort()
|
python
|
def delete(context, force, yes, analysis_id):
"""Delete an analysis log from the database."""
analysis_obj = context.obj['store'].analysis(analysis_id)
if analysis_obj is None:
print(click.style('analysis log not found', fg='red'))
context.abort()
print(click.style(f"{analysis_obj.family}: {analysis_obj.status}"))
if analysis_obj.is_temp:
if yes or click.confirm(f"remove analysis log?"):
analysis_obj.delete()
context.obj['store'].commit()
print(click.style(f"analysis deleted: {analysis_obj.family}", fg='blue'))
else:
if analysis_obj.is_deleted:
print(click.style(f"{analysis_obj.family}: already deleted", fg='red'))
context.abort()
if Path(analysis_obj.out_dir).exists():
root_dir = context.obj['store'].families_dir
family_dir = analysis_obj.out_dir
if not force and (len(family_dir) <= len(root_dir) or root_dir not in family_dir):
print(click.style(f"unknown analysis output dir: {analysis_obj.out_dir}", fg='red'))
print(click.style("use '--force' to override"))
context.abort()
if yes or click.confirm(f"remove analysis output: {analysis_obj.out_dir}?"):
shutil.rmtree(analysis_obj.out_dir, ignore_errors=True)
analysis_obj.is_deleted = True
context.obj['store'].commit()
print(click.style(f"analysis deleted: {analysis_obj.family}", fg='blue'))
else:
print(click.style(f"analysis output doesn't exist: {analysis_obj.out_dir}", fg='red'))
context.abort()
|
[
"def",
"delete",
"(",
"context",
",",
"force",
",",
"yes",
",",
"analysis_id",
")",
":",
"analysis_obj",
"=",
"context",
".",
"obj",
"[",
"'store'",
"]",
".",
"analysis",
"(",
"analysis_id",
")",
"if",
"analysis_obj",
"is",
"None",
":",
"print",
"(",
"click",
".",
"style",
"(",
"'analysis log not found'",
",",
"fg",
"=",
"'red'",
")",
")",
"context",
".",
"abort",
"(",
")",
"print",
"(",
"click",
".",
"style",
"(",
"f\"{analysis_obj.family}: {analysis_obj.status}\"",
")",
")",
"if",
"analysis_obj",
".",
"is_temp",
":",
"if",
"yes",
"or",
"click",
".",
"confirm",
"(",
"f\"remove analysis log?\"",
")",
":",
"analysis_obj",
".",
"delete",
"(",
")",
"context",
".",
"obj",
"[",
"'store'",
"]",
".",
"commit",
"(",
")",
"print",
"(",
"click",
".",
"style",
"(",
"f\"analysis deleted: {analysis_obj.family}\"",
",",
"fg",
"=",
"'blue'",
")",
")",
"else",
":",
"if",
"analysis_obj",
".",
"is_deleted",
":",
"print",
"(",
"click",
".",
"style",
"(",
"f\"{analysis_obj.family}: already deleted\"",
",",
"fg",
"=",
"'red'",
")",
")",
"context",
".",
"abort",
"(",
")",
"if",
"Path",
"(",
"analysis_obj",
".",
"out_dir",
")",
".",
"exists",
"(",
")",
":",
"root_dir",
"=",
"context",
".",
"obj",
"[",
"'store'",
"]",
".",
"families_dir",
"family_dir",
"=",
"analysis_obj",
".",
"out_dir",
"if",
"not",
"force",
"and",
"(",
"len",
"(",
"family_dir",
")",
"<=",
"len",
"(",
"root_dir",
")",
"or",
"root_dir",
"not",
"in",
"family_dir",
")",
":",
"print",
"(",
"click",
".",
"style",
"(",
"f\"unknown analysis output dir: {analysis_obj.out_dir}\"",
",",
"fg",
"=",
"'red'",
")",
")",
"print",
"(",
"click",
".",
"style",
"(",
"\"use '--force' to override\"",
")",
")",
"context",
".",
"abort",
"(",
")",
"if",
"yes",
"or",
"click",
".",
"confirm",
"(",
"f\"remove analysis output: {analysis_obj.out_dir}?\"",
")",
":",
"shutil",
".",
"rmtree",
"(",
"analysis_obj",
".",
"out_dir",
",",
"ignore_errors",
"=",
"True",
")",
"analysis_obj",
".",
"is_deleted",
"=",
"True",
"context",
".",
"obj",
"[",
"'store'",
"]",
".",
"commit",
"(",
")",
"print",
"(",
"click",
".",
"style",
"(",
"f\"analysis deleted: {analysis_obj.family}\"",
",",
"fg",
"=",
"'blue'",
")",
")",
"else",
":",
"print",
"(",
"click",
".",
"style",
"(",
"f\"analysis output doesn't exist: {analysis_obj.out_dir}\"",
",",
"fg",
"=",
"'red'",
")",
")",
"context",
".",
"abort",
"(",
")"
] |
Delete an analysis log from the database.
|
[
"Delete",
"an",
"analysis",
"log",
"from",
"the",
"database",
"."
] |
train
|
https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/cli/delete.py#L12-L46
|
thespacedoctor/sherlock
|
sherlock/imports/_base_importer.py
|
_base_importer.add_data_to_database_table
|
def add_data_to_database_table(
self,
dictList,
createStatement=False):
"""*Import data in the list of dictionaries in the requested database table*
Also adds HTMIDs and updates the sherlock-catalogue database helper table with the time-stamp of when the imported catlogue was last updated
**Key Arguments:**
- ``dictList`` - a list of dictionaries containing all the rows in the catalogue to be imported
- ``createStatement`` - the table's mysql create statement (used to generate table if it does not yet exist in database). Default *False*
**Usage:**
.. code-block:: python
self.add_data_to_database_table(
dictList=dictList,
createStatement=createStatement
)
.. todo ::
- Write a checklist for creating a new sherlock database importer
"""
self.log.debug('starting the ``add_data_to_database_table`` method')
if len(dictList) == 0:
return
myPid = self.myPid
dbTableName = self.dbTableName
if createStatement:
writequery(
log=self.log,
sqlQuery=createStatement,
dbConn=self.cataloguesDbConn,
)
insert_list_of_dictionaries_into_database_tables(
dbConn=self.cataloguesDbConn,
log=self.log,
dictList=dictList,
dbTableName=dbTableName,
uniqueKeyList=[],
dateModified=True,
dateCreated=True,
batchSize=10000,
replace=True,
dbSettings=self.settings["database settings"][
"static catalogues"]
)
self._add_htmids_to_database_table()
cleaner = database_cleaner(
log=self.log,
settings=self.settings
)
cleaner._update_tcs_helper_catalogue_tables_info_with_new_tables()
self._update_database_helper_table()
print """Now:
- [ ] edit the `%(dbTableName)s` row in the sherlock catalogues database adding relevant column mappings, catalogue version number etc
- [ ] retire any previous version of this catlogue in the database. Renaming the catalogue-table by appending `legacy_` and also change the name in the `tcs_helper_catalogue_tables_info` table
- [ ] dupliate views from the previous catalogue version to point towards the new version and then delete the old views
- [ ] run the command `sherlock clean [-s <pathToSettingsFile>]` to clean up helper tables
- [ ] switch out the old catalogue table/views in your sherlock search algorithms in the yaml settings files
- [ ] run a test batch of transients to make sure catalogue is installed as expected
""" % locals()
self.log.debug('completed the ``add_data_to_database_table`` method')
return None
|
python
|
def add_data_to_database_table(
self,
dictList,
createStatement=False):
"""*Import data in the list of dictionaries in the requested database table*
Also adds HTMIDs and updates the sherlock-catalogue database helper table with the time-stamp of when the imported catlogue was last updated
**Key Arguments:**
- ``dictList`` - a list of dictionaries containing all the rows in the catalogue to be imported
- ``createStatement`` - the table's mysql create statement (used to generate table if it does not yet exist in database). Default *False*
**Usage:**
.. code-block:: python
self.add_data_to_database_table(
dictList=dictList,
createStatement=createStatement
)
.. todo ::
- Write a checklist for creating a new sherlock database importer
"""
self.log.debug('starting the ``add_data_to_database_table`` method')
if len(dictList) == 0:
return
myPid = self.myPid
dbTableName = self.dbTableName
if createStatement:
writequery(
log=self.log,
sqlQuery=createStatement,
dbConn=self.cataloguesDbConn,
)
insert_list_of_dictionaries_into_database_tables(
dbConn=self.cataloguesDbConn,
log=self.log,
dictList=dictList,
dbTableName=dbTableName,
uniqueKeyList=[],
dateModified=True,
dateCreated=True,
batchSize=10000,
replace=True,
dbSettings=self.settings["database settings"][
"static catalogues"]
)
self._add_htmids_to_database_table()
cleaner = database_cleaner(
log=self.log,
settings=self.settings
)
cleaner._update_tcs_helper_catalogue_tables_info_with_new_tables()
self._update_database_helper_table()
print """Now:
- [ ] edit the `%(dbTableName)s` row in the sherlock catalogues database adding relevant column mappings, catalogue version number etc
- [ ] retire any previous version of this catlogue in the database. Renaming the catalogue-table by appending `legacy_` and also change the name in the `tcs_helper_catalogue_tables_info` table
- [ ] dupliate views from the previous catalogue version to point towards the new version and then delete the old views
- [ ] run the command `sherlock clean [-s <pathToSettingsFile>]` to clean up helper tables
- [ ] switch out the old catalogue table/views in your sherlock search algorithms in the yaml settings files
- [ ] run a test batch of transients to make sure catalogue is installed as expected
""" % locals()
self.log.debug('completed the ``add_data_to_database_table`` method')
return None
|
[
"def",
"add_data_to_database_table",
"(",
"self",
",",
"dictList",
",",
"createStatement",
"=",
"False",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``add_data_to_database_table`` method'",
")",
"if",
"len",
"(",
"dictList",
")",
"==",
"0",
":",
"return",
"myPid",
"=",
"self",
".",
"myPid",
"dbTableName",
"=",
"self",
".",
"dbTableName",
"if",
"createStatement",
":",
"writequery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"createStatement",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
")",
"insert_list_of_dictionaries_into_database_tables",
"(",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
"log",
"=",
"self",
".",
"log",
",",
"dictList",
"=",
"dictList",
",",
"dbTableName",
"=",
"dbTableName",
",",
"uniqueKeyList",
"=",
"[",
"]",
",",
"dateModified",
"=",
"True",
",",
"dateCreated",
"=",
"True",
",",
"batchSize",
"=",
"10000",
",",
"replace",
"=",
"True",
",",
"dbSettings",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"static catalogues\"",
"]",
")",
"self",
".",
"_add_htmids_to_database_table",
"(",
")",
"cleaner",
"=",
"database_cleaner",
"(",
"log",
"=",
"self",
".",
"log",
",",
"settings",
"=",
"self",
".",
"settings",
")",
"cleaner",
".",
"_update_tcs_helper_catalogue_tables_info_with_new_tables",
"(",
")",
"self",
".",
"_update_database_helper_table",
"(",
")",
"print",
"\"\"\"Now:\n\n- [ ] edit the `%(dbTableName)s` row in the sherlock catalogues database adding relevant column mappings, catalogue version number etc\n- [ ] retire any previous version of this catlogue in the database. Renaming the catalogue-table by appending `legacy_` and also change the name in the `tcs_helper_catalogue_tables_info` table\n- [ ] dupliate views from the previous catalogue version to point towards the new version and then delete the old views\n- [ ] run the command `sherlock clean [-s <pathToSettingsFile>]` to clean up helper tables\n- [ ] switch out the old catalogue table/views in your sherlock search algorithms in the yaml settings files\n- [ ] run a test batch of transients to make sure catalogue is installed as expected\n\n\"\"\"",
"%",
"locals",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``add_data_to_database_table`` method'",
")",
"return",
"None"
] |
*Import data in the list of dictionaries in the requested database table*
Also adds HTMIDs and updates the sherlock-catalogue database helper table with the time-stamp of when the imported catlogue was last updated
**Key Arguments:**
- ``dictList`` - a list of dictionaries containing all the rows in the catalogue to be imported
- ``createStatement`` - the table's mysql create statement (used to generate table if it does not yet exist in database). Default *False*
**Usage:**
.. code-block:: python
self.add_data_to_database_table(
dictList=dictList,
createStatement=createStatement
)
.. todo ::
- Write a checklist for creating a new sherlock database importer
|
[
"*",
"Import",
"data",
"in",
"the",
"list",
"of",
"dictionaries",
"in",
"the",
"requested",
"database",
"table",
"*"
] |
train
|
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/_base_importer.py#L135-L211
|
thespacedoctor/sherlock
|
sherlock/imports/_base_importer.py
|
_base_importer._add_htmids_to_database_table
|
def _add_htmids_to_database_table(
self):
"""*Add HTMIDs to database table once all the data has been imported (HTM Levels 10,13,16)*
**Usage:**
.. code-block:: python
self._add_htmids_to_database_table()
"""
self.log.debug('starting the ``add_htmids_to_database_table`` method')
tableName = self.dbTableName
self.log.info("Adding HTMIds to %(tableName)s" % locals())
add_htm_ids_to_mysql_database_table(
raColName=self.raColName,
declColName=self.declColName,
tableName=self.dbTableName,
dbConn=self.cataloguesDbConn,
log=self.log,
primaryIdColumnName=self.primaryIdColumnName
)
self.log.debug('completed the ``add_htmids_to_database_table`` method')
return None
|
python
|
def _add_htmids_to_database_table(
self):
"""*Add HTMIDs to database table once all the data has been imported (HTM Levels 10,13,16)*
**Usage:**
.. code-block:: python
self._add_htmids_to_database_table()
"""
self.log.debug('starting the ``add_htmids_to_database_table`` method')
tableName = self.dbTableName
self.log.info("Adding HTMIds to %(tableName)s" % locals())
add_htm_ids_to_mysql_database_table(
raColName=self.raColName,
declColName=self.declColName,
tableName=self.dbTableName,
dbConn=self.cataloguesDbConn,
log=self.log,
primaryIdColumnName=self.primaryIdColumnName
)
self.log.debug('completed the ``add_htmids_to_database_table`` method')
return None
|
[
"def",
"_add_htmids_to_database_table",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``add_htmids_to_database_table`` method'",
")",
"tableName",
"=",
"self",
".",
"dbTableName",
"self",
".",
"log",
".",
"info",
"(",
"\"Adding HTMIds to %(tableName)s\"",
"%",
"locals",
"(",
")",
")",
"add_htm_ids_to_mysql_database_table",
"(",
"raColName",
"=",
"self",
".",
"raColName",
",",
"declColName",
"=",
"self",
".",
"declColName",
",",
"tableName",
"=",
"self",
".",
"dbTableName",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
"log",
"=",
"self",
".",
"log",
",",
"primaryIdColumnName",
"=",
"self",
".",
"primaryIdColumnName",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``add_htmids_to_database_table`` method'",
")",
"return",
"None"
] |
*Add HTMIDs to database table once all the data has been imported (HTM Levels 10,13,16)*
**Usage:**
.. code-block:: python
self._add_htmids_to_database_table()
|
[
"*",
"Add",
"HTMIDs",
"to",
"database",
"table",
"once",
"all",
"the",
"data",
"has",
"been",
"imported",
"(",
"HTM",
"Levels",
"10",
"13",
"16",
")",
"*"
] |
train
|
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/_base_importer.py#L213-L239
|
thespacedoctor/sherlock
|
sherlock/imports/_base_importer.py
|
_base_importer._update_database_helper_table
|
def _update_database_helper_table(
self):
"""*Update the sherlock catalogues database helper table with the time-stamp of when this catlogue was last updated*
**Usage:**
.. code-block:: python
self._update_database_helper_table()
"""
self.log.debug('starting the ``_update_database_helper_table`` method')
tableName = self.dbTableName
sqlQuery = u"""
update tcs_helper_catalogue_tables_info set last_updated = now() where table_name = "%(tableName)s";
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
self.log.debug(
'completed the ``_update_database_helper_table`` method')
return None
|
python
|
def _update_database_helper_table(
self):
"""*Update the sherlock catalogues database helper table with the time-stamp of when this catlogue was last updated*
**Usage:**
.. code-block:: python
self._update_database_helper_table()
"""
self.log.debug('starting the ``_update_database_helper_table`` method')
tableName = self.dbTableName
sqlQuery = u"""
update tcs_helper_catalogue_tables_info set last_updated = now() where table_name = "%(tableName)s";
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
self.log.debug(
'completed the ``_update_database_helper_table`` method')
return None
|
[
"def",
"_update_database_helper_table",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_update_database_helper_table`` method'",
")",
"tableName",
"=",
"self",
".",
"dbTableName",
"sqlQuery",
"=",
"u\"\"\"\n update tcs_helper_catalogue_tables_info set last_updated = now() where table_name = \"%(tableName)s\";\n \"\"\"",
"%",
"locals",
"(",
")",
"writequery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_update_database_helper_table`` method'",
")",
"return",
"None"
] |
*Update the sherlock catalogues database helper table with the time-stamp of when this catlogue was last updated*
**Usage:**
.. code-block:: python
self._update_database_helper_table()
|
[
"*",
"Update",
"the",
"sherlock",
"catalogues",
"database",
"helper",
"table",
"with",
"the",
"time",
"-",
"stamp",
"of",
"when",
"this",
"catlogue",
"was",
"last",
"updated",
"*"
] |
train
|
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/_base_importer.py#L241-L267
|
contentful-labs/contentful.py
|
contentful/cda/resources.py
|
Array.resolve_links
|
def resolve_links(self):
"""Attempt to resolve all internal links (locally).
In case the linked resources are found either as members of the array or within
the `includes` element, those will be replaced and reference the actual resources.
No network calls will be performed.
"""
for resource in self.items_mapped['Entry'].values():
for dct in [getattr(resource, '_cf_cda', {}), resource.fields]:
for k, v in dct.items():
if isinstance(v, ResourceLink):
resolved = self._resolve_resource_link(v)
if resolved is not None:
dct[k] = resolved
elif isinstance(v, (MultipleAssets, MultipleEntries, list)):
for idx, ele in enumerate(v):
if not isinstance(ele, ResourceLink):
break
resolved = self._resolve_resource_link(ele)
if resolved is not None:
v[idx] = resolved
|
python
|
def resolve_links(self):
"""Attempt to resolve all internal links (locally).
In case the linked resources are found either as members of the array or within
the `includes` element, those will be replaced and reference the actual resources.
No network calls will be performed.
"""
for resource in self.items_mapped['Entry'].values():
for dct in [getattr(resource, '_cf_cda', {}), resource.fields]:
for k, v in dct.items():
if isinstance(v, ResourceLink):
resolved = self._resolve_resource_link(v)
if resolved is not None:
dct[k] = resolved
elif isinstance(v, (MultipleAssets, MultipleEntries, list)):
for idx, ele in enumerate(v):
if not isinstance(ele, ResourceLink):
break
resolved = self._resolve_resource_link(ele)
if resolved is not None:
v[idx] = resolved
|
[
"def",
"resolve_links",
"(",
"self",
")",
":",
"for",
"resource",
"in",
"self",
".",
"items_mapped",
"[",
"'Entry'",
"]",
".",
"values",
"(",
")",
":",
"for",
"dct",
"in",
"[",
"getattr",
"(",
"resource",
",",
"'_cf_cda'",
",",
"{",
"}",
")",
",",
"resource",
".",
"fields",
"]",
":",
"for",
"k",
",",
"v",
"in",
"dct",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"ResourceLink",
")",
":",
"resolved",
"=",
"self",
".",
"_resolve_resource_link",
"(",
"v",
")",
"if",
"resolved",
"is",
"not",
"None",
":",
"dct",
"[",
"k",
"]",
"=",
"resolved",
"elif",
"isinstance",
"(",
"v",
",",
"(",
"MultipleAssets",
",",
"MultipleEntries",
",",
"list",
")",
")",
":",
"for",
"idx",
",",
"ele",
"in",
"enumerate",
"(",
"v",
")",
":",
"if",
"not",
"isinstance",
"(",
"ele",
",",
"ResourceLink",
")",
":",
"break",
"resolved",
"=",
"self",
".",
"_resolve_resource_link",
"(",
"ele",
")",
"if",
"resolved",
"is",
"not",
"None",
":",
"v",
"[",
"idx",
"]",
"=",
"resolved"
] |
Attempt to resolve all internal links (locally).
In case the linked resources are found either as members of the array or within
the `includes` element, those will be replaced and reference the actual resources.
No network calls will be performed.
|
[
"Attempt",
"to",
"resolve",
"all",
"internal",
"links",
"(",
"locally",
")",
"."
] |
train
|
https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/resources.py#L90-L111
|
ozgurgunes/django-manifest
|
manifest/accounts/backends.py
|
AuthenticationBackend.authenticate
|
def authenticate(self, request, identification, password=None,
check_password=True):
"""
Authenticates a user through the combination email/username with
password.
:param identification:
A string containing the username or email of the user that is
trying to authenticate.
:password:
Optional string containing the password for the user.
:param check_password:
Boolean that defines if the password should be checked for this
user. Always keep this ``True``. This is only used by accounts at
activation when a user opens a page with a secret hash.
:return: The logged in :class:`User`.
"""
UserModel = get_user_model()
try:
validators.validate_email(identification)
try:
user = UserModel.objects.get(email__iexact=identification)
except UserModel.DoesNotExist:
return None
except:
validators.ValidationError
try:
user = UserModel.objects.get(username__iexact=identification)
except UserModel.DoesNotExist:
return None
if check_password:
if user.check_password(password):
return user
return None
else:
return user
|
python
|
def authenticate(self, request, identification, password=None,
check_password=True):
"""
Authenticates a user through the combination email/username with
password.
:param identification:
A string containing the username or email of the user that is
trying to authenticate.
:password:
Optional string containing the password for the user.
:param check_password:
Boolean that defines if the password should be checked for this
user. Always keep this ``True``. This is only used by accounts at
activation when a user opens a page with a secret hash.
:return: The logged in :class:`User`.
"""
UserModel = get_user_model()
try:
validators.validate_email(identification)
try:
user = UserModel.objects.get(email__iexact=identification)
except UserModel.DoesNotExist:
return None
except:
validators.ValidationError
try:
user = UserModel.objects.get(username__iexact=identification)
except UserModel.DoesNotExist:
return None
if check_password:
if user.check_password(password):
return user
return None
else:
return user
|
[
"def",
"authenticate",
"(",
"self",
",",
"request",
",",
"identification",
",",
"password",
"=",
"None",
",",
"check_password",
"=",
"True",
")",
":",
"UserModel",
"=",
"get_user_model",
"(",
")",
"try",
":",
"validators",
".",
"validate_email",
"(",
"identification",
")",
"try",
":",
"user",
"=",
"UserModel",
".",
"objects",
".",
"get",
"(",
"email__iexact",
"=",
"identification",
")",
"except",
"UserModel",
".",
"DoesNotExist",
":",
"return",
"None",
"except",
":",
"validators",
".",
"ValidationError",
"try",
":",
"user",
"=",
"UserModel",
".",
"objects",
".",
"get",
"(",
"username__iexact",
"=",
"identification",
")",
"except",
"UserModel",
".",
"DoesNotExist",
":",
"return",
"None",
"if",
"check_password",
":",
"if",
"user",
".",
"check_password",
"(",
"password",
")",
":",
"return",
"user",
"return",
"None",
"else",
":",
"return",
"user"
] |
Authenticates a user through the combination email/username with
password.
:param identification:
A string containing the username or email of the user that is
trying to authenticate.
:password:
Optional string containing the password for the user.
:param check_password:
Boolean that defines if the password should be checked for this
user. Always keep this ``True``. This is only used by accounts at
activation when a user opens a page with a secret hash.
:return: The logged in :class:`User`.
|
[
"Authenticates",
"a",
"user",
"through",
"the",
"combination",
"email",
"/",
"username",
"with",
"password",
"."
] |
train
|
https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/backends.py#L16-L55
|
Clinical-Genomics/trailblazer
|
trailblazer/store/api.py
|
BaseHandler.find_analysis
|
def find_analysis(self, family, started_at, status):
"""Find a single analysis."""
query = self.Analysis.query.filter_by(
family=family,
started_at=started_at,
status=status,
)
return query.first()
|
python
|
def find_analysis(self, family, started_at, status):
"""Find a single analysis."""
query = self.Analysis.query.filter_by(
family=family,
started_at=started_at,
status=status,
)
return query.first()
|
[
"def",
"find_analysis",
"(",
"self",
",",
"family",
",",
"started_at",
",",
"status",
")",
":",
"query",
"=",
"self",
".",
"Analysis",
".",
"query",
".",
"filter_by",
"(",
"family",
"=",
"family",
",",
"started_at",
"=",
"started_at",
",",
"status",
"=",
"status",
",",
")",
"return",
"query",
".",
"first",
"(",
")"
] |
Find a single analysis.
|
[
"Find",
"a",
"single",
"analysis",
"."
] |
train
|
https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/store/api.py#L26-L33
|
Clinical-Genomics/trailblazer
|
trailblazer/store/api.py
|
BaseHandler.analyses
|
def analyses(self, *, family: str=None, query: str=None, status: str=None, deleted: bool=None,
temp: bool=False, before: dt.datetime=None, is_visible: bool=None):
"""Fetch analyses form the database."""
analysis_query = self.Analysis.query
if family:
analysis_query = analysis_query.filter_by(family=family)
elif query:
analysis_query = analysis_query.filter(sqa.or_(
self.Analysis.family.like(f"%{query}%"),
self.Analysis.status.like(f"%{query}%"),
))
if status:
analysis_query = analysis_query.filter_by(status=status)
if isinstance(deleted, bool):
analysis_query = analysis_query.filter_by(is_deleted=deleted)
if temp:
analysis_query = analysis_query.filter(self.Analysis.status.in_(TEMP_STATUSES))
if before:
analysis_query = analysis_query.filter(self.Analysis.started_at < before)
if is_visible is not None:
analysis_query = analysis_query.filter_by(is_visible=is_visible)
return analysis_query.order_by(self.Analysis.started_at.desc())
|
python
|
def analyses(self, *, family: str=None, query: str=None, status: str=None, deleted: bool=None,
temp: bool=False, before: dt.datetime=None, is_visible: bool=None):
"""Fetch analyses form the database."""
analysis_query = self.Analysis.query
if family:
analysis_query = analysis_query.filter_by(family=family)
elif query:
analysis_query = analysis_query.filter(sqa.or_(
self.Analysis.family.like(f"%{query}%"),
self.Analysis.status.like(f"%{query}%"),
))
if status:
analysis_query = analysis_query.filter_by(status=status)
if isinstance(deleted, bool):
analysis_query = analysis_query.filter_by(is_deleted=deleted)
if temp:
analysis_query = analysis_query.filter(self.Analysis.status.in_(TEMP_STATUSES))
if before:
analysis_query = analysis_query.filter(self.Analysis.started_at < before)
if is_visible is not None:
analysis_query = analysis_query.filter_by(is_visible=is_visible)
return analysis_query.order_by(self.Analysis.started_at.desc())
|
[
"def",
"analyses",
"(",
"self",
",",
"*",
",",
"family",
":",
"str",
"=",
"None",
",",
"query",
":",
"str",
"=",
"None",
",",
"status",
":",
"str",
"=",
"None",
",",
"deleted",
":",
"bool",
"=",
"None",
",",
"temp",
":",
"bool",
"=",
"False",
",",
"before",
":",
"dt",
".",
"datetime",
"=",
"None",
",",
"is_visible",
":",
"bool",
"=",
"None",
")",
":",
"analysis_query",
"=",
"self",
".",
"Analysis",
".",
"query",
"if",
"family",
":",
"analysis_query",
"=",
"analysis_query",
".",
"filter_by",
"(",
"family",
"=",
"family",
")",
"elif",
"query",
":",
"analysis_query",
"=",
"analysis_query",
".",
"filter",
"(",
"sqa",
".",
"or_",
"(",
"self",
".",
"Analysis",
".",
"family",
".",
"like",
"(",
"f\"%{query}%\"",
")",
",",
"self",
".",
"Analysis",
".",
"status",
".",
"like",
"(",
"f\"%{query}%\"",
")",
",",
")",
")",
"if",
"status",
":",
"analysis_query",
"=",
"analysis_query",
".",
"filter_by",
"(",
"status",
"=",
"status",
")",
"if",
"isinstance",
"(",
"deleted",
",",
"bool",
")",
":",
"analysis_query",
"=",
"analysis_query",
".",
"filter_by",
"(",
"is_deleted",
"=",
"deleted",
")",
"if",
"temp",
":",
"analysis_query",
"=",
"analysis_query",
".",
"filter",
"(",
"self",
".",
"Analysis",
".",
"status",
".",
"in_",
"(",
"TEMP_STATUSES",
")",
")",
"if",
"before",
":",
"analysis_query",
"=",
"analysis_query",
".",
"filter",
"(",
"self",
".",
"Analysis",
".",
"started_at",
"<",
"before",
")",
"if",
"is_visible",
"is",
"not",
"None",
":",
"analysis_query",
"=",
"analysis_query",
".",
"filter_by",
"(",
"is_visible",
"=",
"is_visible",
")",
"return",
"analysis_query",
".",
"order_by",
"(",
"self",
".",
"Analysis",
".",
"started_at",
".",
"desc",
"(",
")",
")"
] |
Fetch analyses form the database.
|
[
"Fetch",
"analyses",
"form",
"the",
"database",
"."
] |
train
|
https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/store/api.py#L35-L56
|
Clinical-Genomics/trailblazer
|
trailblazer/store/api.py
|
BaseHandler.analysis
|
def analysis(self, analysis_id: int) -> models.Analysis:
"""Get a single analysis."""
return self.Analysis.query.get(analysis_id)
|
python
|
def analysis(self, analysis_id: int) -> models.Analysis:
"""Get a single analysis."""
return self.Analysis.query.get(analysis_id)
|
[
"def",
"analysis",
"(",
"self",
",",
"analysis_id",
":",
"int",
")",
"->",
"models",
".",
"Analysis",
":",
"return",
"self",
".",
"Analysis",
".",
"query",
".",
"get",
"(",
"analysis_id",
")"
] |
Get a single analysis.
|
[
"Get",
"a",
"single",
"analysis",
"."
] |
train
|
https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/store/api.py#L58-L60
|
Clinical-Genomics/trailblazer
|
trailblazer/store/api.py
|
BaseHandler.track_update
|
def track_update(self):
"""Update the lastest updated date in the database."""
metadata = self.info()
metadata.updated_at = dt.datetime.now()
self.commit()
|
python
|
def track_update(self):
"""Update the lastest updated date in the database."""
metadata = self.info()
metadata.updated_at = dt.datetime.now()
self.commit()
|
[
"def",
"track_update",
"(",
"self",
")",
":",
"metadata",
"=",
"self",
".",
"info",
"(",
")",
"metadata",
".",
"updated_at",
"=",
"dt",
".",
"datetime",
".",
"now",
"(",
")",
"self",
".",
"commit",
"(",
")"
] |
Update the lastest updated date in the database.
|
[
"Update",
"the",
"lastest",
"updated",
"date",
"in",
"the",
"database",
"."
] |
train
|
https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/store/api.py#L62-L66
|
Clinical-Genomics/trailblazer
|
trailblazer/store/api.py
|
BaseHandler.is_running
|
def is_running(self, family: str) -> bool:
"""Check if an analysis is currently running/pending for a family."""
latest_analysis = self.analyses(family=family).first()
return latest_analysis and latest_analysis.status in TEMP_STATUSES
|
python
|
def is_running(self, family: str) -> bool:
"""Check if an analysis is currently running/pending for a family."""
latest_analysis = self.analyses(family=family).first()
return latest_analysis and latest_analysis.status in TEMP_STATUSES
|
[
"def",
"is_running",
"(",
"self",
",",
"family",
":",
"str",
")",
"->",
"bool",
":",
"latest_analysis",
"=",
"self",
".",
"analyses",
"(",
"family",
"=",
"family",
")",
".",
"first",
"(",
")",
"return",
"latest_analysis",
"and",
"latest_analysis",
".",
"status",
"in",
"TEMP_STATUSES"
] |
Check if an analysis is currently running/pending for a family.
|
[
"Check",
"if",
"an",
"analysis",
"is",
"currently",
"running",
"/",
"pending",
"for",
"a",
"family",
"."
] |
train
|
https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/store/api.py#L68-L71
|
Clinical-Genomics/trailblazer
|
trailblazer/store/api.py
|
BaseHandler.add_pending
|
def add_pending(self, family: str, email: str=None) -> models.Analysis:
"""Add pending entry for an analysis."""
started_at = dt.datetime.now()
new_log = self.Analysis(family=family, status='pending', started_at=started_at)
new_log.user = self.user(email) if email else None
self.add_commit(new_log)
return new_log
|
python
|
def add_pending(self, family: str, email: str=None) -> models.Analysis:
"""Add pending entry for an analysis."""
started_at = dt.datetime.now()
new_log = self.Analysis(family=family, status='pending', started_at=started_at)
new_log.user = self.user(email) if email else None
self.add_commit(new_log)
return new_log
|
[
"def",
"add_pending",
"(",
"self",
",",
"family",
":",
"str",
",",
"email",
":",
"str",
"=",
"None",
")",
"->",
"models",
".",
"Analysis",
":",
"started_at",
"=",
"dt",
".",
"datetime",
".",
"now",
"(",
")",
"new_log",
"=",
"self",
".",
"Analysis",
"(",
"family",
"=",
"family",
",",
"status",
"=",
"'pending'",
",",
"started_at",
"=",
"started_at",
")",
"new_log",
".",
"user",
"=",
"self",
".",
"user",
"(",
"email",
")",
"if",
"email",
"else",
"None",
"self",
".",
"add_commit",
"(",
"new_log",
")",
"return",
"new_log"
] |
Add pending entry for an analysis.
|
[
"Add",
"pending",
"entry",
"for",
"an",
"analysis",
"."
] |
train
|
https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/store/api.py#L77-L83
|
Clinical-Genomics/trailblazer
|
trailblazer/store/api.py
|
BaseHandler.add_user
|
def add_user(self, name: str, email: str) -> models.User:
"""Add a new user to the database."""
new_user = self.User(name=name, email=email)
self.add_commit(new_user)
return new_user
|
python
|
def add_user(self, name: str, email: str) -> models.User:
"""Add a new user to the database."""
new_user = self.User(name=name, email=email)
self.add_commit(new_user)
return new_user
|
[
"def",
"add_user",
"(",
"self",
",",
"name",
":",
"str",
",",
"email",
":",
"str",
")",
"->",
"models",
".",
"User",
":",
"new_user",
"=",
"self",
".",
"User",
"(",
"name",
"=",
"name",
",",
"email",
"=",
"email",
")",
"self",
".",
"add_commit",
"(",
"new_user",
")",
"return",
"new_user"
] |
Add a new user to the database.
|
[
"Add",
"a",
"new",
"user",
"to",
"the",
"database",
"."
] |
train
|
https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/store/api.py#L85-L89
|
Clinical-Genomics/trailblazer
|
trailblazer/store/api.py
|
BaseHandler.user
|
def user(self, email: str) -> models.User:
"""Fetch a user from the database."""
return self.User.query.filter_by(email=email).first()
|
python
|
def user(self, email: str) -> models.User:
"""Fetch a user from the database."""
return self.User.query.filter_by(email=email).first()
|
[
"def",
"user",
"(",
"self",
",",
"email",
":",
"str",
")",
"->",
"models",
".",
"User",
":",
"return",
"self",
".",
"User",
".",
"query",
".",
"filter_by",
"(",
"email",
"=",
"email",
")",
".",
"first",
"(",
")"
] |
Fetch a user from the database.
|
[
"Fetch",
"a",
"user",
"from",
"the",
"database",
"."
] |
train
|
https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/store/api.py#L91-L93
|
Clinical-Genomics/trailblazer
|
trailblazer/store/api.py
|
BaseHandler.aggregate_failed
|
def aggregate_failed(self) -> List:
"""Count the number of failed jobs per category (name)."""
categories = self.session.query(
self.Job.name.label('name'),
sqa.func.count(self.Job.id).label('count')
).filter(self.Job.status != 'cancelled').group_by(self.Job.name).all()
data = [{'name': category.name, 'count': category.count} for category in categories]
return data
|
python
|
def aggregate_failed(self) -> List:
"""Count the number of failed jobs per category (name)."""
categories = self.session.query(
self.Job.name.label('name'),
sqa.func.count(self.Job.id).label('count')
).filter(self.Job.status != 'cancelled').group_by(self.Job.name).all()
data = [{'name': category.name, 'count': category.count} for category in categories]
return data
|
[
"def",
"aggregate_failed",
"(",
"self",
")",
"->",
"List",
":",
"categories",
"=",
"self",
".",
"session",
".",
"query",
"(",
"self",
".",
"Job",
".",
"name",
".",
"label",
"(",
"'name'",
")",
",",
"sqa",
".",
"func",
".",
"count",
"(",
"self",
".",
"Job",
".",
"id",
")",
".",
"label",
"(",
"'count'",
")",
")",
".",
"filter",
"(",
"self",
".",
"Job",
".",
"status",
"!=",
"'cancelled'",
")",
".",
"group_by",
"(",
"self",
".",
"Job",
".",
"name",
")",
".",
"all",
"(",
")",
"data",
"=",
"[",
"{",
"'name'",
":",
"category",
".",
"name",
",",
"'count'",
":",
"category",
".",
"count",
"}",
"for",
"category",
"in",
"categories",
"]",
"return",
"data"
] |
Count the number of failed jobs per category (name).
|
[
"Count",
"the",
"number",
"of",
"failed",
"jobs",
"per",
"category",
"(",
"name",
")",
"."
] |
train
|
https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/store/api.py#L95-L102
|
thespacedoctor/sherlock
|
sherlock/imports/ned.py
|
ned.ingest
|
def ingest(self):
"""*Perform conesearches of the online NED database and import the results into a the sherlock-database*
The code:
1. uses the list of transient coordinates and queries NED for the results within the given search radius
2. Creates the `tcs_cat_ned_stream` table if it doesn't exist
3. Adds the resulting NED IDs/Names to the `tcs_cat_ned_stream` table
4. Updates the NED query history table
5. Queris NED via NED IDs for the remaining source metadata to be added to the `tcs_cat_ned_stream` table
**Usage:**
Having setup the NED object with a coordinate list and cone-search radius, run the `ingest()` method
.. code-block:: python
stream.ingest()
.. todo ::
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``ingest`` method')
if not self.radiusArcsec:
self.log.error(
'please give a radius in arcsec with which to preform the initial NED conesearch' % locals())
sys.exit(0)
# VARIABLES
# SIZE OF NUMBER OF ROWS TO INSERT INTO DATABASE TABLE AT ANY ONE GO
self.databaseInsertbatchSize = 10000
# THE DATABASE TABLE TO STREAM THE NED DATA INTO
self.dbTableName = "tcs_cat_ned_stream"
dictList = self._create_dictionary_of_ned()
tableName = self.dbTableName
createStatement = """CREATE TABLE IF NOT EXISTS `%(tableName)s` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`ned_name` varchar(150) NOT NULL,
`redshift` double DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,
`updated` varchar(45) DEFAULT '0',
`major_diameter_arcmin` double DEFAULT NULL,
`ned_notes` varchar(700) DEFAULT NULL,
`object_type` varchar(100) DEFAULT NULL,
`redshift_err` double DEFAULT NULL,
`redshift_quality` varchar(100) DEFAULT NULL,
`magnitude_filter` varchar(10) DEFAULT NULL,
`minor_diameter_arcmin` double DEFAULT NULL,
`morphology` varchar(50) DEFAULT NULL,
`hierarchy` varchar(50) DEFAULT NULL,
`galaxy_morphology` varchar(50) DEFAULT NULL,
`radio_morphology` varchar(50) DEFAULT NULL,
`activity_type` varchar(50) DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`eb_v` double DEFAULT NULL,
`htm16ID` bigint(20) DEFAULT NULL,
`download_error` tinyint(1) DEFAULT '0',
`htm10ID` bigint(20) DEFAULT NULL,
`htm13ID` bigint(20) DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `ned_name` (`ned_name`),
KEY `idx_htm16ID` (`htm16ID`),
KEY `raDeg` (`raDeg`),
KEY `downloadError` (`download_error`),
KEY `idx_htm10ID` (`htm10ID`),
KEY `idx_htm13ID` (`htm13ID`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
self.add_data_to_database_table(
dictList=dictList,
createStatement=createStatement
)
self._update_ned_query_history()
self._download_ned_source_metadata()
self.log.debug('completed the ``ingest`` method')
return None
|
python
|
def ingest(self):
"""*Perform conesearches of the online NED database and import the results into a the sherlock-database*
The code:
1. uses the list of transient coordinates and queries NED for the results within the given search radius
2. Creates the `tcs_cat_ned_stream` table if it doesn't exist
3. Adds the resulting NED IDs/Names to the `tcs_cat_ned_stream` table
4. Updates the NED query history table
5. Queris NED via NED IDs for the remaining source metadata to be added to the `tcs_cat_ned_stream` table
**Usage:**
Having setup the NED object with a coordinate list and cone-search radius, run the `ingest()` method
.. code-block:: python
stream.ingest()
.. todo ::
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``ingest`` method')
if not self.radiusArcsec:
self.log.error(
'please give a radius in arcsec with which to preform the initial NED conesearch' % locals())
sys.exit(0)
# VARIABLES
# SIZE OF NUMBER OF ROWS TO INSERT INTO DATABASE TABLE AT ANY ONE GO
self.databaseInsertbatchSize = 10000
# THE DATABASE TABLE TO STREAM THE NED DATA INTO
self.dbTableName = "tcs_cat_ned_stream"
dictList = self._create_dictionary_of_ned()
tableName = self.dbTableName
createStatement = """CREATE TABLE IF NOT EXISTS `%(tableName)s` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`ned_name` varchar(150) NOT NULL,
`redshift` double DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,
`updated` varchar(45) DEFAULT '0',
`major_diameter_arcmin` double DEFAULT NULL,
`ned_notes` varchar(700) DEFAULT NULL,
`object_type` varchar(100) DEFAULT NULL,
`redshift_err` double DEFAULT NULL,
`redshift_quality` varchar(100) DEFAULT NULL,
`magnitude_filter` varchar(10) DEFAULT NULL,
`minor_diameter_arcmin` double DEFAULT NULL,
`morphology` varchar(50) DEFAULT NULL,
`hierarchy` varchar(50) DEFAULT NULL,
`galaxy_morphology` varchar(50) DEFAULT NULL,
`radio_morphology` varchar(50) DEFAULT NULL,
`activity_type` varchar(50) DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`eb_v` double DEFAULT NULL,
`htm16ID` bigint(20) DEFAULT NULL,
`download_error` tinyint(1) DEFAULT '0',
`htm10ID` bigint(20) DEFAULT NULL,
`htm13ID` bigint(20) DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `ned_name` (`ned_name`),
KEY `idx_htm16ID` (`htm16ID`),
KEY `raDeg` (`raDeg`),
KEY `downloadError` (`download_error`),
KEY `idx_htm10ID` (`htm10ID`),
KEY `idx_htm13ID` (`htm13ID`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
self.add_data_to_database_table(
dictList=dictList,
createStatement=createStatement
)
self._update_ned_query_history()
self._download_ned_source_metadata()
self.log.debug('completed the ``ingest`` method')
return None
|
[
"def",
"ingest",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``ingest`` method'",
")",
"if",
"not",
"self",
".",
"radiusArcsec",
":",
"self",
".",
"log",
".",
"error",
"(",
"'please give a radius in arcsec with which to preform the initial NED conesearch'",
"%",
"locals",
"(",
")",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"# VARIABLES",
"# SIZE OF NUMBER OF ROWS TO INSERT INTO DATABASE TABLE AT ANY ONE GO",
"self",
".",
"databaseInsertbatchSize",
"=",
"10000",
"# THE DATABASE TABLE TO STREAM THE NED DATA INTO",
"self",
".",
"dbTableName",
"=",
"\"tcs_cat_ned_stream\"",
"dictList",
"=",
"self",
".",
"_create_dictionary_of_ned",
"(",
")",
"tableName",
"=",
"self",
".",
"dbTableName",
"createStatement",
"=",
"\"\"\"CREATE TABLE IF NOT EXISTS `%(tableName)s` (\n `primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',\n `ned_name` varchar(150) NOT NULL,\n `redshift` double DEFAULT NULL,\n `dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,\n `dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,\n `updated` varchar(45) DEFAULT '0',\n `major_diameter_arcmin` double DEFAULT NULL,\n `ned_notes` varchar(700) DEFAULT NULL,\n `object_type` varchar(100) DEFAULT NULL,\n `redshift_err` double DEFAULT NULL,\n `redshift_quality` varchar(100) DEFAULT NULL,\n `magnitude_filter` varchar(10) DEFAULT NULL,\n `minor_diameter_arcmin` double DEFAULT NULL,\n `morphology` varchar(50) DEFAULT NULL,\n `hierarchy` varchar(50) DEFAULT NULL,\n `galaxy_morphology` varchar(50) DEFAULT NULL,\n `radio_morphology` varchar(50) DEFAULT NULL,\n `activity_type` varchar(50) DEFAULT NULL,\n `raDeg` double DEFAULT NULL,\n `decDeg` double DEFAULT NULL,\n `eb_v` double DEFAULT NULL,\n `htm16ID` bigint(20) DEFAULT NULL,\n `download_error` tinyint(1) DEFAULT '0',\n `htm10ID` bigint(20) DEFAULT NULL,\n `htm13ID` bigint(20) DEFAULT NULL,\n PRIMARY KEY (`primaryId`),\n UNIQUE KEY `ned_name` (`ned_name`),\n KEY `idx_htm16ID` (`htm16ID`),\n KEY `raDeg` (`raDeg`),\n KEY `downloadError` (`download_error`),\n KEY `idx_htm10ID` (`htm10ID`),\n KEY `idx_htm13ID` (`htm13ID`)\n) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;\n\"\"\"",
"%",
"locals",
"(",
")",
"self",
".",
"add_data_to_database_table",
"(",
"dictList",
"=",
"dictList",
",",
"createStatement",
"=",
"createStatement",
")",
"self",
".",
"_update_ned_query_history",
"(",
")",
"self",
".",
"_download_ned_source_metadata",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``ingest`` method'",
")",
"return",
"None"
] |
*Perform conesearches of the online NED database and import the results into a the sherlock-database*
The code:
1. uses the list of transient coordinates and queries NED for the results within the given search radius
2. Creates the `tcs_cat_ned_stream` table if it doesn't exist
3. Adds the resulting NED IDs/Names to the `tcs_cat_ned_stream` table
4. Updates the NED query history table
5. Queris NED via NED IDs for the remaining source metadata to be added to the `tcs_cat_ned_stream` table
**Usage:**
Having setup the NED object with a coordinate list and cone-search radius, run the `ingest()` method
.. code-block:: python
stream.ingest()
.. todo ::
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
|
[
"*",
"Perform",
"conesearches",
"of",
"the",
"online",
"NED",
"database",
"and",
"import",
"the",
"results",
"into",
"a",
"the",
"sherlock",
"-",
"database",
"*"
] |
train
|
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/ned.py#L79-L167
|
thespacedoctor/sherlock
|
sherlock/imports/ned.py
|
ned._create_dictionary_of_ned
|
def _create_dictionary_of_ned(
self):
"""*Create a list of dictionaries containing all the object ids (NED names) in the ned stream*
**Return:**
- ``dictList`` - a list of dictionaries containing all the object ids (NED names) in the ned stream
**Usage:**
.. code-block:: python
dictList = stream._create_dictionary_of_ned()
"""
self.log.debug(
'starting the ``_create_dictionary_of_ned`` method')
# GET THE NAMES (UNIQUE IDS) OF THE SOURCES WITHIN THE CONESEARCH FROM
# NED
names, searchParams = conesearch(
log=self.log,
radiusArcsec=self.radiusArcsec,
nearestOnly=False,
unclassified=True,
quiet=False,
listOfCoordinates=self.coordinateList,
outputFilePath=False,
verbose=False
).get_crossmatch_names()
dictList = []
dictList[:] = [{"ned_name": n} for n in names]
self.log.debug(
'completed the ``_create_dictionary_of_ned`` method')
return dictList
|
python
|
def _create_dictionary_of_ned(
self):
"""*Create a list of dictionaries containing all the object ids (NED names) in the ned stream*
**Return:**
- ``dictList`` - a list of dictionaries containing all the object ids (NED names) in the ned stream
**Usage:**
.. code-block:: python
dictList = stream._create_dictionary_of_ned()
"""
self.log.debug(
'starting the ``_create_dictionary_of_ned`` method')
# GET THE NAMES (UNIQUE IDS) OF THE SOURCES WITHIN THE CONESEARCH FROM
# NED
names, searchParams = conesearch(
log=self.log,
radiusArcsec=self.radiusArcsec,
nearestOnly=False,
unclassified=True,
quiet=False,
listOfCoordinates=self.coordinateList,
outputFilePath=False,
verbose=False
).get_crossmatch_names()
dictList = []
dictList[:] = [{"ned_name": n} for n in names]
self.log.debug(
'completed the ``_create_dictionary_of_ned`` method')
return dictList
|
[
"def",
"_create_dictionary_of_ned",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_create_dictionary_of_ned`` method'",
")",
"# GET THE NAMES (UNIQUE IDS) OF THE SOURCES WITHIN THE CONESEARCH FROM",
"# NED",
"names",
",",
"searchParams",
"=",
"conesearch",
"(",
"log",
"=",
"self",
".",
"log",
",",
"radiusArcsec",
"=",
"self",
".",
"radiusArcsec",
",",
"nearestOnly",
"=",
"False",
",",
"unclassified",
"=",
"True",
",",
"quiet",
"=",
"False",
",",
"listOfCoordinates",
"=",
"self",
".",
"coordinateList",
",",
"outputFilePath",
"=",
"False",
",",
"verbose",
"=",
"False",
")",
".",
"get_crossmatch_names",
"(",
")",
"dictList",
"=",
"[",
"]",
"dictList",
"[",
":",
"]",
"=",
"[",
"{",
"\"ned_name\"",
":",
"n",
"}",
"for",
"n",
"in",
"names",
"]",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_create_dictionary_of_ned`` method'",
")",
"return",
"dictList"
] |
*Create a list of dictionaries containing all the object ids (NED names) in the ned stream*
**Return:**
- ``dictList`` - a list of dictionaries containing all the object ids (NED names) in the ned stream
**Usage:**
.. code-block:: python
dictList = stream._create_dictionary_of_ned()
|
[
"*",
"Create",
"a",
"list",
"of",
"dictionaries",
"containing",
"all",
"the",
"object",
"ids",
"(",
"NED",
"names",
")",
"in",
"the",
"ned",
"stream",
"*"
] |
train
|
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/ned.py#L169-L203
|
thespacedoctor/sherlock
|
sherlock/imports/ned.py
|
ned._update_ned_query_history
|
def _update_ned_query_history(
self):
"""*Update the database helper table to give details of the ned cone searches performed*
*Usage:*
.. code-block:: python
stream._update_ned_query_history()
"""
self.log.debug('starting the ``_update_ned_query_history`` method')
myPid = self.myPid
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
# UPDATE THE DATABASE HELPER TABLE TO GIVE DETAILS OF THE NED CONE
# SEARCHES PERFORMED
dataList = []
for i, coord in enumerate(self.coordinateList):
if isinstance(coord, str):
ra = coord.split(" ")[0]
dec = coord.split(" ")[1]
elif isinstance(coord, tuple) or isinstance(coord, list):
ra = coord[0]
dec = coord[1]
dataList.append(
{"raDeg": ra,
"decDeg": dec,
"arcsecRadius": self.radiusArcsec}
)
if len(dataList) == 0:
return None
# CREATE TABLE IF NOT EXIST
createStatement = """CREATE TABLE IF NOT EXISTS `tcs_helper_ned_query_history` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT,
`raDeg` double DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,
`updated` varchar(45) DEFAULT '0',
`arcsecRadius` int(11) DEFAULT NULL,
`dateQueried` datetime DEFAULT CURRENT_TIMESTAMP,
`htm16ID` bigint(20) DEFAULT NULL,
`htm13ID` int(11) DEFAULT NULL,
`htm10ID` int(11) DEFAULT NULL,
PRIMARY KEY (`primaryId`),
KEY `idx_htm16ID` (`htm16ID`),
KEY `dateQueried` (`dateQueried`),
KEY `dateHtm16` (`dateQueried`,`htm16ID`),
KEY `idx_htm10ID` (`htm10ID`),
KEY `idx_htm13ID` (`htm13ID`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
"""
writequery(
log=self.log,
sqlQuery=createStatement,
dbConn=self.cataloguesDbConn
)
# USE dbSettings TO ACTIVATE MULTIPROCESSING
insert_list_of_dictionaries_into_database_tables(
dbConn=self.cataloguesDbConn,
log=self.log,
dictList=dataList,
dbTableName="tcs_helper_ned_query_history",
uniqueKeyList=[],
dateModified=True,
batchSize=10000,
replace=True,
dbSettings=self.settings["database settings"][
"static catalogues"]
)
# INDEX THE TABLE FOR LATER SEARCHES
add_htm_ids_to_mysql_database_table(
raColName="raDeg",
declColName="decDeg",
tableName="tcs_helper_ned_query_history",
dbConn=self.cataloguesDbConn,
log=self.log,
primaryIdColumnName="primaryId"
)
self.log.debug('completed the ``_update_ned_query_history`` method')
return None
|
python
|
def _update_ned_query_history(
self):
"""*Update the database helper table to give details of the ned cone searches performed*
*Usage:*
.. code-block:: python
stream._update_ned_query_history()
"""
self.log.debug('starting the ``_update_ned_query_history`` method')
myPid = self.myPid
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
# UPDATE THE DATABASE HELPER TABLE TO GIVE DETAILS OF THE NED CONE
# SEARCHES PERFORMED
dataList = []
for i, coord in enumerate(self.coordinateList):
if isinstance(coord, str):
ra = coord.split(" ")[0]
dec = coord.split(" ")[1]
elif isinstance(coord, tuple) or isinstance(coord, list):
ra = coord[0]
dec = coord[1]
dataList.append(
{"raDeg": ra,
"decDeg": dec,
"arcsecRadius": self.radiusArcsec}
)
if len(dataList) == 0:
return None
# CREATE TABLE IF NOT EXIST
createStatement = """CREATE TABLE IF NOT EXISTS `tcs_helper_ned_query_history` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT,
`raDeg` double DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,
`updated` varchar(45) DEFAULT '0',
`arcsecRadius` int(11) DEFAULT NULL,
`dateQueried` datetime DEFAULT CURRENT_TIMESTAMP,
`htm16ID` bigint(20) DEFAULT NULL,
`htm13ID` int(11) DEFAULT NULL,
`htm10ID` int(11) DEFAULT NULL,
PRIMARY KEY (`primaryId`),
KEY `idx_htm16ID` (`htm16ID`),
KEY `dateQueried` (`dateQueried`),
KEY `dateHtm16` (`dateQueried`,`htm16ID`),
KEY `idx_htm10ID` (`htm10ID`),
KEY `idx_htm13ID` (`htm13ID`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
"""
writequery(
log=self.log,
sqlQuery=createStatement,
dbConn=self.cataloguesDbConn
)
# USE dbSettings TO ACTIVATE MULTIPROCESSING
insert_list_of_dictionaries_into_database_tables(
dbConn=self.cataloguesDbConn,
log=self.log,
dictList=dataList,
dbTableName="tcs_helper_ned_query_history",
uniqueKeyList=[],
dateModified=True,
batchSize=10000,
replace=True,
dbSettings=self.settings["database settings"][
"static catalogues"]
)
# INDEX THE TABLE FOR LATER SEARCHES
add_htm_ids_to_mysql_database_table(
raColName="raDeg",
declColName="decDeg",
tableName="tcs_helper_ned_query_history",
dbConn=self.cataloguesDbConn,
log=self.log,
primaryIdColumnName="primaryId"
)
self.log.debug('completed the ``_update_ned_query_history`` method')
return None
|
[
"def",
"_update_ned_query_history",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_update_ned_query_history`` method'",
")",
"myPid",
"=",
"self",
".",
"myPid",
"# ASTROCALC UNIT CONVERTER OBJECT",
"converter",
"=",
"unit_conversion",
"(",
"log",
"=",
"self",
".",
"log",
")",
"# UPDATE THE DATABASE HELPER TABLE TO GIVE DETAILS OF THE NED CONE",
"# SEARCHES PERFORMED",
"dataList",
"=",
"[",
"]",
"for",
"i",
",",
"coord",
"in",
"enumerate",
"(",
"self",
".",
"coordinateList",
")",
":",
"if",
"isinstance",
"(",
"coord",
",",
"str",
")",
":",
"ra",
"=",
"coord",
".",
"split",
"(",
"\" \"",
")",
"[",
"0",
"]",
"dec",
"=",
"coord",
".",
"split",
"(",
"\" \"",
")",
"[",
"1",
"]",
"elif",
"isinstance",
"(",
"coord",
",",
"tuple",
")",
"or",
"isinstance",
"(",
"coord",
",",
"list",
")",
":",
"ra",
"=",
"coord",
"[",
"0",
"]",
"dec",
"=",
"coord",
"[",
"1",
"]",
"dataList",
".",
"append",
"(",
"{",
"\"raDeg\"",
":",
"ra",
",",
"\"decDeg\"",
":",
"dec",
",",
"\"arcsecRadius\"",
":",
"self",
".",
"radiusArcsec",
"}",
")",
"if",
"len",
"(",
"dataList",
")",
"==",
"0",
":",
"return",
"None",
"# CREATE TABLE IF NOT EXIST",
"createStatement",
"=",
"\"\"\"CREATE TABLE IF NOT EXISTS `tcs_helper_ned_query_history` (\n `primaryId` bigint(20) NOT NULL AUTO_INCREMENT,\n `raDeg` double DEFAULT NULL,\n `decDeg` double DEFAULT NULL,\n `dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,\n `dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,\n `updated` varchar(45) DEFAULT '0',\n `arcsecRadius` int(11) DEFAULT NULL,\n `dateQueried` datetime DEFAULT CURRENT_TIMESTAMP,\n `htm16ID` bigint(20) DEFAULT NULL,\n `htm13ID` int(11) DEFAULT NULL,\n `htm10ID` int(11) DEFAULT NULL,\n PRIMARY KEY (`primaryId`),\n KEY `idx_htm16ID` (`htm16ID`),\n KEY `dateQueried` (`dateQueried`),\n KEY `dateHtm16` (`dateQueried`,`htm16ID`),\n KEY `idx_htm10ID` (`htm10ID`),\n KEY `idx_htm13ID` (`htm13ID`)\n) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;\n \"\"\"",
"writequery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"createStatement",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
")",
"# USE dbSettings TO ACTIVATE MULTIPROCESSING",
"insert_list_of_dictionaries_into_database_tables",
"(",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
"log",
"=",
"self",
".",
"log",
",",
"dictList",
"=",
"dataList",
",",
"dbTableName",
"=",
"\"tcs_helper_ned_query_history\"",
",",
"uniqueKeyList",
"=",
"[",
"]",
",",
"dateModified",
"=",
"True",
",",
"batchSize",
"=",
"10000",
",",
"replace",
"=",
"True",
",",
"dbSettings",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"static catalogues\"",
"]",
")",
"# INDEX THE TABLE FOR LATER SEARCHES",
"add_htm_ids_to_mysql_database_table",
"(",
"raColName",
"=",
"\"raDeg\"",
",",
"declColName",
"=",
"\"decDeg\"",
",",
"tableName",
"=",
"\"tcs_helper_ned_query_history\"",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
"log",
"=",
"self",
".",
"log",
",",
"primaryIdColumnName",
"=",
"\"primaryId\"",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_update_ned_query_history`` method'",
")",
"return",
"None"
] |
*Update the database helper table to give details of the ned cone searches performed*
*Usage:*
.. code-block:: python
stream._update_ned_query_history()
|
[
"*",
"Update",
"the",
"database",
"helper",
"table",
"to",
"give",
"details",
"of",
"the",
"ned",
"cone",
"searches",
"performed",
"*"
] |
train
|
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/ned.py#L205-L296
|
thespacedoctor/sherlock
|
sherlock/imports/ned.py
|
ned._download_ned_source_metadata
|
def _download_ned_source_metadata(
self):
"""*Query NED using the names of the NED sources in our local database to retrieve extra metadata*
*Usage:*
.. code-block:: python
stream._download_ned_source_metadata()
"""
self.log.debug('starting the ``_download_ned_source_metadata`` method')
self.dbTableName = "tcs_cat_ned_stream"
total, batches = self._count_ned_sources_in_database_requiring_metadata()
self.log.info(
"%(total)s galaxies require metadata. Need to send %(batches)s batch requests to NED." % locals())
totalBatches = self.batches
thisCount = 0
# FOR EACH BATCH, GET THE GALAXY IDs, QUERY NED AND UPDATE THE DATABASE
# THEN RECOUNT TO DETERMINE IF THERE ARE REMAINING SOURCES TO GRAB
# METADATA FOR
while self.total:
thisCount += 1
self._get_ned_sources_needing_metadata()
self._do_ned_namesearch_queries_and_add_resulting_metadata_to_database(
thisCount)
self._count_ned_sources_in_database_requiring_metadata()
self.log.debug(
'completed the ``_download_ned_source_metadata`` method')
return None
|
python
|
def _download_ned_source_metadata(
self):
"""*Query NED using the names of the NED sources in our local database to retrieve extra metadata*
*Usage:*
.. code-block:: python
stream._download_ned_source_metadata()
"""
self.log.debug('starting the ``_download_ned_source_metadata`` method')
self.dbTableName = "tcs_cat_ned_stream"
total, batches = self._count_ned_sources_in_database_requiring_metadata()
self.log.info(
"%(total)s galaxies require metadata. Need to send %(batches)s batch requests to NED." % locals())
totalBatches = self.batches
thisCount = 0
# FOR EACH BATCH, GET THE GALAXY IDs, QUERY NED AND UPDATE THE DATABASE
# THEN RECOUNT TO DETERMINE IF THERE ARE REMAINING SOURCES TO GRAB
# METADATA FOR
while self.total:
thisCount += 1
self._get_ned_sources_needing_metadata()
self._do_ned_namesearch_queries_and_add_resulting_metadata_to_database(
thisCount)
self._count_ned_sources_in_database_requiring_metadata()
self.log.debug(
'completed the ``_download_ned_source_metadata`` method')
return None
|
[
"def",
"_download_ned_source_metadata",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_download_ned_source_metadata`` method'",
")",
"self",
".",
"dbTableName",
"=",
"\"tcs_cat_ned_stream\"",
"total",
",",
"batches",
"=",
"self",
".",
"_count_ned_sources_in_database_requiring_metadata",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"%(total)s galaxies require metadata. Need to send %(batches)s batch requests to NED.\"",
"%",
"locals",
"(",
")",
")",
"totalBatches",
"=",
"self",
".",
"batches",
"thisCount",
"=",
"0",
"# FOR EACH BATCH, GET THE GALAXY IDs, QUERY NED AND UPDATE THE DATABASE",
"# THEN RECOUNT TO DETERMINE IF THERE ARE REMAINING SOURCES TO GRAB",
"# METADATA FOR",
"while",
"self",
".",
"total",
":",
"thisCount",
"+=",
"1",
"self",
".",
"_get_ned_sources_needing_metadata",
"(",
")",
"self",
".",
"_do_ned_namesearch_queries_and_add_resulting_metadata_to_database",
"(",
"thisCount",
")",
"self",
".",
"_count_ned_sources_in_database_requiring_metadata",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_download_ned_source_metadata`` method'",
")",
"return",
"None"
] |
*Query NED using the names of the NED sources in our local database to retrieve extra metadata*
*Usage:*
.. code-block:: python
stream._download_ned_source_metadata()
|
[
"*",
"Query",
"NED",
"using",
"the",
"names",
"of",
"the",
"NED",
"sources",
"in",
"our",
"local",
"database",
"to",
"retrieve",
"extra",
"metadata",
"*"
] |
train
|
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/ned.py#L298-L332
|
thespacedoctor/sherlock
|
sherlock/imports/ned.py
|
ned._get_ned_sources_needing_metadata
|
def _get_ned_sources_needing_metadata(
self):
"""*Get the names of 50000 or less NED sources that still require metabase in the database*
**Return:**
- ``len(self.theseIds)`` -- the number of NED IDs returned
*Usage:*
.. code-block:: python
numberSources = stream._get_ned_sources_needing_metadata()
"""
self.log.debug(
'starting the ``_get_ned_sources_needing_metadata`` method')
tableName = self.dbTableName
# SELECT THE DATA FROM NED TABLE
sqlQuery = u"""
select ned_name from %(tableName)s where raDeg is null and (download_error != 1 or download_error is null) limit 50000;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
self.theseIds = []
self.theseIds[:] = [r["ned_name"] for r in rows]
self.log.debug(
'completed the ``_get_ned_sources_needing_metadata`` method')
return len(self.theseIds)
|
python
|
def _get_ned_sources_needing_metadata(
self):
"""*Get the names of 50000 or less NED sources that still require metabase in the database*
**Return:**
- ``len(self.theseIds)`` -- the number of NED IDs returned
*Usage:*
.. code-block:: python
numberSources = stream._get_ned_sources_needing_metadata()
"""
self.log.debug(
'starting the ``_get_ned_sources_needing_metadata`` method')
tableName = self.dbTableName
# SELECT THE DATA FROM NED TABLE
sqlQuery = u"""
select ned_name from %(tableName)s where raDeg is null and (download_error != 1 or download_error is null) limit 50000;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
self.theseIds = []
self.theseIds[:] = [r["ned_name"] for r in rows]
self.log.debug(
'completed the ``_get_ned_sources_needing_metadata`` method')
return len(self.theseIds)
|
[
"def",
"_get_ned_sources_needing_metadata",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_get_ned_sources_needing_metadata`` method'",
")",
"tableName",
"=",
"self",
".",
"dbTableName",
"# SELECT THE DATA FROM NED TABLE",
"sqlQuery",
"=",
"u\"\"\"\n select ned_name from %(tableName)s where raDeg is null and (download_error != 1 or download_error is null) limit 50000;\n \"\"\"",
"%",
"locals",
"(",
")",
"rows",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
"quiet",
"=",
"False",
")",
"self",
".",
"theseIds",
"=",
"[",
"]",
"self",
".",
"theseIds",
"[",
":",
"]",
"=",
"[",
"r",
"[",
"\"ned_name\"",
"]",
"for",
"r",
"in",
"rows",
"]",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_get_ned_sources_needing_metadata`` method'",
")",
"return",
"len",
"(",
"self",
".",
"theseIds",
")"
] |
*Get the names of 50000 or less NED sources that still require metabase in the database*
**Return:**
- ``len(self.theseIds)`` -- the number of NED IDs returned
*Usage:*
.. code-block:: python
numberSources = stream._get_ned_sources_needing_metadata()
|
[
"*",
"Get",
"the",
"names",
"of",
"50000",
"or",
"less",
"NED",
"sources",
"that",
"still",
"require",
"metabase",
"in",
"the",
"database",
"*"
] |
train
|
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/ned.py#L334-L369
|
thespacedoctor/sherlock
|
sherlock/imports/ned.py
|
ned._do_ned_namesearch_queries_and_add_resulting_metadata_to_database
|
def _do_ned_namesearch_queries_and_add_resulting_metadata_to_database(
self,
batchCount):
"""*Query NED via name searcha and add result metadata to database*
**Key Arguments:**
- ``batchCount`` - the index number of the batch sent to NED (only needed for printing to STDOUT to give user idea of progress)
*Usage:*
.. code-block:: python
numberSources = stream._do_ned_namesearch_queries_and_add_resulting_metadata_to_database(batchCount=10)
"""
self.log.debug(
'starting the ``_do_ned_namesearch_queries_and_add_resulting_metadata_to_database`` method')
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
tableName = self.dbTableName
# QUERY NED WITH BATCH
totalCount = len(self.theseIds)
print "requesting metadata from NED for %(totalCount)s galaxies (batch %(batchCount)s)" % locals()
# QUERY THE ONLINE NED DATABASE USING NEDDY'S NAMESEARCH METHOD
search = namesearch(
log=self.log,
names=self.theseIds,
quiet=True
)
results = search.get()
print "results returned from ned -- starting to add to database" % locals()
# CLEAN THE RETURNED DATA AND UPDATE DATABASE
totalCount = len(results)
count = 0
sqlQuery = ""
dictList = []
for thisDict in results:
thisDict["tableName"] = tableName
count += 1
for k, v in thisDict.iteritems():
if not v or len(v) == 0:
thisDict[k] = "null"
if k in ["major_diameter_arcmin", "minor_diameter_arcmin"] and (":" in v or "?" in v or "<" in v):
thisDict[k] = v.replace(":", "").replace(
"?", "").replace("<", "")
if isinstance(v, str) and '"' in v:
thisDict[k] = v.replace('"', '\\"')
if "Input name not" not in thisDict["input_note"] and "Same object as" not in thisDict["input_note"]:
try:
thisDict["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=thisDict["ra"]
)
thisDict["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=thisDict["dec"]
)
except:
name = thisDict["input_name"]
self.log.warning(
"Could not convert the RA & DEC for the %(name)s NED source" % locals())
continue
thisDict["eb_v"] = thisDict["eb-v"]
thisDict["ned_name"] = thisDict["input_name"]
row = {}
for k in ["redshift_quality", "redshift", "hierarchy", "object_type", "major_diameter_arcmin", "morphology", "magnitude_filter", "ned_notes", "eb_v", "raDeg", "radio_morphology", "activity_type", "minor_diameter_arcmin", "decDeg", "redshift_err", "ned_name"]:
if thisDict[k] == "null":
row[k] = None
else:
row[k] = thisDict[k]
dictList.append(row)
self.add_data_to_database_table(
dictList=dictList,
createStatement="""SET SESSION sql_mode="";"""
)
theseIds = ("\", \"").join(self.theseIds)
sqlQuery = u"""
update %(tableName)s set download_error = 1 where ned_name in ("%(theseIds)s");
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
print "%(count)s/%(totalCount)s galaxy metadata batch entries added to database" % locals()
if count < totalCount:
# Cursor up one line and clear line
sys.stdout.write("\x1b[1A\x1b[2K")
sqlQuery = u"""
update tcs_helper_catalogue_tables_info set last_updated = now() where table_name = "%(tableName)s"
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
self.log.debug(
'completed the ``_do_ned_namesearch_queries_and_add_resulting_metadata_to_database`` method')
return None
|
python
|
def _do_ned_namesearch_queries_and_add_resulting_metadata_to_database(
self,
batchCount):
"""*Query NED via name searcha and add result metadata to database*
**Key Arguments:**
- ``batchCount`` - the index number of the batch sent to NED (only needed for printing to STDOUT to give user idea of progress)
*Usage:*
.. code-block:: python
numberSources = stream._do_ned_namesearch_queries_and_add_resulting_metadata_to_database(batchCount=10)
"""
self.log.debug(
'starting the ``_do_ned_namesearch_queries_and_add_resulting_metadata_to_database`` method')
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
tableName = self.dbTableName
# QUERY NED WITH BATCH
totalCount = len(self.theseIds)
print "requesting metadata from NED for %(totalCount)s galaxies (batch %(batchCount)s)" % locals()
# QUERY THE ONLINE NED DATABASE USING NEDDY'S NAMESEARCH METHOD
search = namesearch(
log=self.log,
names=self.theseIds,
quiet=True
)
results = search.get()
print "results returned from ned -- starting to add to database" % locals()
# CLEAN THE RETURNED DATA AND UPDATE DATABASE
totalCount = len(results)
count = 0
sqlQuery = ""
dictList = []
for thisDict in results:
thisDict["tableName"] = tableName
count += 1
for k, v in thisDict.iteritems():
if not v or len(v) == 0:
thisDict[k] = "null"
if k in ["major_diameter_arcmin", "minor_diameter_arcmin"] and (":" in v or "?" in v or "<" in v):
thisDict[k] = v.replace(":", "").replace(
"?", "").replace("<", "")
if isinstance(v, str) and '"' in v:
thisDict[k] = v.replace('"', '\\"')
if "Input name not" not in thisDict["input_note"] and "Same object as" not in thisDict["input_note"]:
try:
thisDict["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=thisDict["ra"]
)
thisDict["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=thisDict["dec"]
)
except:
name = thisDict["input_name"]
self.log.warning(
"Could not convert the RA & DEC for the %(name)s NED source" % locals())
continue
thisDict["eb_v"] = thisDict["eb-v"]
thisDict["ned_name"] = thisDict["input_name"]
row = {}
for k in ["redshift_quality", "redshift", "hierarchy", "object_type", "major_diameter_arcmin", "morphology", "magnitude_filter", "ned_notes", "eb_v", "raDeg", "radio_morphology", "activity_type", "minor_diameter_arcmin", "decDeg", "redshift_err", "ned_name"]:
if thisDict[k] == "null":
row[k] = None
else:
row[k] = thisDict[k]
dictList.append(row)
self.add_data_to_database_table(
dictList=dictList,
createStatement="""SET SESSION sql_mode="";"""
)
theseIds = ("\", \"").join(self.theseIds)
sqlQuery = u"""
update %(tableName)s set download_error = 1 where ned_name in ("%(theseIds)s");
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
print "%(count)s/%(totalCount)s galaxy metadata batch entries added to database" % locals()
if count < totalCount:
# Cursor up one line and clear line
sys.stdout.write("\x1b[1A\x1b[2K")
sqlQuery = u"""
update tcs_helper_catalogue_tables_info set last_updated = now() where table_name = "%(tableName)s"
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
self.log.debug(
'completed the ``_do_ned_namesearch_queries_and_add_resulting_metadata_to_database`` method')
return None
|
[
"def",
"_do_ned_namesearch_queries_and_add_resulting_metadata_to_database",
"(",
"self",
",",
"batchCount",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_do_ned_namesearch_queries_and_add_resulting_metadata_to_database`` method'",
")",
"# ASTROCALC UNIT CONVERTER OBJECT",
"converter",
"=",
"unit_conversion",
"(",
"log",
"=",
"self",
".",
"log",
")",
"tableName",
"=",
"self",
".",
"dbTableName",
"# QUERY NED WITH BATCH",
"totalCount",
"=",
"len",
"(",
"self",
".",
"theseIds",
")",
"print",
"\"requesting metadata from NED for %(totalCount)s galaxies (batch %(batchCount)s)\"",
"%",
"locals",
"(",
")",
"# QUERY THE ONLINE NED DATABASE USING NEDDY'S NAMESEARCH METHOD",
"search",
"=",
"namesearch",
"(",
"log",
"=",
"self",
".",
"log",
",",
"names",
"=",
"self",
".",
"theseIds",
",",
"quiet",
"=",
"True",
")",
"results",
"=",
"search",
".",
"get",
"(",
")",
"print",
"\"results returned from ned -- starting to add to database\"",
"%",
"locals",
"(",
")",
"# CLEAN THE RETURNED DATA AND UPDATE DATABASE",
"totalCount",
"=",
"len",
"(",
"results",
")",
"count",
"=",
"0",
"sqlQuery",
"=",
"\"\"",
"dictList",
"=",
"[",
"]",
"for",
"thisDict",
"in",
"results",
":",
"thisDict",
"[",
"\"tableName\"",
"]",
"=",
"tableName",
"count",
"+=",
"1",
"for",
"k",
",",
"v",
"in",
"thisDict",
".",
"iteritems",
"(",
")",
":",
"if",
"not",
"v",
"or",
"len",
"(",
"v",
")",
"==",
"0",
":",
"thisDict",
"[",
"k",
"]",
"=",
"\"null\"",
"if",
"k",
"in",
"[",
"\"major_diameter_arcmin\"",
",",
"\"minor_diameter_arcmin\"",
"]",
"and",
"(",
"\":\"",
"in",
"v",
"or",
"\"?\"",
"in",
"v",
"or",
"\"<\"",
"in",
"v",
")",
":",
"thisDict",
"[",
"k",
"]",
"=",
"v",
".",
"replace",
"(",
"\":\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"?\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"<\"",
",",
"\"\"",
")",
"if",
"isinstance",
"(",
"v",
",",
"str",
")",
"and",
"'\"'",
"in",
"v",
":",
"thisDict",
"[",
"k",
"]",
"=",
"v",
".",
"replace",
"(",
"'\"'",
",",
"'\\\\\"'",
")",
"if",
"\"Input name not\"",
"not",
"in",
"thisDict",
"[",
"\"input_note\"",
"]",
"and",
"\"Same object as\"",
"not",
"in",
"thisDict",
"[",
"\"input_note\"",
"]",
":",
"try",
":",
"thisDict",
"[",
"\"raDeg\"",
"]",
"=",
"converter",
".",
"ra_sexegesimal_to_decimal",
"(",
"ra",
"=",
"thisDict",
"[",
"\"ra\"",
"]",
")",
"thisDict",
"[",
"\"decDeg\"",
"]",
"=",
"converter",
".",
"dec_sexegesimal_to_decimal",
"(",
"dec",
"=",
"thisDict",
"[",
"\"dec\"",
"]",
")",
"except",
":",
"name",
"=",
"thisDict",
"[",
"\"input_name\"",
"]",
"self",
".",
"log",
".",
"warning",
"(",
"\"Could not convert the RA & DEC for the %(name)s NED source\"",
"%",
"locals",
"(",
")",
")",
"continue",
"thisDict",
"[",
"\"eb_v\"",
"]",
"=",
"thisDict",
"[",
"\"eb-v\"",
"]",
"thisDict",
"[",
"\"ned_name\"",
"]",
"=",
"thisDict",
"[",
"\"input_name\"",
"]",
"row",
"=",
"{",
"}",
"for",
"k",
"in",
"[",
"\"redshift_quality\"",
",",
"\"redshift\"",
",",
"\"hierarchy\"",
",",
"\"object_type\"",
",",
"\"major_diameter_arcmin\"",
",",
"\"morphology\"",
",",
"\"magnitude_filter\"",
",",
"\"ned_notes\"",
",",
"\"eb_v\"",
",",
"\"raDeg\"",
",",
"\"radio_morphology\"",
",",
"\"activity_type\"",
",",
"\"minor_diameter_arcmin\"",
",",
"\"decDeg\"",
",",
"\"redshift_err\"",
",",
"\"ned_name\"",
"]",
":",
"if",
"thisDict",
"[",
"k",
"]",
"==",
"\"null\"",
":",
"row",
"[",
"k",
"]",
"=",
"None",
"else",
":",
"row",
"[",
"k",
"]",
"=",
"thisDict",
"[",
"k",
"]",
"dictList",
".",
"append",
"(",
"row",
")",
"self",
".",
"add_data_to_database_table",
"(",
"dictList",
"=",
"dictList",
",",
"createStatement",
"=",
"\"\"\"SET SESSION sql_mode=\"\";\"\"\"",
")",
"theseIds",
"=",
"(",
"\"\\\", \\\"\"",
")",
".",
"join",
"(",
"self",
".",
"theseIds",
")",
"sqlQuery",
"=",
"u\"\"\"\n update %(tableName)s set download_error = 1 where ned_name in (\"%(theseIds)s\");\n \"\"\"",
"%",
"locals",
"(",
")",
"writequery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
")",
"print",
"\"%(count)s/%(totalCount)s galaxy metadata batch entries added to database\"",
"%",
"locals",
"(",
")",
"if",
"count",
"<",
"totalCount",
":",
"# Cursor up one line and clear line",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\x1b[1A\\x1b[2K\"",
")",
"sqlQuery",
"=",
"u\"\"\"\n update tcs_helper_catalogue_tables_info set last_updated = now() where table_name = \"%(tableName)s\"\n \"\"\"",
"%",
"locals",
"(",
")",
"writequery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_do_ned_namesearch_queries_and_add_resulting_metadata_to_database`` method'",
")",
"return",
"None"
] |
*Query NED via name searcha and add result metadata to database*
**Key Arguments:**
- ``batchCount`` - the index number of the batch sent to NED (only needed for printing to STDOUT to give user idea of progress)
*Usage:*
.. code-block:: python
numberSources = stream._do_ned_namesearch_queries_and_add_resulting_metadata_to_database(batchCount=10)
|
[
"*",
"Query",
"NED",
"via",
"name",
"searcha",
"and",
"add",
"result",
"metadata",
"to",
"database",
"*"
] |
train
|
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/ned.py#L371-L478
|
thespacedoctor/sherlock
|
sherlock/imports/ned.py
|
ned._count_ned_sources_in_database_requiring_metadata
|
def _count_ned_sources_in_database_requiring_metadata(
self):
"""*Count the sources in the NED table requiring metadata*
**Return:**
- ``self.total``, ``self.batches`` -- total number of galaxies needing metadata & the number of batches required to be sent to NED
*Usage:*
.. code-block:: python
totalRemaining, numberOfBatches = stream._count_ned_sources_in_database_requiring_metadata()
"""
self.log.debug(
'starting the ``_count_ned_sources_in_database_requiring_metadata`` method')
tableName = self.dbTableName
sqlQuery = u"""
select count(*) as count from %(tableName)s where raDeg is null and (download_error != 1 or download_error is null)
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
self.total = rows[0]["count"]
self.batches = int(self.total / 50000.) + 1
if self.total == 0:
self.batches = 0
self.log.debug(
'completed the ``_count_ned_sources_in_database_requiring_metadata`` method')
return self.total, self.batches
|
python
|
def _count_ned_sources_in_database_requiring_metadata(
self):
"""*Count the sources in the NED table requiring metadata*
**Return:**
- ``self.total``, ``self.batches`` -- total number of galaxies needing metadata & the number of batches required to be sent to NED
*Usage:*
.. code-block:: python
totalRemaining, numberOfBatches = stream._count_ned_sources_in_database_requiring_metadata()
"""
self.log.debug(
'starting the ``_count_ned_sources_in_database_requiring_metadata`` method')
tableName = self.dbTableName
sqlQuery = u"""
select count(*) as count from %(tableName)s where raDeg is null and (download_error != 1 or download_error is null)
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
self.total = rows[0]["count"]
self.batches = int(self.total / 50000.) + 1
if self.total == 0:
self.batches = 0
self.log.debug(
'completed the ``_count_ned_sources_in_database_requiring_metadata`` method')
return self.total, self.batches
|
[
"def",
"_count_ned_sources_in_database_requiring_metadata",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_count_ned_sources_in_database_requiring_metadata`` method'",
")",
"tableName",
"=",
"self",
".",
"dbTableName",
"sqlQuery",
"=",
"u\"\"\"\n select count(*) as count from %(tableName)s where raDeg is null and (download_error != 1 or download_error is null)\n \"\"\"",
"%",
"locals",
"(",
")",
"rows",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
"quiet",
"=",
"False",
")",
"self",
".",
"total",
"=",
"rows",
"[",
"0",
"]",
"[",
"\"count\"",
"]",
"self",
".",
"batches",
"=",
"int",
"(",
"self",
".",
"total",
"/",
"50000.",
")",
"+",
"1",
"if",
"self",
".",
"total",
"==",
"0",
":",
"self",
".",
"batches",
"=",
"0",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_count_ned_sources_in_database_requiring_metadata`` method'",
")",
"return",
"self",
".",
"total",
",",
"self",
".",
"batches"
] |
*Count the sources in the NED table requiring metadata*
**Return:**
- ``self.total``, ``self.batches`` -- total number of galaxies needing metadata & the number of batches required to be sent to NED
*Usage:*
.. code-block:: python
totalRemaining, numberOfBatches = stream._count_ned_sources_in_database_requiring_metadata()
|
[
"*",
"Count",
"the",
"sources",
"in",
"the",
"NED",
"table",
"requiring",
"metadata",
"*"
] |
train
|
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/ned.py#L480-L515
|
pytroll/posttroll
|
posttroll/address_receiver.py
|
AddressReceiver.start
|
def start(self):
"""Start the receiver.
"""
if not self._is_running:
self._do_run = True
self._thread.start()
return self
|
python
|
def start(self):
"""Start the receiver.
"""
if not self._is_running:
self._do_run = True
self._thread.start()
return self
|
[
"def",
"start",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_is_running",
":",
"self",
".",
"_do_run",
"=",
"True",
"self",
".",
"_thread",
".",
"start",
"(",
")",
"return",
"self"
] |
Start the receiver.
|
[
"Start",
"the",
"receiver",
"."
] |
train
|
https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/address_receiver.py#L80-L86
|
pytroll/posttroll
|
posttroll/address_receiver.py
|
AddressReceiver.get
|
def get(self, name=""):
"""Get the address(es).
"""
addrs = []
with self._address_lock:
for metadata in self._addresses.values():
if (name == "" or
(name and name in metadata["service"])):
mda = copy.copy(metadata)
mda["receive_time"] = mda["receive_time"].isoformat()
addrs.append(mda)
LOGGER.debug('return address %s', str(addrs))
return addrs
|
python
|
def get(self, name=""):
"""Get the address(es).
"""
addrs = []
with self._address_lock:
for metadata in self._addresses.values():
if (name == "" or
(name and name in metadata["service"])):
mda = copy.copy(metadata)
mda["receive_time"] = mda["receive_time"].isoformat()
addrs.append(mda)
LOGGER.debug('return address %s', str(addrs))
return addrs
|
[
"def",
"get",
"(",
"self",
",",
"name",
"=",
"\"\"",
")",
":",
"addrs",
"=",
"[",
"]",
"with",
"self",
".",
"_address_lock",
":",
"for",
"metadata",
"in",
"self",
".",
"_addresses",
".",
"values",
"(",
")",
":",
"if",
"(",
"name",
"==",
"\"\"",
"or",
"(",
"name",
"and",
"name",
"in",
"metadata",
"[",
"\"service\"",
"]",
")",
")",
":",
"mda",
"=",
"copy",
".",
"copy",
"(",
"metadata",
")",
"mda",
"[",
"\"receive_time\"",
"]",
"=",
"mda",
"[",
"\"receive_time\"",
"]",
".",
"isoformat",
"(",
")",
"addrs",
".",
"append",
"(",
"mda",
")",
"LOGGER",
".",
"debug",
"(",
"'return address %s'",
",",
"str",
"(",
"addrs",
")",
")",
"return",
"addrs"
] |
Get the address(es).
|
[
"Get",
"the",
"address",
"(",
"es",
")",
"."
] |
train
|
https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/address_receiver.py#L99-L112
|
pytroll/posttroll
|
posttroll/address_receiver.py
|
AddressReceiver._check_age
|
def _check_age(self, pub, min_interval=timedelta(seconds=0)):
"""Check the age of the receiver.
"""
now = datetime.utcnow()
if (now - self._last_age_check) <= min_interval:
return
LOGGER.debug("%s - checking addresses", str(datetime.utcnow()))
self._last_age_check = now
to_del = []
with self._address_lock:
for addr, metadata in self._addresses.items():
atime = metadata["receive_time"]
if now - atime > self._max_age:
mda = {'status': False,
'URI': addr,
'service': metadata['service']}
msg = Message('/address/' + metadata['name'], 'info', mda)
to_del.append(addr)
LOGGER.info("publish remove '%s'", str(msg))
pub.send(msg.encode())
for addr in to_del:
del self._addresses[addr]
|
python
|
def _check_age(self, pub, min_interval=timedelta(seconds=0)):
"""Check the age of the receiver.
"""
now = datetime.utcnow()
if (now - self._last_age_check) <= min_interval:
return
LOGGER.debug("%s - checking addresses", str(datetime.utcnow()))
self._last_age_check = now
to_del = []
with self._address_lock:
for addr, metadata in self._addresses.items():
atime = metadata["receive_time"]
if now - atime > self._max_age:
mda = {'status': False,
'URI': addr,
'service': metadata['service']}
msg = Message('/address/' + metadata['name'], 'info', mda)
to_del.append(addr)
LOGGER.info("publish remove '%s'", str(msg))
pub.send(msg.encode())
for addr in to_del:
del self._addresses[addr]
|
[
"def",
"_check_age",
"(",
"self",
",",
"pub",
",",
"min_interval",
"=",
"timedelta",
"(",
"seconds",
"=",
"0",
")",
")",
":",
"now",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"if",
"(",
"now",
"-",
"self",
".",
"_last_age_check",
")",
"<=",
"min_interval",
":",
"return",
"LOGGER",
".",
"debug",
"(",
"\"%s - checking addresses\"",
",",
"str",
"(",
"datetime",
".",
"utcnow",
"(",
")",
")",
")",
"self",
".",
"_last_age_check",
"=",
"now",
"to_del",
"=",
"[",
"]",
"with",
"self",
".",
"_address_lock",
":",
"for",
"addr",
",",
"metadata",
"in",
"self",
".",
"_addresses",
".",
"items",
"(",
")",
":",
"atime",
"=",
"metadata",
"[",
"\"receive_time\"",
"]",
"if",
"now",
"-",
"atime",
">",
"self",
".",
"_max_age",
":",
"mda",
"=",
"{",
"'status'",
":",
"False",
",",
"'URI'",
":",
"addr",
",",
"'service'",
":",
"metadata",
"[",
"'service'",
"]",
"}",
"msg",
"=",
"Message",
"(",
"'/address/'",
"+",
"metadata",
"[",
"'name'",
"]",
",",
"'info'",
",",
"mda",
")",
"to_del",
".",
"append",
"(",
"addr",
")",
"LOGGER",
".",
"info",
"(",
"\"publish remove '%s'\"",
",",
"str",
"(",
"msg",
")",
")",
"pub",
".",
"send",
"(",
"msg",
".",
"encode",
"(",
")",
")",
"for",
"addr",
"in",
"to_del",
":",
"del",
"self",
".",
"_addresses",
"[",
"addr",
"]"
] |
Check the age of the receiver.
|
[
"Check",
"the",
"age",
"of",
"the",
"receiver",
"."
] |
train
|
https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/address_receiver.py#L114-L136
|
pytroll/posttroll
|
posttroll/address_receiver.py
|
AddressReceiver._run
|
def _run(self):
"""Run the receiver.
"""
port = broadcast_port
nameservers = []
if self._multicast_enabled:
recv = MulticastReceiver(port).settimeout(2.)
while True:
try:
recv = MulticastReceiver(port).settimeout(2.)
LOGGER.info("Receiver initialized.")
break
except IOError as err:
if err.errno == errno.ENODEV:
LOGGER.error("Receiver initialization failed "
"(no such device). "
"Trying again in %d s",
10)
time.sleep(10)
else:
raise
else:
recv = _SimpleReceiver(port)
nameservers = ["localhost"]
self._is_running = True
with Publish("address_receiver", self._port, ["addresses"],
nameservers=nameservers) as pub:
try:
while self._do_run:
try:
data, fromaddr = recv()
LOGGER.debug("data %s", data)
del fromaddr
except SocketTimeout:
if self._multicast_enabled:
LOGGER.debug("Multicast socket timed out on recv!")
continue
finally:
self._check_age(pub, min_interval=self._max_age / 20)
if self._do_heartbeat:
pub.heartbeat(min_interval=29)
msg = Message.decode(data)
name = msg.subject.split("/")[1]
if(msg.type == 'info' and
msg.subject.lower().startswith(self._subject)):
addr = msg.data["URI"]
msg.data['status'] = True
metadata = copy.copy(msg.data)
metadata["name"] = name
LOGGER.debug('receiving address %s %s %s', str(addr),
str(name), str(metadata))
if addr not in self._addresses:
LOGGER.info("nameserver: publish add '%s'",
str(msg))
pub.send(msg.encode())
self._add(addr, metadata)
finally:
self._is_running = False
recv.close()
|
python
|
def _run(self):
"""Run the receiver.
"""
port = broadcast_port
nameservers = []
if self._multicast_enabled:
recv = MulticastReceiver(port).settimeout(2.)
while True:
try:
recv = MulticastReceiver(port).settimeout(2.)
LOGGER.info("Receiver initialized.")
break
except IOError as err:
if err.errno == errno.ENODEV:
LOGGER.error("Receiver initialization failed "
"(no such device). "
"Trying again in %d s",
10)
time.sleep(10)
else:
raise
else:
recv = _SimpleReceiver(port)
nameservers = ["localhost"]
self._is_running = True
with Publish("address_receiver", self._port, ["addresses"],
nameservers=nameservers) as pub:
try:
while self._do_run:
try:
data, fromaddr = recv()
LOGGER.debug("data %s", data)
del fromaddr
except SocketTimeout:
if self._multicast_enabled:
LOGGER.debug("Multicast socket timed out on recv!")
continue
finally:
self._check_age(pub, min_interval=self._max_age / 20)
if self._do_heartbeat:
pub.heartbeat(min_interval=29)
msg = Message.decode(data)
name = msg.subject.split("/")[1]
if(msg.type == 'info' and
msg.subject.lower().startswith(self._subject)):
addr = msg.data["URI"]
msg.data['status'] = True
metadata = copy.copy(msg.data)
metadata["name"] = name
LOGGER.debug('receiving address %s %s %s', str(addr),
str(name), str(metadata))
if addr not in self._addresses:
LOGGER.info("nameserver: publish add '%s'",
str(msg))
pub.send(msg.encode())
self._add(addr, metadata)
finally:
self._is_running = False
recv.close()
|
[
"def",
"_run",
"(",
"self",
")",
":",
"port",
"=",
"broadcast_port",
"nameservers",
"=",
"[",
"]",
"if",
"self",
".",
"_multicast_enabled",
":",
"recv",
"=",
"MulticastReceiver",
"(",
"port",
")",
".",
"settimeout",
"(",
"2.",
")",
"while",
"True",
":",
"try",
":",
"recv",
"=",
"MulticastReceiver",
"(",
"port",
")",
".",
"settimeout",
"(",
"2.",
")",
"LOGGER",
".",
"info",
"(",
"\"Receiver initialized.\"",
")",
"break",
"except",
"IOError",
"as",
"err",
":",
"if",
"err",
".",
"errno",
"==",
"errno",
".",
"ENODEV",
":",
"LOGGER",
".",
"error",
"(",
"\"Receiver initialization failed \"",
"\"(no such device). \"",
"\"Trying again in %d s\"",
",",
"10",
")",
"time",
".",
"sleep",
"(",
"10",
")",
"else",
":",
"raise",
"else",
":",
"recv",
"=",
"_SimpleReceiver",
"(",
"port",
")",
"nameservers",
"=",
"[",
"\"localhost\"",
"]",
"self",
".",
"_is_running",
"=",
"True",
"with",
"Publish",
"(",
"\"address_receiver\"",
",",
"self",
".",
"_port",
",",
"[",
"\"addresses\"",
"]",
",",
"nameservers",
"=",
"nameservers",
")",
"as",
"pub",
":",
"try",
":",
"while",
"self",
".",
"_do_run",
":",
"try",
":",
"data",
",",
"fromaddr",
"=",
"recv",
"(",
")",
"LOGGER",
".",
"debug",
"(",
"\"data %s\"",
",",
"data",
")",
"del",
"fromaddr",
"except",
"SocketTimeout",
":",
"if",
"self",
".",
"_multicast_enabled",
":",
"LOGGER",
".",
"debug",
"(",
"\"Multicast socket timed out on recv!\"",
")",
"continue",
"finally",
":",
"self",
".",
"_check_age",
"(",
"pub",
",",
"min_interval",
"=",
"self",
".",
"_max_age",
"/",
"20",
")",
"if",
"self",
".",
"_do_heartbeat",
":",
"pub",
".",
"heartbeat",
"(",
"min_interval",
"=",
"29",
")",
"msg",
"=",
"Message",
".",
"decode",
"(",
"data",
")",
"name",
"=",
"msg",
".",
"subject",
".",
"split",
"(",
"\"/\"",
")",
"[",
"1",
"]",
"if",
"(",
"msg",
".",
"type",
"==",
"'info'",
"and",
"msg",
".",
"subject",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"self",
".",
"_subject",
")",
")",
":",
"addr",
"=",
"msg",
".",
"data",
"[",
"\"URI\"",
"]",
"msg",
".",
"data",
"[",
"'status'",
"]",
"=",
"True",
"metadata",
"=",
"copy",
".",
"copy",
"(",
"msg",
".",
"data",
")",
"metadata",
"[",
"\"name\"",
"]",
"=",
"name",
"LOGGER",
".",
"debug",
"(",
"'receiving address %s %s %s'",
",",
"str",
"(",
"addr",
")",
",",
"str",
"(",
"name",
")",
",",
"str",
"(",
"metadata",
")",
")",
"if",
"addr",
"not",
"in",
"self",
".",
"_addresses",
":",
"LOGGER",
".",
"info",
"(",
"\"nameserver: publish add '%s'\"",
",",
"str",
"(",
"msg",
")",
")",
"pub",
".",
"send",
"(",
"msg",
".",
"encode",
"(",
")",
")",
"self",
".",
"_add",
"(",
"addr",
",",
"metadata",
")",
"finally",
":",
"self",
".",
"_is_running",
"=",
"False",
"recv",
".",
"close",
"(",
")"
] |
Run the receiver.
|
[
"Run",
"the",
"receiver",
"."
] |
train
|
https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/address_receiver.py#L138-L198
|
pytroll/posttroll
|
posttroll/address_receiver.py
|
AddressReceiver._add
|
def _add(self, adr, metadata):
"""Add an address.
"""
with self._address_lock:
metadata["receive_time"] = datetime.utcnow()
self._addresses[adr] = metadata
|
python
|
def _add(self, adr, metadata):
"""Add an address.
"""
with self._address_lock:
metadata["receive_time"] = datetime.utcnow()
self._addresses[adr] = metadata
|
[
"def",
"_add",
"(",
"self",
",",
"adr",
",",
"metadata",
")",
":",
"with",
"self",
".",
"_address_lock",
":",
"metadata",
"[",
"\"receive_time\"",
"]",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"self",
".",
"_addresses",
"[",
"adr",
"]",
"=",
"metadata"
] |
Add an address.
|
[
"Add",
"an",
"address",
"."
] |
train
|
https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/address_receiver.py#L200-L205
|
thespacedoctor/sherlock
|
sherlock/imports/ifs.py
|
ifs.ingest
|
def ingest(self):
"""*Import the IFS catalogue into the sherlock-catalogues database*
The method first generates a list of python dictionaries from the IFS datafile, imports this list of dictionaries into a database table and then generates the HTMIDs for that table.
**Usage:**
See class docstring for usage
"""
self.log.debug('starting the ``get`` method')
self.primaryIdColumnName = "primaryId"
self.raColName = "raDeg"
self.declColName = "decDeg"
self.dbTableName = "tcs_cat_ifs_stream"
self.databaseInsertbatchSize = 500
dictList = self._create_dictionary_of_IFS()
tableName = self.dbTableName
createStatement = """
CREATE TABLE `%(tableName)s` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`decDeg` double DEFAULT NULL,
`name` varchar(100) COLLATE utf8_unicode_ci DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`z` double DEFAULT NULL,
`htm16ID` bigint(20) DEFAULT NULL,
`htm10ID` bigint(20) DEFAULT NULL,
`htm13ID` bigint(20) DEFAULT NULL,
`dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,
`updated` varchar(45) DEFAULT '0',
PRIMARY KEY (`primaryId`),
UNIQUE KEY `radeg_decdeg` (`raDeg`,`decDeg`),
KEY `idx_htm16ID` (`htm16ID`),
KEY `idx_htm10ID` (`htm10ID`),
KEY `idx_htm13ID` (`htm13ID`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
""" % locals()
self.add_data_to_database_table(
dictList=dictList,
createStatement=createStatement
)
self.log.debug('completed the ``get`` method')
return None
|
python
|
def ingest(self):
"""*Import the IFS catalogue into the sherlock-catalogues database*
The method first generates a list of python dictionaries from the IFS datafile, imports this list of dictionaries into a database table and then generates the HTMIDs for that table.
**Usage:**
See class docstring for usage
"""
self.log.debug('starting the ``get`` method')
self.primaryIdColumnName = "primaryId"
self.raColName = "raDeg"
self.declColName = "decDeg"
self.dbTableName = "tcs_cat_ifs_stream"
self.databaseInsertbatchSize = 500
dictList = self._create_dictionary_of_IFS()
tableName = self.dbTableName
createStatement = """
CREATE TABLE `%(tableName)s` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`decDeg` double DEFAULT NULL,
`name` varchar(100) COLLATE utf8_unicode_ci DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`z` double DEFAULT NULL,
`htm16ID` bigint(20) DEFAULT NULL,
`htm10ID` bigint(20) DEFAULT NULL,
`htm13ID` bigint(20) DEFAULT NULL,
`dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,
`updated` varchar(45) DEFAULT '0',
PRIMARY KEY (`primaryId`),
UNIQUE KEY `radeg_decdeg` (`raDeg`,`decDeg`),
KEY `idx_htm16ID` (`htm16ID`),
KEY `idx_htm10ID` (`htm10ID`),
KEY `idx_htm13ID` (`htm13ID`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
""" % locals()
self.add_data_to_database_table(
dictList=dictList,
createStatement=createStatement
)
self.log.debug('completed the ``get`` method')
return None
|
[
"def",
"ingest",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``get`` method'",
")",
"self",
".",
"primaryIdColumnName",
"=",
"\"primaryId\"",
"self",
".",
"raColName",
"=",
"\"raDeg\"",
"self",
".",
"declColName",
"=",
"\"decDeg\"",
"self",
".",
"dbTableName",
"=",
"\"tcs_cat_ifs_stream\"",
"self",
".",
"databaseInsertbatchSize",
"=",
"500",
"dictList",
"=",
"self",
".",
"_create_dictionary_of_IFS",
"(",
")",
"tableName",
"=",
"self",
".",
"dbTableName",
"createStatement",
"=",
"\"\"\"\n CREATE TABLE `%(tableName)s` (\n `primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',\n `dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,\n `decDeg` double DEFAULT NULL,\n `name` varchar(100) COLLATE utf8_unicode_ci DEFAULT NULL,\n `raDeg` double DEFAULT NULL,\n `z` double DEFAULT NULL,\n `htm16ID` bigint(20) DEFAULT NULL,\n `htm10ID` bigint(20) DEFAULT NULL,\n `htm13ID` bigint(20) DEFAULT NULL,\n `dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,\n `updated` varchar(45) DEFAULT '0',\n PRIMARY KEY (`primaryId`),\n UNIQUE KEY `radeg_decdeg` (`raDeg`,`decDeg`),\n KEY `idx_htm16ID` (`htm16ID`),\n KEY `idx_htm10ID` (`htm10ID`),\n KEY `idx_htm13ID` (`htm13ID`)\n ) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;\n\"\"\"",
"%",
"locals",
"(",
")",
"self",
".",
"add_data_to_database_table",
"(",
"dictList",
"=",
"dictList",
",",
"createStatement",
"=",
"createStatement",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``get`` method'",
")",
"return",
"None"
] |
*Import the IFS catalogue into the sherlock-catalogues database*
The method first generates a list of python dictionaries from the IFS datafile, imports this list of dictionaries into a database table and then generates the HTMIDs for that table.
**Usage:**
See class docstring for usage
|
[
"*",
"Import",
"the",
"IFS",
"catalogue",
"into",
"the",
"sherlock",
"-",
"catalogues",
"database",
"*"
] |
train
|
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/ifs.py#L58-L105
|
thespacedoctor/sherlock
|
sherlock/imports/ifs.py
|
ifs._create_dictionary_of_IFS
|
def _create_dictionary_of_IFS(
self):
"""*Generate the list of dictionaries containing all the rows in the IFS stream*
**Return:**
- ``dictList`` - a list of dictionaries containing all the rows in the IFS stream
**Usage:**
.. code-block:: python
from sherlock.imports import IFS
stream = IFS(
log=log,
settings=settings
)
dictList = stream._create_dictionary_of_IFS()
"""
self.log.debug(
'starting the ``_create_dictionary_of_IFS`` method')
# GRAB THE CONTENT OF THE IFS CSV
try:
response = requests.get(
url=self.settings["ifs galaxies url"],
)
thisData = response.content
thisData = thisData.split("\n")
status_code = response.status_code
except requests.exceptions.RequestException:
print 'HTTP Request failed'
sys.exit(0)
dictList = []
columns = ["name", "raDeg", "decDeg", "z"]
for line in thisData:
thisDict = {}
line = line.strip()
line = line.replace("\t", " ")
values = line.split("|")
if len(values) > 3:
thisDict["name"] = values[0].strip()
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
try:
raDeg = converter.ra_sexegesimal_to_decimal(
ra=values[1].strip()
)
thisDict["raDeg"] = raDeg
decDeg = converter.dec_sexegesimal_to_decimal(
dec=values[2].strip()
)
thisDict["decDeg"] = decDeg
except:
name = thisDict["name"]
self.log.warning(
'Could not convert the coordinates for IFS source %(name)s. Skipping import of this source.' % locals())
continue
try:
z = float(values[3].strip())
if z > 0.:
thisDict["z"] = float(values[3].strip())
else:
thisDict["z"] = None
except:
thisDict["z"] = None
dictList.append(thisDict)
self.log.debug(
'completed the ``_create_dictionary_of_IFS`` method')
return dictList
|
python
|
def _create_dictionary_of_IFS(
self):
"""*Generate the list of dictionaries containing all the rows in the IFS stream*
**Return:**
- ``dictList`` - a list of dictionaries containing all the rows in the IFS stream
**Usage:**
.. code-block:: python
from sherlock.imports import IFS
stream = IFS(
log=log,
settings=settings
)
dictList = stream._create_dictionary_of_IFS()
"""
self.log.debug(
'starting the ``_create_dictionary_of_IFS`` method')
# GRAB THE CONTENT OF THE IFS CSV
try:
response = requests.get(
url=self.settings["ifs galaxies url"],
)
thisData = response.content
thisData = thisData.split("\n")
status_code = response.status_code
except requests.exceptions.RequestException:
print 'HTTP Request failed'
sys.exit(0)
dictList = []
columns = ["name", "raDeg", "decDeg", "z"]
for line in thisData:
thisDict = {}
line = line.strip()
line = line.replace("\t", " ")
values = line.split("|")
if len(values) > 3:
thisDict["name"] = values[0].strip()
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
try:
raDeg = converter.ra_sexegesimal_to_decimal(
ra=values[1].strip()
)
thisDict["raDeg"] = raDeg
decDeg = converter.dec_sexegesimal_to_decimal(
dec=values[2].strip()
)
thisDict["decDeg"] = decDeg
except:
name = thisDict["name"]
self.log.warning(
'Could not convert the coordinates for IFS source %(name)s. Skipping import of this source.' % locals())
continue
try:
z = float(values[3].strip())
if z > 0.:
thisDict["z"] = float(values[3].strip())
else:
thisDict["z"] = None
except:
thisDict["z"] = None
dictList.append(thisDict)
self.log.debug(
'completed the ``_create_dictionary_of_IFS`` method')
return dictList
|
[
"def",
"_create_dictionary_of_IFS",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_create_dictionary_of_IFS`` method'",
")",
"# GRAB THE CONTENT OF THE IFS CSV",
"try",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
"=",
"self",
".",
"settings",
"[",
"\"ifs galaxies url\"",
"]",
",",
")",
"thisData",
"=",
"response",
".",
"content",
"thisData",
"=",
"thisData",
".",
"split",
"(",
"\"\\n\"",
")",
"status_code",
"=",
"response",
".",
"status_code",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
":",
"print",
"'HTTP Request failed'",
"sys",
".",
"exit",
"(",
"0",
")",
"dictList",
"=",
"[",
"]",
"columns",
"=",
"[",
"\"name\"",
",",
"\"raDeg\"",
",",
"\"decDeg\"",
",",
"\"z\"",
"]",
"for",
"line",
"in",
"thisData",
":",
"thisDict",
"=",
"{",
"}",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"line",
"=",
"line",
".",
"replace",
"(",
"\"\\t\"",
",",
"\" \"",
")",
"values",
"=",
"line",
".",
"split",
"(",
"\"|\"",
")",
"if",
"len",
"(",
"values",
")",
">",
"3",
":",
"thisDict",
"[",
"\"name\"",
"]",
"=",
"values",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"# ASTROCALC UNIT CONVERTER OBJECT",
"converter",
"=",
"unit_conversion",
"(",
"log",
"=",
"self",
".",
"log",
")",
"try",
":",
"raDeg",
"=",
"converter",
".",
"ra_sexegesimal_to_decimal",
"(",
"ra",
"=",
"values",
"[",
"1",
"]",
".",
"strip",
"(",
")",
")",
"thisDict",
"[",
"\"raDeg\"",
"]",
"=",
"raDeg",
"decDeg",
"=",
"converter",
".",
"dec_sexegesimal_to_decimal",
"(",
"dec",
"=",
"values",
"[",
"2",
"]",
".",
"strip",
"(",
")",
")",
"thisDict",
"[",
"\"decDeg\"",
"]",
"=",
"decDeg",
"except",
":",
"name",
"=",
"thisDict",
"[",
"\"name\"",
"]",
"self",
".",
"log",
".",
"warning",
"(",
"'Could not convert the coordinates for IFS source %(name)s. Skipping import of this source.'",
"%",
"locals",
"(",
")",
")",
"continue",
"try",
":",
"z",
"=",
"float",
"(",
"values",
"[",
"3",
"]",
".",
"strip",
"(",
")",
")",
"if",
"z",
">",
"0.",
":",
"thisDict",
"[",
"\"z\"",
"]",
"=",
"float",
"(",
"values",
"[",
"3",
"]",
".",
"strip",
"(",
")",
")",
"else",
":",
"thisDict",
"[",
"\"z\"",
"]",
"=",
"None",
"except",
":",
"thisDict",
"[",
"\"z\"",
"]",
"=",
"None",
"dictList",
".",
"append",
"(",
"thisDict",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_create_dictionary_of_IFS`` method'",
")",
"return",
"dictList"
] |
*Generate the list of dictionaries containing all the rows in the IFS stream*
**Return:**
- ``dictList`` - a list of dictionaries containing all the rows in the IFS stream
**Usage:**
.. code-block:: python
from sherlock.imports import IFS
stream = IFS(
log=log,
settings=settings
)
dictList = stream._create_dictionary_of_IFS()
|
[
"*",
"Generate",
"the",
"list",
"of",
"dictionaries",
"containing",
"all",
"the",
"rows",
"in",
"the",
"IFS",
"stream",
"*"
] |
train
|
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/ifs.py#L107-L181
|
phoemur/wgetter
|
wgetter.py
|
approximate_size
|
def approximate_size(size, a_kilobyte_is_1024_bytes=True):
'''
Humansize.py from Dive into Python3
Mark Pilgrim - http://www.diveintopython3.net/
Copyright (c) 2009, Mark Pilgrim, All rights reserved.
Convert a file size to human-readable form.
Keyword arguments:
size -- file size in bytes
a_kilobyte_is_1024_bytes -- if True (default), use multiples of 1024
if False, use multiples of 1000
Returns: string
'''
size = float(size)
if size < 0:
raise ValueError('number must be non-negative')
multiple = 1024 if a_kilobyte_is_1024_bytes else 1000
for suffix in SUFFIXES[multiple]:
size /= multiple
if size < multiple:
return '{0:.1f}{1}'.format(size, suffix)
raise ValueError('number too large')
|
python
|
def approximate_size(size, a_kilobyte_is_1024_bytes=True):
'''
Humansize.py from Dive into Python3
Mark Pilgrim - http://www.diveintopython3.net/
Copyright (c) 2009, Mark Pilgrim, All rights reserved.
Convert a file size to human-readable form.
Keyword arguments:
size -- file size in bytes
a_kilobyte_is_1024_bytes -- if True (default), use multiples of 1024
if False, use multiples of 1000
Returns: string
'''
size = float(size)
if size < 0:
raise ValueError('number must be non-negative')
multiple = 1024 if a_kilobyte_is_1024_bytes else 1000
for suffix in SUFFIXES[multiple]:
size /= multiple
if size < multiple:
return '{0:.1f}{1}'.format(size, suffix)
raise ValueError('number too large')
|
[
"def",
"approximate_size",
"(",
"size",
",",
"a_kilobyte_is_1024_bytes",
"=",
"True",
")",
":",
"size",
"=",
"float",
"(",
"size",
")",
"if",
"size",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'number must be non-negative'",
")",
"multiple",
"=",
"1024",
"if",
"a_kilobyte_is_1024_bytes",
"else",
"1000",
"for",
"suffix",
"in",
"SUFFIXES",
"[",
"multiple",
"]",
":",
"size",
"/=",
"multiple",
"if",
"size",
"<",
"multiple",
":",
"return",
"'{0:.1f}{1}'",
".",
"format",
"(",
"size",
",",
"suffix",
")",
"raise",
"ValueError",
"(",
"'number too large'",
")"
] |
Humansize.py from Dive into Python3
Mark Pilgrim - http://www.diveintopython3.net/
Copyright (c) 2009, Mark Pilgrim, All rights reserved.
Convert a file size to human-readable form.
Keyword arguments:
size -- file size in bytes
a_kilobyte_is_1024_bytes -- if True (default), use multiples of 1024
if False, use multiples of 1000
Returns: string
|
[
"Humansize",
".",
"py",
"from",
"Dive",
"into",
"Python3",
"Mark",
"Pilgrim",
"-",
"http",
":",
"//",
"www",
".",
"diveintopython3",
".",
"net",
"/",
"Copyright",
"(",
"c",
")",
"2009",
"Mark",
"Pilgrim",
"All",
"rights",
"reserved",
"."
] |
train
|
https://github.com/phoemur/wgetter/blob/ac182a72480e150e4a800fbade4fbccc29e72b51/wgetter.py#L41-L66
|
phoemur/wgetter
|
wgetter.py
|
get_console_width
|
def get_console_width():
"""Return width of available window area. Autodetection works for
Windows and POSIX platforms. Returns 80 for others
Code from http://bitbucket.org/techtonik/python-pager
"""
if os.name == 'nt':
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# get console handle
from ctypes import windll, Structure, byref
try:
from ctypes.wintypes import SHORT, WORD, DWORD
except ImportError:
# workaround for missing types in Python 2.5
from ctypes import (
c_short as SHORT, c_ushort as WORD, c_ulong as DWORD)
console_handle = windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
# CONSOLE_SCREEN_BUFFER_INFO Structure
class COORD(Structure):
_fields_ = [("X", SHORT), ("Y", SHORT)]
class SMALL_RECT(Structure):
_fields_ = [("Left", SHORT), ("Top", SHORT),
("Right", SHORT), ("Bottom", SHORT)]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
_fields_ = [("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", DWORD)]
sbi = CONSOLE_SCREEN_BUFFER_INFO()
ret = windll.kernel32.GetConsoleScreenBufferInfo(
console_handle, byref(sbi))
if ret == 0:
return 0
return sbi.srWindow.Right + 1
elif os.name == 'posix':
from fcntl import ioctl
from termios import TIOCGWINSZ
from array import array
winsize = array("H", [0] * 4)
try:
ioctl(sys.stdout.fileno(), TIOCGWINSZ, winsize)
except IOError:
pass
return (winsize[1], winsize[0])[0]
return 80
|
python
|
def get_console_width():
"""Return width of available window area. Autodetection works for
Windows and POSIX platforms. Returns 80 for others
Code from http://bitbucket.org/techtonik/python-pager
"""
if os.name == 'nt':
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# get console handle
from ctypes import windll, Structure, byref
try:
from ctypes.wintypes import SHORT, WORD, DWORD
except ImportError:
# workaround for missing types in Python 2.5
from ctypes import (
c_short as SHORT, c_ushort as WORD, c_ulong as DWORD)
console_handle = windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
# CONSOLE_SCREEN_BUFFER_INFO Structure
class COORD(Structure):
_fields_ = [("X", SHORT), ("Y", SHORT)]
class SMALL_RECT(Structure):
_fields_ = [("Left", SHORT), ("Top", SHORT),
("Right", SHORT), ("Bottom", SHORT)]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
_fields_ = [("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", DWORD)]
sbi = CONSOLE_SCREEN_BUFFER_INFO()
ret = windll.kernel32.GetConsoleScreenBufferInfo(
console_handle, byref(sbi))
if ret == 0:
return 0
return sbi.srWindow.Right + 1
elif os.name == 'posix':
from fcntl import ioctl
from termios import TIOCGWINSZ
from array import array
winsize = array("H", [0] * 4)
try:
ioctl(sys.stdout.fileno(), TIOCGWINSZ, winsize)
except IOError:
pass
return (winsize[1], winsize[0])[0]
return 80
|
[
"def",
"get_console_width",
"(",
")",
":",
"if",
"os",
".",
"name",
"==",
"'nt'",
":",
"STD_INPUT_HANDLE",
"=",
"-",
"10",
"STD_OUTPUT_HANDLE",
"=",
"-",
"11",
"STD_ERROR_HANDLE",
"=",
"-",
"12",
"# get console handle",
"from",
"ctypes",
"import",
"windll",
",",
"Structure",
",",
"byref",
"try",
":",
"from",
"ctypes",
".",
"wintypes",
"import",
"SHORT",
",",
"WORD",
",",
"DWORD",
"except",
"ImportError",
":",
"# workaround for missing types in Python 2.5",
"from",
"ctypes",
"import",
"(",
"c_short",
"as",
"SHORT",
",",
"c_ushort",
"as",
"WORD",
",",
"c_ulong",
"as",
"DWORD",
")",
"console_handle",
"=",
"windll",
".",
"kernel32",
".",
"GetStdHandle",
"(",
"STD_OUTPUT_HANDLE",
")",
"# CONSOLE_SCREEN_BUFFER_INFO Structure",
"class",
"COORD",
"(",
"Structure",
")",
":",
"_fields_",
"=",
"[",
"(",
"\"X\"",
",",
"SHORT",
")",
",",
"(",
"\"Y\"",
",",
"SHORT",
")",
"]",
"class",
"SMALL_RECT",
"(",
"Structure",
")",
":",
"_fields_",
"=",
"[",
"(",
"\"Left\"",
",",
"SHORT",
")",
",",
"(",
"\"Top\"",
",",
"SHORT",
")",
",",
"(",
"\"Right\"",
",",
"SHORT",
")",
",",
"(",
"\"Bottom\"",
",",
"SHORT",
")",
"]",
"class",
"CONSOLE_SCREEN_BUFFER_INFO",
"(",
"Structure",
")",
":",
"_fields_",
"=",
"[",
"(",
"\"dwSize\"",
",",
"COORD",
")",
",",
"(",
"\"dwCursorPosition\"",
",",
"COORD",
")",
",",
"(",
"\"wAttributes\"",
",",
"WORD",
")",
",",
"(",
"\"srWindow\"",
",",
"SMALL_RECT",
")",
",",
"(",
"\"dwMaximumWindowSize\"",
",",
"DWORD",
")",
"]",
"sbi",
"=",
"CONSOLE_SCREEN_BUFFER_INFO",
"(",
")",
"ret",
"=",
"windll",
".",
"kernel32",
".",
"GetConsoleScreenBufferInfo",
"(",
"console_handle",
",",
"byref",
"(",
"sbi",
")",
")",
"if",
"ret",
"==",
"0",
":",
"return",
"0",
"return",
"sbi",
".",
"srWindow",
".",
"Right",
"+",
"1",
"elif",
"os",
".",
"name",
"==",
"'posix'",
":",
"from",
"fcntl",
"import",
"ioctl",
"from",
"termios",
"import",
"TIOCGWINSZ",
"from",
"array",
"import",
"array",
"winsize",
"=",
"array",
"(",
"\"H\"",
",",
"[",
"0",
"]",
"*",
"4",
")",
"try",
":",
"ioctl",
"(",
"sys",
".",
"stdout",
".",
"fileno",
"(",
")",
",",
"TIOCGWINSZ",
",",
"winsize",
")",
"except",
"IOError",
":",
"pass",
"return",
"(",
"winsize",
"[",
"1",
"]",
",",
"winsize",
"[",
"0",
"]",
")",
"[",
"0",
"]",
"return",
"80"
] |
Return width of available window area. Autodetection works for
Windows and POSIX platforms. Returns 80 for others
Code from http://bitbucket.org/techtonik/python-pager
|
[
"Return",
"width",
"of",
"available",
"window",
"area",
".",
"Autodetection",
"works",
"for",
"Windows",
"and",
"POSIX",
"platforms",
".",
"Returns",
"80",
"for",
"others"
] |
train
|
https://github.com/phoemur/wgetter/blob/ac182a72480e150e4a800fbade4fbccc29e72b51/wgetter.py#L69-L125
|
phoemur/wgetter
|
wgetter.py
|
filename_from_url
|
def filename_from_url(url):
""":return: detected filename or None"""
fname = os.path.basename(urlparse.urlparse(url).path)
if len(fname.strip(" \n\t.")) == 0:
return None
return fname
|
python
|
def filename_from_url(url):
""":return: detected filename or None"""
fname = os.path.basename(urlparse.urlparse(url).path)
if len(fname.strip(" \n\t.")) == 0:
return None
return fname
|
[
"def",
"filename_from_url",
"(",
"url",
")",
":",
"fname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"urlparse",
".",
"urlparse",
"(",
"url",
")",
".",
"path",
")",
"if",
"len",
"(",
"fname",
".",
"strip",
"(",
"\" \\n\\t.\"",
")",
")",
"==",
"0",
":",
"return",
"None",
"return",
"fname"
] |
:return: detected filename or None
|
[
":",
"return",
":",
"detected",
"filename",
"or",
"None"
] |
train
|
https://github.com/phoemur/wgetter/blob/ac182a72480e150e4a800fbade4fbccc29e72b51/wgetter.py#L133-L138
|
phoemur/wgetter
|
wgetter.py
|
filename_from_headers
|
def filename_from_headers(headers):
"""Detect filename from Content-Disposition headers if present.
http://greenbytes.de/tech/tc2231/
:param: headers as dict, list or string
:return: filename from content-disposition header or None
"""
if type(headers) == str:
headers = headers.splitlines()
if type(headers) == list:
headers = dict([x.split(':', 1) for x in headers])
cdisp = headers.get("Content-Disposition")
if not cdisp:
return None
cdtype = cdisp.split(';')
if len(cdtype) == 1:
return None
if cdtype[0].strip().lower() not in ('inline', 'attachment'):
return None
# several filename params is illegal, but just in case
fnames = [x for x in cdtype[1:] if x.strip().startswith('filename=')]
if len(fnames) > 1:
return None
name = fnames[0].split('=')[1].strip(' \t"')
name = os.path.basename(name)
if not name:
return None
return name
|
python
|
def filename_from_headers(headers):
"""Detect filename from Content-Disposition headers if present.
http://greenbytes.de/tech/tc2231/
:param: headers as dict, list or string
:return: filename from content-disposition header or None
"""
if type(headers) == str:
headers = headers.splitlines()
if type(headers) == list:
headers = dict([x.split(':', 1) for x in headers])
cdisp = headers.get("Content-Disposition")
if not cdisp:
return None
cdtype = cdisp.split(';')
if len(cdtype) == 1:
return None
if cdtype[0].strip().lower() not in ('inline', 'attachment'):
return None
# several filename params is illegal, but just in case
fnames = [x for x in cdtype[1:] if x.strip().startswith('filename=')]
if len(fnames) > 1:
return None
name = fnames[0].split('=')[1].strip(' \t"')
name = os.path.basename(name)
if not name:
return None
return name
|
[
"def",
"filename_from_headers",
"(",
"headers",
")",
":",
"if",
"type",
"(",
"headers",
")",
"==",
"str",
":",
"headers",
"=",
"headers",
".",
"splitlines",
"(",
")",
"if",
"type",
"(",
"headers",
")",
"==",
"list",
":",
"headers",
"=",
"dict",
"(",
"[",
"x",
".",
"split",
"(",
"':'",
",",
"1",
")",
"for",
"x",
"in",
"headers",
"]",
")",
"cdisp",
"=",
"headers",
".",
"get",
"(",
"\"Content-Disposition\"",
")",
"if",
"not",
"cdisp",
":",
"return",
"None",
"cdtype",
"=",
"cdisp",
".",
"split",
"(",
"';'",
")",
"if",
"len",
"(",
"cdtype",
")",
"==",
"1",
":",
"return",
"None",
"if",
"cdtype",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"not",
"in",
"(",
"'inline'",
",",
"'attachment'",
")",
":",
"return",
"None",
"# several filename params is illegal, but just in case",
"fnames",
"=",
"[",
"x",
"for",
"x",
"in",
"cdtype",
"[",
"1",
":",
"]",
"if",
"x",
".",
"strip",
"(",
")",
".",
"startswith",
"(",
"'filename='",
")",
"]",
"if",
"len",
"(",
"fnames",
")",
">",
"1",
":",
"return",
"None",
"name",
"=",
"fnames",
"[",
"0",
"]",
".",
"split",
"(",
"'='",
")",
"[",
"1",
"]",
".",
"strip",
"(",
"' \\t\"'",
")",
"name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"name",
")",
"if",
"not",
"name",
":",
"return",
"None",
"return",
"name"
] |
Detect filename from Content-Disposition headers if present.
http://greenbytes.de/tech/tc2231/
:param: headers as dict, list or string
:return: filename from content-disposition header or None
|
[
"Detect",
"filename",
"from",
"Content",
"-",
"Disposition",
"headers",
"if",
"present",
".",
"http",
":",
"//",
"greenbytes",
".",
"de",
"/",
"tech",
"/",
"tc2231",
"/"
] |
train
|
https://github.com/phoemur/wgetter/blob/ac182a72480e150e4a800fbade4fbccc29e72b51/wgetter.py#L141-L168
|
phoemur/wgetter
|
wgetter.py
|
filename_fix_existing
|
def filename_fix_existing(filename, dirname):
"""Expands name portion of filename with numeric ' (x)' suffix to
return filename that doesn't exist already.
"""
name, ext = filename.rsplit('.', 1)
names = [x for x in os.listdir(dirname) if x.startswith(name)]
names = [x.rsplit('.', 1)[0] for x in names]
suffixes = [x.replace(name, '') for x in names]
# filter suffixes that match ' (x)' pattern
suffixes = [x[2:-1] for x in suffixes
if x.startswith(' (') and x.endswith(')')]
indexes = [int(x) for x in suffixes
if set(x) <= set('0123456789')]
idx = 1
if indexes:
idx += sorted(indexes)[-1]
return '{0}({1}).{2}'.format(name, idx, ext)
|
python
|
def filename_fix_existing(filename, dirname):
"""Expands name portion of filename with numeric ' (x)' suffix to
return filename that doesn't exist already.
"""
name, ext = filename.rsplit('.', 1)
names = [x for x in os.listdir(dirname) if x.startswith(name)]
names = [x.rsplit('.', 1)[0] for x in names]
suffixes = [x.replace(name, '') for x in names]
# filter suffixes that match ' (x)' pattern
suffixes = [x[2:-1] for x in suffixes
if x.startswith(' (') and x.endswith(')')]
indexes = [int(x) for x in suffixes
if set(x) <= set('0123456789')]
idx = 1
if indexes:
idx += sorted(indexes)[-1]
return '{0}({1}).{2}'.format(name, idx, ext)
|
[
"def",
"filename_fix_existing",
"(",
"filename",
",",
"dirname",
")",
":",
"name",
",",
"ext",
"=",
"filename",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"names",
"=",
"[",
"x",
"for",
"x",
"in",
"os",
".",
"listdir",
"(",
"dirname",
")",
"if",
"x",
".",
"startswith",
"(",
"name",
")",
"]",
"names",
"=",
"[",
"x",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"[",
"0",
"]",
"for",
"x",
"in",
"names",
"]",
"suffixes",
"=",
"[",
"x",
".",
"replace",
"(",
"name",
",",
"''",
")",
"for",
"x",
"in",
"names",
"]",
"# filter suffixes that match ' (x)' pattern",
"suffixes",
"=",
"[",
"x",
"[",
"2",
":",
"-",
"1",
"]",
"for",
"x",
"in",
"suffixes",
"if",
"x",
".",
"startswith",
"(",
"' ('",
")",
"and",
"x",
".",
"endswith",
"(",
"')'",
")",
"]",
"indexes",
"=",
"[",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"suffixes",
"if",
"set",
"(",
"x",
")",
"<=",
"set",
"(",
"'0123456789'",
")",
"]",
"idx",
"=",
"1",
"if",
"indexes",
":",
"idx",
"+=",
"sorted",
"(",
"indexes",
")",
"[",
"-",
"1",
"]",
"return",
"'{0}({1}).{2}'",
".",
"format",
"(",
"name",
",",
"idx",
",",
"ext",
")"
] |
Expands name portion of filename with numeric ' (x)' suffix to
return filename that doesn't exist already.
|
[
"Expands",
"name",
"portion",
"of",
"filename",
"with",
"numeric",
"(",
"x",
")",
"suffix",
"to",
"return",
"filename",
"that",
"doesn",
"t",
"exist",
"already",
"."
] |
train
|
https://github.com/phoemur/wgetter/blob/ac182a72480e150e4a800fbade4fbccc29e72b51/wgetter.py#L171-L187
|
phoemur/wgetter
|
wgetter.py
|
report_bar
|
def report_bar(bytes_so_far, total_size, speed, eta):
'''
This callback for the download function is used to print the download bar
'''
percent = int(bytes_so_far * 100 / total_size)
current = approximate_size(bytes_so_far).center(9)
total = approximate_size(total_size).center(9)
shaded = int(float(bytes_so_far) / total_size * AVAIL_WIDTH)
sys.stdout.write(
" {0}% [{1}{2}{3}] {4}/{5} {6} eta{7}".format(str(percent).center(4),
'=' * (shaded - 1),
'>',
' ' * (AVAIL_WIDTH - shaded),
current,
total,
(approximate_size(speed) + '/s').center(11),
eta.center(10)))
sys.stdout.write("\r")
sys.stdout.flush()
|
python
|
def report_bar(bytes_so_far, total_size, speed, eta):
'''
This callback for the download function is used to print the download bar
'''
percent = int(bytes_so_far * 100 / total_size)
current = approximate_size(bytes_so_far).center(9)
total = approximate_size(total_size).center(9)
shaded = int(float(bytes_so_far) / total_size * AVAIL_WIDTH)
sys.stdout.write(
" {0}% [{1}{2}{3}] {4}/{5} {6} eta{7}".format(str(percent).center(4),
'=' * (shaded - 1),
'>',
' ' * (AVAIL_WIDTH - shaded),
current,
total,
(approximate_size(speed) + '/s').center(11),
eta.center(10)))
sys.stdout.write("\r")
sys.stdout.flush()
|
[
"def",
"report_bar",
"(",
"bytes_so_far",
",",
"total_size",
",",
"speed",
",",
"eta",
")",
":",
"percent",
"=",
"int",
"(",
"bytes_so_far",
"*",
"100",
"/",
"total_size",
")",
"current",
"=",
"approximate_size",
"(",
"bytes_so_far",
")",
".",
"center",
"(",
"9",
")",
"total",
"=",
"approximate_size",
"(",
"total_size",
")",
".",
"center",
"(",
"9",
")",
"shaded",
"=",
"int",
"(",
"float",
"(",
"bytes_so_far",
")",
"/",
"total_size",
"*",
"AVAIL_WIDTH",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\" {0}% [{1}{2}{3}] {4}/{5} {6} eta{7}\"",
".",
"format",
"(",
"str",
"(",
"percent",
")",
".",
"center",
"(",
"4",
")",
",",
"'='",
"*",
"(",
"shaded",
"-",
"1",
")",
",",
"'>'",
",",
"' '",
"*",
"(",
"AVAIL_WIDTH",
"-",
"shaded",
")",
",",
"current",
",",
"total",
",",
"(",
"approximate_size",
"(",
"speed",
")",
"+",
"'/s'",
")",
".",
"center",
"(",
"11",
")",
",",
"eta",
".",
"center",
"(",
"10",
")",
")",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\r\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
] |
This callback for the download function is used to print the download bar
|
[
"This",
"callback",
"for",
"the",
"download",
"function",
"is",
"used",
"to",
"print",
"the",
"download",
"bar"
] |
train
|
https://github.com/phoemur/wgetter/blob/ac182a72480e150e4a800fbade4fbccc29e72b51/wgetter.py#L190-L208
|
phoemur/wgetter
|
wgetter.py
|
report_unknown
|
def report_unknown(bytes_so_far, total_size, speed, eta):
'''
This callback for the download function is used
when the total size is unknown
'''
sys.stdout.write(
"Downloading: {0} / Unknown - {1}/s ".format(approximate_size(bytes_so_far),
approximate_size(speed)))
sys.stdout.write("\r")
sys.stdout.flush()
|
python
|
def report_unknown(bytes_so_far, total_size, speed, eta):
'''
This callback for the download function is used
when the total size is unknown
'''
sys.stdout.write(
"Downloading: {0} / Unknown - {1}/s ".format(approximate_size(bytes_so_far),
approximate_size(speed)))
sys.stdout.write("\r")
sys.stdout.flush()
|
[
"def",
"report_unknown",
"(",
"bytes_so_far",
",",
"total_size",
",",
"speed",
",",
"eta",
")",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"Downloading: {0} / Unknown - {1}/s \"",
".",
"format",
"(",
"approximate_size",
"(",
"bytes_so_far",
")",
",",
"approximate_size",
"(",
"speed",
")",
")",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\r\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
] |
This callback for the download function is used
when the total size is unknown
|
[
"This",
"callback",
"for",
"the",
"download",
"function",
"is",
"used",
"when",
"the",
"total",
"size",
"is",
"unknown"
] |
train
|
https://github.com/phoemur/wgetter/blob/ac182a72480e150e4a800fbade4fbccc29e72b51/wgetter.py#L211-L221
|
phoemur/wgetter
|
wgetter.py
|
report_onlysize
|
def report_onlysize(bytes_so_far, total_size, speed, eta):
'''
This callback for the download function is used when console width
is not enough to print the bar.
It prints only the sizes
'''
percent = int(bytes_so_far * 100 / total_size)
current = approximate_size(bytes_so_far).center(10)
total = approximate_size(total_size).center(10)
sys.stdout.write('D: {0}% -{1}/{2}'.format(percent, current, total) + "eta {0}".format(eta))
sys.stdout.write("\r")
sys.stdout.flush()
|
python
|
def report_onlysize(bytes_so_far, total_size, speed, eta):
'''
This callback for the download function is used when console width
is not enough to print the bar.
It prints only the sizes
'''
percent = int(bytes_so_far * 100 / total_size)
current = approximate_size(bytes_so_far).center(10)
total = approximate_size(total_size).center(10)
sys.stdout.write('D: {0}% -{1}/{2}'.format(percent, current, total) + "eta {0}".format(eta))
sys.stdout.write("\r")
sys.stdout.flush()
|
[
"def",
"report_onlysize",
"(",
"bytes_so_far",
",",
"total_size",
",",
"speed",
",",
"eta",
")",
":",
"percent",
"=",
"int",
"(",
"bytes_so_far",
"*",
"100",
"/",
"total_size",
")",
"current",
"=",
"approximate_size",
"(",
"bytes_so_far",
")",
".",
"center",
"(",
"10",
")",
"total",
"=",
"approximate_size",
"(",
"total_size",
")",
".",
"center",
"(",
"10",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"'D: {0}% -{1}/{2}'",
".",
"format",
"(",
"percent",
",",
"current",
",",
"total",
")",
"+",
"\"eta {0}\"",
".",
"format",
"(",
"eta",
")",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\r\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
] |
This callback for the download function is used when console width
is not enough to print the bar.
It prints only the sizes
|
[
"This",
"callback",
"for",
"the",
"download",
"function",
"is",
"used",
"when",
"console",
"width",
"is",
"not",
"enough",
"to",
"print",
"the",
"bar",
".",
"It",
"prints",
"only",
"the",
"sizes"
] |
train
|
https://github.com/phoemur/wgetter/blob/ac182a72480e150e4a800fbade4fbccc29e72b51/wgetter.py#L224-L235
|
phoemur/wgetter
|
wgetter.py
|
download
|
def download(link, outdir='.', chunk_size=4096):
'''
This is the Main function, which downloads a given link
and saves on outdir (default = current directory)
'''
url = None
fh = None
eta = 'unknown '
bytes_so_far = 0
filename = filename_from_url(link) or "."
cj = cjar.CookieJar()
# get filename for temp file in current directory
(fd_tmp, tmpfile) = tempfile.mkstemp(
".tmp", prefix=filename + ".", dir=outdir)
os.close(fd_tmp)
os.unlink(tmpfile)
try:
opener = ulib.build_opener(ulib.HTTPCookieProcessor(cj))
url = opener.open(link)
fh = open(tmpfile, mode='wb')
headers = url.info()
try:
total_size = int(headers['Content-Length'])
except (ValueError, KeyError, TypeError):
total_size = 'unknown'
try:
md5_header = headers['Content-MD5']
except (ValueError, KeyError, TypeError):
md5_header = None
# Define which callback we're gonna use
if total_size != 'unknown':
if CONSOLE_WIDTH > 57:
reporthook = report_bar
else:
reporthook = report_onlysize
else:
reporthook = report_unknown
# Below are the registers to calculate network transfer rate
time_register = time()
speed = 0.0
speed_list = []
bytes_register = 0.0
eta = 'unknown '
# Loop that reads in chunks, calculates speed and does the callback to
# print the progress
while True:
chunk = url.read(chunk_size)
# Update Download Speed every 1 second
if time() - time_register > 0.5:
speed = (bytes_so_far - bytes_register) / \
(time() - time_register)
speed_list.append(speed)
# Set register properly for future use
time_register = time()
bytes_register = bytes_so_far
# Estimative of remaining download time
if total_size != 'unknown' and len(speed_list) == 3:
speed_mean = sum(speed_list) / 3
eta_sec = int((total_size - bytes_so_far) / speed_mean)
eta = str(datetime.timedelta(seconds=eta_sec))
speed_list = []
bytes_so_far += len(chunk)
if not chunk:
sys.stdout.write('\n')
break
fh.write(chunk)
reporthook(bytes_so_far, total_size, speed, eta)
except KeyboardInterrupt:
print('\n\nCtrl + C: Download aborted by user')
print('Partial downloaded file:\n{0}'.format(os.path.abspath(tmpfile)))
sys.exit(1)
finally:
if url:
url.close()
if fh:
fh.close()
filenamealt = filename_from_headers(headers)
if filenamealt:
filename = filenamealt
# add numeric '(x)' suffix if filename already exists
if os.path.exists(os.path.join(outdir, filename)):
filename = filename_fix_existing(filename, outdir)
filename = os.path.join(outdir, filename)
shutil.move(tmpfile, filename)
# Check if sizes matches
if total_size != 'unknown' and total_size != bytes_so_far:
print(
'\n\nWARNING!! Downloaded file size mismatches... Probably corrupted...')
# Check md5 if it was in html header
if md5_header:
print('\nValidating MD5 checksum...')
if md5_header == md5sum(filename):
print('MD5 checksum passed!')
else:
print('MD5 checksum do NOT passed!!!')
return filename
|
python
|
def download(link, outdir='.', chunk_size=4096):
'''
This is the Main function, which downloads a given link
and saves on outdir (default = current directory)
'''
url = None
fh = None
eta = 'unknown '
bytes_so_far = 0
filename = filename_from_url(link) or "."
cj = cjar.CookieJar()
# get filename for temp file in current directory
(fd_tmp, tmpfile) = tempfile.mkstemp(
".tmp", prefix=filename + ".", dir=outdir)
os.close(fd_tmp)
os.unlink(tmpfile)
try:
opener = ulib.build_opener(ulib.HTTPCookieProcessor(cj))
url = opener.open(link)
fh = open(tmpfile, mode='wb')
headers = url.info()
try:
total_size = int(headers['Content-Length'])
except (ValueError, KeyError, TypeError):
total_size = 'unknown'
try:
md5_header = headers['Content-MD5']
except (ValueError, KeyError, TypeError):
md5_header = None
# Define which callback we're gonna use
if total_size != 'unknown':
if CONSOLE_WIDTH > 57:
reporthook = report_bar
else:
reporthook = report_onlysize
else:
reporthook = report_unknown
# Below are the registers to calculate network transfer rate
time_register = time()
speed = 0.0
speed_list = []
bytes_register = 0.0
eta = 'unknown '
# Loop that reads in chunks, calculates speed and does the callback to
# print the progress
while True:
chunk = url.read(chunk_size)
# Update Download Speed every 1 second
if time() - time_register > 0.5:
speed = (bytes_so_far - bytes_register) / \
(time() - time_register)
speed_list.append(speed)
# Set register properly for future use
time_register = time()
bytes_register = bytes_so_far
# Estimative of remaining download time
if total_size != 'unknown' and len(speed_list) == 3:
speed_mean = sum(speed_list) / 3
eta_sec = int((total_size - bytes_so_far) / speed_mean)
eta = str(datetime.timedelta(seconds=eta_sec))
speed_list = []
bytes_so_far += len(chunk)
if not chunk:
sys.stdout.write('\n')
break
fh.write(chunk)
reporthook(bytes_so_far, total_size, speed, eta)
except KeyboardInterrupt:
print('\n\nCtrl + C: Download aborted by user')
print('Partial downloaded file:\n{0}'.format(os.path.abspath(tmpfile)))
sys.exit(1)
finally:
if url:
url.close()
if fh:
fh.close()
filenamealt = filename_from_headers(headers)
if filenamealt:
filename = filenamealt
# add numeric '(x)' suffix if filename already exists
if os.path.exists(os.path.join(outdir, filename)):
filename = filename_fix_existing(filename, outdir)
filename = os.path.join(outdir, filename)
shutil.move(tmpfile, filename)
# Check if sizes matches
if total_size != 'unknown' and total_size != bytes_so_far:
print(
'\n\nWARNING!! Downloaded file size mismatches... Probably corrupted...')
# Check md5 if it was in html header
if md5_header:
print('\nValidating MD5 checksum...')
if md5_header == md5sum(filename):
print('MD5 checksum passed!')
else:
print('MD5 checksum do NOT passed!!!')
return filename
|
[
"def",
"download",
"(",
"link",
",",
"outdir",
"=",
"'.'",
",",
"chunk_size",
"=",
"4096",
")",
":",
"url",
"=",
"None",
"fh",
"=",
"None",
"eta",
"=",
"'unknown '",
"bytes_so_far",
"=",
"0",
"filename",
"=",
"filename_from_url",
"(",
"link",
")",
"or",
"\".\"",
"cj",
"=",
"cjar",
".",
"CookieJar",
"(",
")",
"# get filename for temp file in current directory",
"(",
"fd_tmp",
",",
"tmpfile",
")",
"=",
"tempfile",
".",
"mkstemp",
"(",
"\".tmp\"",
",",
"prefix",
"=",
"filename",
"+",
"\".\"",
",",
"dir",
"=",
"outdir",
")",
"os",
".",
"close",
"(",
"fd_tmp",
")",
"os",
".",
"unlink",
"(",
"tmpfile",
")",
"try",
":",
"opener",
"=",
"ulib",
".",
"build_opener",
"(",
"ulib",
".",
"HTTPCookieProcessor",
"(",
"cj",
")",
")",
"url",
"=",
"opener",
".",
"open",
"(",
"link",
")",
"fh",
"=",
"open",
"(",
"tmpfile",
",",
"mode",
"=",
"'wb'",
")",
"headers",
"=",
"url",
".",
"info",
"(",
")",
"try",
":",
"total_size",
"=",
"int",
"(",
"headers",
"[",
"'Content-Length'",
"]",
")",
"except",
"(",
"ValueError",
",",
"KeyError",
",",
"TypeError",
")",
":",
"total_size",
"=",
"'unknown'",
"try",
":",
"md5_header",
"=",
"headers",
"[",
"'Content-MD5'",
"]",
"except",
"(",
"ValueError",
",",
"KeyError",
",",
"TypeError",
")",
":",
"md5_header",
"=",
"None",
"# Define which callback we're gonna use",
"if",
"total_size",
"!=",
"'unknown'",
":",
"if",
"CONSOLE_WIDTH",
">",
"57",
":",
"reporthook",
"=",
"report_bar",
"else",
":",
"reporthook",
"=",
"report_onlysize",
"else",
":",
"reporthook",
"=",
"report_unknown",
"# Below are the registers to calculate network transfer rate",
"time_register",
"=",
"time",
"(",
")",
"speed",
"=",
"0.0",
"speed_list",
"=",
"[",
"]",
"bytes_register",
"=",
"0.0",
"eta",
"=",
"'unknown '",
"# Loop that reads in chunks, calculates speed and does the callback to",
"# print the progress",
"while",
"True",
":",
"chunk",
"=",
"url",
".",
"read",
"(",
"chunk_size",
")",
"# Update Download Speed every 1 second",
"if",
"time",
"(",
")",
"-",
"time_register",
">",
"0.5",
":",
"speed",
"=",
"(",
"bytes_so_far",
"-",
"bytes_register",
")",
"/",
"(",
"time",
"(",
")",
"-",
"time_register",
")",
"speed_list",
".",
"append",
"(",
"speed",
")",
"# Set register properly for future use",
"time_register",
"=",
"time",
"(",
")",
"bytes_register",
"=",
"bytes_so_far",
"# Estimative of remaining download time",
"if",
"total_size",
"!=",
"'unknown'",
"and",
"len",
"(",
"speed_list",
")",
"==",
"3",
":",
"speed_mean",
"=",
"sum",
"(",
"speed_list",
")",
"/",
"3",
"eta_sec",
"=",
"int",
"(",
"(",
"total_size",
"-",
"bytes_so_far",
")",
"/",
"speed_mean",
")",
"eta",
"=",
"str",
"(",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"eta_sec",
")",
")",
"speed_list",
"=",
"[",
"]",
"bytes_so_far",
"+=",
"len",
"(",
"chunk",
")",
"if",
"not",
"chunk",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"'\\n'",
")",
"break",
"fh",
".",
"write",
"(",
"chunk",
")",
"reporthook",
"(",
"bytes_so_far",
",",
"total_size",
",",
"speed",
",",
"eta",
")",
"except",
"KeyboardInterrupt",
":",
"print",
"(",
"'\\n\\nCtrl + C: Download aborted by user'",
")",
"print",
"(",
"'Partial downloaded file:\\n{0}'",
".",
"format",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"tmpfile",
")",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"finally",
":",
"if",
"url",
":",
"url",
".",
"close",
"(",
")",
"if",
"fh",
":",
"fh",
".",
"close",
"(",
")",
"filenamealt",
"=",
"filename_from_headers",
"(",
"headers",
")",
"if",
"filenamealt",
":",
"filename",
"=",
"filenamealt",
"# add numeric '(x)' suffix if filename already exists",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"outdir",
",",
"filename",
")",
")",
":",
"filename",
"=",
"filename_fix_existing",
"(",
"filename",
",",
"outdir",
")",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outdir",
",",
"filename",
")",
"shutil",
".",
"move",
"(",
"tmpfile",
",",
"filename",
")",
"# Check if sizes matches",
"if",
"total_size",
"!=",
"'unknown'",
"and",
"total_size",
"!=",
"bytes_so_far",
":",
"print",
"(",
"'\\n\\nWARNING!! Downloaded file size mismatches... Probably corrupted...'",
")",
"# Check md5 if it was in html header",
"if",
"md5_header",
":",
"print",
"(",
"'\\nValidating MD5 checksum...'",
")",
"if",
"md5_header",
"==",
"md5sum",
"(",
"filename",
")",
":",
"print",
"(",
"'MD5 checksum passed!'",
")",
"else",
":",
"print",
"(",
"'MD5 checksum do NOT passed!!!'",
")",
"return",
"filename"
] |
This is the Main function, which downloads a given link
and saves on outdir (default = current directory)
|
[
"This",
"is",
"the",
"Main",
"function",
"which",
"downloads",
"a",
"given",
"link",
"and",
"saves",
"on",
"outdir",
"(",
"default",
"=",
"current",
"directory",
")"
] |
train
|
https://github.com/phoemur/wgetter/blob/ac182a72480e150e4a800fbade4fbccc29e72b51/wgetter.py#L252-L365
|
Clinical-Genomics/trailblazer
|
trailblazer/cli/ls.py
|
ls_cmd
|
def ls_cmd(context, before, status):
"""Display recent logs for analyses."""
runs = context.obj['store'].analyses(
status=status,
deleted=False,
before=parse_date(before) if before else None,
).limit(30)
for run_obj in runs:
if run_obj.status == 'pending':
message = f"{run_obj.id} | {run_obj.family} [{run_obj.status.upper()}]"
else:
message = (f"{run_obj.id} | {run_obj.family} {run_obj.started_at.date()} "
f"[{run_obj.type.upper()}/{run_obj.status.upper()}]")
if run_obj.status == 'running':
message = click.style(f"{message} - {run_obj.progress * 100}/100", fg='blue')
elif run_obj.status == 'completed':
message = click.style(f"{message} - {run_obj.completed_at}", fg='green')
elif run_obj.status == 'failed':
message = click.style(message, fg='red')
print(message)
|
python
|
def ls_cmd(context, before, status):
"""Display recent logs for analyses."""
runs = context.obj['store'].analyses(
status=status,
deleted=False,
before=parse_date(before) if before else None,
).limit(30)
for run_obj in runs:
if run_obj.status == 'pending':
message = f"{run_obj.id} | {run_obj.family} [{run_obj.status.upper()}]"
else:
message = (f"{run_obj.id} | {run_obj.family} {run_obj.started_at.date()} "
f"[{run_obj.type.upper()}/{run_obj.status.upper()}]")
if run_obj.status == 'running':
message = click.style(f"{message} - {run_obj.progress * 100}/100", fg='blue')
elif run_obj.status == 'completed':
message = click.style(f"{message} - {run_obj.completed_at}", fg='green')
elif run_obj.status == 'failed':
message = click.style(message, fg='red')
print(message)
|
[
"def",
"ls_cmd",
"(",
"context",
",",
"before",
",",
"status",
")",
":",
"runs",
"=",
"context",
".",
"obj",
"[",
"'store'",
"]",
".",
"analyses",
"(",
"status",
"=",
"status",
",",
"deleted",
"=",
"False",
",",
"before",
"=",
"parse_date",
"(",
"before",
")",
"if",
"before",
"else",
"None",
",",
")",
".",
"limit",
"(",
"30",
")",
"for",
"run_obj",
"in",
"runs",
":",
"if",
"run_obj",
".",
"status",
"==",
"'pending'",
":",
"message",
"=",
"f\"{run_obj.id} | {run_obj.family} [{run_obj.status.upper()}]\"",
"else",
":",
"message",
"=",
"(",
"f\"{run_obj.id} | {run_obj.family} {run_obj.started_at.date()} \"",
"f\"[{run_obj.type.upper()}/{run_obj.status.upper()}]\"",
")",
"if",
"run_obj",
".",
"status",
"==",
"'running'",
":",
"message",
"=",
"click",
".",
"style",
"(",
"f\"{message} - {run_obj.progress * 100}/100\"",
",",
"fg",
"=",
"'blue'",
")",
"elif",
"run_obj",
".",
"status",
"==",
"'completed'",
":",
"message",
"=",
"click",
".",
"style",
"(",
"f\"{message} - {run_obj.completed_at}\"",
",",
"fg",
"=",
"'green'",
")",
"elif",
"run_obj",
".",
"status",
"==",
"'failed'",
":",
"message",
"=",
"click",
".",
"style",
"(",
"message",
",",
"fg",
"=",
"'red'",
")",
"print",
"(",
"message",
")"
] |
Display recent logs for analyses.
|
[
"Display",
"recent",
"logs",
"for",
"analyses",
"."
] |
train
|
https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/cli/ls.py#L11-L30
|
ozgurgunes/django-manifest
|
manifest/core/templatetags/analytics.py
|
analytics
|
def analytics(account=None, *args, **kwargs):
"""
Simple Google Analytics integration.
First looks for an ``account`` parameter. If not supplied, uses
Django ``GOOGLE_ANALYTICS_ACCOUNT`` setting. If account not set,
raises ``TemplateSyntaxError``.
:param account:
Google Analytics account id to be used.
"""
if not account:
try:
account = settings.GOOGLE_ANALYTICS_ACCOUNT
except:
raise template.TemplateSyntaxError(
"Analytics account could not found either "
"in tag parameters or settings")
return {'account': account, 'params':kwargs }
|
python
|
def analytics(account=None, *args, **kwargs):
"""
Simple Google Analytics integration.
First looks for an ``account`` parameter. If not supplied, uses
Django ``GOOGLE_ANALYTICS_ACCOUNT`` setting. If account not set,
raises ``TemplateSyntaxError``.
:param account:
Google Analytics account id to be used.
"""
if not account:
try:
account = settings.GOOGLE_ANALYTICS_ACCOUNT
except:
raise template.TemplateSyntaxError(
"Analytics account could not found either "
"in tag parameters or settings")
return {'account': account, 'params':kwargs }
|
[
"def",
"analytics",
"(",
"account",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"account",
":",
"try",
":",
"account",
"=",
"settings",
".",
"GOOGLE_ANALYTICS_ACCOUNT",
"except",
":",
"raise",
"template",
".",
"TemplateSyntaxError",
"(",
"\"Analytics account could not found either \"",
"\"in tag parameters or settings\"",
")",
"return",
"{",
"'account'",
":",
"account",
",",
"'params'",
":",
"kwargs",
"}"
] |
Simple Google Analytics integration.
First looks for an ``account`` parameter. If not supplied, uses
Django ``GOOGLE_ANALYTICS_ACCOUNT`` setting. If account not set,
raises ``TemplateSyntaxError``.
:param account:
Google Analytics account id to be used.
|
[
"Simple",
"Google",
"Analytics",
"integration",
"."
] |
train
|
https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/core/templatetags/analytics.py#L8-L27
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.