repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
fudge-py/fudge | fudge/__init__.py | Fake.without_args | def without_args(self, *args, **kwargs):
"""Set the last call to expect that certain arguments will not exist.
This is the opposite of :func:`fudge.Fake.with_matching_args`. It will
fail if any of the arguments are passed.
.. doctest::
>>> import fudge
>>> query = fudge.Fake('query').expects_call().without_args(
... 'http://example.com', name="Steve"
... )
>>> query('http://python.org', name="Joe")
>>> query('http://example.com')
Traceback (most recent call last):
...
AssertionError: fake:query() was called unexpectedly with arg http://example.com
>>> query("Joe", "Frank", "Bartholomew", "Steve")
>>> query(name='Steve')
Traceback (most recent call last):
...
AssertionError: fake:query() was called unexpectedly with kwarg name=Steve
>>> query('http://python.org', name='Steve')
Traceback (most recent call last):
...
AssertionError: fake:query() was called unexpectedly with kwarg name=Steve
>>> query(city='Chicago', name='Steve')
Traceback (most recent call last):
...
AssertionError: fake:query() was called unexpectedly with kwarg name=Steve
>>> query.expects_call().without_args('http://example2.com')
fake:query
>>> query('foobar')
>>> query('foobar', 'http://example2.com')
Traceback (most recent call last):
...
AssertionError: fake:query() was called unexpectedly with arg http://example2.com
>>> query.expects_call().without_args(name="Hieronymus")
fake:query
>>> query("Gottfried", "Hieronymus")
>>> query(name="Wexter", other_name="Hieronymus")
>>> query('asdf', name="Hieronymus")
Traceback (most recent call last):
...
AssertionError: fake:query() was called unexpectedly with kwarg name=Hieronymus
>>> query(name="Hieronymus")
Traceback (most recent call last):
...
AssertionError: fake:query() was called unexpectedly with kwarg name=Hieronymus
>>> query = fudge.Fake('query').expects_call().without_args(
... 'http://example.com', name="Steve"
... ).with_args('dog')
>>> query('dog')
>>> query('dog', 'http://example.com')
Traceback (most recent call last):
...
AssertionError: fake:query('dog') was called unexpectedly with args ('dog', 'http://example.com')
>>> query()
Traceback (most recent call last):
...
AssertionError: fake:query('dog') was called unexpectedly with args ()
"""
exp = self._get_current_call()
if args:
exp.unexpected_args = args
if kwargs:
exp.unexpected_kwargs = kwargs
return self | python | def without_args(self, *args, **kwargs):
"""Set the last call to expect that certain arguments will not exist.
This is the opposite of :func:`fudge.Fake.with_matching_args`. It will
fail if any of the arguments are passed.
.. doctest::
>>> import fudge
>>> query = fudge.Fake('query').expects_call().without_args(
... 'http://example.com', name="Steve"
... )
>>> query('http://python.org', name="Joe")
>>> query('http://example.com')
Traceback (most recent call last):
...
AssertionError: fake:query() was called unexpectedly with arg http://example.com
>>> query("Joe", "Frank", "Bartholomew", "Steve")
>>> query(name='Steve')
Traceback (most recent call last):
...
AssertionError: fake:query() was called unexpectedly with kwarg name=Steve
>>> query('http://python.org', name='Steve')
Traceback (most recent call last):
...
AssertionError: fake:query() was called unexpectedly with kwarg name=Steve
>>> query(city='Chicago', name='Steve')
Traceback (most recent call last):
...
AssertionError: fake:query() was called unexpectedly with kwarg name=Steve
>>> query.expects_call().without_args('http://example2.com')
fake:query
>>> query('foobar')
>>> query('foobar', 'http://example2.com')
Traceback (most recent call last):
...
AssertionError: fake:query() was called unexpectedly with arg http://example2.com
>>> query.expects_call().without_args(name="Hieronymus")
fake:query
>>> query("Gottfried", "Hieronymus")
>>> query(name="Wexter", other_name="Hieronymus")
>>> query('asdf', name="Hieronymus")
Traceback (most recent call last):
...
AssertionError: fake:query() was called unexpectedly with kwarg name=Hieronymus
>>> query(name="Hieronymus")
Traceback (most recent call last):
...
AssertionError: fake:query() was called unexpectedly with kwarg name=Hieronymus
>>> query = fudge.Fake('query').expects_call().without_args(
... 'http://example.com', name="Steve"
... ).with_args('dog')
>>> query('dog')
>>> query('dog', 'http://example.com')
Traceback (most recent call last):
...
AssertionError: fake:query('dog') was called unexpectedly with args ('dog', 'http://example.com')
>>> query()
Traceback (most recent call last):
...
AssertionError: fake:query('dog') was called unexpectedly with args ()
"""
exp = self._get_current_call()
if args:
exp.unexpected_args = args
if kwargs:
exp.unexpected_kwargs = kwargs
return self | [
"def",
"without_args",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"exp",
"=",
"self",
".",
"_get_current_call",
"(",
")",
"if",
"args",
":",
"exp",
".",
"unexpected_args",
"=",
"args",
"if",
"kwargs",
":",
"exp",
".",
"unexpected_kwargs",
"=",
"kwargs",
"return",
"self"
] | Set the last call to expect that certain arguments will not exist.
This is the opposite of :func:`fudge.Fake.with_matching_args`. It will
fail if any of the arguments are passed.
.. doctest::
>>> import fudge
>>> query = fudge.Fake('query').expects_call().without_args(
... 'http://example.com', name="Steve"
... )
>>> query('http://python.org', name="Joe")
>>> query('http://example.com')
Traceback (most recent call last):
...
AssertionError: fake:query() was called unexpectedly with arg http://example.com
>>> query("Joe", "Frank", "Bartholomew", "Steve")
>>> query(name='Steve')
Traceback (most recent call last):
...
AssertionError: fake:query() was called unexpectedly with kwarg name=Steve
>>> query('http://python.org', name='Steve')
Traceback (most recent call last):
...
AssertionError: fake:query() was called unexpectedly with kwarg name=Steve
>>> query(city='Chicago', name='Steve')
Traceback (most recent call last):
...
AssertionError: fake:query() was called unexpectedly with kwarg name=Steve
>>> query.expects_call().without_args('http://example2.com')
fake:query
>>> query('foobar')
>>> query('foobar', 'http://example2.com')
Traceback (most recent call last):
...
AssertionError: fake:query() was called unexpectedly with arg http://example2.com
>>> query.expects_call().without_args(name="Hieronymus")
fake:query
>>> query("Gottfried", "Hieronymus")
>>> query(name="Wexter", other_name="Hieronymus")
>>> query('asdf', name="Hieronymus")
Traceback (most recent call last):
...
AssertionError: fake:query() was called unexpectedly with kwarg name=Hieronymus
>>> query(name="Hieronymus")
Traceback (most recent call last):
...
AssertionError: fake:query() was called unexpectedly with kwarg name=Hieronymus
>>> query = fudge.Fake('query').expects_call().without_args(
... 'http://example.com', name="Steve"
... ).with_args('dog')
>>> query('dog')
>>> query('dog', 'http://example.com')
Traceback (most recent call last):
...
AssertionError: fake:query('dog') was called unexpectedly with args ('dog', 'http://example.com')
>>> query()
Traceback (most recent call last):
...
AssertionError: fake:query('dog') was called unexpectedly with args () | [
"Set",
"the",
"last",
"call",
"to",
"expect",
"that",
"certain",
"arguments",
"will",
"not",
"exist",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/__init__.py#L1285-L1357 |
fudge-py/fudge | fudge/__init__.py | Fake.with_arg_count | def with_arg_count(self, count):
"""Set the last call to expect an exact argument count.
I.E.::
>>> auth = Fake('auth').provides('login').with_arg_count(2)
>>> auth.login('joe_user') # forgot password
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called with 1 arg(s) but expected 2
"""
exp = self._get_current_call()
exp.expected_arg_count = count
return self | python | def with_arg_count(self, count):
"""Set the last call to expect an exact argument count.
I.E.::
>>> auth = Fake('auth').provides('login').with_arg_count(2)
>>> auth.login('joe_user') # forgot password
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called with 1 arg(s) but expected 2
"""
exp = self._get_current_call()
exp.expected_arg_count = count
return self | [
"def",
"with_arg_count",
"(",
"self",
",",
"count",
")",
":",
"exp",
"=",
"self",
".",
"_get_current_call",
"(",
")",
"exp",
".",
"expected_arg_count",
"=",
"count",
"return",
"self"
] | Set the last call to expect an exact argument count.
I.E.::
>>> auth = Fake('auth').provides('login').with_arg_count(2)
>>> auth.login('joe_user') # forgot password
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called with 1 arg(s) but expected 2 | [
"Set",
"the",
"last",
"call",
"to",
"expect",
"an",
"exact",
"argument",
"count",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/__init__.py#L1359-L1373 |
fudge-py/fudge | fudge/__init__.py | Fake.with_kwarg_count | def with_kwarg_count(self, count):
"""Set the last call to expect an exact count of keyword arguments.
I.E.::
>>> auth = Fake('auth').provides('login').with_kwarg_count(2)
>>> auth.login(username='joe') # forgot password=
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called with 1 keyword arg(s) but expected 2
"""
exp = self._get_current_call()
exp.expected_kwarg_count = count
return self | python | def with_kwarg_count(self, count):
"""Set the last call to expect an exact count of keyword arguments.
I.E.::
>>> auth = Fake('auth').provides('login').with_kwarg_count(2)
>>> auth.login(username='joe') # forgot password=
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called with 1 keyword arg(s) but expected 2
"""
exp = self._get_current_call()
exp.expected_kwarg_count = count
return self | [
"def",
"with_kwarg_count",
"(",
"self",
",",
"count",
")",
":",
"exp",
"=",
"self",
".",
"_get_current_call",
"(",
")",
"exp",
".",
"expected_kwarg_count",
"=",
"count",
"return",
"self"
] | Set the last call to expect an exact count of keyword arguments.
I.E.::
>>> auth = Fake('auth').provides('login').with_kwarg_count(2)
>>> auth.login(username='joe') # forgot password=
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called with 1 keyword arg(s) but expected 2 | [
"Set",
"the",
"last",
"call",
"to",
"expect",
"an",
"exact",
"count",
"of",
"keyword",
"arguments",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/__init__.py#L1375-L1389 |
thespacedoctor/sherlock | sherlock/database.py | database.connect | def connect(self):
"""connect to the various databases, the credientals and settings of which are found in the sherlock settings file
**Return:**
- ``transientsDbConn`` -- the database hosting the transient source data
- ``cataloguesDbConn`` -- connection to the database hosting the contextual catalogues the transients are to be crossmatched against
- ``pmDbConn`` -- connection to the PESSTO Marshall database
See the class docstring for usage
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``get`` method')
transientSettings = self.settings[
"database settings"]["transients"]
catalogueSettings = self.settings[
"database settings"]["static catalogues"]
if "pessto marshall" in self.settings[
"database settings"]:
marshallSettings = self.settings[
"database settings"]["pessto marshall"]
else:
marshallSettings = False
dbConns = []
for dbSettings in [transientSettings, catalogueSettings, marshallSettings]:
port = False
if dbSettings and dbSettings["tunnel"]:
port = self._setup_tunnel(
tunnelParameters=dbSettings["tunnel"]
)
if dbSettings:
# SETUP A DATABASE CONNECTION FOR THE STATIC CATALOGUES
host = dbSettings["host"]
user = dbSettings["user"]
passwd = dbSettings["password"]
dbName = dbSettings["db"]
thisConn = ms.connect(
host=host,
user=user,
passwd=passwd,
db=dbName,
port=port,
use_unicode=True,
charset='utf8',
client_flag=ms.constants.CLIENT.MULTI_STATEMENTS,
connect_timeout=3600
)
thisConn.autocommit(True)
dbConns.append(thisConn)
else:
dbConns.append(None)
# CREATE A DICTIONARY OF DATABASES
dbConns = {
"transients": dbConns[0],
"catalogues": dbConns[1],
"marshall": dbConns[2]
}
dbVersions = {}
for k, v in dbConns.iteritems():
if v:
sqlQuery = u"""
SELECT VERSION() as v;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=v,
quiet=False
)
version = rows[0]['v']
dbVersions[k] = version
else:
dbVersions[k] = None
self.log.debug('completed the ``get`` method')
return dbConns, dbVersions | python | def connect(self):
"""connect to the various databases, the credientals and settings of which are found in the sherlock settings file
**Return:**
- ``transientsDbConn`` -- the database hosting the transient source data
- ``cataloguesDbConn`` -- connection to the database hosting the contextual catalogues the transients are to be crossmatched against
- ``pmDbConn`` -- connection to the PESSTO Marshall database
See the class docstring for usage
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``get`` method')
transientSettings = self.settings[
"database settings"]["transients"]
catalogueSettings = self.settings[
"database settings"]["static catalogues"]
if "pessto marshall" in self.settings[
"database settings"]:
marshallSettings = self.settings[
"database settings"]["pessto marshall"]
else:
marshallSettings = False
dbConns = []
for dbSettings in [transientSettings, catalogueSettings, marshallSettings]:
port = False
if dbSettings and dbSettings["tunnel"]:
port = self._setup_tunnel(
tunnelParameters=dbSettings["tunnel"]
)
if dbSettings:
# SETUP A DATABASE CONNECTION FOR THE STATIC CATALOGUES
host = dbSettings["host"]
user = dbSettings["user"]
passwd = dbSettings["password"]
dbName = dbSettings["db"]
thisConn = ms.connect(
host=host,
user=user,
passwd=passwd,
db=dbName,
port=port,
use_unicode=True,
charset='utf8',
client_flag=ms.constants.CLIENT.MULTI_STATEMENTS,
connect_timeout=3600
)
thisConn.autocommit(True)
dbConns.append(thisConn)
else:
dbConns.append(None)
# CREATE A DICTIONARY OF DATABASES
dbConns = {
"transients": dbConns[0],
"catalogues": dbConns[1],
"marshall": dbConns[2]
}
dbVersions = {}
for k, v in dbConns.iteritems():
if v:
sqlQuery = u"""
SELECT VERSION() as v;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=v,
quiet=False
)
version = rows[0]['v']
dbVersions[k] = version
else:
dbVersions[k] = None
self.log.debug('completed the ``get`` method')
return dbConns, dbVersions | [
"def",
"connect",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``get`` method'",
")",
"transientSettings",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"transients\"",
"]",
"catalogueSettings",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"static catalogues\"",
"]",
"if",
"\"pessto marshall\"",
"in",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
":",
"marshallSettings",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"pessto marshall\"",
"]",
"else",
":",
"marshallSettings",
"=",
"False",
"dbConns",
"=",
"[",
"]",
"for",
"dbSettings",
"in",
"[",
"transientSettings",
",",
"catalogueSettings",
",",
"marshallSettings",
"]",
":",
"port",
"=",
"False",
"if",
"dbSettings",
"and",
"dbSettings",
"[",
"\"tunnel\"",
"]",
":",
"port",
"=",
"self",
".",
"_setup_tunnel",
"(",
"tunnelParameters",
"=",
"dbSettings",
"[",
"\"tunnel\"",
"]",
")",
"if",
"dbSettings",
":",
"# SETUP A DATABASE CONNECTION FOR THE STATIC CATALOGUES",
"host",
"=",
"dbSettings",
"[",
"\"host\"",
"]",
"user",
"=",
"dbSettings",
"[",
"\"user\"",
"]",
"passwd",
"=",
"dbSettings",
"[",
"\"password\"",
"]",
"dbName",
"=",
"dbSettings",
"[",
"\"db\"",
"]",
"thisConn",
"=",
"ms",
".",
"connect",
"(",
"host",
"=",
"host",
",",
"user",
"=",
"user",
",",
"passwd",
"=",
"passwd",
",",
"db",
"=",
"dbName",
",",
"port",
"=",
"port",
",",
"use_unicode",
"=",
"True",
",",
"charset",
"=",
"'utf8'",
",",
"client_flag",
"=",
"ms",
".",
"constants",
".",
"CLIENT",
".",
"MULTI_STATEMENTS",
",",
"connect_timeout",
"=",
"3600",
")",
"thisConn",
".",
"autocommit",
"(",
"True",
")",
"dbConns",
".",
"append",
"(",
"thisConn",
")",
"else",
":",
"dbConns",
".",
"append",
"(",
"None",
")",
"# CREATE A DICTIONARY OF DATABASES",
"dbConns",
"=",
"{",
"\"transients\"",
":",
"dbConns",
"[",
"0",
"]",
",",
"\"catalogues\"",
":",
"dbConns",
"[",
"1",
"]",
",",
"\"marshall\"",
":",
"dbConns",
"[",
"2",
"]",
"}",
"dbVersions",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"dbConns",
".",
"iteritems",
"(",
")",
":",
"if",
"v",
":",
"sqlQuery",
"=",
"u\"\"\"\n SELECT VERSION() as v;\n \"\"\"",
"%",
"locals",
"(",
")",
"rows",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"v",
",",
"quiet",
"=",
"False",
")",
"version",
"=",
"rows",
"[",
"0",
"]",
"[",
"'v'",
"]",
"dbVersions",
"[",
"k",
"]",
"=",
"version",
"else",
":",
"dbVersions",
"[",
"k",
"]",
"=",
"None",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``get`` method'",
")",
"return",
"dbConns",
",",
"dbVersions"
] | connect to the various databases, the credientals and settings of which are found in the sherlock settings file
**Return:**
- ``transientsDbConn`` -- the database hosting the transient source data
- ``cataloguesDbConn`` -- connection to the database hosting the contextual catalogues the transients are to be crossmatched against
- ``pmDbConn`` -- connection to the PESSTO Marshall database
See the class docstring for usage
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"connect",
"to",
"the",
"various",
"databases",
"the",
"credientals",
"and",
"settings",
"of",
"which",
"are",
"found",
"in",
"the",
"sherlock",
"settings",
"file"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/database.py#L83-L171 |
thespacedoctor/sherlock | sherlock/database.py | database._setup_tunnel | def _setup_tunnel(
self,
tunnelParameters):
"""
*setup a ssh tunnel for a database connection to port through*
**Key Arguments:**
- ``tunnelParameters`` -- the tunnel parameters found associated with the database settings
**Return:**
- ``sshPort`` -- the port the ssh tunnel is connected via
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_setup_tunnel`` method')
# TEST TUNNEL DOES NOT ALREADY EXIST
sshPort = tunnelParameters["port"]
connected = self._checkServer(
"127.0.0.1", sshPort)
if connected:
self.log.debug('ssh tunnel already exists - moving on')
else:
# GRAB TUNNEL SETTINGS FROM SETTINGS FILE
ru = tunnelParameters["remote user"]
rip = tunnelParameters["remote ip"]
rh = tunnelParameters["remote datbase host"]
cmd = "ssh -fnN %(ru)s@%(rip)s -L %(sshPort)s:%(rh)s:3306" % locals()
p = Popen(cmd, shell=True, close_fds=True)
output = p.communicate()[0]
self.log.debug('output: %(output)s' % locals())
# TEST CONNECTION - QUIT AFTER SO MANY TRIES
connected = False
count = 0
while not connected:
connected = self._checkServer(
"127.0.0.1", sshPort)
time.sleep(1)
count += 1
if count == 5:
self.log.error(
'cound not setup tunnel to remote datbase' % locals())
sys.exit(0)
return sshPort | python | def _setup_tunnel(
self,
tunnelParameters):
"""
*setup a ssh tunnel for a database connection to port through*
**Key Arguments:**
- ``tunnelParameters`` -- the tunnel parameters found associated with the database settings
**Return:**
- ``sshPort`` -- the port the ssh tunnel is connected via
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_setup_tunnel`` method')
# TEST TUNNEL DOES NOT ALREADY EXIST
sshPort = tunnelParameters["port"]
connected = self._checkServer(
"127.0.0.1", sshPort)
if connected:
self.log.debug('ssh tunnel already exists - moving on')
else:
# GRAB TUNNEL SETTINGS FROM SETTINGS FILE
ru = tunnelParameters["remote user"]
rip = tunnelParameters["remote ip"]
rh = tunnelParameters["remote datbase host"]
cmd = "ssh -fnN %(ru)s@%(rip)s -L %(sshPort)s:%(rh)s:3306" % locals()
p = Popen(cmd, shell=True, close_fds=True)
output = p.communicate()[0]
self.log.debug('output: %(output)s' % locals())
# TEST CONNECTION - QUIT AFTER SO MANY TRIES
connected = False
count = 0
while not connected:
connected = self._checkServer(
"127.0.0.1", sshPort)
time.sleep(1)
count += 1
if count == 5:
self.log.error(
'cound not setup tunnel to remote datbase' % locals())
sys.exit(0)
return sshPort | [
"def",
"_setup_tunnel",
"(",
"self",
",",
"tunnelParameters",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_setup_tunnel`` method'",
")",
"# TEST TUNNEL DOES NOT ALREADY EXIST",
"sshPort",
"=",
"tunnelParameters",
"[",
"\"port\"",
"]",
"connected",
"=",
"self",
".",
"_checkServer",
"(",
"\"127.0.0.1\"",
",",
"sshPort",
")",
"if",
"connected",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'ssh tunnel already exists - moving on'",
")",
"else",
":",
"# GRAB TUNNEL SETTINGS FROM SETTINGS FILE",
"ru",
"=",
"tunnelParameters",
"[",
"\"remote user\"",
"]",
"rip",
"=",
"tunnelParameters",
"[",
"\"remote ip\"",
"]",
"rh",
"=",
"tunnelParameters",
"[",
"\"remote datbase host\"",
"]",
"cmd",
"=",
"\"ssh -fnN %(ru)s@%(rip)s -L %(sshPort)s:%(rh)s:3306\"",
"%",
"locals",
"(",
")",
"p",
"=",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"close_fds",
"=",
"True",
")",
"output",
"=",
"p",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
"self",
".",
"log",
".",
"debug",
"(",
"'output: %(output)s'",
"%",
"locals",
"(",
")",
")",
"# TEST CONNECTION - QUIT AFTER SO MANY TRIES",
"connected",
"=",
"False",
"count",
"=",
"0",
"while",
"not",
"connected",
":",
"connected",
"=",
"self",
".",
"_checkServer",
"(",
"\"127.0.0.1\"",
",",
"sshPort",
")",
"time",
".",
"sleep",
"(",
"1",
")",
"count",
"+=",
"1",
"if",
"count",
"==",
"5",
":",
"self",
".",
"log",
".",
"error",
"(",
"'cound not setup tunnel to remote datbase'",
"%",
"locals",
"(",
")",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"return",
"sshPort"
] | *setup a ssh tunnel for a database connection to port through*
**Key Arguments:**
- ``tunnelParameters`` -- the tunnel parameters found associated with the database settings
**Return:**
- ``sshPort`` -- the port the ssh tunnel is connected via
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"*",
"setup",
"a",
"ssh",
"tunnel",
"for",
"a",
"database",
"connection",
"to",
"port",
"through",
"*"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/database.py#L173-L226 |
thespacedoctor/sherlock | sherlock/database.py | database._checkServer | def _checkServer(self, address, port):
"""Check that the TCP Port we've decided to use for tunnelling is available
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_checkServer`` method')
# CREATE A TCP SOCKET
import socket
s = socket.socket()
self.log.debug(
"""Attempting to connect to `%(address)s` on port `%(port)s`""" % locals())
try:
s.connect((address, port))
self.log.debug(
"""Connected to `%(address)s` on port `%(port)s`""" % locals())
return True
except socket.error, e:
self.log.warning(
"""Connection to `%(address)s` on port `%(port)s` failed - try again: %(e)s""" % locals())
return False
return None | python | def _checkServer(self, address, port):
"""Check that the TCP Port we've decided to use for tunnelling is available
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_checkServer`` method')
# CREATE A TCP SOCKET
import socket
s = socket.socket()
self.log.debug(
"""Attempting to connect to `%(address)s` on port `%(port)s`""" % locals())
try:
s.connect((address, port))
self.log.debug(
"""Connected to `%(address)s` on port `%(port)s`""" % locals())
return True
except socket.error, e:
self.log.warning(
"""Connection to `%(address)s` on port `%(port)s` failed - try again: %(e)s""" % locals())
return False
return None | [
"def",
"_checkServer",
"(",
"self",
",",
"address",
",",
"port",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_checkServer`` method'",
")",
"# CREATE A TCP SOCKET",
"import",
"socket",
"s",
"=",
"socket",
".",
"socket",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"\"\"Attempting to connect to `%(address)s` on port `%(port)s`\"\"\"",
"%",
"locals",
"(",
")",
")",
"try",
":",
"s",
".",
"connect",
"(",
"(",
"address",
",",
"port",
")",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"\"\"Connected to `%(address)s` on port `%(port)s`\"\"\"",
"%",
"locals",
"(",
")",
")",
"return",
"True",
"except",
"socket",
".",
"error",
",",
"e",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"\"\"Connection to `%(address)s` on port `%(port)s` failed - try again: %(e)s\"\"\"",
"%",
"locals",
"(",
")",
")",
"return",
"False",
"return",
"None"
] | Check that the TCP Port we've decided to use for tunnelling is available
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"Check",
"that",
"the",
"TCP",
"Port",
"we",
"ve",
"decided",
"to",
"use",
"for",
"tunnelling",
"is",
"available"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/database.py#L228-L258 |
ozgurgunes/django-manifest | manifest/accounts/decorators.py | secure_required | def secure_required(view_func):
"""
Decorator to switch an url from http to https.
If a view is accessed through http and this decorator is applied
to that view, than it will return a permanent redirect to the
secure (https) version of the same view.
The decorator also must check that ``ACCOUNTS_USE_HTTPS`` is enabled.
If disabled, it should not redirect to https because the project
doesn't support it.
"""
def _wrapped_view(request, *args, **kwargs):
if not request.is_secure():
if defaults.ACCOUNTS_USE_HTTPS:
request_url = request.build_absolute_uri(
request.get_full_path())
secure_url = request_url.replace('http://', 'https://')
return HttpResponsePermanentRedirect(secure_url)
return view_func(request, *args, **kwargs)
return wraps(view_func,
assigned=available_attrs(view_func))(_wrapped_view) | python | def secure_required(view_func):
"""
Decorator to switch an url from http to https.
If a view is accessed through http and this decorator is applied
to that view, than it will return a permanent redirect to the
secure (https) version of the same view.
The decorator also must check that ``ACCOUNTS_USE_HTTPS`` is enabled.
If disabled, it should not redirect to https because the project
doesn't support it.
"""
def _wrapped_view(request, *args, **kwargs):
if not request.is_secure():
if defaults.ACCOUNTS_USE_HTTPS:
request_url = request.build_absolute_uri(
request.get_full_path())
secure_url = request_url.replace('http://', 'https://')
return HttpResponsePermanentRedirect(secure_url)
return view_func(request, *args, **kwargs)
return wraps(view_func,
assigned=available_attrs(view_func))(_wrapped_view) | [
"def",
"secure_required",
"(",
"view_func",
")",
":",
"def",
"_wrapped_view",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"request",
".",
"is_secure",
"(",
")",
":",
"if",
"defaults",
".",
"ACCOUNTS_USE_HTTPS",
":",
"request_url",
"=",
"request",
".",
"build_absolute_uri",
"(",
"request",
".",
"get_full_path",
"(",
")",
")",
"secure_url",
"=",
"request_url",
".",
"replace",
"(",
"'http://'",
",",
"'https://'",
")",
"return",
"HttpResponsePermanentRedirect",
"(",
"secure_url",
")",
"return",
"view_func",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wraps",
"(",
"view_func",
",",
"assigned",
"=",
"available_attrs",
"(",
"view_func",
")",
")",
"(",
"_wrapped_view",
")"
] | Decorator to switch an url from http to https.
If a view is accessed through http and this decorator is applied
to that view, than it will return a permanent redirect to the
secure (https) version of the same view.
The decorator also must check that ``ACCOUNTS_USE_HTTPS`` is enabled.
If disabled, it should not redirect to https because the project
doesn't support it. | [
"Decorator",
"to",
"switch",
"an",
"url",
"from",
"http",
"to",
"https",
"."
] | train | https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/decorators.py#L10-L32 |
ozgurgunes/django-manifest | manifest/accounts/forms.py | identification_field_factory | def identification_field_factory(label, error_required):
"""
A simple identification field factory which enable you to set the label.
:param label:
String containing the label for this field.
:param error_required:
String containing the error message if the field is left empty.
"""
return forms.CharField(label=_(u"%(label)s") % {'label': label},
widget=forms.TextInput(attrs=attrs_dict),
max_length=75,
error_messages={'required': _(u"%(error)s") %
{'error': error_required}}) | python | def identification_field_factory(label, error_required):
"""
A simple identification field factory which enable you to set the label.
:param label:
String containing the label for this field.
:param error_required:
String containing the error message if the field is left empty.
"""
return forms.CharField(label=_(u"%(label)s") % {'label': label},
widget=forms.TextInput(attrs=attrs_dict),
max_length=75,
error_messages={'required': _(u"%(error)s") %
{'error': error_required}}) | [
"def",
"identification_field_factory",
"(",
"label",
",",
"error_required",
")",
":",
"return",
"forms",
".",
"CharField",
"(",
"label",
"=",
"_",
"(",
"u\"%(label)s\"",
")",
"%",
"{",
"'label'",
":",
"label",
"}",
",",
"widget",
"=",
"forms",
".",
"TextInput",
"(",
"attrs",
"=",
"attrs_dict",
")",
",",
"max_length",
"=",
"75",
",",
"error_messages",
"=",
"{",
"'required'",
":",
"_",
"(",
"u\"%(error)s\"",
")",
"%",
"{",
"'error'",
":",
"error_required",
"}",
"}",
")"
] | A simple identification field factory which enable you to set the label.
:param label:
String containing the label for this field.
:param error_required:
String containing the error message if the field is left empty. | [
"A",
"simple",
"identification",
"field",
"factory",
"which",
"enable",
"you",
"to",
"set",
"the",
"label",
"."
] | train | https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/forms.py#L133-L148 |
ozgurgunes/django-manifest | manifest/accounts/forms.py | RegistrationForm.clean_username | def clean_username(self):
"""
Validate that the username is unique and not listed
in ``defaults.ACCOUNTS_FORBIDDEN_USERNAMES`` list.
"""
try:
user = get_user_model().objects.get(username=self.cleaned_data["username"])
except get_user_model().DoesNotExist:
pass
else:
raise forms.ValidationError(
self.error_messages['duplicate_username'])
if self.cleaned_data['username'].lower() \
in defaults.ACCOUNTS_FORBIDDEN_USERNAMES:
raise forms.ValidationError(_(u'This username is not allowed.'))
return self.cleaned_data['username'] | python | def clean_username(self):
"""
Validate that the username is unique and not listed
in ``defaults.ACCOUNTS_FORBIDDEN_USERNAMES`` list.
"""
try:
user = get_user_model().objects.get(username=self.cleaned_data["username"])
except get_user_model().DoesNotExist:
pass
else:
raise forms.ValidationError(
self.error_messages['duplicate_username'])
if self.cleaned_data['username'].lower() \
in defaults.ACCOUNTS_FORBIDDEN_USERNAMES:
raise forms.ValidationError(_(u'This username is not allowed.'))
return self.cleaned_data['username'] | [
"def",
"clean_username",
"(",
"self",
")",
":",
"try",
":",
"user",
"=",
"get_user_model",
"(",
")",
".",
"objects",
".",
"get",
"(",
"username",
"=",
"self",
".",
"cleaned_data",
"[",
"\"username\"",
"]",
")",
"except",
"get_user_model",
"(",
")",
".",
"DoesNotExist",
":",
"pass",
"else",
":",
"raise",
"forms",
".",
"ValidationError",
"(",
"self",
".",
"error_messages",
"[",
"'duplicate_username'",
"]",
")",
"if",
"self",
".",
"cleaned_data",
"[",
"'username'",
"]",
".",
"lower",
"(",
")",
"in",
"defaults",
".",
"ACCOUNTS_FORBIDDEN_USERNAMES",
":",
"raise",
"forms",
".",
"ValidationError",
"(",
"_",
"(",
"u'This username is not allowed.'",
")",
")",
"return",
"self",
".",
"cleaned_data",
"[",
"'username'",
"]"
] | Validate that the username is unique and not listed
in ``defaults.ACCOUNTS_FORBIDDEN_USERNAMES`` list. | [
"Validate",
"that",
"the",
"username",
"is",
"unique",
"and",
"not",
"listed",
"in",
"defaults",
".",
"ACCOUNTS_FORBIDDEN_USERNAMES",
"list",
"."
] | train | https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/forms.py#L48-L65 |
ozgurgunes/django-manifest | manifest/accounts/forms.py | RegistrationForm.clean_email | def clean_email(self):
"""
Validate that the email address is unique.
"""
if get_user_model().objects.filter(
Q(email__iexact=self.cleaned_data['email']) |
Q(email_unconfirmed__iexact=self.cleaned_data['email'])):
raise forms.ValidationError(_(u'This email address is already '
'in use. Please supply a different email.'))
return self.cleaned_data['email'] | python | def clean_email(self):
"""
Validate that the email address is unique.
"""
if get_user_model().objects.filter(
Q(email__iexact=self.cleaned_data['email']) |
Q(email_unconfirmed__iexact=self.cleaned_data['email'])):
raise forms.ValidationError(_(u'This email address is already '
'in use. Please supply a different email.'))
return self.cleaned_data['email'] | [
"def",
"clean_email",
"(",
"self",
")",
":",
"if",
"get_user_model",
"(",
")",
".",
"objects",
".",
"filter",
"(",
"Q",
"(",
"email__iexact",
"=",
"self",
".",
"cleaned_data",
"[",
"'email'",
"]",
")",
"|",
"Q",
"(",
"email_unconfirmed__iexact",
"=",
"self",
".",
"cleaned_data",
"[",
"'email'",
"]",
")",
")",
":",
"raise",
"forms",
".",
"ValidationError",
"(",
"_",
"(",
"u'This email address is already '",
"'in use. Please supply a different email.'",
")",
")",
"return",
"self",
".",
"cleaned_data",
"[",
"'email'",
"]"
] | Validate that the email address is unique. | [
"Validate",
"that",
"the",
"email",
"address",
"is",
"unique",
"."
] | train | https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/forms.py#L67-L77 |
ozgurgunes/django-manifest | manifest/accounts/forms.py | RegistrationForm.save | def save(self):
"""
Creates a new user and account. Returns the newly created user.
"""
username, email, password = (self.cleaned_data['username'],
self.cleaned_data['email'],
self.cleaned_data['password1'])
user = get_user_model().objects.create_user(username, email, password,
not defaults.ACCOUNTS_ACTIVATION_REQUIRED,
defaults.ACCOUNTS_ACTIVATION_REQUIRED)
return user | python | def save(self):
"""
Creates a new user and account. Returns the newly created user.
"""
username, email, password = (self.cleaned_data['username'],
self.cleaned_data['email'],
self.cleaned_data['password1'])
user = get_user_model().objects.create_user(username, email, password,
not defaults.ACCOUNTS_ACTIVATION_REQUIRED,
defaults.ACCOUNTS_ACTIVATION_REQUIRED)
return user | [
"def",
"save",
"(",
"self",
")",
":",
"username",
",",
"email",
",",
"password",
"=",
"(",
"self",
".",
"cleaned_data",
"[",
"'username'",
"]",
",",
"self",
".",
"cleaned_data",
"[",
"'email'",
"]",
",",
"self",
".",
"cleaned_data",
"[",
"'password1'",
"]",
")",
"user",
"=",
"get_user_model",
"(",
")",
".",
"objects",
".",
"create_user",
"(",
"username",
",",
"email",
",",
"password",
",",
"not",
"defaults",
".",
"ACCOUNTS_ACTIVATION_REQUIRED",
",",
"defaults",
".",
"ACCOUNTS_ACTIVATION_REQUIRED",
")",
"return",
"user"
] | Creates a new user and account. Returns the newly created user. | [
"Creates",
"a",
"new",
"user",
"and",
"account",
".",
"Returns",
"the",
"newly",
"created",
"user",
"."
] | train | https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/forms.py#L79-L91 |
ozgurgunes/django-manifest | manifest/accounts/forms.py | AuthenticationForm.clean | def clean(self):
"""
Checks for the identification and password.
If the combination can't be found will raise an invalid sign in error.
"""
identification = self.cleaned_data.get('identification')
password = self.cleaned_data.get('password')
if identification and password:
self.user_cache = authenticate(identification=identification,
password=password)
if self.user_cache is None:
raise forms.ValidationError(_(u"Please enter a correct "
"username or email address and password. "
"Note that both fields are case-sensitive."))
return self.cleaned_data | python | def clean(self):
"""
Checks for the identification and password.
If the combination can't be found will raise an invalid sign in error.
"""
identification = self.cleaned_data.get('identification')
password = self.cleaned_data.get('password')
if identification and password:
self.user_cache = authenticate(identification=identification,
password=password)
if self.user_cache is None:
raise forms.ValidationError(_(u"Please enter a correct "
"username or email address and password. "
"Note that both fields are case-sensitive."))
return self.cleaned_data | [
"def",
"clean",
"(",
"self",
")",
":",
"identification",
"=",
"self",
".",
"cleaned_data",
".",
"get",
"(",
"'identification'",
")",
"password",
"=",
"self",
".",
"cleaned_data",
".",
"get",
"(",
"'password'",
")",
"if",
"identification",
"and",
"password",
":",
"self",
".",
"user_cache",
"=",
"authenticate",
"(",
"identification",
"=",
"identification",
",",
"password",
"=",
"password",
")",
"if",
"self",
".",
"user_cache",
"is",
"None",
":",
"raise",
"forms",
".",
"ValidationError",
"(",
"_",
"(",
"u\"Please enter a correct \"",
"\"username or email address and password. \"",
"\"Note that both fields are case-sensitive.\"",
")",
")",
"return",
"self",
".",
"cleaned_data"
] | Checks for the identification and password.
If the combination can't be found will raise an invalid sign in error. | [
"Checks",
"for",
"the",
"identification",
"and",
"password",
"."
] | train | https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/forms.py#L181-L198 |
ozgurgunes/django-manifest | manifest/accounts/forms.py | EmailForm.clean_email | def clean_email(self):
"""
Validate that the email is not already registered with another user.
"""
if self.cleaned_data['email'].lower() == self.user.email:
raise forms.ValidationError(_(u"You're already known under "
"this email address."))
if get_user_model().objects.filter(
email__iexact=self.cleaned_data['email']).exclude(
email__iexact=self.user.email):
raise forms.ValidationError(_(u'This email address is already '
'in use. Please supply a different email address.'))
return self.cleaned_data['email'] | python | def clean_email(self):
"""
Validate that the email is not already registered with another user.
"""
if self.cleaned_data['email'].lower() == self.user.email:
raise forms.ValidationError(_(u"You're already known under "
"this email address."))
if get_user_model().objects.filter(
email__iexact=self.cleaned_data['email']).exclude(
email__iexact=self.user.email):
raise forms.ValidationError(_(u'This email address is already '
'in use. Please supply a different email address.'))
return self.cleaned_data['email'] | [
"def",
"clean_email",
"(",
"self",
")",
":",
"if",
"self",
".",
"cleaned_data",
"[",
"'email'",
"]",
".",
"lower",
"(",
")",
"==",
"self",
".",
"user",
".",
"email",
":",
"raise",
"forms",
".",
"ValidationError",
"(",
"_",
"(",
"u\"You're already known under \"",
"\"this email address.\"",
")",
")",
"if",
"get_user_model",
"(",
")",
".",
"objects",
".",
"filter",
"(",
"email__iexact",
"=",
"self",
".",
"cleaned_data",
"[",
"'email'",
"]",
")",
".",
"exclude",
"(",
"email__iexact",
"=",
"self",
".",
"user",
".",
"email",
")",
":",
"raise",
"forms",
".",
"ValidationError",
"(",
"_",
"(",
"u'This email address is already '",
"'in use. Please supply a different email address.'",
")",
")",
"return",
"self",
".",
"cleaned_data",
"[",
"'email'",
"]"
] | Validate that the email is not already registered with another user. | [
"Validate",
"that",
"the",
"email",
"is",
"not",
"already",
"registered",
"with",
"another",
"user",
"."
] | train | https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/forms.py#L221-L234 |
ozgurgunes/django-manifest | manifest/accounts/forms.py | ProfileForm.clean_picture | def clean_picture(self):
"""
Validates format and file size of uploaded profile picture.
"""
if self.cleaned_data.get('picture'):
picture_data = self.cleaned_data['picture']
if 'error' in picture_data:
raise forms.ValidationError(_(u'Upload a valid image. '
'The file you uploaded was either not an image '
'or a corrupted image.'))
content_type = picture_data.content_type
if content_type:
main, sub = content_type.split('/')
if not (main == 'image'
and sub in defaults.ACCOUNTS_PICTURE_FORMATS):
raise forms.ValidationError(_(u'%s only.' %
defaults.ACCOUNTS_PICTURE_FORMATS))
if picture_data.size > int(defaults.ACCOUNTS_PICTURE_MAX_FILE):
raise forms.ValidationError(_(u'Image size is too big.'))
return self.cleaned_data['picture'] | python | def clean_picture(self):
"""
Validates format and file size of uploaded profile picture.
"""
if self.cleaned_data.get('picture'):
picture_data = self.cleaned_data['picture']
if 'error' in picture_data:
raise forms.ValidationError(_(u'Upload a valid image. '
'The file you uploaded was either not an image '
'or a corrupted image.'))
content_type = picture_data.content_type
if content_type:
main, sub = content_type.split('/')
if not (main == 'image'
and sub in defaults.ACCOUNTS_PICTURE_FORMATS):
raise forms.ValidationError(_(u'%s only.' %
defaults.ACCOUNTS_PICTURE_FORMATS))
if picture_data.size > int(defaults.ACCOUNTS_PICTURE_MAX_FILE):
raise forms.ValidationError(_(u'Image size is too big.'))
return self.cleaned_data['picture'] | [
"def",
"clean_picture",
"(",
"self",
")",
":",
"if",
"self",
".",
"cleaned_data",
".",
"get",
"(",
"'picture'",
")",
":",
"picture_data",
"=",
"self",
".",
"cleaned_data",
"[",
"'picture'",
"]",
"if",
"'error'",
"in",
"picture_data",
":",
"raise",
"forms",
".",
"ValidationError",
"(",
"_",
"(",
"u'Upload a valid image. '",
"'The file you uploaded was either not an image '",
"'or a corrupted image.'",
")",
")",
"content_type",
"=",
"picture_data",
".",
"content_type",
"if",
"content_type",
":",
"main",
",",
"sub",
"=",
"content_type",
".",
"split",
"(",
"'/'",
")",
"if",
"not",
"(",
"main",
"==",
"'image'",
"and",
"sub",
"in",
"defaults",
".",
"ACCOUNTS_PICTURE_FORMATS",
")",
":",
"raise",
"forms",
".",
"ValidationError",
"(",
"_",
"(",
"u'%s only.'",
"%",
"defaults",
".",
"ACCOUNTS_PICTURE_FORMATS",
")",
")",
"if",
"picture_data",
".",
"size",
">",
"int",
"(",
"defaults",
".",
"ACCOUNTS_PICTURE_MAX_FILE",
")",
":",
"raise",
"forms",
".",
"ValidationError",
"(",
"_",
"(",
"u'Image size is too big.'",
")",
")",
"return",
"self",
".",
"cleaned_data",
"[",
"'picture'",
"]"
] | Validates format and file size of uploaded profile picture. | [
"Validates",
"format",
"and",
"file",
"size",
"of",
"uploaded",
"profile",
"picture",
"."
] | train | https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/forms.py#L280-L302 |
OpenEnergyPlatform/oedialect | oedialect/compiler.py | OECompiler.visit_grouping | def visit_grouping(self, grouping, asfrom=False, **kwargs):
""""
TODO:
"""
return {
'type': 'grouping',
'grouping': grouping.element._compiler_dispatch(self, **kwargs)
} | python | def visit_grouping(self, grouping, asfrom=False, **kwargs):
""""
TODO:
"""
return {
'type': 'grouping',
'grouping': grouping.element._compiler_dispatch(self, **kwargs)
} | [
"def",
"visit_grouping",
"(",
"self",
",",
"grouping",
",",
"asfrom",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"{",
"'type'",
":",
"'grouping'",
",",
"'grouping'",
":",
"grouping",
".",
"element",
".",
"_compiler_dispatch",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
"}"
] | TODO: | [
"TODO",
":"
] | train | https://github.com/OpenEnergyPlatform/oedialect/blob/40a8d9e9b272ea4674d2c40dd6b3e6cc15f91c1e/oedialect/compiler.py#L275-L282 |
OpenEnergyPlatform/oedialect | oedialect/compiler.py | OECompiler.visit_select | def visit_select(self, select, asfrom=False, parens=True,
fromhints=None,
compound_index=0,
nested_join_translation=False,
select_wraps_for=None,
**kwargs):
jsn = {'command': 'advanced/search', 'type': 'select'}
needs_nested_translation = \
select.use_labels and \
not nested_join_translation and \
not self.stack and \
not self.dialect.supports_right_nested_joins
if needs_nested_translation:
transformed_select = self._transform_select_for_nested_joins(
select)
text = self.visit_select(
transformed_select, asfrom=asfrom, parens=parens,
fromhints=fromhints,
compound_index=compound_index,
nested_join_translation=True, **kwargs
)
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = toplevel or \
(
compound_index == 0 and entry.get(
'need_result_map_for_compound', False)
) or entry.get('need_result_map_for_nested',
False)
# this was first proposed as part of #3372; however, it is not
# reached in current tests and could possibly be an assertion
# instead.
if not populate_result_map and 'add_to_result_map' in kwargs:
del kwargs['add_to_result_map']
if needs_nested_translation:
if populate_result_map:
self._transform_result_map_for_nested_joins(
select, transformed_select)
return jsn
froms = self._setup_select_stack(select, entry, asfrom)
column_clause_args = kwargs.copy()
column_clause_args.update({
'within_label_clause': False,
'within_columns_clause': False
})
if select._hints:
hint_text, byfrom = self._setup_select_hints(select)
if hint_text:
text += hint_text + " "
else:
byfrom = None
"""
if select._prefixes:
text += self._generate_prefixes(
select, select._prefixes, **kwargs)
"""
if select._distinct:
jsn['distinct'] = True
# the actual list of columns to print in the SELECT column list.
inner_columns = [
c for c in [
self._label_select_column(
select,
column,
populate_result_map, asfrom,
column_clause_args,
name=name)
for name, column in select._columns_plus_names
]
if c is not None
]
if populate_result_map and select_wraps_for is not None:
# if this select is a compiler-generated wrapper,
# rewrite the targeted columns in the result map
wrapped_inner_columns = set(select_wraps_for.inner_columns)
translate = dict(
(outer, inner.pop()) for outer, inner in [
(
outer,
outer.proxy_set.intersection(wrapped_inner_columns))
for outer in select.inner_columns
] if inner
)
self._result_columns = [
(key, name, tuple(translate.get(o, o) for o in obj), type_)
for key, name, obj, type_ in self._result_columns
]
jsn = self._compose_select_body(
jsn, select, inner_columns, froms, byfrom, kwargs)
if select._statement_hints:
per_dialect = [
ht for (dialect_name, ht)
in select._statement_hints
if dialect_name in ('*', self.dialect.name)
]
if per_dialect:
text += " " + self.get_statement_hint_text(per_dialect)
if self.ctes and self._is_toplevel_select(select):
text = self._render_cte_clause() + text
if select._suffixes:
text += " " + self._generate_prefixes(
select, select._suffixes, **kwargs)
self.stack.pop(-1)
if asfrom and parens:
return jsn # "(" + text + ")"
else:
return jsn | python | def visit_select(self, select, asfrom=False, parens=True,
fromhints=None,
compound_index=0,
nested_join_translation=False,
select_wraps_for=None,
**kwargs):
jsn = {'command': 'advanced/search', 'type': 'select'}
needs_nested_translation = \
select.use_labels and \
not nested_join_translation and \
not self.stack and \
not self.dialect.supports_right_nested_joins
if needs_nested_translation:
transformed_select = self._transform_select_for_nested_joins(
select)
text = self.visit_select(
transformed_select, asfrom=asfrom, parens=parens,
fromhints=fromhints,
compound_index=compound_index,
nested_join_translation=True, **kwargs
)
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = toplevel or \
(
compound_index == 0 and entry.get(
'need_result_map_for_compound', False)
) or entry.get('need_result_map_for_nested',
False)
# this was first proposed as part of #3372; however, it is not
# reached in current tests and could possibly be an assertion
# instead.
if not populate_result_map and 'add_to_result_map' in kwargs:
del kwargs['add_to_result_map']
if needs_nested_translation:
if populate_result_map:
self._transform_result_map_for_nested_joins(
select, transformed_select)
return jsn
froms = self._setup_select_stack(select, entry, asfrom)
column_clause_args = kwargs.copy()
column_clause_args.update({
'within_label_clause': False,
'within_columns_clause': False
})
if select._hints:
hint_text, byfrom = self._setup_select_hints(select)
if hint_text:
text += hint_text + " "
else:
byfrom = None
"""
if select._prefixes:
text += self._generate_prefixes(
select, select._prefixes, **kwargs)
"""
if select._distinct:
jsn['distinct'] = True
# the actual list of columns to print in the SELECT column list.
inner_columns = [
c for c in [
self._label_select_column(
select,
column,
populate_result_map, asfrom,
column_clause_args,
name=name)
for name, column in select._columns_plus_names
]
if c is not None
]
if populate_result_map and select_wraps_for is not None:
# if this select is a compiler-generated wrapper,
# rewrite the targeted columns in the result map
wrapped_inner_columns = set(select_wraps_for.inner_columns)
translate = dict(
(outer, inner.pop()) for outer, inner in [
(
outer,
outer.proxy_set.intersection(wrapped_inner_columns))
for outer in select.inner_columns
] if inner
)
self._result_columns = [
(key, name, tuple(translate.get(o, o) for o in obj), type_)
for key, name, obj, type_ in self._result_columns
]
jsn = self._compose_select_body(
jsn, select, inner_columns, froms, byfrom, kwargs)
if select._statement_hints:
per_dialect = [
ht for (dialect_name, ht)
in select._statement_hints
if dialect_name in ('*', self.dialect.name)
]
if per_dialect:
text += " " + self.get_statement_hint_text(per_dialect)
if self.ctes and self._is_toplevel_select(select):
text = self._render_cte_clause() + text
if select._suffixes:
text += " " + self._generate_prefixes(
select, select._suffixes, **kwargs)
self.stack.pop(-1)
if asfrom and parens:
return jsn # "(" + text + ")"
else:
return jsn | [
"def",
"visit_select",
"(",
"self",
",",
"select",
",",
"asfrom",
"=",
"False",
",",
"parens",
"=",
"True",
",",
"fromhints",
"=",
"None",
",",
"compound_index",
"=",
"0",
",",
"nested_join_translation",
"=",
"False",
",",
"select_wraps_for",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"jsn",
"=",
"{",
"'command'",
":",
"'advanced/search'",
",",
"'type'",
":",
"'select'",
"}",
"needs_nested_translation",
"=",
"select",
".",
"use_labels",
"and",
"not",
"nested_join_translation",
"and",
"not",
"self",
".",
"stack",
"and",
"not",
"self",
".",
"dialect",
".",
"supports_right_nested_joins",
"if",
"needs_nested_translation",
":",
"transformed_select",
"=",
"self",
".",
"_transform_select_for_nested_joins",
"(",
"select",
")",
"text",
"=",
"self",
".",
"visit_select",
"(",
"transformed_select",
",",
"asfrom",
"=",
"asfrom",
",",
"parens",
"=",
"parens",
",",
"fromhints",
"=",
"fromhints",
",",
"compound_index",
"=",
"compound_index",
",",
"nested_join_translation",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"toplevel",
"=",
"not",
"self",
".",
"stack",
"entry",
"=",
"self",
".",
"_default_stack_entry",
"if",
"toplevel",
"else",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
"populate_result_map",
"=",
"toplevel",
"or",
"(",
"compound_index",
"==",
"0",
"and",
"entry",
".",
"get",
"(",
"'need_result_map_for_compound'",
",",
"False",
")",
")",
"or",
"entry",
".",
"get",
"(",
"'need_result_map_for_nested'",
",",
"False",
")",
"# this was first proposed as part of #3372; however, it is not",
"# reached in current tests and could possibly be an assertion",
"# instead.",
"if",
"not",
"populate_result_map",
"and",
"'add_to_result_map'",
"in",
"kwargs",
":",
"del",
"kwargs",
"[",
"'add_to_result_map'",
"]",
"if",
"needs_nested_translation",
":",
"if",
"populate_result_map",
":",
"self",
".",
"_transform_result_map_for_nested_joins",
"(",
"select",
",",
"transformed_select",
")",
"return",
"jsn",
"froms",
"=",
"self",
".",
"_setup_select_stack",
"(",
"select",
",",
"entry",
",",
"asfrom",
")",
"column_clause_args",
"=",
"kwargs",
".",
"copy",
"(",
")",
"column_clause_args",
".",
"update",
"(",
"{",
"'within_label_clause'",
":",
"False",
",",
"'within_columns_clause'",
":",
"False",
"}",
")",
"if",
"select",
".",
"_hints",
":",
"hint_text",
",",
"byfrom",
"=",
"self",
".",
"_setup_select_hints",
"(",
"select",
")",
"if",
"hint_text",
":",
"text",
"+=",
"hint_text",
"+",
"\" \"",
"else",
":",
"byfrom",
"=",
"None",
"if",
"select",
".",
"_distinct",
":",
"jsn",
"[",
"'distinct'",
"]",
"=",
"True",
"# the actual list of columns to print in the SELECT column list.",
"inner_columns",
"=",
"[",
"c",
"for",
"c",
"in",
"[",
"self",
".",
"_label_select_column",
"(",
"select",
",",
"column",
",",
"populate_result_map",
",",
"asfrom",
",",
"column_clause_args",
",",
"name",
"=",
"name",
")",
"for",
"name",
",",
"column",
"in",
"select",
".",
"_columns_plus_names",
"]",
"if",
"c",
"is",
"not",
"None",
"]",
"if",
"populate_result_map",
"and",
"select_wraps_for",
"is",
"not",
"None",
":",
"# if this select is a compiler-generated wrapper,",
"# rewrite the targeted columns in the result map",
"wrapped_inner_columns",
"=",
"set",
"(",
"select_wraps_for",
".",
"inner_columns",
")",
"translate",
"=",
"dict",
"(",
"(",
"outer",
",",
"inner",
".",
"pop",
"(",
")",
")",
"for",
"outer",
",",
"inner",
"in",
"[",
"(",
"outer",
",",
"outer",
".",
"proxy_set",
".",
"intersection",
"(",
"wrapped_inner_columns",
")",
")",
"for",
"outer",
"in",
"select",
".",
"inner_columns",
"]",
"if",
"inner",
")",
"self",
".",
"_result_columns",
"=",
"[",
"(",
"key",
",",
"name",
",",
"tuple",
"(",
"translate",
".",
"get",
"(",
"o",
",",
"o",
")",
"for",
"o",
"in",
"obj",
")",
",",
"type_",
")",
"for",
"key",
",",
"name",
",",
"obj",
",",
"type_",
"in",
"self",
".",
"_result_columns",
"]",
"jsn",
"=",
"self",
".",
"_compose_select_body",
"(",
"jsn",
",",
"select",
",",
"inner_columns",
",",
"froms",
",",
"byfrom",
",",
"kwargs",
")",
"if",
"select",
".",
"_statement_hints",
":",
"per_dialect",
"=",
"[",
"ht",
"for",
"(",
"dialect_name",
",",
"ht",
")",
"in",
"select",
".",
"_statement_hints",
"if",
"dialect_name",
"in",
"(",
"'*'",
",",
"self",
".",
"dialect",
".",
"name",
")",
"]",
"if",
"per_dialect",
":",
"text",
"+=",
"\" \"",
"+",
"self",
".",
"get_statement_hint_text",
"(",
"per_dialect",
")",
"if",
"self",
".",
"ctes",
"and",
"self",
".",
"_is_toplevel_select",
"(",
"select",
")",
":",
"text",
"=",
"self",
".",
"_render_cte_clause",
"(",
")",
"+",
"text",
"if",
"select",
".",
"_suffixes",
":",
"text",
"+=",
"\" \"",
"+",
"self",
".",
"_generate_prefixes",
"(",
"select",
",",
"select",
".",
"_suffixes",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"stack",
".",
"pop",
"(",
"-",
"1",
")",
"if",
"asfrom",
"and",
"parens",
":",
"return",
"jsn",
"# \"(\" + text + \")\"",
"else",
":",
"return",
"jsn"
] | if select._prefixes:
text += self._generate_prefixes(
select, select._prefixes, **kwargs) | [
"if",
"select",
".",
"_prefixes",
":",
"text",
"+",
"=",
"self",
".",
"_generate_prefixes",
"(",
"select",
"select",
".",
"_prefixes",
"**",
"kwargs",
")"
] | train | https://github.com/OpenEnergyPlatform/oedialect/blob/40a8d9e9b272ea4674d2c40dd6b3e6cc15f91c1e/oedialect/compiler.py#L517-L641 |
OpenEnergyPlatform/oedialect | oedialect/compiler.py | OECompiler._label_select_column | def _label_select_column(self, select, column,
populate_result_map,
asfrom, column_clause_args,
name=None,
within_columns_clause=True):
"""produce labeled columns present in a select()."""
if column.type._has_column_expression and \
populate_result_map:
col_expr = column.type.column_expression(column)
add_to_result_map = lambda keyname, name, objects, type_: \
self._add_to_result_map(
keyname, name,
(column,) + objects, type_)
else:
col_expr = column
if populate_result_map:
add_to_result_map = self._add_to_result_map
else:
add_to_result_map = None
if not within_columns_clause:
result_expr = col_expr
elif isinstance(column, elements.Label):
if col_expr is not column:
result_expr = compiler._CompileLabel(
col_expr,
column.name,
alt_names=(column.element,)
)
else:
result_expr = col_expr
elif select is not None and name:
result_expr = compiler._CompileLabel(
col_expr,
name,
alt_names=(column._key_label,)
)
elif \
asfrom and \
isinstance(column,
elements.ColumnClause) and \
not column.is_literal and \
column.table is not None and \
not isinstance(column.table, selectable.Select):
result_expr = compiler._CompileLabel(col_expr,
elements._as_truncated(
column.name),
alt_names=(column.key,))
elif (
not isinstance(column, elements.TextClause) and
(
not isinstance(column,
elements.UnaryExpression) or
column.wraps_column_expression
) and
(
not hasattr(column, 'name') or
isinstance(column, functions.Function)
)
):
result_expr = compiler._CompileLabel(col_expr, column.anon_label)
elif col_expr is not column:
# TODO: are we sure "column" has a .name and .key here ?
# assert isinstance(column, elements.ColumnClause)
result_expr = compiler._CompileLabel(col_expr,
elements._as_truncated(
column.name),
alt_names=(column.key,))
else:
result_expr = col_expr
column_clause_args.update(
within_columns_clause=within_columns_clause,
add_to_result_map=add_to_result_map
)
return result_expr._compiler_dispatch(
self,
**column_clause_args
) | python | def _label_select_column(self, select, column,
populate_result_map,
asfrom, column_clause_args,
name=None,
within_columns_clause=True):
"""produce labeled columns present in a select()."""
if column.type._has_column_expression and \
populate_result_map:
col_expr = column.type.column_expression(column)
add_to_result_map = lambda keyname, name, objects, type_: \
self._add_to_result_map(
keyname, name,
(column,) + objects, type_)
else:
col_expr = column
if populate_result_map:
add_to_result_map = self._add_to_result_map
else:
add_to_result_map = None
if not within_columns_clause:
result_expr = col_expr
elif isinstance(column, elements.Label):
if col_expr is not column:
result_expr = compiler._CompileLabel(
col_expr,
column.name,
alt_names=(column.element,)
)
else:
result_expr = col_expr
elif select is not None and name:
result_expr = compiler._CompileLabel(
col_expr,
name,
alt_names=(column._key_label,)
)
elif \
asfrom and \
isinstance(column,
elements.ColumnClause) and \
not column.is_literal and \
column.table is not None and \
not isinstance(column.table, selectable.Select):
result_expr = compiler._CompileLabel(col_expr,
elements._as_truncated(
column.name),
alt_names=(column.key,))
elif (
not isinstance(column, elements.TextClause) and
(
not isinstance(column,
elements.UnaryExpression) or
column.wraps_column_expression
) and
(
not hasattr(column, 'name') or
isinstance(column, functions.Function)
)
):
result_expr = compiler._CompileLabel(col_expr, column.anon_label)
elif col_expr is not column:
# TODO: are we sure "column" has a .name and .key here ?
# assert isinstance(column, elements.ColumnClause)
result_expr = compiler._CompileLabel(col_expr,
elements._as_truncated(
column.name),
alt_names=(column.key,))
else:
result_expr = col_expr
column_clause_args.update(
within_columns_clause=within_columns_clause,
add_to_result_map=add_to_result_map
)
return result_expr._compiler_dispatch(
self,
**column_clause_args
) | [
"def",
"_label_select_column",
"(",
"self",
",",
"select",
",",
"column",
",",
"populate_result_map",
",",
"asfrom",
",",
"column_clause_args",
",",
"name",
"=",
"None",
",",
"within_columns_clause",
"=",
"True",
")",
":",
"if",
"column",
".",
"type",
".",
"_has_column_expression",
"and",
"populate_result_map",
":",
"col_expr",
"=",
"column",
".",
"type",
".",
"column_expression",
"(",
"column",
")",
"add_to_result_map",
"=",
"lambda",
"keyname",
",",
"name",
",",
"objects",
",",
"type_",
":",
"self",
".",
"_add_to_result_map",
"(",
"keyname",
",",
"name",
",",
"(",
"column",
",",
")",
"+",
"objects",
",",
"type_",
")",
"else",
":",
"col_expr",
"=",
"column",
"if",
"populate_result_map",
":",
"add_to_result_map",
"=",
"self",
".",
"_add_to_result_map",
"else",
":",
"add_to_result_map",
"=",
"None",
"if",
"not",
"within_columns_clause",
":",
"result_expr",
"=",
"col_expr",
"elif",
"isinstance",
"(",
"column",
",",
"elements",
".",
"Label",
")",
":",
"if",
"col_expr",
"is",
"not",
"column",
":",
"result_expr",
"=",
"compiler",
".",
"_CompileLabel",
"(",
"col_expr",
",",
"column",
".",
"name",
",",
"alt_names",
"=",
"(",
"column",
".",
"element",
",",
")",
")",
"else",
":",
"result_expr",
"=",
"col_expr",
"elif",
"select",
"is",
"not",
"None",
"and",
"name",
":",
"result_expr",
"=",
"compiler",
".",
"_CompileLabel",
"(",
"col_expr",
",",
"name",
",",
"alt_names",
"=",
"(",
"column",
".",
"_key_label",
",",
")",
")",
"elif",
"asfrom",
"and",
"isinstance",
"(",
"column",
",",
"elements",
".",
"ColumnClause",
")",
"and",
"not",
"column",
".",
"is_literal",
"and",
"column",
".",
"table",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"column",
".",
"table",
",",
"selectable",
".",
"Select",
")",
":",
"result_expr",
"=",
"compiler",
".",
"_CompileLabel",
"(",
"col_expr",
",",
"elements",
".",
"_as_truncated",
"(",
"column",
".",
"name",
")",
",",
"alt_names",
"=",
"(",
"column",
".",
"key",
",",
")",
")",
"elif",
"(",
"not",
"isinstance",
"(",
"column",
",",
"elements",
".",
"TextClause",
")",
"and",
"(",
"not",
"isinstance",
"(",
"column",
",",
"elements",
".",
"UnaryExpression",
")",
"or",
"column",
".",
"wraps_column_expression",
")",
"and",
"(",
"not",
"hasattr",
"(",
"column",
",",
"'name'",
")",
"or",
"isinstance",
"(",
"column",
",",
"functions",
".",
"Function",
")",
")",
")",
":",
"result_expr",
"=",
"compiler",
".",
"_CompileLabel",
"(",
"col_expr",
",",
"column",
".",
"anon_label",
")",
"elif",
"col_expr",
"is",
"not",
"column",
":",
"# TODO: are we sure \"column\" has a .name and .key here ?",
"# assert isinstance(column, elements.ColumnClause)",
"result_expr",
"=",
"compiler",
".",
"_CompileLabel",
"(",
"col_expr",
",",
"elements",
".",
"_as_truncated",
"(",
"column",
".",
"name",
")",
",",
"alt_names",
"=",
"(",
"column",
".",
"key",
",",
")",
")",
"else",
":",
"result_expr",
"=",
"col_expr",
"column_clause_args",
".",
"update",
"(",
"within_columns_clause",
"=",
"within_columns_clause",
",",
"add_to_result_map",
"=",
"add_to_result_map",
")",
"return",
"result_expr",
".",
"_compiler_dispatch",
"(",
"self",
",",
"*",
"*",
"column_clause_args",
")"
] | produce labeled columns present in a select(). | [
"produce",
"labeled",
"columns",
"present",
"in",
"a",
"select",
"()",
"."
] | train | https://github.com/OpenEnergyPlatform/oedialect/blob/40a8d9e9b272ea4674d2c40dd6b3e6cc15f91c1e/oedialect/compiler.py#L891-L972 |
pytroll/posttroll | posttroll/logger.py | run | def run():
"""Main function
"""
import argparse
global LOGGER
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--rotated", help="Time rotated log file")
parser.add_argument("-v", "--verbose", help="print debug messages too",
action="store_true")
parser.add_argument("-s", "--server", help="server to listen to",
default="localhost")
parser.add_argument("-p", "--port", help="port to listen to",
default=16543,
type=int)
opts = parser.parse_args()
if opts.verbose:
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
if opts.rotated:
handler = logging.handlers.TimedRotatingFileHandler(opts.rotated,
when="midnight",
backupCount=7)
else:
handler = logging.StreamHandler()
LOGGER = logging.getLogger("pytroll")
LOGGER.setLevel(loglevel)
handler.setLevel(loglevel)
formatter = ColoredFormatter("[%(asctime)s %(levelname)-19s] %(message)s")
handler.setFormatter(formatter)
LOGGER.addHandler(handler)
import time
try:
tlogger = Logger(opts.server, opts.port)
# logger = Logger("safe", 16543)
tlogger.start()
while True:
time.sleep(1)
except KeyboardInterrupt:
tlogger.stop()
print("Thanks for using pytroll/logger. "
"See you soon on www.pytroll.org!") | python | def run():
"""Main function
"""
import argparse
global LOGGER
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--rotated", help="Time rotated log file")
parser.add_argument("-v", "--verbose", help="print debug messages too",
action="store_true")
parser.add_argument("-s", "--server", help="server to listen to",
default="localhost")
parser.add_argument("-p", "--port", help="port to listen to",
default=16543,
type=int)
opts = parser.parse_args()
if opts.verbose:
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
if opts.rotated:
handler = logging.handlers.TimedRotatingFileHandler(opts.rotated,
when="midnight",
backupCount=7)
else:
handler = logging.StreamHandler()
LOGGER = logging.getLogger("pytroll")
LOGGER.setLevel(loglevel)
handler.setLevel(loglevel)
formatter = ColoredFormatter("[%(asctime)s %(levelname)-19s] %(message)s")
handler.setFormatter(formatter)
LOGGER.addHandler(handler)
import time
try:
tlogger = Logger(opts.server, opts.port)
# logger = Logger("safe", 16543)
tlogger.start()
while True:
time.sleep(1)
except KeyboardInterrupt:
tlogger.stop()
print("Thanks for using pytroll/logger. "
"See you soon on www.pytroll.org!") | [
"def",
"run",
"(",
")",
":",
"import",
"argparse",
"global",
"LOGGER",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"\"-r\"",
",",
"\"--rotated\"",
",",
"help",
"=",
"\"Time rotated log file\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-v\"",
",",
"\"--verbose\"",
",",
"help",
"=",
"\"print debug messages too\"",
",",
"action",
"=",
"\"store_true\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-s\"",
",",
"\"--server\"",
",",
"help",
"=",
"\"server to listen to\"",
",",
"default",
"=",
"\"localhost\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-p\"",
",",
"\"--port\"",
",",
"help",
"=",
"\"port to listen to\"",
",",
"default",
"=",
"16543",
",",
"type",
"=",
"int",
")",
"opts",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"opts",
".",
"verbose",
":",
"loglevel",
"=",
"logging",
".",
"DEBUG",
"else",
":",
"loglevel",
"=",
"logging",
".",
"INFO",
"if",
"opts",
".",
"rotated",
":",
"handler",
"=",
"logging",
".",
"handlers",
".",
"TimedRotatingFileHandler",
"(",
"opts",
".",
"rotated",
",",
"when",
"=",
"\"midnight\"",
",",
"backupCount",
"=",
"7",
")",
"else",
":",
"handler",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"LOGGER",
"=",
"logging",
".",
"getLogger",
"(",
"\"pytroll\"",
")",
"LOGGER",
".",
"setLevel",
"(",
"loglevel",
")",
"handler",
".",
"setLevel",
"(",
"loglevel",
")",
"formatter",
"=",
"ColoredFormatter",
"(",
"\"[%(asctime)s %(levelname)-19s] %(message)s\"",
")",
"handler",
".",
"setFormatter",
"(",
"formatter",
")",
"LOGGER",
".",
"addHandler",
"(",
"handler",
")",
"import",
"time",
"try",
":",
"tlogger",
"=",
"Logger",
"(",
"opts",
".",
"server",
",",
"opts",
".",
"port",
")",
"# logger = Logger(\"safe\", 16543)",
"tlogger",
".",
"start",
"(",
")",
"while",
"True",
":",
"time",
".",
"sleep",
"(",
"1",
")",
"except",
"KeyboardInterrupt",
":",
"tlogger",
".",
"stop",
"(",
")",
"print",
"(",
"\"Thanks for using pytroll/logger. \"",
"\"See you soon on www.pytroll.org!\"",
")"
] | Main function | [
"Main",
"function"
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/logger.py#L164-L213 |
pytroll/posttroll | posttroll/logger.py | Logger.log | def log(self):
"""Log stuff.
"""
with Subscribe(services=[""], addr_listener=True) as sub:
for msg in sub.recv(1):
if msg:
if msg.type in ["log.debug", "log.info",
"log.warning", "log.error",
"log.critical"]:
getattr(LOGGER, msg.type[4:])(msg.subject + " " +
msg.sender + " " +
str(msg.data) + " " +
str(msg.time))
elif msg.binary:
LOGGER.debug("%s %s %s [binary] %s", msg.subject,
msg.sender,
msg.type,
str(msg.time))
else:
LOGGER.debug("%s %s %s %s %s", msg.subject,
msg.sender,
msg.type,
str(msg.data),
str(msg.time))
if not self.loop:
LOGGER.info("Stop logging")
break | python | def log(self):
"""Log stuff.
"""
with Subscribe(services=[""], addr_listener=True) as sub:
for msg in sub.recv(1):
if msg:
if msg.type in ["log.debug", "log.info",
"log.warning", "log.error",
"log.critical"]:
getattr(LOGGER, msg.type[4:])(msg.subject + " " +
msg.sender + " " +
str(msg.data) + " " +
str(msg.time))
elif msg.binary:
LOGGER.debug("%s %s %s [binary] %s", msg.subject,
msg.sender,
msg.type,
str(msg.time))
else:
LOGGER.debug("%s %s %s %s %s", msg.subject,
msg.sender,
msg.type,
str(msg.data),
str(msg.time))
if not self.loop:
LOGGER.info("Stop logging")
break | [
"def",
"log",
"(",
"self",
")",
":",
"with",
"Subscribe",
"(",
"services",
"=",
"[",
"\"\"",
"]",
",",
"addr_listener",
"=",
"True",
")",
"as",
"sub",
":",
"for",
"msg",
"in",
"sub",
".",
"recv",
"(",
"1",
")",
":",
"if",
"msg",
":",
"if",
"msg",
".",
"type",
"in",
"[",
"\"log.debug\"",
",",
"\"log.info\"",
",",
"\"log.warning\"",
",",
"\"log.error\"",
",",
"\"log.critical\"",
"]",
":",
"getattr",
"(",
"LOGGER",
",",
"msg",
".",
"type",
"[",
"4",
":",
"]",
")",
"(",
"msg",
".",
"subject",
"+",
"\" \"",
"+",
"msg",
".",
"sender",
"+",
"\" \"",
"+",
"str",
"(",
"msg",
".",
"data",
")",
"+",
"\" \"",
"+",
"str",
"(",
"msg",
".",
"time",
")",
")",
"elif",
"msg",
".",
"binary",
":",
"LOGGER",
".",
"debug",
"(",
"\"%s %s %s [binary] %s\"",
",",
"msg",
".",
"subject",
",",
"msg",
".",
"sender",
",",
"msg",
".",
"type",
",",
"str",
"(",
"msg",
".",
"time",
")",
")",
"else",
":",
"LOGGER",
".",
"debug",
"(",
"\"%s %s %s %s %s\"",
",",
"msg",
".",
"subject",
",",
"msg",
".",
"sender",
",",
"msg",
".",
"type",
",",
"str",
"(",
"msg",
".",
"data",
")",
",",
"str",
"(",
"msg",
".",
"time",
")",
")",
"if",
"not",
"self",
".",
"loop",
":",
"LOGGER",
".",
"info",
"(",
"\"Stop logging\"",
")",
"break"
] | Log stuff. | [
"Log",
"stuff",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/logger.py#L129-L156 |
emichael/PyREM | pyrem/task.py | cleanup | def cleanup():
"""Stop all started tasks on system exit.
Note: This only handles signals caught by the atexit module by default.
SIGKILL, for instance, will not be caught, so cleanup is not guaranteed in
all cases.
"""
to_stop = STARTED_TASKS.copy()
if to_stop:
print "Cleaning up..."
for task in to_stop:
try:
task.stop()
except: # pylint: disable=W0702
etype, value, trace = sys.exc_info()
# Disregard no such process exceptions, print out the rest
if not (isinstance(value, OSError) and value.errno == 3):
print ''.join(format_exception(etype, value, trace, None))
continue | python | def cleanup():
"""Stop all started tasks on system exit.
Note: This only handles signals caught by the atexit module by default.
SIGKILL, for instance, will not be caught, so cleanup is not guaranteed in
all cases.
"""
to_stop = STARTED_TASKS.copy()
if to_stop:
print "Cleaning up..."
for task in to_stop:
try:
task.stop()
except: # pylint: disable=W0702
etype, value, trace = sys.exc_info()
# Disregard no such process exceptions, print out the rest
if not (isinstance(value, OSError) and value.errno == 3):
print ''.join(format_exception(etype, value, trace, None))
continue | [
"def",
"cleanup",
"(",
")",
":",
"to_stop",
"=",
"STARTED_TASKS",
".",
"copy",
"(",
")",
"if",
"to_stop",
":",
"print",
"\"Cleaning up...\"",
"for",
"task",
"in",
"to_stop",
":",
"try",
":",
"task",
".",
"stop",
"(",
")",
"except",
":",
"# pylint: disable=W0702",
"etype",
",",
"value",
",",
"trace",
"=",
"sys",
".",
"exc_info",
"(",
")",
"# Disregard no such process exceptions, print out the rest",
"if",
"not",
"(",
"isinstance",
"(",
"value",
",",
"OSError",
")",
"and",
"value",
".",
"errno",
"==",
"3",
")",
":",
"print",
"''",
".",
"join",
"(",
"format_exception",
"(",
"etype",
",",
"value",
",",
"trace",
",",
"None",
")",
")",
"continue"
] | Stop all started tasks on system exit.
Note: This only handles signals caught by the atexit module by default.
SIGKILL, for instance, will not be caught, so cleanup is not guaranteed in
all cases. | [
"Stop",
"all",
"started",
"tasks",
"on",
"system",
"exit",
"."
] | train | https://github.com/emichael/PyREM/blob/2609249ead197cd9496d164f4998ca9985503579/pyrem/task.py#L30-L48 |
emichael/PyREM | pyrem/task.py | Task.start | def start(self, wait=False):
"""Start a task.
This function depends on the underlying implementation of _start, which
any subclass of ``Task`` should implement.
Args:
wait (bool): Whether or not to wait on the task to finish before
returning from this function. Default `False`.
Raises:
RuntimeError: If the task has already been started without a
subsequent call to ``reset()``.
"""
if self._status is not TaskStatus.IDLE:
raise RuntimeError("Cannot start %s in state %s" %
(self, self._status))
self._status = TaskStatus.STARTED
STARTED_TASKS.add(self)
self._start()
if wait:
self.wait()
return self.return_values | python | def start(self, wait=False):
"""Start a task.
This function depends on the underlying implementation of _start, which
any subclass of ``Task`` should implement.
Args:
wait (bool): Whether or not to wait on the task to finish before
returning from this function. Default `False`.
Raises:
RuntimeError: If the task has already been started without a
subsequent call to ``reset()``.
"""
if self._status is not TaskStatus.IDLE:
raise RuntimeError("Cannot start %s in state %s" %
(self, self._status))
self._status = TaskStatus.STARTED
STARTED_TASKS.add(self)
self._start()
if wait:
self.wait()
return self.return_values | [
"def",
"start",
"(",
"self",
",",
"wait",
"=",
"False",
")",
":",
"if",
"self",
".",
"_status",
"is",
"not",
"TaskStatus",
".",
"IDLE",
":",
"raise",
"RuntimeError",
"(",
"\"Cannot start %s in state %s\"",
"%",
"(",
"self",
",",
"self",
".",
"_status",
")",
")",
"self",
".",
"_status",
"=",
"TaskStatus",
".",
"STARTED",
"STARTED_TASKS",
".",
"add",
"(",
"self",
")",
"self",
".",
"_start",
"(",
")",
"if",
"wait",
":",
"self",
".",
"wait",
"(",
")",
"return",
"self",
".",
"return_values"
] | Start a task.
This function depends on the underlying implementation of _start, which
any subclass of ``Task`` should implement.
Args:
wait (bool): Whether or not to wait on the task to finish before
returning from this function. Default `False`.
Raises:
RuntimeError: If the task has already been started without a
subsequent call to ``reset()``. | [
"Start",
"a",
"task",
"."
] | train | https://github.com/emichael/PyREM/blob/2609249ead197cd9496d164f4998ca9985503579/pyrem/task.py#L82-L106 |
emichael/PyREM | pyrem/task.py | Task.wait | def wait(self):
"""Wait on a task to finish and stop it when it has finished.
Raises:
RuntimeError: If the task hasn't been started or has already been
stopped.
Returns:
The ``return_values`` of the task.
"""
if self._status is not TaskStatus.STARTED:
raise RuntimeError("Cannot wait on %s in state %s" %
(self, self._status))
self._wait()
self.stop()
return self.return_values | python | def wait(self):
"""Wait on a task to finish and stop it when it has finished.
Raises:
RuntimeError: If the task hasn't been started or has already been
stopped.
Returns:
The ``return_values`` of the task.
"""
if self._status is not TaskStatus.STARTED:
raise RuntimeError("Cannot wait on %s in state %s" %
(self, self._status))
self._wait()
self.stop()
return self.return_values | [
"def",
"wait",
"(",
"self",
")",
":",
"if",
"self",
".",
"_status",
"is",
"not",
"TaskStatus",
".",
"STARTED",
":",
"raise",
"RuntimeError",
"(",
"\"Cannot wait on %s in state %s\"",
"%",
"(",
"self",
",",
"self",
".",
"_status",
")",
")",
"self",
".",
"_wait",
"(",
")",
"self",
".",
"stop",
"(",
")",
"return",
"self",
".",
"return_values"
] | Wait on a task to finish and stop it when it has finished.
Raises:
RuntimeError: If the task hasn't been started or has already been
stopped.
Returns:
The ``return_values`` of the task. | [
"Wait",
"on",
"a",
"task",
"to",
"finish",
"and",
"stop",
"it",
"when",
"it",
"has",
"finished",
"."
] | train | https://github.com/emichael/PyREM/blob/2609249ead197cd9496d164f4998ca9985503579/pyrem/task.py#L112-L127 |
emichael/PyREM | pyrem/task.py | Task.stop | def stop(self):
"""Stop a task immediately.
Raises:
RuntimeError: If the task hasn't been started or has already been
stopped.
"""
if self._status is TaskStatus.STOPPED:
return
if self._status is not TaskStatus.STARTED:
raise RuntimeError("Cannot stop %s in state %s" %
(self, self._status))
self._stop()
STARTED_TASKS.remove(self)
self._status = TaskStatus.STOPPED | python | def stop(self):
"""Stop a task immediately.
Raises:
RuntimeError: If the task hasn't been started or has already been
stopped.
"""
if self._status is TaskStatus.STOPPED:
return
if self._status is not TaskStatus.STARTED:
raise RuntimeError("Cannot stop %s in state %s" %
(self, self._status))
self._stop()
STARTED_TASKS.remove(self)
self._status = TaskStatus.STOPPED | [
"def",
"stop",
"(",
"self",
")",
":",
"if",
"self",
".",
"_status",
"is",
"TaskStatus",
".",
"STOPPED",
":",
"return",
"if",
"self",
".",
"_status",
"is",
"not",
"TaskStatus",
".",
"STARTED",
":",
"raise",
"RuntimeError",
"(",
"\"Cannot stop %s in state %s\"",
"%",
"(",
"self",
",",
"self",
".",
"_status",
")",
")",
"self",
".",
"_stop",
"(",
")",
"STARTED_TASKS",
".",
"remove",
"(",
"self",
")",
"self",
".",
"_status",
"=",
"TaskStatus",
".",
"STOPPED"
] | Stop a task immediately.
Raises:
RuntimeError: If the task hasn't been started or has already been
stopped. | [
"Stop",
"a",
"task",
"immediately",
"."
] | train | https://github.com/emichael/PyREM/blob/2609249ead197cd9496d164f4998ca9985503579/pyrem/task.py#L133-L149 |
emichael/PyREM | pyrem/task.py | Task.reset | def reset(self):
"""Reset a task.
Allows a task to be started again, clears the ``return_values``.
Raises:
RuntimeError: If the task has not been stopped.
"""
if self._status is not TaskStatus.STOPPED:
raise RuntimeError("Cannot reset %s in state %s" %
(self, self._status))
self._reset()
self.return_values = {}
self._status = TaskStatus.IDLE | python | def reset(self):
"""Reset a task.
Allows a task to be started again, clears the ``return_values``.
Raises:
RuntimeError: If the task has not been stopped.
"""
if self._status is not TaskStatus.STOPPED:
raise RuntimeError("Cannot reset %s in state %s" %
(self, self._status))
self._reset()
self.return_values = {}
self._status = TaskStatus.IDLE | [
"def",
"reset",
"(",
"self",
")",
":",
"if",
"self",
".",
"_status",
"is",
"not",
"TaskStatus",
".",
"STOPPED",
":",
"raise",
"RuntimeError",
"(",
"\"Cannot reset %s in state %s\"",
"%",
"(",
"self",
",",
"self",
".",
"_status",
")",
")",
"self",
".",
"_reset",
"(",
")",
"self",
".",
"return_values",
"=",
"{",
"}",
"self",
".",
"_status",
"=",
"TaskStatus",
".",
"IDLE"
] | Reset a task.
Allows a task to be started again, clears the ``return_values``.
Raises:
RuntimeError: If the task has not been stopped. | [
"Reset",
"a",
"task",
"."
] | train | https://github.com/emichael/PyREM/blob/2609249ead197cd9496d164f4998ca9985503579/pyrem/task.py#L155-L168 |
emichael/PyREM | pyrem/task.py | Parallel._aggregate | def _aggregate(self):
"""Helper method to aggregate RemoteTasks into single ssh session."""
# pylint: disable=W0212
nonremote = [t for t in self._tasks if not isinstance(t, RemoteTask)]
remote = [t for t in self._tasks if isinstance(t, RemoteTask)]
host_dict = defaultdict(list)
for task in remote:
host_dict[task.host].append(task)
aggregated = []
for task_group in host_dict.values():
# Build up combined command
combined_cmd = []
for task in task_group:
if combined_cmd:
combined_cmd.append('&')
combined_cmd.append(' '.join(task._remote_command))
# Now, generated aggregate task
t0 = task_group[0] # pylint: disable=C0103
task = RemoteTask(
t0.host, combined_cmd, t0._quiet, t0._return_output,
t0._kill_remote, t0._identity_file)
aggregated.append(task)
self._tasks = nonremote + aggregated | python | def _aggregate(self):
"""Helper method to aggregate RemoteTasks into single ssh session."""
# pylint: disable=W0212
nonremote = [t for t in self._tasks if not isinstance(t, RemoteTask)]
remote = [t for t in self._tasks if isinstance(t, RemoteTask)]
host_dict = defaultdict(list)
for task in remote:
host_dict[task.host].append(task)
aggregated = []
for task_group in host_dict.values():
# Build up combined command
combined_cmd = []
for task in task_group:
if combined_cmd:
combined_cmd.append('&')
combined_cmd.append(' '.join(task._remote_command))
# Now, generated aggregate task
t0 = task_group[0] # pylint: disable=C0103
task = RemoteTask(
t0.host, combined_cmd, t0._quiet, t0._return_output,
t0._kill_remote, t0._identity_file)
aggregated.append(task)
self._tasks = nonremote + aggregated | [
"def",
"_aggregate",
"(",
"self",
")",
":",
"# pylint: disable=W0212",
"nonremote",
"=",
"[",
"t",
"for",
"t",
"in",
"self",
".",
"_tasks",
"if",
"not",
"isinstance",
"(",
"t",
",",
"RemoteTask",
")",
"]",
"remote",
"=",
"[",
"t",
"for",
"t",
"in",
"self",
".",
"_tasks",
"if",
"isinstance",
"(",
"t",
",",
"RemoteTask",
")",
"]",
"host_dict",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"task",
"in",
"remote",
":",
"host_dict",
"[",
"task",
".",
"host",
"]",
".",
"append",
"(",
"task",
")",
"aggregated",
"=",
"[",
"]",
"for",
"task_group",
"in",
"host_dict",
".",
"values",
"(",
")",
":",
"# Build up combined command",
"combined_cmd",
"=",
"[",
"]",
"for",
"task",
"in",
"task_group",
":",
"if",
"combined_cmd",
":",
"combined_cmd",
".",
"append",
"(",
"'&'",
")",
"combined_cmd",
".",
"append",
"(",
"' '",
".",
"join",
"(",
"task",
".",
"_remote_command",
")",
")",
"# Now, generated aggregate task",
"t0",
"=",
"task_group",
"[",
"0",
"]",
"# pylint: disable=C0103",
"task",
"=",
"RemoteTask",
"(",
"t0",
".",
"host",
",",
"combined_cmd",
",",
"t0",
".",
"_quiet",
",",
"t0",
".",
"_return_output",
",",
"t0",
".",
"_kill_remote",
",",
"t0",
".",
"_identity_file",
")",
"aggregated",
".",
"append",
"(",
"task",
")",
"self",
".",
"_tasks",
"=",
"nonremote",
"+",
"aggregated"
] | Helper method to aggregate RemoteTasks into single ssh session. | [
"Helper",
"method",
"to",
"aggregate",
"RemoteTasks",
"into",
"single",
"ssh",
"session",
"."
] | train | https://github.com/emichael/PyREM/blob/2609249ead197cd9496d164f4998ca9985503579/pyrem/task.py#L385-L412 |
ozgurgunes/django-manifest | manifest/accounts/models.py | upload_to_picture | def upload_to_picture(instance, filename):
"""
Uploads a picture for a user to the ``ACCOUNTS_PICTURE_PATH`` and
saving it under unique hash for the image. This is for privacy
reasons so others can't just browse through the picture directory.
"""
extension = filename.split('.')[-1].lower()
salt, hash = generate_sha1(instance.id)
return '%(path)s/%(hash)s.%(extension)s' % {
'path': getattr(defaults,
'ACCOUNTS_PICTURE_PATH','%s/%s' % (
str(instance._meta.app_label),
str(instance._meta.model_name))),
'hash': hash[:10],
'extension': extension} | python | def upload_to_picture(instance, filename):
"""
Uploads a picture for a user to the ``ACCOUNTS_PICTURE_PATH`` and
saving it under unique hash for the image. This is for privacy
reasons so others can't just browse through the picture directory.
"""
extension = filename.split('.')[-1].lower()
salt, hash = generate_sha1(instance.id)
return '%(path)s/%(hash)s.%(extension)s' % {
'path': getattr(defaults,
'ACCOUNTS_PICTURE_PATH','%s/%s' % (
str(instance._meta.app_label),
str(instance._meta.model_name))),
'hash': hash[:10],
'extension': extension} | [
"def",
"upload_to_picture",
"(",
"instance",
",",
"filename",
")",
":",
"extension",
"=",
"filename",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
".",
"lower",
"(",
")",
"salt",
",",
"hash",
"=",
"generate_sha1",
"(",
"instance",
".",
"id",
")",
"return",
"'%(path)s/%(hash)s.%(extension)s'",
"%",
"{",
"'path'",
":",
"getattr",
"(",
"defaults",
",",
"'ACCOUNTS_PICTURE_PATH'",
",",
"'%s/%s'",
"%",
"(",
"str",
"(",
"instance",
".",
"_meta",
".",
"app_label",
")",
",",
"str",
"(",
"instance",
".",
"_meta",
".",
"model_name",
")",
")",
")",
",",
"'hash'",
":",
"hash",
"[",
":",
"10",
"]",
",",
"'extension'",
":",
"extension",
"}"
] | Uploads a picture for a user to the ``ACCOUNTS_PICTURE_PATH`` and
saving it under unique hash for the image. This is for privacy
reasons so others can't just browse through the picture directory. | [
"Uploads",
"a",
"picture",
"for",
"a",
"user",
"to",
"the",
"ACCOUNTS_PICTURE_PATH",
"and",
"saving",
"it",
"under",
"unique",
"hash",
"for",
"the",
"image",
".",
"This",
"is",
"for",
"privacy",
"reasons",
"so",
"others",
"can",
"t",
"just",
"browse",
"through",
"the",
"picture",
"directory",
"."
] | train | https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/models.py#L173-L188 |
ozgurgunes/django-manifest | manifest/accounts/models.py | AccountActivationMixin.activation_key_expired | def activation_key_expired(self):
"""
Checks if activation key is expired.
Returns ``True`` when the ``activation_key`` of the user is expired
and ``False`` if the key is still valid.
The key is expired when it's set to the value defined in
``ACCOUNTS_ACTIVATED`` or ``activation_key_created`` is beyond the
amount of days defined in ``ACCOUNTS_ACTIVATION_DAYS``.
"""
expiration_days = datetime.timedelta(
days=defaults.ACCOUNTS_ACTIVATION_DAYS)
expiration_date = self.date_joined + expiration_days
if self.activation_key == defaults.ACCOUNTS_ACTIVATED:
return True
if get_datetime_now() >= expiration_date:
return True
return False | python | def activation_key_expired(self):
"""
Checks if activation key is expired.
Returns ``True`` when the ``activation_key`` of the user is expired
and ``False`` if the key is still valid.
The key is expired when it's set to the value defined in
``ACCOUNTS_ACTIVATED`` or ``activation_key_created`` is beyond the
amount of days defined in ``ACCOUNTS_ACTIVATION_DAYS``.
"""
expiration_days = datetime.timedelta(
days=defaults.ACCOUNTS_ACTIVATION_DAYS)
expiration_date = self.date_joined + expiration_days
if self.activation_key == defaults.ACCOUNTS_ACTIVATED:
return True
if get_datetime_now() >= expiration_date:
return True
return False | [
"def",
"activation_key_expired",
"(",
"self",
")",
":",
"expiration_days",
"=",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"defaults",
".",
"ACCOUNTS_ACTIVATION_DAYS",
")",
"expiration_date",
"=",
"self",
".",
"date_joined",
"+",
"expiration_days",
"if",
"self",
".",
"activation_key",
"==",
"defaults",
".",
"ACCOUNTS_ACTIVATED",
":",
"return",
"True",
"if",
"get_datetime_now",
"(",
")",
">=",
"expiration_date",
":",
"return",
"True",
"return",
"False"
] | Checks if activation key is expired.
Returns ``True`` when the ``activation_key`` of the user is expired
and ``False`` if the key is still valid.
The key is expired when it's set to the value defined in
``ACCOUNTS_ACTIVATED`` or ``activation_key_created`` is beyond the
amount of days defined in ``ACCOUNTS_ACTIVATION_DAYS``. | [
"Checks",
"if",
"activation",
"key",
"is",
"expired",
"."
] | train | https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/models.py#L31-L50 |
ozgurgunes/django-manifest | manifest/accounts/models.py | AccountActivationMixin.send_activation_email | def send_activation_email(self):
"""
Sends a activation email to the user.
This email is send when the user wants to activate their
newly created user.
"""
context= {'user': self,
'protocol': get_protocol(),
'activation_days': defaults.ACCOUNTS_ACTIVATION_DAYS,
'activation_key': self.activation_key,
'site': Site.objects.get_current()}
subject = ''.join(render_to_string(
'accounts/emails/activation_email_subject.txt',
context).splitlines())
message = render_to_string(
'accounts/emails/activation_email_message.txt',
context)
send_mail(subject,
message,
settings.DEFAULT_FROM_EMAIL,
[self.email,]) | python | def send_activation_email(self):
"""
Sends a activation email to the user.
This email is send when the user wants to activate their
newly created user.
"""
context= {'user': self,
'protocol': get_protocol(),
'activation_days': defaults.ACCOUNTS_ACTIVATION_DAYS,
'activation_key': self.activation_key,
'site': Site.objects.get_current()}
subject = ''.join(render_to_string(
'accounts/emails/activation_email_subject.txt',
context).splitlines())
message = render_to_string(
'accounts/emails/activation_email_message.txt',
context)
send_mail(subject,
message,
settings.DEFAULT_FROM_EMAIL,
[self.email,]) | [
"def",
"send_activation_email",
"(",
"self",
")",
":",
"context",
"=",
"{",
"'user'",
":",
"self",
",",
"'protocol'",
":",
"get_protocol",
"(",
")",
",",
"'activation_days'",
":",
"defaults",
".",
"ACCOUNTS_ACTIVATION_DAYS",
",",
"'activation_key'",
":",
"self",
".",
"activation_key",
",",
"'site'",
":",
"Site",
".",
"objects",
".",
"get_current",
"(",
")",
"}",
"subject",
"=",
"''",
".",
"join",
"(",
"render_to_string",
"(",
"'accounts/emails/activation_email_subject.txt'",
",",
"context",
")",
".",
"splitlines",
"(",
")",
")",
"message",
"=",
"render_to_string",
"(",
"'accounts/emails/activation_email_message.txt'",
",",
"context",
")",
"send_mail",
"(",
"subject",
",",
"message",
",",
"settings",
".",
"DEFAULT_FROM_EMAIL",
",",
"[",
"self",
".",
"email",
",",
"]",
")"
] | Sends a activation email to the user.
This email is send when the user wants to activate their
newly created user. | [
"Sends",
"a",
"activation",
"email",
"to",
"the",
"user",
"."
] | train | https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/models.py#L52-L76 |
ozgurgunes/django-manifest | manifest/accounts/models.py | EmailConfirmationMixin.change_email | def change_email(self, email):
"""
Changes the email address for a user.
A user needs to verify this new email address before it becomes
active. By storing the new email address in a temporary field
-- ``temporary_email`` -- we are able to set this email address
after the user has verified it by clicking on the verification URI
in the email. This email gets send out by ``send_verification_email``.
:param email:
The new email address that the user wants to use.
"""
self.email_unconfirmed = email
salt, hash = generate_sha1(self.username)
self.email_confirmation_key = hash
self.email_confirmation_key_created = get_datetime_now()
self.save()
# Send email for activation
self.send_confirmation_email()
return self | python | def change_email(self, email):
"""
Changes the email address for a user.
A user needs to verify this new email address before it becomes
active. By storing the new email address in a temporary field
-- ``temporary_email`` -- we are able to set this email address
after the user has verified it by clicking on the verification URI
in the email. This email gets send out by ``send_verification_email``.
:param email:
The new email address that the user wants to use.
"""
self.email_unconfirmed = email
salt, hash = generate_sha1(self.username)
self.email_confirmation_key = hash
self.email_confirmation_key_created = get_datetime_now()
self.save()
# Send email for activation
self.send_confirmation_email()
return self | [
"def",
"change_email",
"(",
"self",
",",
"email",
")",
":",
"self",
".",
"email_unconfirmed",
"=",
"email",
"salt",
",",
"hash",
"=",
"generate_sha1",
"(",
"self",
".",
"username",
")",
"self",
".",
"email_confirmation_key",
"=",
"hash",
"self",
".",
"email_confirmation_key_created",
"=",
"get_datetime_now",
"(",
")",
"self",
".",
"save",
"(",
")",
"# Send email for activation",
"self",
".",
"send_confirmation_email",
"(",
")",
"return",
"self"
] | Changes the email address for a user.
A user needs to verify this new email address before it becomes
active. By storing the new email address in a temporary field
-- ``temporary_email`` -- we are able to set this email address
after the user has verified it by clicking on the verification URI
in the email. This email gets send out by ``send_verification_email``.
:param email:
The new email address that the user wants to use. | [
"Changes",
"the",
"email",
"address",
"for",
"a",
"user",
"."
] | train | https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/models.py#L99-L123 |
ozgurgunes/django-manifest | manifest/accounts/models.py | EmailConfirmationMixin.send_confirmation_email | def send_confirmation_email(self):
"""
Sends an email to confirm the new email address.
This method sends out two emails. One to the new email address that
contains the ``email_confirmation_key`` which is used to verify this
this email address with :func:`User.objects.confirm_email`.
The other email is to the old email address to let the user know that
a request is made to change this email address.
"""
context= {'user': self,
'new_email': self.email_unconfirmed,
'protocol': get_protocol(),
'confirmation_key': self.email_confirmation_key,
'site': Site.objects.get_current()}
# Email to the old address
subject_old = ''.join(render_to_string(
'accounts/emails/confirmation_email_subject_old.txt',
context).splitlines())
message_old = render_to_string(
'accounts/emails/confirmation_email_message_old.txt',
context)
send_mail(subject_old,
message_old,
settings.DEFAULT_FROM_EMAIL,
[self.email])
# Email to the new address
subject_new = ''.join(render_to_string(
'accounts/emails/confirmation_email_subject_new.txt',
context).splitlines())
message_new = render_to_string(
'accounts/emails/confirmation_email_message_new.txt',
context)
send_mail(subject_new,
message_new,
settings.DEFAULT_FROM_EMAIL,
[self.email_unconfirmed,]) | python | def send_confirmation_email(self):
"""
Sends an email to confirm the new email address.
This method sends out two emails. One to the new email address that
contains the ``email_confirmation_key`` which is used to verify this
this email address with :func:`User.objects.confirm_email`.
The other email is to the old email address to let the user know that
a request is made to change this email address.
"""
context= {'user': self,
'new_email': self.email_unconfirmed,
'protocol': get_protocol(),
'confirmation_key': self.email_confirmation_key,
'site': Site.objects.get_current()}
# Email to the old address
subject_old = ''.join(render_to_string(
'accounts/emails/confirmation_email_subject_old.txt',
context).splitlines())
message_old = render_to_string(
'accounts/emails/confirmation_email_message_old.txt',
context)
send_mail(subject_old,
message_old,
settings.DEFAULT_FROM_EMAIL,
[self.email])
# Email to the new address
subject_new = ''.join(render_to_string(
'accounts/emails/confirmation_email_subject_new.txt',
context).splitlines())
message_new = render_to_string(
'accounts/emails/confirmation_email_message_new.txt',
context)
send_mail(subject_new,
message_new,
settings.DEFAULT_FROM_EMAIL,
[self.email_unconfirmed,]) | [
"def",
"send_confirmation_email",
"(",
"self",
")",
":",
"context",
"=",
"{",
"'user'",
":",
"self",
",",
"'new_email'",
":",
"self",
".",
"email_unconfirmed",
",",
"'protocol'",
":",
"get_protocol",
"(",
")",
",",
"'confirmation_key'",
":",
"self",
".",
"email_confirmation_key",
",",
"'site'",
":",
"Site",
".",
"objects",
".",
"get_current",
"(",
")",
"}",
"# Email to the old address",
"subject_old",
"=",
"''",
".",
"join",
"(",
"render_to_string",
"(",
"'accounts/emails/confirmation_email_subject_old.txt'",
",",
"context",
")",
".",
"splitlines",
"(",
")",
")",
"message_old",
"=",
"render_to_string",
"(",
"'accounts/emails/confirmation_email_message_old.txt'",
",",
"context",
")",
"send_mail",
"(",
"subject_old",
",",
"message_old",
",",
"settings",
".",
"DEFAULT_FROM_EMAIL",
",",
"[",
"self",
".",
"email",
"]",
")",
"# Email to the new address",
"subject_new",
"=",
"''",
".",
"join",
"(",
"render_to_string",
"(",
"'accounts/emails/confirmation_email_subject_new.txt'",
",",
"context",
")",
".",
"splitlines",
"(",
")",
")",
"message_new",
"=",
"render_to_string",
"(",
"'accounts/emails/confirmation_email_message_new.txt'",
",",
"context",
")",
"send_mail",
"(",
"subject_new",
",",
"message_new",
",",
"settings",
".",
"DEFAULT_FROM_EMAIL",
",",
"[",
"self",
".",
"email_unconfirmed",
",",
"]",
")"
] | Sends an email to confirm the new email address.
This method sends out two emails. One to the new email address that
contains the ``email_confirmation_key`` which is used to verify this
this email address with :func:`User.objects.confirm_email`.
The other email is to the old email address to let the user know that
a request is made to change this email address. | [
"Sends",
"an",
"email",
"to",
"confirm",
"the",
"new",
"email",
"address",
"."
] | train | https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/models.py#L125-L170 |
ozgurgunes/django-manifest | manifest/accounts/models.py | UserProfileMixin.get_picture_url | def get_picture_url(self):
"""
Returns the image containing the picture for the user.
The picture can be a uploaded image or a Gravatar.
Gravatar functionality will only be used when
``ACCOUNTS_GRAVATAR_PICTURE`` is set to ``True``.
:return:
``None`` when Gravatar is not used and no default image is
supplied by ``ACCOUNTS_GRAVATAR_DEFAULT``.
"""
# First check for a picture and if any return that.
if self.picture:
return self.picture.url
# Use Gravatar if the user wants to.
if defaults.ACCOUNTS_GRAVATAR_PICTURE:
return get_gravatar(self.email,
defaults.ACCOUNTS_GRAVATAR_SIZE,
defaults.ACCOUNTS_GRAVATAR_DEFAULT)
# Gravatar not used, check for a default image.
else:
if defaults.ACCOUNTS_GRAVATAR_DEFAULT not in ['404', 'mm',
'identicon',
'monsterid',
'wavatar']:
return defaults.ACCOUNTS_GRAVATAR_DEFAULT
else: return None | python | def get_picture_url(self):
"""
Returns the image containing the picture for the user.
The picture can be a uploaded image or a Gravatar.
Gravatar functionality will only be used when
``ACCOUNTS_GRAVATAR_PICTURE`` is set to ``True``.
:return:
``None`` when Gravatar is not used and no default image is
supplied by ``ACCOUNTS_GRAVATAR_DEFAULT``.
"""
# First check for a picture and if any return that.
if self.picture:
return self.picture.url
# Use Gravatar if the user wants to.
if defaults.ACCOUNTS_GRAVATAR_PICTURE:
return get_gravatar(self.email,
defaults.ACCOUNTS_GRAVATAR_SIZE,
defaults.ACCOUNTS_GRAVATAR_DEFAULT)
# Gravatar not used, check for a default image.
else:
if defaults.ACCOUNTS_GRAVATAR_DEFAULT not in ['404', 'mm',
'identicon',
'monsterid',
'wavatar']:
return defaults.ACCOUNTS_GRAVATAR_DEFAULT
else: return None | [
"def",
"get_picture_url",
"(",
"self",
")",
":",
"# First check for a picture and if any return that.",
"if",
"self",
".",
"picture",
":",
"return",
"self",
".",
"picture",
".",
"url",
"# Use Gravatar if the user wants to.",
"if",
"defaults",
".",
"ACCOUNTS_GRAVATAR_PICTURE",
":",
"return",
"get_gravatar",
"(",
"self",
".",
"email",
",",
"defaults",
".",
"ACCOUNTS_GRAVATAR_SIZE",
",",
"defaults",
".",
"ACCOUNTS_GRAVATAR_DEFAULT",
")",
"# Gravatar not used, check for a default image.",
"else",
":",
"if",
"defaults",
".",
"ACCOUNTS_GRAVATAR_DEFAULT",
"not",
"in",
"[",
"'404'",
",",
"'mm'",
",",
"'identicon'",
",",
"'monsterid'",
",",
"'wavatar'",
"]",
":",
"return",
"defaults",
".",
"ACCOUNTS_GRAVATAR_DEFAULT",
"else",
":",
"return",
"None"
] | Returns the image containing the picture for the user.
The picture can be a uploaded image or a Gravatar.
Gravatar functionality will only be used when
``ACCOUNTS_GRAVATAR_PICTURE`` is set to ``True``.
:return:
``None`` when Gravatar is not used and no default image is
supplied by ``ACCOUNTS_GRAVATAR_DEFAULT``. | [
"Returns",
"the",
"image",
"containing",
"the",
"picture",
"for",
"the",
"user",
"."
] | train | https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/models.py#L240-L271 |
ozgurgunes/django-manifest | manifest/accounts/models.py | UserProfileMixin.get_full_name_or_username | def get_full_name_or_username(self):
"""
Returns the full name of the user, or if none is supplied will return
the username.
Also looks at ``ACCOUNTS_WITHOUT_USERNAMES`` settings to define if it
should return the username or email address when the full name is not
supplied.
:return:
``String`` containing the full name of the user. If no name is
supplied it will return the username or email address depending on
the ``ACCOUNTS_WITHOUT_USERNAMES`` setting.
"""
if self.first_name or self.last_name:
# We will return this as translated string. Maybe there are some
# countries that first display the last name.
name = _(u"%(first_name)s %(last_name)s") % \
{'first_name': self.first_name,
'last_name': self.last_name}
else:
# Fallback to the username if usernames are used
if not defaults.ACCOUNTS_WITHOUT_USERNAMES:
name = "%(username)s" % {'username': self.username}
else:
name = "%(email)s" % {'email': self.email}
return name.strip() | python | def get_full_name_or_username(self):
"""
Returns the full name of the user, or if none is supplied will return
the username.
Also looks at ``ACCOUNTS_WITHOUT_USERNAMES`` settings to define if it
should return the username or email address when the full name is not
supplied.
:return:
``String`` containing the full name of the user. If no name is
supplied it will return the username or email address depending on
the ``ACCOUNTS_WITHOUT_USERNAMES`` setting.
"""
if self.first_name or self.last_name:
# We will return this as translated string. Maybe there are some
# countries that first display the last name.
name = _(u"%(first_name)s %(last_name)s") % \
{'first_name': self.first_name,
'last_name': self.last_name}
else:
# Fallback to the username if usernames are used
if not defaults.ACCOUNTS_WITHOUT_USERNAMES:
name = "%(username)s" % {'username': self.username}
else:
name = "%(email)s" % {'email': self.email}
return name.strip() | [
"def",
"get_full_name_or_username",
"(",
"self",
")",
":",
"if",
"self",
".",
"first_name",
"or",
"self",
".",
"last_name",
":",
"# We will return this as translated string. Maybe there are some",
"# countries that first display the last name.",
"name",
"=",
"_",
"(",
"u\"%(first_name)s %(last_name)s\"",
")",
"%",
"{",
"'first_name'",
":",
"self",
".",
"first_name",
",",
"'last_name'",
":",
"self",
".",
"last_name",
"}",
"else",
":",
"# Fallback to the username if usernames are used",
"if",
"not",
"defaults",
".",
"ACCOUNTS_WITHOUT_USERNAMES",
":",
"name",
"=",
"\"%(username)s\"",
"%",
"{",
"'username'",
":",
"self",
".",
"username",
"}",
"else",
":",
"name",
"=",
"\"%(email)s\"",
"%",
"{",
"'email'",
":",
"self",
".",
"email",
"}",
"return",
"name",
".",
"strip",
"(",
")"
] | Returns the full name of the user, or if none is supplied will return
the username.
Also looks at ``ACCOUNTS_WITHOUT_USERNAMES`` settings to define if it
should return the username or email address when the full name is not
supplied.
:return:
``String`` containing the full name of the user. If no name is
supplied it will return the username or email address depending on
the ``ACCOUNTS_WITHOUT_USERNAMES`` setting. | [
"Returns",
"the",
"full",
"name",
"of",
"the",
"user",
"or",
"if",
"none",
"is",
"supplied",
"will",
"return",
"the",
"username",
"."
] | train | https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/models.py#L273-L300 |
contentful-labs/contentful.py | contentful/cda/client.py | Client.validate_config | def validate_config(config):
"""Verify sanity for a :class:`.Config` instance.
This will raise an exception in case conditions are not met, otherwise
will complete silently.
:param config: (:class:`.Config`) Configuration container.
"""
non_null_params = ['space_id', 'access_token']
for param in non_null_params:
if getattr(config, param) is None:
raise Exception('Configuration for \"{0}\" must not be empty.'.format(param))
for clazz in config.custom_entries:
if not issubclass(clazz, Entry):
raise Exception(
'Provided class \"{0}\" must be a subclass of Entry.'.format(clazz.__name__))
elif clazz is Entry:
raise Exception('Cannot register "Entry" as a custom entry class.') | python | def validate_config(config):
"""Verify sanity for a :class:`.Config` instance.
This will raise an exception in case conditions are not met, otherwise
will complete silently.
:param config: (:class:`.Config`) Configuration container.
"""
non_null_params = ['space_id', 'access_token']
for param in non_null_params:
if getattr(config, param) is None:
raise Exception('Configuration for \"{0}\" must not be empty.'.format(param))
for clazz in config.custom_entries:
if not issubclass(clazz, Entry):
raise Exception(
'Provided class \"{0}\" must be a subclass of Entry.'.format(clazz.__name__))
elif clazz is Entry:
raise Exception('Cannot register "Entry" as a custom entry class.') | [
"def",
"validate_config",
"(",
"config",
")",
":",
"non_null_params",
"=",
"[",
"'space_id'",
",",
"'access_token'",
"]",
"for",
"param",
"in",
"non_null_params",
":",
"if",
"getattr",
"(",
"config",
",",
"param",
")",
"is",
"None",
":",
"raise",
"Exception",
"(",
"'Configuration for \\\"{0}\\\" must not be empty.'",
".",
"format",
"(",
"param",
")",
")",
"for",
"clazz",
"in",
"config",
".",
"custom_entries",
":",
"if",
"not",
"issubclass",
"(",
"clazz",
",",
"Entry",
")",
":",
"raise",
"Exception",
"(",
"'Provided class \\\"{0}\\\" must be a subclass of Entry.'",
".",
"format",
"(",
"clazz",
".",
"__name__",
")",
")",
"elif",
"clazz",
"is",
"Entry",
":",
"raise",
"Exception",
"(",
"'Cannot register \"Entry\" as a custom entry class.'",
")"
] | Verify sanity for a :class:`.Config` instance.
This will raise an exception in case conditions are not met, otherwise
will complete silently.
:param config: (:class:`.Config`) Configuration container. | [
"Verify",
"sanity",
"for",
"a",
":",
"class",
":",
".",
"Config",
"instance",
"."
] | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/client.py#L52-L70 |
contentful-labs/contentful.py | contentful/cda/client.py | Client.fetch | def fetch(self, resource_class):
"""Construct a :class:`.Request` for the given resource type.
Provided an :class:`.Entry` subclass, the Content Type ID will be inferred and requested explicitly.
Examples::
client.fetch(Asset)
client.fetch(Entry)
client.fetch(ContentType)
client.fetch(CustomEntryClass)
:param resource_class: The type of resource to be fetched.
:return: :class:`.Request` instance.
"""
if issubclass(resource_class, Entry):
params = None
content_type = getattr(resource_class, '__content_type__', None)
if content_type is not None:
params = {'content_type': resource_class.__content_type__}
return RequestArray(self.dispatcher, utils.path_for_class(resource_class), self.config.resolve_links,
params=params)
else:
remote_path = utils.path_for_class(resource_class)
if remote_path is None:
raise Exception('Invalid resource type \"{0}\".'.format(resource_class))
return RequestArray(self.dispatcher, remote_path, self.config.resolve_links) | python | def fetch(self, resource_class):
"""Construct a :class:`.Request` for the given resource type.
Provided an :class:`.Entry` subclass, the Content Type ID will be inferred and requested explicitly.
Examples::
client.fetch(Asset)
client.fetch(Entry)
client.fetch(ContentType)
client.fetch(CustomEntryClass)
:param resource_class: The type of resource to be fetched.
:return: :class:`.Request` instance.
"""
if issubclass(resource_class, Entry):
params = None
content_type = getattr(resource_class, '__content_type__', None)
if content_type is not None:
params = {'content_type': resource_class.__content_type__}
return RequestArray(self.dispatcher, utils.path_for_class(resource_class), self.config.resolve_links,
params=params)
else:
remote_path = utils.path_for_class(resource_class)
if remote_path is None:
raise Exception('Invalid resource type \"{0}\".'.format(resource_class))
return RequestArray(self.dispatcher, remote_path, self.config.resolve_links) | [
"def",
"fetch",
"(",
"self",
",",
"resource_class",
")",
":",
"if",
"issubclass",
"(",
"resource_class",
",",
"Entry",
")",
":",
"params",
"=",
"None",
"content_type",
"=",
"getattr",
"(",
"resource_class",
",",
"'__content_type__'",
",",
"None",
")",
"if",
"content_type",
"is",
"not",
"None",
":",
"params",
"=",
"{",
"'content_type'",
":",
"resource_class",
".",
"__content_type__",
"}",
"return",
"RequestArray",
"(",
"self",
".",
"dispatcher",
",",
"utils",
".",
"path_for_class",
"(",
"resource_class",
")",
",",
"self",
".",
"config",
".",
"resolve_links",
",",
"params",
"=",
"params",
")",
"else",
":",
"remote_path",
"=",
"utils",
".",
"path_for_class",
"(",
"resource_class",
")",
"if",
"remote_path",
"is",
"None",
":",
"raise",
"Exception",
"(",
"'Invalid resource type \\\"{0}\\\".'",
".",
"format",
"(",
"resource_class",
")",
")",
"return",
"RequestArray",
"(",
"self",
".",
"dispatcher",
",",
"remote_path",
",",
"self",
".",
"config",
".",
"resolve_links",
")"
] | Construct a :class:`.Request` for the given resource type.
Provided an :class:`.Entry` subclass, the Content Type ID will be inferred and requested explicitly.
Examples::
client.fetch(Asset)
client.fetch(Entry)
client.fetch(ContentType)
client.fetch(CustomEntryClass)
:param resource_class: The type of resource to be fetched.
:return: :class:`.Request` instance. | [
"Construct",
"a",
":",
"class",
":",
".",
"Request",
"for",
"the",
"given",
"resource",
"type",
"."
] | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/client.py#L72-L100 |
contentful-labs/contentful.py | contentful/cda/client.py | Client.resolve | def resolve(self, link_resource_type, resource_id, array=None):
"""Resolve a link to a CDA resource.
Provided an `array` argument, attempt to retrieve the resource from the `mapped_items`
section of that array (containing both included and regular resources), in case the
resource cannot be found in the array (or if no `array` was provided) - attempt to fetch
the resource from the API by issuing a network request.
:param link_resource_type: (str) Resource type as str.
:param resource_id: (str) Remote ID of the linked resource.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved.
"""
result = None
if array is not None:
container = array.items_mapped.get(link_resource_type)
result = container.get(resource_id)
if result is None:
clz = utils.class_for_type(link_resource_type)
result = self.fetch(clz).where({'sys.id': resource_id}).first()
return result | python | def resolve(self, link_resource_type, resource_id, array=None):
"""Resolve a link to a CDA resource.
Provided an `array` argument, attempt to retrieve the resource from the `mapped_items`
section of that array (containing both included and regular resources), in case the
resource cannot be found in the array (or if no `array` was provided) - attempt to fetch
the resource from the API by issuing a network request.
:param link_resource_type: (str) Resource type as str.
:param resource_id: (str) Remote ID of the linked resource.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved.
"""
result = None
if array is not None:
container = array.items_mapped.get(link_resource_type)
result = container.get(resource_id)
if result is None:
clz = utils.class_for_type(link_resource_type)
result = self.fetch(clz).where({'sys.id': resource_id}).first()
return result | [
"def",
"resolve",
"(",
"self",
",",
"link_resource_type",
",",
"resource_id",
",",
"array",
"=",
"None",
")",
":",
"result",
"=",
"None",
"if",
"array",
"is",
"not",
"None",
":",
"container",
"=",
"array",
".",
"items_mapped",
".",
"get",
"(",
"link_resource_type",
")",
"result",
"=",
"container",
".",
"get",
"(",
"resource_id",
")",
"if",
"result",
"is",
"None",
":",
"clz",
"=",
"utils",
".",
"class_for_type",
"(",
"link_resource_type",
")",
"result",
"=",
"self",
".",
"fetch",
"(",
"clz",
")",
".",
"where",
"(",
"{",
"'sys.id'",
":",
"resource_id",
"}",
")",
".",
"first",
"(",
")",
"return",
"result"
] | Resolve a link to a CDA resource.
Provided an `array` argument, attempt to retrieve the resource from the `mapped_items`
section of that array (containing both included and regular resources), in case the
resource cannot be found in the array (or if no `array` was provided) - attempt to fetch
the resource from the API by issuing a network request.
:param link_resource_type: (str) Resource type as str.
:param resource_id: (str) Remote ID of the linked resource.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved. | [
"Resolve",
"a",
"link",
"to",
"a",
"CDA",
"resource",
"."
] | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/client.py#L109-L132 |
contentful-labs/contentful.py | contentful/cda/client.py | Client.resolve_resource_link | def resolve_resource_link(self, resource_link, array=None):
"""Convenience method for resolving links given a :class:`.resources.ResourceLink` object.
Extract link values and pass to the :func:`.resolve` method of this class.
:param resource_link: (:class:`.ResourceLink`) instance.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved.
"""
return self.resolve(resource_link.link_type, resource_link.resource_id, array) | python | def resolve_resource_link(self, resource_link, array=None):
"""Convenience method for resolving links given a :class:`.resources.ResourceLink` object.
Extract link values and pass to the :func:`.resolve` method of this class.
:param resource_link: (:class:`.ResourceLink`) instance.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved.
"""
return self.resolve(resource_link.link_type, resource_link.resource_id, array) | [
"def",
"resolve_resource_link",
"(",
"self",
",",
"resource_link",
",",
"array",
"=",
"None",
")",
":",
"return",
"self",
".",
"resolve",
"(",
"resource_link",
".",
"link_type",
",",
"resource_link",
".",
"resource_id",
",",
"array",
")"
] | Convenience method for resolving links given a :class:`.resources.ResourceLink` object.
Extract link values and pass to the :func:`.resolve` method of this class.
:param resource_link: (:class:`.ResourceLink`) instance.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved. | [
"Convenience",
"method",
"for",
"resolving",
"links",
"given",
"a",
":",
"class",
":",
".",
"resources",
".",
"ResourceLink",
"object",
"."
] | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/client.py#L134-L143 |
contentful-labs/contentful.py | contentful/cda/client.py | Client.resolve_dict_link | def resolve_dict_link(self, dct, array=None):
"""Convenience method for resolving links given a dict object.
Extract link values and pass to the :func:`.resolve` method of this class.
:param dct: (dict) Dictionary with the link data.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved.
"""
sys = dct.get('sys')
return self.resolve(sys['linkType'], sys['id'], array) if sys is not None else None | python | def resolve_dict_link(self, dct, array=None):
"""Convenience method for resolving links given a dict object.
Extract link values and pass to the :func:`.resolve` method of this class.
:param dct: (dict) Dictionary with the link data.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved.
"""
sys = dct.get('sys')
return self.resolve(sys['linkType'], sys['id'], array) if sys is not None else None | [
"def",
"resolve_dict_link",
"(",
"self",
",",
"dct",
",",
"array",
"=",
"None",
")",
":",
"sys",
"=",
"dct",
".",
"get",
"(",
"'sys'",
")",
"return",
"self",
".",
"resolve",
"(",
"sys",
"[",
"'linkType'",
"]",
",",
"sys",
"[",
"'id'",
"]",
",",
"array",
")",
"if",
"sys",
"is",
"not",
"None",
"else",
"None"
] | Convenience method for resolving links given a dict object.
Extract link values and pass to the :func:`.resolve` method of this class.
:param dct: (dict) Dictionary with the link data.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved. | [
"Convenience",
"method",
"for",
"resolving",
"links",
"given",
"a",
"dict",
"object",
"."
] | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/client.py#L145-L155 |
contentful-labs/contentful.py | contentful/cda/client.py | Dispatcher.invoke | def invoke(self, request):
"""Invoke the given :class:`.Request` instance using the associated :class:`.Dispatcher`.
:param request: :class:`.Request` instance to invoke.
:return: :class:`.Resource` subclass.
"""
url = '{0}/{1}'.format(self.base_url, request.remote_path)
r = self.httpclient.get(url, params=request.params, headers=self.get_headers())
if 200 <= r.status_code < 300:
return self.resource_factory.from_json(r.json())
else:
if r.status_code in ErrorMapping.mapping:
raise ErrorMapping.mapping[r.status_code](r)
else:
raise ApiError(r) | python | def invoke(self, request):
"""Invoke the given :class:`.Request` instance using the associated :class:`.Dispatcher`.
:param request: :class:`.Request` instance to invoke.
:return: :class:`.Resource` subclass.
"""
url = '{0}/{1}'.format(self.base_url, request.remote_path)
r = self.httpclient.get(url, params=request.params, headers=self.get_headers())
if 200 <= r.status_code < 300:
return self.resource_factory.from_json(r.json())
else:
if r.status_code in ErrorMapping.mapping:
raise ErrorMapping.mapping[r.status_code](r)
else:
raise ApiError(r) | [
"def",
"invoke",
"(",
"self",
",",
"request",
")",
":",
"url",
"=",
"'{0}/{1}'",
".",
"format",
"(",
"self",
".",
"base_url",
",",
"request",
".",
"remote_path",
")",
"r",
"=",
"self",
".",
"httpclient",
".",
"get",
"(",
"url",
",",
"params",
"=",
"request",
".",
"params",
",",
"headers",
"=",
"self",
".",
"get_headers",
"(",
")",
")",
"if",
"200",
"<=",
"r",
".",
"status_code",
"<",
"300",
":",
"return",
"self",
".",
"resource_factory",
".",
"from_json",
"(",
"r",
".",
"json",
"(",
")",
")",
"else",
":",
"if",
"r",
".",
"status_code",
"in",
"ErrorMapping",
".",
"mapping",
":",
"raise",
"ErrorMapping",
".",
"mapping",
"[",
"r",
".",
"status_code",
"]",
"(",
"r",
")",
"else",
":",
"raise",
"ApiError",
"(",
"r",
")"
] | Invoke the given :class:`.Request` instance using the associated :class:`.Dispatcher`.
:param request: :class:`.Request` instance to invoke.
:return: :class:`.Resource` subclass. | [
"Invoke",
"the",
"given",
":",
"class",
":",
".",
"Request",
"instance",
"using",
"the",
"associated",
":",
"class",
":",
".",
"Dispatcher",
"."
] | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/client.py#L209-L223 |
contentful-labs/contentful.py | contentful/cda/client.py | RequestArray.all | def all(self):
"""Attempt to retrieve all available resources matching this request.
:return: Result instance as returned by the :class:`.Dispatcher`.
"""
result = self.invoke()
if self.resolve_links:
result.resolve_links()
return result | python | def all(self):
"""Attempt to retrieve all available resources matching this request.
:return: Result instance as returned by the :class:`.Dispatcher`.
"""
result = self.invoke()
if self.resolve_links:
result.resolve_links()
return result | [
"def",
"all",
"(",
"self",
")",
":",
"result",
"=",
"self",
".",
"invoke",
"(",
")",
"if",
"self",
".",
"resolve_links",
":",
"result",
".",
"resolve_links",
"(",
")",
"return",
"result"
] | Attempt to retrieve all available resources matching this request.
:return: Result instance as returned by the :class:`.Dispatcher`. | [
"Attempt",
"to",
"retrieve",
"all",
"available",
"resources",
"matching",
"this",
"request",
"."
] | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/client.py#L263-L272 |
contentful-labs/contentful.py | contentful/cda/client.py | RequestArray.first | def first(self):
"""Attempt to retrieve only the first resource matching this request.
:return: Result instance, or `None` if there are no matching resources.
"""
self.params['limit'] = 1
result = self.all()
return result.items[0] if result.total > 0 else None | python | def first(self):
"""Attempt to retrieve only the first resource matching this request.
:return: Result instance, or `None` if there are no matching resources.
"""
self.params['limit'] = 1
result = self.all()
return result.items[0] if result.total > 0 else None | [
"def",
"first",
"(",
"self",
")",
":",
"self",
".",
"params",
"[",
"'limit'",
"]",
"=",
"1",
"result",
"=",
"self",
".",
"all",
"(",
")",
"return",
"result",
".",
"items",
"[",
"0",
"]",
"if",
"result",
".",
"total",
">",
"0",
"else",
"None"
] | Attempt to retrieve only the first resource matching this request.
:return: Result instance, or `None` if there are no matching resources. | [
"Attempt",
"to",
"retrieve",
"only",
"the",
"first",
"resource",
"matching",
"this",
"request",
"."
] | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/client.py#L274-L281 |
contentful-labs/contentful.py | contentful/cda/client.py | RequestArray.where | def where(self, params):
"""Set a dict of parameters to be passed to the API when invoking this request.
:param params: (dict) query parameters.
:return: this :class:`.RequestArray` instance for convenience.
"""
self.params = dict(self.params, **params) # params overrides self.params
return self | python | def where(self, params):
"""Set a dict of parameters to be passed to the API when invoking this request.
:param params: (dict) query parameters.
:return: this :class:`.RequestArray` instance for convenience.
"""
self.params = dict(self.params, **params) # params overrides self.params
return self | [
"def",
"where",
"(",
"self",
",",
"params",
")",
":",
"self",
".",
"params",
"=",
"dict",
"(",
"self",
".",
"params",
",",
"*",
"*",
"params",
")",
"# params overrides self.params",
"return",
"self"
] | Set a dict of parameters to be passed to the API when invoking this request.
:param params: (dict) query parameters.
:return: this :class:`.RequestArray` instance for convenience. | [
"Set",
"a",
"dict",
"of",
"parameters",
"to",
"be",
"passed",
"to",
"the",
"API",
"when",
"invoking",
"this",
"request",
"."
] | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/client.py#L283-L290 |
dusktreader/py-buzz | examples/require_condition.py | complex_require_condition | def complex_require_condition():
"""
This function demonstrates a more complex usage of the require_condition
function. It shows argument interpolation and handling a more complex
boolean expression
"""
print("Demonstrating complex require_condition example")
val = 64
Buzz.require_condition(is_even(val), 'This condition should pass')
val = 81
Buzz.require_condition(is_even(val), 'Value {val} is not even', val=val) | python | def complex_require_condition():
"""
This function demonstrates a more complex usage of the require_condition
function. It shows argument interpolation and handling a more complex
boolean expression
"""
print("Demonstrating complex require_condition example")
val = 64
Buzz.require_condition(is_even(val), 'This condition should pass')
val = 81
Buzz.require_condition(is_even(val), 'Value {val} is not even', val=val) | [
"def",
"complex_require_condition",
"(",
")",
":",
"print",
"(",
"\"Demonstrating complex require_condition example\"",
")",
"val",
"=",
"64",
"Buzz",
".",
"require_condition",
"(",
"is_even",
"(",
"val",
")",
",",
"'This condition should pass'",
")",
"val",
"=",
"81",
"Buzz",
".",
"require_condition",
"(",
"is_even",
"(",
"val",
")",
",",
"'Value {val} is not even'",
",",
"val",
"=",
"val",
")"
] | This function demonstrates a more complex usage of the require_condition
function. It shows argument interpolation and handling a more complex
boolean expression | [
"This",
"function",
"demonstrates",
"a",
"more",
"complex",
"usage",
"of",
"the",
"require_condition",
"function",
".",
"It",
"shows",
"argument",
"interpolation",
"and",
"handling",
"a",
"more",
"complex",
"boolean",
"expression"
] | train | https://github.com/dusktreader/py-buzz/blob/f2fd97abe158a1688188647992a5be6531058ec3/examples/require_condition.py#L27-L38 |
ozgurgunes/django-manifest | manifest/core/templatetags/navigation.py | active_url | def active_url(context, urls, css=None):
"""
Highlight menu item based on url tag.
Returns a css class if ``request.path`` is in given ``url``.
:param url:
Django url to be reversed.
:param css:
Css class to be returned for highlighting. Return active if none set.
"""
request = context['request']
if request.get_full_path in (reverse(url) for url in urls.split()):
return css if css else 'active'
return '' | python | def active_url(context, urls, css=None):
"""
Highlight menu item based on url tag.
Returns a css class if ``request.path`` is in given ``url``.
:param url:
Django url to be reversed.
:param css:
Css class to be returned for highlighting. Return active if none set.
"""
request = context['request']
if request.get_full_path in (reverse(url) for url in urls.split()):
return css if css else 'active'
return '' | [
"def",
"active_url",
"(",
"context",
",",
"urls",
",",
"css",
"=",
"None",
")",
":",
"request",
"=",
"context",
"[",
"'request'",
"]",
"if",
"request",
".",
"get_full_path",
"in",
"(",
"reverse",
"(",
"url",
")",
"for",
"url",
"in",
"urls",
".",
"split",
"(",
")",
")",
":",
"return",
"css",
"if",
"css",
"else",
"'active'",
"return",
"''"
] | Highlight menu item based on url tag.
Returns a css class if ``request.path`` is in given ``url``.
:param url:
Django url to be reversed.
:param css:
Css class to be returned for highlighting. Return active if none set. | [
"Highlight",
"menu",
"item",
"based",
"on",
"url",
"tag",
".",
"Returns",
"a",
"css",
"class",
"if",
"request",
".",
"path",
"is",
"in",
"given",
"url",
".",
":",
"param",
"url",
":",
"Django",
"url",
"to",
"be",
"reversed",
"."
] | train | https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/core/templatetags/navigation.py#L61-L77 |
ozgurgunes/django-manifest | manifest/core/templatetags/navigation.py | active_path | def active_path(context, pattern, css=None):
"""
Highlight menu item based on path.
Returns a css class if ``request.path`` is in given ``pattern``.
:param pattern:
Regex url pattern.
:param css:
Css class to be returned for highlighting. Return active if none set.
"""
request = context['request']
#pattern = "^" + pattern + "$"
if re.search(pattern, request.path):
return css if css else 'active'
return '' | python | def active_path(context, pattern, css=None):
"""
Highlight menu item based on path.
Returns a css class if ``request.path`` is in given ``pattern``.
:param pattern:
Regex url pattern.
:param css:
Css class to be returned for highlighting. Return active if none set.
"""
request = context['request']
#pattern = "^" + pattern + "$"
if re.search(pattern, request.path):
return css if css else 'active'
return '' | [
"def",
"active_path",
"(",
"context",
",",
"pattern",
",",
"css",
"=",
"None",
")",
":",
"request",
"=",
"context",
"[",
"'request'",
"]",
"#pattern = \"^\" + pattern + \"$\"",
"if",
"re",
".",
"search",
"(",
"pattern",
",",
"request",
".",
"path",
")",
":",
"return",
"css",
"if",
"css",
"else",
"'active'",
"return",
"''"
] | Highlight menu item based on path.
Returns a css class if ``request.path`` is in given ``pattern``.
:param pattern:
Regex url pattern.
:param css:
Css class to be returned for highlighting. Return active if none set. | [
"Highlight",
"menu",
"item",
"based",
"on",
"path",
".",
"Returns",
"a",
"css",
"class",
"if",
"request",
".",
"path",
"is",
"in",
"given",
"pattern",
".",
":",
"param",
"pattern",
":",
"Regex",
"url",
"pattern",
"."
] | train | https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/core/templatetags/navigation.py#L80-L97 |
zeroSteiner/smoke-zephyr | smoke_zephyr/configuration.py | MemoryConfiguration.get | def get(self, item_name):
"""
Retrieve the value of an option.
:param str item_name: The name of the option to retrieve.
:return: The value of *item_name* in the configuration.
"""
if self.prefix:
item_name = self.prefix + self.seperator + item_name
item_names = item_name.split(self.seperator)
node = self._storage
for item_name in item_names:
node = node[item_name]
return node | python | def get(self, item_name):
"""
Retrieve the value of an option.
:param str item_name: The name of the option to retrieve.
:return: The value of *item_name* in the configuration.
"""
if self.prefix:
item_name = self.prefix + self.seperator + item_name
item_names = item_name.split(self.seperator)
node = self._storage
for item_name in item_names:
node = node[item_name]
return node | [
"def",
"get",
"(",
"self",
",",
"item_name",
")",
":",
"if",
"self",
".",
"prefix",
":",
"item_name",
"=",
"self",
".",
"prefix",
"+",
"self",
".",
"seperator",
"+",
"item_name",
"item_names",
"=",
"item_name",
".",
"split",
"(",
"self",
".",
"seperator",
")",
"node",
"=",
"self",
".",
"_storage",
"for",
"item_name",
"in",
"item_names",
":",
"node",
"=",
"node",
"[",
"item_name",
"]",
"return",
"node"
] | Retrieve the value of an option.
:param str item_name: The name of the option to retrieve.
:return: The value of *item_name* in the configuration. | [
"Retrieve",
"the",
"value",
"of",
"an",
"option",
"."
] | train | https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/configuration.py#L80-L93 |
zeroSteiner/smoke-zephyr | smoke_zephyr/configuration.py | MemoryConfiguration.get_if_exists | def get_if_exists(self, item_name, default_value=None):
"""
Retrieve the value of an option if it exists, otherwise
return *default_value* instead of raising an error:
:param str item_name: The name of the option to retrieve.
:param default_value: The value to return if *item_name* does not exist.
:return: The value of *item_name* in the configuration.
"""
if self.has_option(item_name):
return self.get(item_name)
return default_value | python | def get_if_exists(self, item_name, default_value=None):
"""
Retrieve the value of an option if it exists, otherwise
return *default_value* instead of raising an error:
:param str item_name: The name of the option to retrieve.
:param default_value: The value to return if *item_name* does not exist.
:return: The value of *item_name* in the configuration.
"""
if self.has_option(item_name):
return self.get(item_name)
return default_value | [
"def",
"get_if_exists",
"(",
"self",
",",
"item_name",
",",
"default_value",
"=",
"None",
")",
":",
"if",
"self",
".",
"has_option",
"(",
"item_name",
")",
":",
"return",
"self",
".",
"get",
"(",
"item_name",
")",
"return",
"default_value"
] | Retrieve the value of an option if it exists, otherwise
return *default_value* instead of raising an error:
:param str item_name: The name of the option to retrieve.
:param default_value: The value to return if *item_name* does not exist.
:return: The value of *item_name* in the configuration. | [
"Retrieve",
"the",
"value",
"of",
"an",
"option",
"if",
"it",
"exists",
"otherwise",
"return",
"*",
"default_value",
"*",
"instead",
"of",
"raising",
"an",
"error",
":"
] | train | https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/configuration.py#L95-L106 |
zeroSteiner/smoke-zephyr | smoke_zephyr/configuration.py | MemoryConfiguration.has_option | def has_option(self, option_name):
"""
Check that an option exists.
:param str option_name: The name of the option to check.
:return: True of the option exists in the configuration.
:rtype: bool
"""
if self.prefix:
option_name = self.prefix + self.seperator + option_name
item_names = option_name.split(self.seperator)
node = self._storage
for item_name in item_names:
if node is None:
return False
if not item_name in node:
return False
node = node[item_name]
return True | python | def has_option(self, option_name):
"""
Check that an option exists.
:param str option_name: The name of the option to check.
:return: True of the option exists in the configuration.
:rtype: bool
"""
if self.prefix:
option_name = self.prefix + self.seperator + option_name
item_names = option_name.split(self.seperator)
node = self._storage
for item_name in item_names:
if node is None:
return False
if not item_name in node:
return False
node = node[item_name]
return True | [
"def",
"has_option",
"(",
"self",
",",
"option_name",
")",
":",
"if",
"self",
".",
"prefix",
":",
"option_name",
"=",
"self",
".",
"prefix",
"+",
"self",
".",
"seperator",
"+",
"option_name",
"item_names",
"=",
"option_name",
".",
"split",
"(",
"self",
".",
"seperator",
")",
"node",
"=",
"self",
".",
"_storage",
"for",
"item_name",
"in",
"item_names",
":",
"if",
"node",
"is",
"None",
":",
"return",
"False",
"if",
"not",
"item_name",
"in",
"node",
":",
"return",
"False",
"node",
"=",
"node",
"[",
"item_name",
"]",
"return",
"True"
] | Check that an option exists.
:param str option_name: The name of the option to check.
:return: True of the option exists in the configuration.
:rtype: bool | [
"Check",
"that",
"an",
"option",
"exists",
"."
] | train | https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/configuration.py#L118-L136 |
zeroSteiner/smoke-zephyr | smoke_zephyr/configuration.py | MemoryConfiguration.has_section | def has_section(self, section_name):
"""
Checks that an option exists and that it contains sub options.
:param str section_name: The name of the section to check.
:return: True if the section exists.
:rtype: dict
"""
if not self.has_option(section_name):
return False
return isinstance(self.get(section_name), dict) | python | def has_section(self, section_name):
"""
Checks that an option exists and that it contains sub options.
:param str section_name: The name of the section to check.
:return: True if the section exists.
:rtype: dict
"""
if not self.has_option(section_name):
return False
return isinstance(self.get(section_name), dict) | [
"def",
"has_section",
"(",
"self",
",",
"section_name",
")",
":",
"if",
"not",
"self",
".",
"has_option",
"(",
"section_name",
")",
":",
"return",
"False",
"return",
"isinstance",
"(",
"self",
".",
"get",
"(",
"section_name",
")",
",",
"dict",
")"
] | Checks that an option exists and that it contains sub options.
:param str section_name: The name of the section to check.
:return: True if the section exists.
:rtype: dict | [
"Checks",
"that",
"an",
"option",
"exists",
"and",
"that",
"it",
"contains",
"sub",
"options",
"."
] | train | https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/configuration.py#L138-L148 |
zeroSteiner/smoke-zephyr | smoke_zephyr/configuration.py | MemoryConfiguration.set | def set(self, item_name, item_value):
"""
Sets the value of an option in the configuration.
:param str item_name: The name of the option to set.
:param item_value: The value of the option to set.
"""
if self.prefix:
item_name = self.prefix + self.seperator + item_name
item_names = item_name.split(self.seperator)
item_last = item_names.pop()
node = self._storage
for item_name in item_names:
if not item_name in node:
node[item_name] = {}
node = node[item_name]
node[item_last] = item_value
return | python | def set(self, item_name, item_value):
"""
Sets the value of an option in the configuration.
:param str item_name: The name of the option to set.
:param item_value: The value of the option to set.
"""
if self.prefix:
item_name = self.prefix + self.seperator + item_name
item_names = item_name.split(self.seperator)
item_last = item_names.pop()
node = self._storage
for item_name in item_names:
if not item_name in node:
node[item_name] = {}
node = node[item_name]
node[item_last] = item_value
return | [
"def",
"set",
"(",
"self",
",",
"item_name",
",",
"item_value",
")",
":",
"if",
"self",
".",
"prefix",
":",
"item_name",
"=",
"self",
".",
"prefix",
"+",
"self",
".",
"seperator",
"+",
"item_name",
"item_names",
"=",
"item_name",
".",
"split",
"(",
"self",
".",
"seperator",
")",
"item_last",
"=",
"item_names",
".",
"pop",
"(",
")",
"node",
"=",
"self",
".",
"_storage",
"for",
"item_name",
"in",
"item_names",
":",
"if",
"not",
"item_name",
"in",
"node",
":",
"node",
"[",
"item_name",
"]",
"=",
"{",
"}",
"node",
"=",
"node",
"[",
"item_name",
"]",
"node",
"[",
"item_last",
"]",
"=",
"item_value",
"return"
] | Sets the value of an option in the configuration.
:param str item_name: The name of the option to set.
:param item_value: The value of the option to set. | [
"Sets",
"the",
"value",
"of",
"an",
"option",
"in",
"the",
"configuration",
"."
] | train | https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/configuration.py#L150-L167 |
zeroSteiner/smoke-zephyr | smoke_zephyr/configuration.py | Configuration.get_missing | def get_missing(self, verify_file):
"""
Use a verification configuration which has a list of required options
and their respective types. This information is used to identify missing
and incompatible options in the loaded configuration.
:param str verify_file: The file to load for verification data.
:return: A dictionary of missing and incompatible settings.
:rtype: dict
"""
vconf = Configuration(verify_file)
missing = {}
for setting, setting_type in vconf.get('settings').items():
if not self.has_option(setting):
missing['missing'] = missing.get('settings', [])
missing['missing'].append(setting)
elif not type(self.get(setting)).__name__ == setting_type:
missing['incompatible'] = missing.get('incompatible', [])
missing['incompatible'].append((setting, setting_type))
return missing | python | def get_missing(self, verify_file):
"""
Use a verification configuration which has a list of required options
and their respective types. This information is used to identify missing
and incompatible options in the loaded configuration.
:param str verify_file: The file to load for verification data.
:return: A dictionary of missing and incompatible settings.
:rtype: dict
"""
vconf = Configuration(verify_file)
missing = {}
for setting, setting_type in vconf.get('settings').items():
if not self.has_option(setting):
missing['missing'] = missing.get('settings', [])
missing['missing'].append(setting)
elif not type(self.get(setting)).__name__ == setting_type:
missing['incompatible'] = missing.get('incompatible', [])
missing['incompatible'].append((setting, setting_type))
return missing | [
"def",
"get_missing",
"(",
"self",
",",
"verify_file",
")",
":",
"vconf",
"=",
"Configuration",
"(",
"verify_file",
")",
"missing",
"=",
"{",
"}",
"for",
"setting",
",",
"setting_type",
"in",
"vconf",
".",
"get",
"(",
"'settings'",
")",
".",
"items",
"(",
")",
":",
"if",
"not",
"self",
".",
"has_option",
"(",
"setting",
")",
":",
"missing",
"[",
"'missing'",
"]",
"=",
"missing",
".",
"get",
"(",
"'settings'",
",",
"[",
"]",
")",
"missing",
"[",
"'missing'",
"]",
".",
"append",
"(",
"setting",
")",
"elif",
"not",
"type",
"(",
"self",
".",
"get",
"(",
"setting",
")",
")",
".",
"__name__",
"==",
"setting_type",
":",
"missing",
"[",
"'incompatible'",
"]",
"=",
"missing",
".",
"get",
"(",
"'incompatible'",
",",
"[",
"]",
")",
"missing",
"[",
"'incompatible'",
"]",
".",
"append",
"(",
"(",
"setting",
",",
"setting_type",
")",
")",
"return",
"missing"
] | Use a verification configuration which has a list of required options
and their respective types. This information is used to identify missing
and incompatible options in the loaded configuration.
:param str verify_file: The file to load for verification data.
:return: A dictionary of missing and incompatible settings.
:rtype: dict | [
"Use",
"a",
"verification",
"configuration",
"which",
"has",
"a",
"list",
"of",
"required",
"options",
"and",
"their",
"respective",
"types",
".",
"This",
"information",
"is",
"used",
"to",
"identify",
"missing",
"and",
"incompatible",
"options",
"in",
"the",
"loaded",
"configuration",
"."
] | train | https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/configuration.py#L197-L216 |
zeroSteiner/smoke-zephyr | smoke_zephyr/configuration.py | Configuration.save | def save(self):
"""
Save the current configuration to disk.
"""
with open(self.configuration_file, 'w') as file_h:
file_h.write(self._serializer('dumps', self._storage)) | python | def save(self):
"""
Save the current configuration to disk.
"""
with open(self.configuration_file, 'w') as file_h:
file_h.write(self._serializer('dumps', self._storage)) | [
"def",
"save",
"(",
"self",
")",
":",
"with",
"open",
"(",
"self",
".",
"configuration_file",
",",
"'w'",
")",
"as",
"file_h",
":",
"file_h",
".",
"write",
"(",
"self",
".",
"_serializer",
"(",
"'dumps'",
",",
"self",
".",
"_storage",
")",
")"
] | Save the current configuration to disk. | [
"Save",
"the",
"current",
"configuration",
"to",
"disk",
"."
] | train | https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/configuration.py#L218-L223 |
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | replace_lines | def replace_lines(regexer, handler, lines):
"""Uses replacement handler to perform replacements on lines of text
First we strip off all whitespace
We run the replacement on a clean 'content' string
Finally we replace the original content with the replaced version
This ensures that we retain the correct whitespace from the original line
"""
result = []
for line in lines:
content = line.strip()
replaced = regexer.sub(handler, content)
result.append(line.replace(content, replaced, 1))
return result | python | def replace_lines(regexer, handler, lines):
"""Uses replacement handler to perform replacements on lines of text
First we strip off all whitespace
We run the replacement on a clean 'content' string
Finally we replace the original content with the replaced version
This ensures that we retain the correct whitespace from the original line
"""
result = []
for line in lines:
content = line.strip()
replaced = regexer.sub(handler, content)
result.append(line.replace(content, replaced, 1))
return result | [
"def",
"replace_lines",
"(",
"regexer",
",",
"handler",
",",
"lines",
")",
":",
"result",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"content",
"=",
"line",
".",
"strip",
"(",
")",
"replaced",
"=",
"regexer",
".",
"sub",
"(",
"handler",
",",
"content",
")",
"result",
".",
"append",
"(",
"line",
".",
"replace",
"(",
"content",
",",
"replaced",
",",
"1",
")",
")",
"return",
"result"
] | Uses replacement handler to perform replacements on lines of text
First we strip off all whitespace
We run the replacement on a clean 'content' string
Finally we replace the original content with the replaced version
This ensures that we retain the correct whitespace from the original line | [
"Uses",
"replacement",
"handler",
"to",
"perform",
"replacements",
"on",
"lines",
"of",
"text"
] | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L37-L50 |
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | write_targets | def write_targets(targets, **params):
"""Writes version info into version file"""
handler = ReplacementHandler(**params)
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
lines = fh.readlines()
lines = replace_lines(regexer, handler, lines)
with open(target, "w") as fh:
fh.writelines(lines)
if handler.missing:
raise Exception(
"Failed to complete all expected replacements: %r" % handler.missing
) | python | def write_targets(targets, **params):
"""Writes version info into version file"""
handler = ReplacementHandler(**params)
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
lines = fh.readlines()
lines = replace_lines(regexer, handler, lines)
with open(target, "w") as fh:
fh.writelines(lines)
if handler.missing:
raise Exception(
"Failed to complete all expected replacements: %r" % handler.missing
) | [
"def",
"write_targets",
"(",
"targets",
",",
"*",
"*",
"params",
")",
":",
"handler",
"=",
"ReplacementHandler",
"(",
"*",
"*",
"params",
")",
"for",
"target",
",",
"regexer",
"in",
"regexer_for_targets",
"(",
"targets",
")",
":",
"with",
"open",
"(",
"target",
")",
"as",
"fh",
":",
"lines",
"=",
"fh",
".",
"readlines",
"(",
")",
"lines",
"=",
"replace_lines",
"(",
"regexer",
",",
"handler",
",",
"lines",
")",
"with",
"open",
"(",
"target",
",",
"\"w\"",
")",
"as",
"fh",
":",
"fh",
".",
"writelines",
"(",
"lines",
")",
"if",
"handler",
".",
"missing",
":",
"raise",
"Exception",
"(",
"\"Failed to complete all expected replacements: %r\"",
"%",
"handler",
".",
"missing",
")"
] | Writes version info into version file | [
"Writes",
"version",
"info",
"into",
"version",
"file"
] | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L53-L65 |
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | regexer_for_targets | def regexer_for_targets(targets):
"""Pairs up target files with their correct regex"""
for target in targets:
path, file_ext = os.path.splitext(target)
regexer = config.regexers[file_ext]
yield target, regexer | python | def regexer_for_targets(targets):
"""Pairs up target files with their correct regex"""
for target in targets:
path, file_ext = os.path.splitext(target)
regexer = config.regexers[file_ext]
yield target, regexer | [
"def",
"regexer_for_targets",
"(",
"targets",
")",
":",
"for",
"target",
"in",
"targets",
":",
"path",
",",
"file_ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"target",
")",
"regexer",
"=",
"config",
".",
"regexers",
"[",
"file_ext",
"]",
"yield",
"target",
",",
"regexer"
] | Pairs up target files with their correct regex | [
"Pairs",
"up",
"target",
"files",
"with",
"their",
"correct",
"regex"
] | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L68-L73 |
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | extract_keypairs | def extract_keypairs(lines, regexer):
"""Given some lines of text, extract key-value pairs from them"""
updates = {}
for line in lines:
# for consistency we must match the replacer and strip whitespace / newlines
match = regexer.match(line.strip())
if not match:
continue
k_v = match.groupdict()
updates[k_v[Constants.KEY_GROUP]] = k_v[Constants.VALUE_GROUP]
return updates | python | def extract_keypairs(lines, regexer):
"""Given some lines of text, extract key-value pairs from them"""
updates = {}
for line in lines:
# for consistency we must match the replacer and strip whitespace / newlines
match = regexer.match(line.strip())
if not match:
continue
k_v = match.groupdict()
updates[k_v[Constants.KEY_GROUP]] = k_v[Constants.VALUE_GROUP]
return updates | [
"def",
"extract_keypairs",
"(",
"lines",
",",
"regexer",
")",
":",
"updates",
"=",
"{",
"}",
"for",
"line",
"in",
"lines",
":",
"# for consistency we must match the replacer and strip whitespace / newlines",
"match",
"=",
"regexer",
".",
"match",
"(",
"line",
".",
"strip",
"(",
")",
")",
"if",
"not",
"match",
":",
"continue",
"k_v",
"=",
"match",
".",
"groupdict",
"(",
")",
"updates",
"[",
"k_v",
"[",
"Constants",
".",
"KEY_GROUP",
"]",
"]",
"=",
"k_v",
"[",
"Constants",
".",
"VALUE_GROUP",
"]",
"return",
"updates"
] | Given some lines of text, extract key-value pairs from them | [
"Given",
"some",
"lines",
"of",
"text",
"extract",
"key",
"-",
"value",
"pairs",
"from",
"them"
] | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L76-L86 |
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | read_targets | def read_targets(targets):
"""Reads generic key-value pairs from input files"""
results = {}
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
results.update(extract_keypairs(fh.readlines(), regexer))
return results | python | def read_targets(targets):
"""Reads generic key-value pairs from input files"""
results = {}
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
results.update(extract_keypairs(fh.readlines(), regexer))
return results | [
"def",
"read_targets",
"(",
"targets",
")",
":",
"results",
"=",
"{",
"}",
"for",
"target",
",",
"regexer",
"in",
"regexer_for_targets",
"(",
"targets",
")",
":",
"with",
"open",
"(",
"target",
")",
"as",
"fh",
":",
"results",
".",
"update",
"(",
"extract_keypairs",
"(",
"fh",
".",
"readlines",
"(",
")",
",",
"regexer",
")",
")",
"return",
"results"
] | Reads generic key-value pairs from input files | [
"Reads",
"generic",
"key",
"-",
"value",
"pairs",
"from",
"input",
"files"
] | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L89-L95 |
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | detect_file_triggers | def detect_file_triggers(trigger_patterns):
"""The existence of files matching configured globs will trigger a version bump"""
triggers = set()
for trigger, pattern in trigger_patterns.items():
matches = glob.glob(pattern)
if matches:
_LOG.debug("trigger: %s bump from %r\n\t%s", trigger, pattern, matches)
triggers.add(trigger)
else:
_LOG.debug("trigger: no match on %r", pattern)
return triggers | python | def detect_file_triggers(trigger_patterns):
"""The existence of files matching configured globs will trigger a version bump"""
triggers = set()
for trigger, pattern in trigger_patterns.items():
matches = glob.glob(pattern)
if matches:
_LOG.debug("trigger: %s bump from %r\n\t%s", trigger, pattern, matches)
triggers.add(trigger)
else:
_LOG.debug("trigger: no match on %r", pattern)
return triggers | [
"def",
"detect_file_triggers",
"(",
"trigger_patterns",
")",
":",
"triggers",
"=",
"set",
"(",
")",
"for",
"trigger",
",",
"pattern",
"in",
"trigger_patterns",
".",
"items",
"(",
")",
":",
"matches",
"=",
"glob",
".",
"glob",
"(",
"pattern",
")",
"if",
"matches",
":",
"_LOG",
".",
"debug",
"(",
"\"trigger: %s bump from %r\\n\\t%s\"",
",",
"trigger",
",",
"pattern",
",",
"matches",
")",
"triggers",
".",
"add",
"(",
"trigger",
")",
"else",
":",
"_LOG",
".",
"debug",
"(",
"\"trigger: no match on %r\"",
",",
"pattern",
")",
"return",
"triggers"
] | The existence of files matching configured globs will trigger a version bump | [
"The",
"existence",
"of",
"files",
"matching",
"configured",
"globs",
"will",
"trigger",
"a",
"version",
"bump"
] | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L98-L108 |
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | get_all_triggers | def get_all_triggers(bump, file_triggers):
"""Aggregated set of significant figures to bump"""
triggers = set()
if file_triggers:
triggers = triggers.union(detect_file_triggers(config.trigger_patterns))
if bump:
_LOG.debug("trigger: %s bump requested", bump)
triggers.add(bump)
return triggers | python | def get_all_triggers(bump, file_triggers):
"""Aggregated set of significant figures to bump"""
triggers = set()
if file_triggers:
triggers = triggers.union(detect_file_triggers(config.trigger_patterns))
if bump:
_LOG.debug("trigger: %s bump requested", bump)
triggers.add(bump)
return triggers | [
"def",
"get_all_triggers",
"(",
"bump",
",",
"file_triggers",
")",
":",
"triggers",
"=",
"set",
"(",
")",
"if",
"file_triggers",
":",
"triggers",
"=",
"triggers",
".",
"union",
"(",
"detect_file_triggers",
"(",
"config",
".",
"trigger_patterns",
")",
")",
"if",
"bump",
":",
"_LOG",
".",
"debug",
"(",
"\"trigger: %s bump requested\"",
",",
"bump",
")",
"triggers",
".",
"add",
"(",
"bump",
")",
"return",
"triggers"
] | Aggregated set of significant figures to bump | [
"Aggregated",
"set",
"of",
"significant",
"figures",
"to",
"bump"
] | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L111-L119 |
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | get_lock_behaviour | def get_lock_behaviour(triggers, all_data, lock):
"""Binary state lock protects from version increments if set"""
updates = {}
lock_key = config._forward_aliases.get(Constants.VERSION_LOCK_FIELD)
# if we are explicitly setting or locking the version, then set the lock field True anyway
if lock:
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_LOCK_VALUE
elif (
triggers
and lock_key
and str(all_data.get(lock_key)) == str(config.VERSION_LOCK_VALUE)
):
triggers.clear()
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_UNLOCK_VALUE
return updates | python | def get_lock_behaviour(triggers, all_data, lock):
"""Binary state lock protects from version increments if set"""
updates = {}
lock_key = config._forward_aliases.get(Constants.VERSION_LOCK_FIELD)
# if we are explicitly setting or locking the version, then set the lock field True anyway
if lock:
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_LOCK_VALUE
elif (
triggers
and lock_key
and str(all_data.get(lock_key)) == str(config.VERSION_LOCK_VALUE)
):
triggers.clear()
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_UNLOCK_VALUE
return updates | [
"def",
"get_lock_behaviour",
"(",
"triggers",
",",
"all_data",
",",
"lock",
")",
":",
"updates",
"=",
"{",
"}",
"lock_key",
"=",
"config",
".",
"_forward_aliases",
".",
"get",
"(",
"Constants",
".",
"VERSION_LOCK_FIELD",
")",
"# if we are explicitly setting or locking the version, then set the lock field True anyway",
"if",
"lock",
":",
"updates",
"[",
"Constants",
".",
"VERSION_LOCK_FIELD",
"]",
"=",
"config",
".",
"VERSION_LOCK_VALUE",
"elif",
"(",
"triggers",
"and",
"lock_key",
"and",
"str",
"(",
"all_data",
".",
"get",
"(",
"lock_key",
")",
")",
"==",
"str",
"(",
"config",
".",
"VERSION_LOCK_VALUE",
")",
")",
":",
"triggers",
".",
"clear",
"(",
")",
"updates",
"[",
"Constants",
".",
"VERSION_LOCK_FIELD",
"]",
"=",
"config",
".",
"VERSION_UNLOCK_VALUE",
"return",
"updates"
] | Binary state lock protects from version increments if set | [
"Binary",
"state",
"lock",
"protects",
"from",
"version",
"increments",
"if",
"set"
] | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L122-L136 |
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | get_final_version_string | def get_final_version_string(release_mode, semver, commit_count=0):
"""Generates update dictionary entries for the version string"""
version_string = ".".join(semver)
maybe_dev_version_string = version_string
updates = {}
if release_mode:
# in production, we have something like `1.2.3`, as well as a flag e.g. PRODUCTION=True
updates[Constants.RELEASE_FIELD] = config.RELEASED_VALUE
else:
# in dev mode, we have a dev marker e.g. `1.2.3.dev678`
maybe_dev_version_string = config.DEVMODE_TEMPLATE.format(
version=version_string, count=commit_count
)
# make available all components of the semantic version including the full string
updates[Constants.VERSION_FIELD] = maybe_dev_version_string
updates[Constants.VERSION_STRICT_FIELD] = version_string
return updates | python | def get_final_version_string(release_mode, semver, commit_count=0):
"""Generates update dictionary entries for the version string"""
version_string = ".".join(semver)
maybe_dev_version_string = version_string
updates = {}
if release_mode:
# in production, we have something like `1.2.3`, as well as a flag e.g. PRODUCTION=True
updates[Constants.RELEASE_FIELD] = config.RELEASED_VALUE
else:
# in dev mode, we have a dev marker e.g. `1.2.3.dev678`
maybe_dev_version_string = config.DEVMODE_TEMPLATE.format(
version=version_string, count=commit_count
)
# make available all components of the semantic version including the full string
updates[Constants.VERSION_FIELD] = maybe_dev_version_string
updates[Constants.VERSION_STRICT_FIELD] = version_string
return updates | [
"def",
"get_final_version_string",
"(",
"release_mode",
",",
"semver",
",",
"commit_count",
"=",
"0",
")",
":",
"version_string",
"=",
"\".\"",
".",
"join",
"(",
"semver",
")",
"maybe_dev_version_string",
"=",
"version_string",
"updates",
"=",
"{",
"}",
"if",
"release_mode",
":",
"# in production, we have something like `1.2.3`, as well as a flag e.g. PRODUCTION=True",
"updates",
"[",
"Constants",
".",
"RELEASE_FIELD",
"]",
"=",
"config",
".",
"RELEASED_VALUE",
"else",
":",
"# in dev mode, we have a dev marker e.g. `1.2.3.dev678`",
"maybe_dev_version_string",
"=",
"config",
".",
"DEVMODE_TEMPLATE",
".",
"format",
"(",
"version",
"=",
"version_string",
",",
"count",
"=",
"commit_count",
")",
"# make available all components of the semantic version including the full string",
"updates",
"[",
"Constants",
".",
"VERSION_FIELD",
"]",
"=",
"maybe_dev_version_string",
"updates",
"[",
"Constants",
".",
"VERSION_STRICT_FIELD",
"]",
"=",
"version_string",
"return",
"updates"
] | Generates update dictionary entries for the version string | [
"Generates",
"update",
"dictionary",
"entries",
"for",
"the",
"version",
"string"
] | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L139-L156 |
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | get_dvcs_info | def get_dvcs_info():
"""Gets current repository info from git"""
cmd = "git rev-list --count HEAD"
commit_count = str(
int(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
)
cmd = "git rev-parse HEAD"
commit = str(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
return {Constants.COMMIT_FIELD: commit, Constants.COMMIT_COUNT_FIELD: commit_count} | python | def get_dvcs_info():
"""Gets current repository info from git"""
cmd = "git rev-list --count HEAD"
commit_count = str(
int(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
)
cmd = "git rev-parse HEAD"
commit = str(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
return {Constants.COMMIT_FIELD: commit, Constants.COMMIT_COUNT_FIELD: commit_count} | [
"def",
"get_dvcs_info",
"(",
")",
":",
"cmd",
"=",
"\"git rev-list --count HEAD\"",
"commit_count",
"=",
"str",
"(",
"int",
"(",
"subprocess",
".",
"check_output",
"(",
"shlex",
".",
"split",
"(",
"cmd",
")",
")",
".",
"decode",
"(",
"\"utf8\"",
")",
".",
"strip",
"(",
")",
")",
")",
"cmd",
"=",
"\"git rev-parse HEAD\"",
"commit",
"=",
"str",
"(",
"subprocess",
".",
"check_output",
"(",
"shlex",
".",
"split",
"(",
"cmd",
")",
")",
".",
"decode",
"(",
"\"utf8\"",
")",
".",
"strip",
"(",
")",
")",
"return",
"{",
"Constants",
".",
"COMMIT_FIELD",
":",
"commit",
",",
"Constants",
".",
"COMMIT_COUNT_FIELD",
":",
"commit_count",
"}"
] | Gets current repository info from git | [
"Gets",
"current",
"repository",
"info",
"from",
"git"
] | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L159-L167 |
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | main | def main(
set_to=None,
set_patch_count=None,
release=None,
bump=None,
lock=None,
file_triggers=None,
config_path=None,
**extra_updates
):
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
:param set_to: explicitly set semver to this version string
:param set_patch_count: sets the patch number to the commit count
:param release: marks with a production flag
just sets a single flag as per config
:param bump: string indicating major/minor/patch
more significant bumps will zero the less significant ones
:param lock: locks the version string for the next call to autoversion
lock only removed if a version bump would have occurred
:param file_triggers: whether to enable bumping based on file triggers
bumping occurs once if any file(s) exist that match the config
:param config_path: path to config file
:param extra_updates:
:return:
"""
updates = {}
if config_path:
get_or_create_config(config_path, config)
for k, v in config.regexers.items():
config.regexers[k] = re.compile(v)
# a forward-mapping of the configured aliases
# giving <our config param> : <the configured value>
# if a value occurs multiple times, we take the last set value
for k, v in config.key_aliases.items():
config._forward_aliases[v] = k
all_data = read_targets(config.targets)
current_semver = semver.get_current_semver(all_data)
triggers = get_all_triggers(bump, file_triggers)
updates.update(get_lock_behaviour(triggers, all_data, lock))
updates.update(get_dvcs_info())
if set_to:
_LOG.debug("setting version directly: %s", set_to)
new_semver = auto_version.definitions.SemVer(*set_to.split("."))
if not lock:
warnings.warn(
"After setting version manually, does it need locking for a CI flow?",
UserWarning,
)
elif set_patch_count:
_LOG.debug(
"auto-incrementing version, using commit count for patch: %s",
updates[Constants.COMMIT_COUNT_FIELD],
)
new_semver = semver.make_new_semver(
current_semver, triggers, patch=updates[Constants.COMMIT_COUNT_FIELD]
)
else:
_LOG.debug("auto-incrementing version")
new_semver = semver.make_new_semver(current_semver, triggers)
updates.update(
get_final_version_string(
release_mode=release,
semver=new_semver,
commit_count=updates.get(Constants.COMMIT_COUNT_FIELD, 0),
)
)
for part in semver.SemVerSigFig:
updates[part] = getattr(new_semver, part)
# only rewrite a field that the user has specified in the configuration
native_updates = {
native: updates[key]
for native, key in config.key_aliases.items()
if key in updates
}
# finally, add in commandline overrides
native_updates.update(extra_updates)
write_targets(config.targets, **native_updates)
return current_semver, new_semver, native_updates | python | def main(
set_to=None,
set_patch_count=None,
release=None,
bump=None,
lock=None,
file_triggers=None,
config_path=None,
**extra_updates
):
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
:param set_to: explicitly set semver to this version string
:param set_patch_count: sets the patch number to the commit count
:param release: marks with a production flag
just sets a single flag as per config
:param bump: string indicating major/minor/patch
more significant bumps will zero the less significant ones
:param lock: locks the version string for the next call to autoversion
lock only removed if a version bump would have occurred
:param file_triggers: whether to enable bumping based on file triggers
bumping occurs once if any file(s) exist that match the config
:param config_path: path to config file
:param extra_updates:
:return:
"""
updates = {}
if config_path:
get_or_create_config(config_path, config)
for k, v in config.regexers.items():
config.regexers[k] = re.compile(v)
# a forward-mapping of the configured aliases
# giving <our config param> : <the configured value>
# if a value occurs multiple times, we take the last set value
for k, v in config.key_aliases.items():
config._forward_aliases[v] = k
all_data = read_targets(config.targets)
current_semver = semver.get_current_semver(all_data)
triggers = get_all_triggers(bump, file_triggers)
updates.update(get_lock_behaviour(triggers, all_data, lock))
updates.update(get_dvcs_info())
if set_to:
_LOG.debug("setting version directly: %s", set_to)
new_semver = auto_version.definitions.SemVer(*set_to.split("."))
if not lock:
warnings.warn(
"After setting version manually, does it need locking for a CI flow?",
UserWarning,
)
elif set_patch_count:
_LOG.debug(
"auto-incrementing version, using commit count for patch: %s",
updates[Constants.COMMIT_COUNT_FIELD],
)
new_semver = semver.make_new_semver(
current_semver, triggers, patch=updates[Constants.COMMIT_COUNT_FIELD]
)
else:
_LOG.debug("auto-incrementing version")
new_semver = semver.make_new_semver(current_semver, triggers)
updates.update(
get_final_version_string(
release_mode=release,
semver=new_semver,
commit_count=updates.get(Constants.COMMIT_COUNT_FIELD, 0),
)
)
for part in semver.SemVerSigFig:
updates[part] = getattr(new_semver, part)
# only rewrite a field that the user has specified in the configuration
native_updates = {
native: updates[key]
for native, key in config.key_aliases.items()
if key in updates
}
# finally, add in commandline overrides
native_updates.update(extra_updates)
write_targets(config.targets, **native_updates)
return current_semver, new_semver, native_updates | [
"def",
"main",
"(",
"set_to",
"=",
"None",
",",
"set_patch_count",
"=",
"None",
",",
"release",
"=",
"None",
",",
"bump",
"=",
"None",
",",
"lock",
"=",
"None",
",",
"file_triggers",
"=",
"None",
",",
"config_path",
"=",
"None",
",",
"*",
"*",
"extra_updates",
")",
":",
"updates",
"=",
"{",
"}",
"if",
"config_path",
":",
"get_or_create_config",
"(",
"config_path",
",",
"config",
")",
"for",
"k",
",",
"v",
"in",
"config",
".",
"regexers",
".",
"items",
"(",
")",
":",
"config",
".",
"regexers",
"[",
"k",
"]",
"=",
"re",
".",
"compile",
"(",
"v",
")",
"# a forward-mapping of the configured aliases",
"# giving <our config param> : <the configured value>",
"# if a value occurs multiple times, we take the last set value",
"for",
"k",
",",
"v",
"in",
"config",
".",
"key_aliases",
".",
"items",
"(",
")",
":",
"config",
".",
"_forward_aliases",
"[",
"v",
"]",
"=",
"k",
"all_data",
"=",
"read_targets",
"(",
"config",
".",
"targets",
")",
"current_semver",
"=",
"semver",
".",
"get_current_semver",
"(",
"all_data",
")",
"triggers",
"=",
"get_all_triggers",
"(",
"bump",
",",
"file_triggers",
")",
"updates",
".",
"update",
"(",
"get_lock_behaviour",
"(",
"triggers",
",",
"all_data",
",",
"lock",
")",
")",
"updates",
".",
"update",
"(",
"get_dvcs_info",
"(",
")",
")",
"if",
"set_to",
":",
"_LOG",
".",
"debug",
"(",
"\"setting version directly: %s\"",
",",
"set_to",
")",
"new_semver",
"=",
"auto_version",
".",
"definitions",
".",
"SemVer",
"(",
"*",
"set_to",
".",
"split",
"(",
"\".\"",
")",
")",
"if",
"not",
"lock",
":",
"warnings",
".",
"warn",
"(",
"\"After setting version manually, does it need locking for a CI flow?\"",
",",
"UserWarning",
",",
")",
"elif",
"set_patch_count",
":",
"_LOG",
".",
"debug",
"(",
"\"auto-incrementing version, using commit count for patch: %s\"",
",",
"updates",
"[",
"Constants",
".",
"COMMIT_COUNT_FIELD",
"]",
",",
")",
"new_semver",
"=",
"semver",
".",
"make_new_semver",
"(",
"current_semver",
",",
"triggers",
",",
"patch",
"=",
"updates",
"[",
"Constants",
".",
"COMMIT_COUNT_FIELD",
"]",
")",
"else",
":",
"_LOG",
".",
"debug",
"(",
"\"auto-incrementing version\"",
")",
"new_semver",
"=",
"semver",
".",
"make_new_semver",
"(",
"current_semver",
",",
"triggers",
")",
"updates",
".",
"update",
"(",
"get_final_version_string",
"(",
"release_mode",
"=",
"release",
",",
"semver",
"=",
"new_semver",
",",
"commit_count",
"=",
"updates",
".",
"get",
"(",
"Constants",
".",
"COMMIT_COUNT_FIELD",
",",
"0",
")",
",",
")",
")",
"for",
"part",
"in",
"semver",
".",
"SemVerSigFig",
":",
"updates",
"[",
"part",
"]",
"=",
"getattr",
"(",
"new_semver",
",",
"part",
")",
"# only rewrite a field that the user has specified in the configuration",
"native_updates",
"=",
"{",
"native",
":",
"updates",
"[",
"key",
"]",
"for",
"native",
",",
"key",
"in",
"config",
".",
"key_aliases",
".",
"items",
"(",
")",
"if",
"key",
"in",
"updates",
"}",
"# finally, add in commandline overrides",
"native_updates",
".",
"update",
"(",
"extra_updates",
")",
"write_targets",
"(",
"config",
".",
"targets",
",",
"*",
"*",
"native_updates",
")",
"return",
"current_semver",
",",
"new_semver",
",",
"native_updates"
] | Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
:param set_to: explicitly set semver to this version string
:param set_patch_count: sets the patch number to the commit count
:param release: marks with a production flag
just sets a single flag as per config
:param bump: string indicating major/minor/patch
more significant bumps will zero the less significant ones
:param lock: locks the version string for the next call to autoversion
lock only removed if a version bump would have occurred
:param file_triggers: whether to enable bumping based on file triggers
bumping occurs once if any file(s) exist that match the config
:param config_path: path to config file
:param extra_updates:
:return: | [
"Main",
"workflow",
"."
] | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L170-L266 |
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | main_from_cli | def main_from_cli():
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
"""
args, others = get_cli()
if args.version:
print(__version__)
exit(0)
log_level = logging.WARNING - 10 * args.verbosity
logging.basicConfig(level=log_level, format="%(module)s %(levelname)8s %(message)s")
command_line_updates = parse_other_args(others)
old, new, updates = main(
set_to=args.set,
set_patch_count=args.set_patch_count,
lock=args.lock,
release=args.release,
bump=args.bump,
file_triggers=args.file_triggers,
config_path=args.config,
**command_line_updates
)
_LOG.info("previously: %s", old)
_LOG.info("currently: %s", new)
_LOG.debug("updates:\n%s", pprint.pformat(updates))
print(
updates.get(config._forward_aliases.get(Constants.VERSION_FIELD))
or updates.get(config._forward_aliases.get(Constants.VERSION_STRICT_FIELD))
) | python | def main_from_cli():
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
"""
args, others = get_cli()
if args.version:
print(__version__)
exit(0)
log_level = logging.WARNING - 10 * args.verbosity
logging.basicConfig(level=log_level, format="%(module)s %(levelname)8s %(message)s")
command_line_updates = parse_other_args(others)
old, new, updates = main(
set_to=args.set,
set_patch_count=args.set_patch_count,
lock=args.lock,
release=args.release,
bump=args.bump,
file_triggers=args.file_triggers,
config_path=args.config,
**command_line_updates
)
_LOG.info("previously: %s", old)
_LOG.info("currently: %s", new)
_LOG.debug("updates:\n%s", pprint.pformat(updates))
print(
updates.get(config._forward_aliases.get(Constants.VERSION_FIELD))
or updates.get(config._forward_aliases.get(Constants.VERSION_STRICT_FIELD))
) | [
"def",
"main_from_cli",
"(",
")",
":",
"args",
",",
"others",
"=",
"get_cli",
"(",
")",
"if",
"args",
".",
"version",
":",
"print",
"(",
"__version__",
")",
"exit",
"(",
"0",
")",
"log_level",
"=",
"logging",
".",
"WARNING",
"-",
"10",
"*",
"args",
".",
"verbosity",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"log_level",
",",
"format",
"=",
"\"%(module)s %(levelname)8s %(message)s\"",
")",
"command_line_updates",
"=",
"parse_other_args",
"(",
"others",
")",
"old",
",",
"new",
",",
"updates",
"=",
"main",
"(",
"set_to",
"=",
"args",
".",
"set",
",",
"set_patch_count",
"=",
"args",
".",
"set_patch_count",
",",
"lock",
"=",
"args",
".",
"lock",
",",
"release",
"=",
"args",
".",
"release",
",",
"bump",
"=",
"args",
".",
"bump",
",",
"file_triggers",
"=",
"args",
".",
"file_triggers",
",",
"config_path",
"=",
"args",
".",
"config",
",",
"*",
"*",
"command_line_updates",
")",
"_LOG",
".",
"info",
"(",
"\"previously: %s\"",
",",
"old",
")",
"_LOG",
".",
"info",
"(",
"\"currently: %s\"",
",",
"new",
")",
"_LOG",
".",
"debug",
"(",
"\"updates:\\n%s\"",
",",
"pprint",
".",
"pformat",
"(",
"updates",
")",
")",
"print",
"(",
"updates",
".",
"get",
"(",
"config",
".",
"_forward_aliases",
".",
"get",
"(",
"Constants",
".",
"VERSION_FIELD",
")",
")",
"or",
"updates",
".",
"get",
"(",
"config",
".",
"_forward_aliases",
".",
"get",
"(",
"Constants",
".",
"VERSION_STRICT_FIELD",
")",
")",
")"
] | Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables | [
"Main",
"workflow",
"."
] | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L284-L320 |
Clinical-Genomics/trailblazer | trailblazer/cli/core.py | log_cmd | def log_cmd(context, sampleinfo, sacct, quiet, config):
"""Log an analysis.
CONFIG: MIP config file for an analysis
"""
log_analysis = LogAnalysis(context.obj['store'])
try:
new_run = log_analysis(config, sampleinfo=sampleinfo, sacct=sacct)
except MissingFileError as error:
click.echo(click.style(f"Skipping, missing Sacct file: {error.message}", fg='yellow'))
return
except KeyError as error:
print(click.style(f"unexpected output, missing key: {error.args[0]}", fg='yellow'))
return
if new_run is None:
if not quiet:
click.echo(click.style('Analysis already logged', fg='yellow'))
else:
message = f"New log added: {new_run.family} ({new_run.id}) - {new_run.status}"
click.echo(click.style(message, fg='green')) | python | def log_cmd(context, sampleinfo, sacct, quiet, config):
"""Log an analysis.
CONFIG: MIP config file for an analysis
"""
log_analysis = LogAnalysis(context.obj['store'])
try:
new_run = log_analysis(config, sampleinfo=sampleinfo, sacct=sacct)
except MissingFileError as error:
click.echo(click.style(f"Skipping, missing Sacct file: {error.message}", fg='yellow'))
return
except KeyError as error:
print(click.style(f"unexpected output, missing key: {error.args[0]}", fg='yellow'))
return
if new_run is None:
if not quiet:
click.echo(click.style('Analysis already logged', fg='yellow'))
else:
message = f"New log added: {new_run.family} ({new_run.id}) - {new_run.status}"
click.echo(click.style(message, fg='green')) | [
"def",
"log_cmd",
"(",
"context",
",",
"sampleinfo",
",",
"sacct",
",",
"quiet",
",",
"config",
")",
":",
"log_analysis",
"=",
"LogAnalysis",
"(",
"context",
".",
"obj",
"[",
"'store'",
"]",
")",
"try",
":",
"new_run",
"=",
"log_analysis",
"(",
"config",
",",
"sampleinfo",
"=",
"sampleinfo",
",",
"sacct",
"=",
"sacct",
")",
"except",
"MissingFileError",
"as",
"error",
":",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"f\"Skipping, missing Sacct file: {error.message}\"",
",",
"fg",
"=",
"'yellow'",
")",
")",
"return",
"except",
"KeyError",
"as",
"error",
":",
"print",
"(",
"click",
".",
"style",
"(",
"f\"unexpected output, missing key: {error.args[0]}\"",
",",
"fg",
"=",
"'yellow'",
")",
")",
"return",
"if",
"new_run",
"is",
"None",
":",
"if",
"not",
"quiet",
":",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"'Analysis already logged'",
",",
"fg",
"=",
"'yellow'",
")",
")",
"else",
":",
"message",
"=",
"f\"New log added: {new_run.family} ({new_run.id}) - {new_run.status}\"",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"message",
",",
"fg",
"=",
"'green'",
")",
")"
] | Log an analysis.
CONFIG: MIP config file for an analysis | [
"Log",
"an",
"analysis",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/cli/core.py#L49-L68 |
Clinical-Genomics/trailblazer | trailblazer/cli/core.py | start | def start(context, mip_config, email, priority, dryrun, command, start_with, family):
"""Start a new analysis."""
mip_cli = MipCli(context.obj['script'])
mip_config = mip_config or context.obj['mip_config']
email = email or environ_email()
kwargs = dict(config=mip_config, family=family, priority=priority, email=email, dryrun=dryrun, start_with=start_with)
if command:
mip_command = mip_cli.build_command(**kwargs)
click.echo(' '.join(mip_command))
else:
try:
mip_cli(**kwargs)
if not dryrun:
context.obj['store'].add_pending(family, email=email)
except MipStartError as error:
click.echo(click.style(error.message, fg='red')) | python | def start(context, mip_config, email, priority, dryrun, command, start_with, family):
"""Start a new analysis."""
mip_cli = MipCli(context.obj['script'])
mip_config = mip_config or context.obj['mip_config']
email = email or environ_email()
kwargs = dict(config=mip_config, family=family, priority=priority, email=email, dryrun=dryrun, start_with=start_with)
if command:
mip_command = mip_cli.build_command(**kwargs)
click.echo(' '.join(mip_command))
else:
try:
mip_cli(**kwargs)
if not dryrun:
context.obj['store'].add_pending(family, email=email)
except MipStartError as error:
click.echo(click.style(error.message, fg='red')) | [
"def",
"start",
"(",
"context",
",",
"mip_config",
",",
"email",
",",
"priority",
",",
"dryrun",
",",
"command",
",",
"start_with",
",",
"family",
")",
":",
"mip_cli",
"=",
"MipCli",
"(",
"context",
".",
"obj",
"[",
"'script'",
"]",
")",
"mip_config",
"=",
"mip_config",
"or",
"context",
".",
"obj",
"[",
"'mip_config'",
"]",
"email",
"=",
"email",
"or",
"environ_email",
"(",
")",
"kwargs",
"=",
"dict",
"(",
"config",
"=",
"mip_config",
",",
"family",
"=",
"family",
",",
"priority",
"=",
"priority",
",",
"email",
"=",
"email",
",",
"dryrun",
"=",
"dryrun",
",",
"start_with",
"=",
"start_with",
")",
"if",
"command",
":",
"mip_command",
"=",
"mip_cli",
".",
"build_command",
"(",
"*",
"*",
"kwargs",
")",
"click",
".",
"echo",
"(",
"' '",
".",
"join",
"(",
"mip_command",
")",
")",
"else",
":",
"try",
":",
"mip_cli",
"(",
"*",
"*",
"kwargs",
")",
"if",
"not",
"dryrun",
":",
"context",
".",
"obj",
"[",
"'store'",
"]",
".",
"add_pending",
"(",
"family",
",",
"email",
"=",
"email",
")",
"except",
"MipStartError",
"as",
"error",
":",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"error",
".",
"message",
",",
"fg",
"=",
"'red'",
")",
")"
] | Start a new analysis. | [
"Start",
"a",
"new",
"analysis",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/cli/core.py#L80-L95 |
Clinical-Genomics/trailblazer | trailblazer/cli/core.py | init | def init(context, reset, force):
"""Setup the database."""
existing_tables = context.obj['store'].engine.table_names()
if force or reset:
if existing_tables and not force:
message = f"Delete existing tables? [{', '.join(existing_tables)}]"
click.confirm(click.style(message, fg='yellow'), abort=True)
context.obj['store'].drop_all()
elif existing_tables:
click.echo(click.style("Database already exists, use '--reset'", fg='red'))
context.abort()
context.obj['store'].setup()
message = f"Success! New tables: {', '.join(context.obj['store'].engine.table_names())}"
click.echo(click.style(message, fg='green')) | python | def init(context, reset, force):
"""Setup the database."""
existing_tables = context.obj['store'].engine.table_names()
if force or reset:
if existing_tables and not force:
message = f"Delete existing tables? [{', '.join(existing_tables)}]"
click.confirm(click.style(message, fg='yellow'), abort=True)
context.obj['store'].drop_all()
elif existing_tables:
click.echo(click.style("Database already exists, use '--reset'", fg='red'))
context.abort()
context.obj['store'].setup()
message = f"Success! New tables: {', '.join(context.obj['store'].engine.table_names())}"
click.echo(click.style(message, fg='green')) | [
"def",
"init",
"(",
"context",
",",
"reset",
",",
"force",
")",
":",
"existing_tables",
"=",
"context",
".",
"obj",
"[",
"'store'",
"]",
".",
"engine",
".",
"table_names",
"(",
")",
"if",
"force",
"or",
"reset",
":",
"if",
"existing_tables",
"and",
"not",
"force",
":",
"message",
"=",
"f\"Delete existing tables? [{', '.join(existing_tables)}]\"",
"click",
".",
"confirm",
"(",
"click",
".",
"style",
"(",
"message",
",",
"fg",
"=",
"'yellow'",
")",
",",
"abort",
"=",
"True",
")",
"context",
".",
"obj",
"[",
"'store'",
"]",
".",
"drop_all",
"(",
")",
"elif",
"existing_tables",
":",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"\"Database already exists, use '--reset'\"",
",",
"fg",
"=",
"'red'",
")",
")",
"context",
".",
"abort",
"(",
")",
"context",
".",
"obj",
"[",
"'store'",
"]",
".",
"setup",
"(",
")",
"message",
"=",
"f\"Success! New tables: {', '.join(context.obj['store'].engine.table_names())}\"",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"message",
",",
"fg",
"=",
"'green'",
")",
")"
] | Setup the database. | [
"Setup",
"the",
"database",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/cli/core.py#L102-L116 |
Clinical-Genomics/trailblazer | trailblazer/cli/core.py | scan | def scan(context, root_dir):
"""Scan a directory for analyses."""
root_dir = root_dir or context.obj['root']
config_files = Path(root_dir).glob('*/analysis/*_config.yaml')
for config_file in config_files:
LOG.debug("found analysis config: %s", config_file)
with config_file.open() as stream:
context.invoke(log_cmd, config=stream, quiet=True)
context.obj['store'].track_update() | python | def scan(context, root_dir):
"""Scan a directory for analyses."""
root_dir = root_dir or context.obj['root']
config_files = Path(root_dir).glob('*/analysis/*_config.yaml')
for config_file in config_files:
LOG.debug("found analysis config: %s", config_file)
with config_file.open() as stream:
context.invoke(log_cmd, config=stream, quiet=True)
context.obj['store'].track_update() | [
"def",
"scan",
"(",
"context",
",",
"root_dir",
")",
":",
"root_dir",
"=",
"root_dir",
"or",
"context",
".",
"obj",
"[",
"'root'",
"]",
"config_files",
"=",
"Path",
"(",
"root_dir",
")",
".",
"glob",
"(",
"'*/analysis/*_config.yaml'",
")",
"for",
"config_file",
"in",
"config_files",
":",
"LOG",
".",
"debug",
"(",
"\"found analysis config: %s\"",
",",
"config_file",
")",
"with",
"config_file",
".",
"open",
"(",
")",
"as",
"stream",
":",
"context",
".",
"invoke",
"(",
"log_cmd",
",",
"config",
"=",
"stream",
",",
"quiet",
"=",
"True",
")",
"context",
".",
"obj",
"[",
"'store'",
"]",
".",
"track_update",
"(",
")"
] | Scan a directory for analyses. | [
"Scan",
"a",
"directory",
"for",
"analyses",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/cli/core.py#L122-L131 |
Clinical-Genomics/trailblazer | trailblazer/cli/core.py | user | def user(context, name, email):
"""Add a new or display information about an existing user."""
existing_user = context.obj['store'].user(email)
if existing_user:
click.echo(existing_user.to_dict())
elif name:
new_user = context.obj['store'].add_user(name, email)
click.echo(click.style(f"New user added: {email} ({new_user.id})", fg='green'))
else:
click.echo(click.style('User not found', fg='yellow')) | python | def user(context, name, email):
"""Add a new or display information about an existing user."""
existing_user = context.obj['store'].user(email)
if existing_user:
click.echo(existing_user.to_dict())
elif name:
new_user = context.obj['store'].add_user(name, email)
click.echo(click.style(f"New user added: {email} ({new_user.id})", fg='green'))
else:
click.echo(click.style('User not found', fg='yellow')) | [
"def",
"user",
"(",
"context",
",",
"name",
",",
"email",
")",
":",
"existing_user",
"=",
"context",
".",
"obj",
"[",
"'store'",
"]",
".",
"user",
"(",
"email",
")",
"if",
"existing_user",
":",
"click",
".",
"echo",
"(",
"existing_user",
".",
"to_dict",
"(",
")",
")",
"elif",
"name",
":",
"new_user",
"=",
"context",
".",
"obj",
"[",
"'store'",
"]",
".",
"add_user",
"(",
"name",
",",
"email",
")",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"f\"New user added: {email} ({new_user.id})\"",
",",
"fg",
"=",
"'green'",
")",
")",
"else",
":",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"'User not found'",
",",
"fg",
"=",
"'yellow'",
")",
")"
] | Add a new or display information about an existing user. | [
"Add",
"a",
"new",
"or",
"display",
"information",
"about",
"an",
"existing",
"user",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/cli/core.py#L138-L147 |
Clinical-Genomics/trailblazer | trailblazer/cli/core.py | cancel | def cancel(context, jobs, analysis_id):
"""Cancel all jobs in a run."""
analysis_obj = context.obj['store'].analysis(analysis_id)
if analysis_obj is None:
click.echo('analysis not found')
context.abort()
elif analysis_obj.status != 'running':
click.echo(f"analysis not running: {analysis_obj.status}")
context.abort()
config_path = Path(analysis_obj.config_path)
with config_path.open() as config_stream:
config_raw = ruamel.yaml.safe_load(config_stream)
config_data = parse_config(config_raw)
log_path = Path(f"{config_data['log_path']}")
if not log_path.exists():
click.echo(f"missing MIP log file: {log_path}")
context.abort()
with log_path.open() as log_stream:
all_jobs = job_ids(log_stream)
if jobs:
for job_id in all_jobs:
click.echo(job_id)
else:
for job_id in all_jobs:
LOG.debug(f"cancelling job: {job_id}")
process = subprocess.Popen(['scancel', job_id])
process.wait()
analysis_obj.status = 'canceled'
context.obj['store'].commit()
click.echo('cancelled analysis successfully!') | python | def cancel(context, jobs, analysis_id):
"""Cancel all jobs in a run."""
analysis_obj = context.obj['store'].analysis(analysis_id)
if analysis_obj is None:
click.echo('analysis not found')
context.abort()
elif analysis_obj.status != 'running':
click.echo(f"analysis not running: {analysis_obj.status}")
context.abort()
config_path = Path(analysis_obj.config_path)
with config_path.open() as config_stream:
config_raw = ruamel.yaml.safe_load(config_stream)
config_data = parse_config(config_raw)
log_path = Path(f"{config_data['log_path']}")
if not log_path.exists():
click.echo(f"missing MIP log file: {log_path}")
context.abort()
with log_path.open() as log_stream:
all_jobs = job_ids(log_stream)
if jobs:
for job_id in all_jobs:
click.echo(job_id)
else:
for job_id in all_jobs:
LOG.debug(f"cancelling job: {job_id}")
process = subprocess.Popen(['scancel', job_id])
process.wait()
analysis_obj.status = 'canceled'
context.obj['store'].commit()
click.echo('cancelled analysis successfully!') | [
"def",
"cancel",
"(",
"context",
",",
"jobs",
",",
"analysis_id",
")",
":",
"analysis_obj",
"=",
"context",
".",
"obj",
"[",
"'store'",
"]",
".",
"analysis",
"(",
"analysis_id",
")",
"if",
"analysis_obj",
"is",
"None",
":",
"click",
".",
"echo",
"(",
"'analysis not found'",
")",
"context",
".",
"abort",
"(",
")",
"elif",
"analysis_obj",
".",
"status",
"!=",
"'running'",
":",
"click",
".",
"echo",
"(",
"f\"analysis not running: {analysis_obj.status}\"",
")",
"context",
".",
"abort",
"(",
")",
"config_path",
"=",
"Path",
"(",
"analysis_obj",
".",
"config_path",
")",
"with",
"config_path",
".",
"open",
"(",
")",
"as",
"config_stream",
":",
"config_raw",
"=",
"ruamel",
".",
"yaml",
".",
"safe_load",
"(",
"config_stream",
")",
"config_data",
"=",
"parse_config",
"(",
"config_raw",
")",
"log_path",
"=",
"Path",
"(",
"f\"{config_data['log_path']}\"",
")",
"if",
"not",
"log_path",
".",
"exists",
"(",
")",
":",
"click",
".",
"echo",
"(",
"f\"missing MIP log file: {log_path}\"",
")",
"context",
".",
"abort",
"(",
")",
"with",
"log_path",
".",
"open",
"(",
")",
"as",
"log_stream",
":",
"all_jobs",
"=",
"job_ids",
"(",
"log_stream",
")",
"if",
"jobs",
":",
"for",
"job_id",
"in",
"all_jobs",
":",
"click",
".",
"echo",
"(",
"job_id",
")",
"else",
":",
"for",
"job_id",
"in",
"all_jobs",
":",
"LOG",
".",
"debug",
"(",
"f\"cancelling job: {job_id}\"",
")",
"process",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"'scancel'",
",",
"job_id",
"]",
")",
"process",
".",
"wait",
"(",
")",
"analysis_obj",
".",
"status",
"=",
"'canceled'",
"context",
".",
"obj",
"[",
"'store'",
"]",
".",
"commit",
"(",
")",
"click",
".",
"echo",
"(",
"'cancelled analysis successfully!'",
")"
] | Cancel all jobs in a run. | [
"Cancel",
"all",
"jobs",
"in",
"a",
"run",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/cli/core.py#L154-L188 |
pytroll/posttroll | posttroll/ns.py | get_pub_addresses | def get_pub_addresses(names=None, timeout=10, nameserver="localhost"):
"""Get the address of the publisher for a given list of publisher *names*
from the nameserver on *nameserver* (localhost by default).
"""
addrs = []
if names is None:
names = ["", ]
for name in names:
then = datetime.now() + timedelta(seconds=timeout)
while datetime.now() < then:
addrs += get_pub_address(name, nameserver=nameserver)
if addrs:
break
time.sleep(.5)
return addrs | python | def get_pub_addresses(names=None, timeout=10, nameserver="localhost"):
"""Get the address of the publisher for a given list of publisher *names*
from the nameserver on *nameserver* (localhost by default).
"""
addrs = []
if names is None:
names = ["", ]
for name in names:
then = datetime.now() + timedelta(seconds=timeout)
while datetime.now() < then:
addrs += get_pub_address(name, nameserver=nameserver)
if addrs:
break
time.sleep(.5)
return addrs | [
"def",
"get_pub_addresses",
"(",
"names",
"=",
"None",
",",
"timeout",
"=",
"10",
",",
"nameserver",
"=",
"\"localhost\"",
")",
":",
"addrs",
"=",
"[",
"]",
"if",
"names",
"is",
"None",
":",
"names",
"=",
"[",
"\"\"",
",",
"]",
"for",
"name",
"in",
"names",
":",
"then",
"=",
"datetime",
".",
"now",
"(",
")",
"+",
"timedelta",
"(",
"seconds",
"=",
"timeout",
")",
"while",
"datetime",
".",
"now",
"(",
")",
"<",
"then",
":",
"addrs",
"+=",
"get_pub_address",
"(",
"name",
",",
"nameserver",
"=",
"nameserver",
")",
"if",
"addrs",
":",
"break",
"time",
".",
"sleep",
"(",
".5",
")",
"return",
"addrs"
] | Get the address of the publisher for a given list of publisher *names*
from the nameserver on *nameserver* (localhost by default). | [
"Get",
"the",
"address",
"of",
"the",
"publisher",
"for",
"a",
"given",
"list",
"of",
"publisher",
"*",
"names",
"*",
"from",
"the",
"nameserver",
"on",
"*",
"nameserver",
"*",
"(",
"localhost",
"by",
"default",
")",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/ns.py#L59-L73 |
pytroll/posttroll | posttroll/ns.py | get_pub_address | def get_pub_address(name, timeout=10, nameserver="localhost"):
"""Get the address of the publisher for a given publisher *name* from the
nameserver on *nameserver* (localhost by default).
"""
# Socket to talk to server
socket = get_context().socket(REQ)
try:
socket.setsockopt(LINGER, timeout * 1000)
socket.connect("tcp://" + nameserver + ":" + str(PORT))
logger.debug('Connecting to %s',
"tcp://" + nameserver + ":" + str(PORT))
poller = Poller()
poller.register(socket, POLLIN)
message = Message("/oper/ns", "request", {"service": name})
socket.send_string(six.text_type(message))
# Get the reply.
sock = poller.poll(timeout=timeout * 1000)
if sock:
if sock[0][0] == socket:
message = Message.decode(socket.recv_string(NOBLOCK))
return message.data
else:
raise TimeoutError("Didn't get an address after %d seconds."
% timeout)
finally:
socket.close() | python | def get_pub_address(name, timeout=10, nameserver="localhost"):
"""Get the address of the publisher for a given publisher *name* from the
nameserver on *nameserver* (localhost by default).
"""
# Socket to talk to server
socket = get_context().socket(REQ)
try:
socket.setsockopt(LINGER, timeout * 1000)
socket.connect("tcp://" + nameserver + ":" + str(PORT))
logger.debug('Connecting to %s',
"tcp://" + nameserver + ":" + str(PORT))
poller = Poller()
poller.register(socket, POLLIN)
message = Message("/oper/ns", "request", {"service": name})
socket.send_string(six.text_type(message))
# Get the reply.
sock = poller.poll(timeout=timeout * 1000)
if sock:
if sock[0][0] == socket:
message = Message.decode(socket.recv_string(NOBLOCK))
return message.data
else:
raise TimeoutError("Didn't get an address after %d seconds."
% timeout)
finally:
socket.close() | [
"def",
"get_pub_address",
"(",
"name",
",",
"timeout",
"=",
"10",
",",
"nameserver",
"=",
"\"localhost\"",
")",
":",
"# Socket to talk to server",
"socket",
"=",
"get_context",
"(",
")",
".",
"socket",
"(",
"REQ",
")",
"try",
":",
"socket",
".",
"setsockopt",
"(",
"LINGER",
",",
"timeout",
"*",
"1000",
")",
"socket",
".",
"connect",
"(",
"\"tcp://\"",
"+",
"nameserver",
"+",
"\":\"",
"+",
"str",
"(",
"PORT",
")",
")",
"logger",
".",
"debug",
"(",
"'Connecting to %s'",
",",
"\"tcp://\"",
"+",
"nameserver",
"+",
"\":\"",
"+",
"str",
"(",
"PORT",
")",
")",
"poller",
"=",
"Poller",
"(",
")",
"poller",
".",
"register",
"(",
"socket",
",",
"POLLIN",
")",
"message",
"=",
"Message",
"(",
"\"/oper/ns\"",
",",
"\"request\"",
",",
"{",
"\"service\"",
":",
"name",
"}",
")",
"socket",
".",
"send_string",
"(",
"six",
".",
"text_type",
"(",
"message",
")",
")",
"# Get the reply.",
"sock",
"=",
"poller",
".",
"poll",
"(",
"timeout",
"=",
"timeout",
"*",
"1000",
")",
"if",
"sock",
":",
"if",
"sock",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"socket",
":",
"message",
"=",
"Message",
".",
"decode",
"(",
"socket",
".",
"recv_string",
"(",
"NOBLOCK",
")",
")",
"return",
"message",
".",
"data",
"else",
":",
"raise",
"TimeoutError",
"(",
"\"Didn't get an address after %d seconds.\"",
"%",
"timeout",
")",
"finally",
":",
"socket",
".",
"close",
"(",
")"
] | Get the address of the publisher for a given publisher *name* from the
nameserver on *nameserver* (localhost by default). | [
"Get",
"the",
"address",
"of",
"the",
"publisher",
"for",
"a",
"given",
"publisher",
"*",
"name",
"*",
"from",
"the",
"nameserver",
"on",
"*",
"nameserver",
"*",
"(",
"localhost",
"by",
"default",
")",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/ns.py#L76-L104 |
pytroll/posttroll | posttroll/ns.py | get_active_address | def get_active_address(name, arec):
"""Get the addresses of the active modules for a given publisher *name*.
"""
addrs = arec.get(name)
if addrs:
return Message("/oper/ns", "info", addrs)
else:
return Message("/oper/ns", "info", "") | python | def get_active_address(name, arec):
"""Get the addresses of the active modules for a given publisher *name*.
"""
addrs = arec.get(name)
if addrs:
return Message("/oper/ns", "info", addrs)
else:
return Message("/oper/ns", "info", "") | [
"def",
"get_active_address",
"(",
"name",
",",
"arec",
")",
":",
"addrs",
"=",
"arec",
".",
"get",
"(",
"name",
")",
"if",
"addrs",
":",
"return",
"Message",
"(",
"\"/oper/ns\"",
",",
"\"info\"",
",",
"addrs",
")",
"else",
":",
"return",
"Message",
"(",
"\"/oper/ns\"",
",",
"\"info\"",
",",
"\"\"",
")"
] | Get the addresses of the active modules for a given publisher *name*. | [
"Get",
"the",
"addresses",
"of",
"the",
"active",
"modules",
"for",
"a",
"given",
"publisher",
"*",
"name",
"*",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/ns.py#L109-L116 |
pytroll/posttroll | posttroll/ns.py | NameServer.run | def run(self, *args):
"""Run the listener and answer to requests.
"""
del args
arec = AddressReceiver(max_age=self._max_age,
multicast_enabled=self._multicast_enabled)
arec.start()
port = PORT
try:
with nslock:
self.listener = get_context().socket(REP)
self.listener.bind("tcp://*:" + str(port))
logger.debug('Listening on port %s', str(port))
poller = Poller()
poller.register(self.listener, POLLIN)
while self.loop:
with nslock:
socks = dict(poller.poll(1000))
if socks:
if socks.get(self.listener) == POLLIN:
msg = self.listener.recv_string()
else:
continue
logger.debug("Replying to request: " + str(msg))
msg = Message.decode(msg)
self.listener.send_unicode(six.text_type(get_active_address(
msg.data["service"], arec)))
except KeyboardInterrupt:
# Needed to stop the nameserver.
pass
finally:
arec.stop()
self.stop() | python | def run(self, *args):
"""Run the listener and answer to requests.
"""
del args
arec = AddressReceiver(max_age=self._max_age,
multicast_enabled=self._multicast_enabled)
arec.start()
port = PORT
try:
with nslock:
self.listener = get_context().socket(REP)
self.listener.bind("tcp://*:" + str(port))
logger.debug('Listening on port %s', str(port))
poller = Poller()
poller.register(self.listener, POLLIN)
while self.loop:
with nslock:
socks = dict(poller.poll(1000))
if socks:
if socks.get(self.listener) == POLLIN:
msg = self.listener.recv_string()
else:
continue
logger.debug("Replying to request: " + str(msg))
msg = Message.decode(msg)
self.listener.send_unicode(six.text_type(get_active_address(
msg.data["service"], arec)))
except KeyboardInterrupt:
# Needed to stop the nameserver.
pass
finally:
arec.stop()
self.stop() | [
"def",
"run",
"(",
"self",
",",
"*",
"args",
")",
":",
"del",
"args",
"arec",
"=",
"AddressReceiver",
"(",
"max_age",
"=",
"self",
".",
"_max_age",
",",
"multicast_enabled",
"=",
"self",
".",
"_multicast_enabled",
")",
"arec",
".",
"start",
"(",
")",
"port",
"=",
"PORT",
"try",
":",
"with",
"nslock",
":",
"self",
".",
"listener",
"=",
"get_context",
"(",
")",
".",
"socket",
"(",
"REP",
")",
"self",
".",
"listener",
".",
"bind",
"(",
"\"tcp://*:\"",
"+",
"str",
"(",
"port",
")",
")",
"logger",
".",
"debug",
"(",
"'Listening on port %s'",
",",
"str",
"(",
"port",
")",
")",
"poller",
"=",
"Poller",
"(",
")",
"poller",
".",
"register",
"(",
"self",
".",
"listener",
",",
"POLLIN",
")",
"while",
"self",
".",
"loop",
":",
"with",
"nslock",
":",
"socks",
"=",
"dict",
"(",
"poller",
".",
"poll",
"(",
"1000",
")",
")",
"if",
"socks",
":",
"if",
"socks",
".",
"get",
"(",
"self",
".",
"listener",
")",
"==",
"POLLIN",
":",
"msg",
"=",
"self",
".",
"listener",
".",
"recv_string",
"(",
")",
"else",
":",
"continue",
"logger",
".",
"debug",
"(",
"\"Replying to request: \"",
"+",
"str",
"(",
"msg",
")",
")",
"msg",
"=",
"Message",
".",
"decode",
"(",
"msg",
")",
"self",
".",
"listener",
".",
"send_unicode",
"(",
"six",
".",
"text_type",
"(",
"get_active_address",
"(",
"msg",
".",
"data",
"[",
"\"service\"",
"]",
",",
"arec",
")",
")",
")",
"except",
"KeyboardInterrupt",
":",
"# Needed to stop the nameserver.",
"pass",
"finally",
":",
"arec",
".",
"stop",
"(",
")",
"self",
".",
"stop",
"(",
")"
] | Run the listener and answer to requests. | [
"Run",
"the",
"listener",
"and",
"answer",
"to",
"requests",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/ns.py#L130-L164 |
pytroll/posttroll | posttroll/ns.py | NameServer.stop | def stop(self):
"""Stop the name server.
"""
self.listener.setsockopt(LINGER, 1)
self.loop = False
with nslock:
self.listener.close() | python | def stop(self):
"""Stop the name server.
"""
self.listener.setsockopt(LINGER, 1)
self.loop = False
with nslock:
self.listener.close() | [
"def",
"stop",
"(",
"self",
")",
":",
"self",
".",
"listener",
".",
"setsockopt",
"(",
"LINGER",
",",
"1",
")",
"self",
".",
"loop",
"=",
"False",
"with",
"nslock",
":",
"self",
".",
"listener",
".",
"close",
"(",
")"
] | Stop the name server. | [
"Stop",
"the",
"name",
"server",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/ns.py#L166-L172 |
zeroSteiner/smoke-zephyr | smoke_zephyr/requirements.py | check_requirements | def check_requirements(requirements, ignore=None):
"""
Parse requirements for package information to determine if all requirements
are met. The *requirements* argument can be a string to a requirements file,
a file like object to be read, or a list of strings representing the package
requirements.
:param requirements: The file to parse.
:type requirements: file obj, list, str, tuple
:param ignore: A sequence of packages to ignore.
:type ignore: list, tuple
:return: A list of missing or incompatible packages.
:rtype: list
"""
ignore = (ignore or [])
not_satisfied = []
working_set = pkg_resources.working_set
installed_packages = dict((p.project_name, p) for p in working_set) # pylint: disable=E1133
if isinstance(requirements, str):
with open(requirements, 'r') as file_h:
requirements = file_h.readlines()
elif hasattr(requirements, 'readlines'):
requirements = requirements.readlines()
elif not isinstance(requirements, (list, tuple)):
raise TypeError('invalid type for argument requirements')
for req_line in requirements:
req_line = req_line.strip()
parts = re.match(r'^([\w\-]+)(([<>=]=)(\d+(\.\d+)*))?$', req_line)
if not parts:
raise ValueError("requirement '{0}' is in an invalid format".format(req_line))
req_pkg = parts.group(1)
if req_pkg in ignore:
continue
if req_pkg not in installed_packages:
try:
find_result = working_set.find(pkg_resources.Requirement.parse(req_line))
except pkg_resources.ResolutionError:
find_result = False
if not find_result:
not_satisfied.append(req_pkg)
continue
if not parts.group(2):
continue
req_version = distutils.version.StrictVersion(parts.group(4))
installed_pkg = installed_packages[req_pkg]
installed_version = re.match(r'^((\d+\.)*\d+)', installed_pkg.version)
if not installed_version:
not_satisfied.append(req_pkg)
continue
installed_version = distutils.version.StrictVersion(installed_version.group(0))
if parts.group(3) == '==' and installed_version != req_version:
not_satisfied.append(req_pkg)
elif parts.group(3) == '>=' and installed_version < req_version:
not_satisfied.append(req_pkg)
elif parts.group(3) == '<=' and installed_version > req_version:
not_satisfied.append(req_pkg)
return not_satisfied | python | def check_requirements(requirements, ignore=None):
"""
Parse requirements for package information to determine if all requirements
are met. The *requirements* argument can be a string to a requirements file,
a file like object to be read, or a list of strings representing the package
requirements.
:param requirements: The file to parse.
:type requirements: file obj, list, str, tuple
:param ignore: A sequence of packages to ignore.
:type ignore: list, tuple
:return: A list of missing or incompatible packages.
:rtype: list
"""
ignore = (ignore or [])
not_satisfied = []
working_set = pkg_resources.working_set
installed_packages = dict((p.project_name, p) for p in working_set) # pylint: disable=E1133
if isinstance(requirements, str):
with open(requirements, 'r') as file_h:
requirements = file_h.readlines()
elif hasattr(requirements, 'readlines'):
requirements = requirements.readlines()
elif not isinstance(requirements, (list, tuple)):
raise TypeError('invalid type for argument requirements')
for req_line in requirements:
req_line = req_line.strip()
parts = re.match(r'^([\w\-]+)(([<>=]=)(\d+(\.\d+)*))?$', req_line)
if not parts:
raise ValueError("requirement '{0}' is in an invalid format".format(req_line))
req_pkg = parts.group(1)
if req_pkg in ignore:
continue
if req_pkg not in installed_packages:
try:
find_result = working_set.find(pkg_resources.Requirement.parse(req_line))
except pkg_resources.ResolutionError:
find_result = False
if not find_result:
not_satisfied.append(req_pkg)
continue
if not parts.group(2):
continue
req_version = distutils.version.StrictVersion(parts.group(4))
installed_pkg = installed_packages[req_pkg]
installed_version = re.match(r'^((\d+\.)*\d+)', installed_pkg.version)
if not installed_version:
not_satisfied.append(req_pkg)
continue
installed_version = distutils.version.StrictVersion(installed_version.group(0))
if parts.group(3) == '==' and installed_version != req_version:
not_satisfied.append(req_pkg)
elif parts.group(3) == '>=' and installed_version < req_version:
not_satisfied.append(req_pkg)
elif parts.group(3) == '<=' and installed_version > req_version:
not_satisfied.append(req_pkg)
return not_satisfied | [
"def",
"check_requirements",
"(",
"requirements",
",",
"ignore",
"=",
"None",
")",
":",
"ignore",
"=",
"(",
"ignore",
"or",
"[",
"]",
")",
"not_satisfied",
"=",
"[",
"]",
"working_set",
"=",
"pkg_resources",
".",
"working_set",
"installed_packages",
"=",
"dict",
"(",
"(",
"p",
".",
"project_name",
",",
"p",
")",
"for",
"p",
"in",
"working_set",
")",
"# pylint: disable=E1133",
"if",
"isinstance",
"(",
"requirements",
",",
"str",
")",
":",
"with",
"open",
"(",
"requirements",
",",
"'r'",
")",
"as",
"file_h",
":",
"requirements",
"=",
"file_h",
".",
"readlines",
"(",
")",
"elif",
"hasattr",
"(",
"requirements",
",",
"'readlines'",
")",
":",
"requirements",
"=",
"requirements",
".",
"readlines",
"(",
")",
"elif",
"not",
"isinstance",
"(",
"requirements",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"raise",
"TypeError",
"(",
"'invalid type for argument requirements'",
")",
"for",
"req_line",
"in",
"requirements",
":",
"req_line",
"=",
"req_line",
".",
"strip",
"(",
")",
"parts",
"=",
"re",
".",
"match",
"(",
"r'^([\\w\\-]+)(([<>=]=)(\\d+(\\.\\d+)*))?$'",
",",
"req_line",
")",
"if",
"not",
"parts",
":",
"raise",
"ValueError",
"(",
"\"requirement '{0}' is in an invalid format\"",
".",
"format",
"(",
"req_line",
")",
")",
"req_pkg",
"=",
"parts",
".",
"group",
"(",
"1",
")",
"if",
"req_pkg",
"in",
"ignore",
":",
"continue",
"if",
"req_pkg",
"not",
"in",
"installed_packages",
":",
"try",
":",
"find_result",
"=",
"working_set",
".",
"find",
"(",
"pkg_resources",
".",
"Requirement",
".",
"parse",
"(",
"req_line",
")",
")",
"except",
"pkg_resources",
".",
"ResolutionError",
":",
"find_result",
"=",
"False",
"if",
"not",
"find_result",
":",
"not_satisfied",
".",
"append",
"(",
"req_pkg",
")",
"continue",
"if",
"not",
"parts",
".",
"group",
"(",
"2",
")",
":",
"continue",
"req_version",
"=",
"distutils",
".",
"version",
".",
"StrictVersion",
"(",
"parts",
".",
"group",
"(",
"4",
")",
")",
"installed_pkg",
"=",
"installed_packages",
"[",
"req_pkg",
"]",
"installed_version",
"=",
"re",
".",
"match",
"(",
"r'^((\\d+\\.)*\\d+)'",
",",
"installed_pkg",
".",
"version",
")",
"if",
"not",
"installed_version",
":",
"not_satisfied",
".",
"append",
"(",
"req_pkg",
")",
"continue",
"installed_version",
"=",
"distutils",
".",
"version",
".",
"StrictVersion",
"(",
"installed_version",
".",
"group",
"(",
"0",
")",
")",
"if",
"parts",
".",
"group",
"(",
"3",
")",
"==",
"'=='",
"and",
"installed_version",
"!=",
"req_version",
":",
"not_satisfied",
".",
"append",
"(",
"req_pkg",
")",
"elif",
"parts",
".",
"group",
"(",
"3",
")",
"==",
"'>='",
"and",
"installed_version",
"<",
"req_version",
":",
"not_satisfied",
".",
"append",
"(",
"req_pkg",
")",
"elif",
"parts",
".",
"group",
"(",
"3",
")",
"==",
"'<='",
"and",
"installed_version",
">",
"req_version",
":",
"not_satisfied",
".",
"append",
"(",
"req_pkg",
")",
"return",
"not_satisfied"
] | Parse requirements for package information to determine if all requirements
are met. The *requirements* argument can be a string to a requirements file,
a file like object to be read, or a list of strings representing the package
requirements.
:param requirements: The file to parse.
:type requirements: file obj, list, str, tuple
:param ignore: A sequence of packages to ignore.
:type ignore: list, tuple
:return: A list of missing or incompatible packages.
:rtype: list | [
"Parse",
"requirements",
"for",
"package",
"information",
"to",
"determine",
"if",
"all",
"requirements",
"are",
"met",
".",
"The",
"*",
"requirements",
"*",
"argument",
"can",
"be",
"a",
"string",
"to",
"a",
"requirements",
"file",
"a",
"file",
"like",
"object",
"to",
"be",
"read",
"or",
"a",
"list",
"of",
"strings",
"representing",
"the",
"package",
"requirements",
"."
] | train | https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/requirements.py#L38-L96 |
andrewda/frc-livescore | livescore/simpleocr_utils/segmentation.py | segments_from_numpy | def segments_from_numpy(segments):
"""reverses segments_to_numpy"""
segments = segments if SEGMENTS_DIRECTION == 0 else segments.tranpose()
segments = [map(int, s) for s in segments]
return segments | python | def segments_from_numpy(segments):
"""reverses segments_to_numpy"""
segments = segments if SEGMENTS_DIRECTION == 0 else segments.tranpose()
segments = [map(int, s) for s in segments]
return segments | [
"def",
"segments_from_numpy",
"(",
"segments",
")",
":",
"segments",
"=",
"segments",
"if",
"SEGMENTS_DIRECTION",
"==",
"0",
"else",
"segments",
".",
"tranpose",
"(",
")",
"segments",
"=",
"[",
"map",
"(",
"int",
",",
"s",
")",
"for",
"s",
"in",
"segments",
"]",
"return",
"segments"
] | reverses segments_to_numpy | [
"reverses",
"segments_to_numpy"
] | train | https://github.com/andrewda/frc-livescore/blob/71594cd6d2c8b6c5feb3889bb05552d09b8128b1/livescore/simpleocr_utils/segmentation.py#L13-L17 |
andrewda/frc-livescore | livescore/simpleocr_utils/segmentation.py | segments_to_numpy | def segments_to_numpy(segments):
"""given a list of 4-element tuples, transforms it into a numpy array"""
segments = numpy.array(segments, dtype=SEGMENT_DATATYPE, ndmin=2) # each segment in a row
segments = segments if SEGMENTS_DIRECTION == 0 else numpy.transpose(segments)
return segments | python | def segments_to_numpy(segments):
"""given a list of 4-element tuples, transforms it into a numpy array"""
segments = numpy.array(segments, dtype=SEGMENT_DATATYPE, ndmin=2) # each segment in a row
segments = segments if SEGMENTS_DIRECTION == 0 else numpy.transpose(segments)
return segments | [
"def",
"segments_to_numpy",
"(",
"segments",
")",
":",
"segments",
"=",
"numpy",
".",
"array",
"(",
"segments",
",",
"dtype",
"=",
"SEGMENT_DATATYPE",
",",
"ndmin",
"=",
"2",
")",
"# each segment in a row",
"segments",
"=",
"segments",
"if",
"SEGMENTS_DIRECTION",
"==",
"0",
"else",
"numpy",
".",
"transpose",
"(",
"segments",
")",
"return",
"segments"
] | given a list of 4-element tuples, transforms it into a numpy array | [
"given",
"a",
"list",
"of",
"4",
"-",
"element",
"tuples",
"transforms",
"it",
"into",
"a",
"numpy",
"array"
] | train | https://github.com/andrewda/frc-livescore/blob/71594cd6d2c8b6c5feb3889bb05552d09b8128b1/livescore/simpleocr_utils/segmentation.py#L20-L24 |
andrewda/frc-livescore | livescore/simpleocr_utils/segmentation.py | region_from_segment | def region_from_segment(image, segment):
"""given a segment (rectangle) and an image, returns it's corresponding subimage"""
x, y, w, h = segment
return image[y:y + h, x:x + w] | python | def region_from_segment(image, segment):
"""given a segment (rectangle) and an image, returns it's corresponding subimage"""
x, y, w, h = segment
return image[y:y + h, x:x + w] | [
"def",
"region_from_segment",
"(",
"image",
",",
"segment",
")",
":",
"x",
",",
"y",
",",
"w",
",",
"h",
"=",
"segment",
"return",
"image",
"[",
"y",
":",
"y",
"+",
"h",
",",
"x",
":",
"x",
"+",
"w",
"]"
] | given a segment (rectangle) and an image, returns it's corresponding subimage | [
"given",
"a",
"segment",
"(",
"rectangle",
")",
"and",
"an",
"image",
"returns",
"it",
"s",
"corresponding",
"subimage"
] | train | https://github.com/andrewda/frc-livescore/blob/71594cd6d2c8b6c5feb3889bb05552d09b8128b1/livescore/simpleocr_utils/segmentation.py#L27-L30 |
thespacedoctor/sherlock | sherlock/imports/marshall.py | marshall.ingest | def ingest(self):
"""*Ingest the ePESSTO Marshall transient stream into the catalogues database*
The method first creates the tables for the various marshall feeder surveys in the sherlock-catalogues database (if they do not yet exist). Then the marshall database is queried for each transient survey and the results imported into the sherlock-catalogues tables,
See the class docstring for usage
.. todo ::
- convert the directory_script_runner to 'load in file'
"""
self.log.debug('starting the ``get`` method')
# A YAML DICTIONARY OF sherlock-catalogues TABLE NAME AND THE SELECT
# QUERY TO LIFT THE DATA FROM THE MARSHALL
yamlFilePath = '/'.join(string.split(__file__, '/')
[:-1]) + "/resources/pessto_marshall_table_selects.yaml"
stream = file(yamlFilePath, 'r')
marshallQueries = yaml.load(stream)
stream.close()
self.primaryIdColumnName = "primaryId"
self.raColName = "raDeg"
self.declColName = "decDeg"
# CREATE THE MARSHALL IMPORT CATALOGUE TABLES (IF THEY DO NOT EXIST)
directory_script_runner(
log=self.log,
pathToScriptDirectory='/'.join(string.split(__file__,
'/')[:-1]) + "/resources",
databaseName=self.settings["database settings"][
"static catalogues"]["db"],
loginPath=self.settings["database settings"][
"static catalogues"]["loginPath"],
successRule="delete",
failureRule="failed"
)
for k, v in marshallQueries["pessto queries"].iteritems():
self.dbTableName = k
self.databaseInsertbatchSize = 500
dictList = self._create_dictionary_of_marshall(
marshallQuery=v["query"],
marshallTable=v["marshallTable"]
)
tableName = self.dbTableName
self.add_data_to_database_table(
dictList=dictList
)
self.log.debug('completed the ``get`` method')
return None | python | def ingest(self):
"""*Ingest the ePESSTO Marshall transient stream into the catalogues database*
The method first creates the tables for the various marshall feeder surveys in the sherlock-catalogues database (if they do not yet exist). Then the marshall database is queried for each transient survey and the results imported into the sherlock-catalogues tables,
See the class docstring for usage
.. todo ::
- convert the directory_script_runner to 'load in file'
"""
self.log.debug('starting the ``get`` method')
# A YAML DICTIONARY OF sherlock-catalogues TABLE NAME AND THE SELECT
# QUERY TO LIFT THE DATA FROM THE MARSHALL
yamlFilePath = '/'.join(string.split(__file__, '/')
[:-1]) + "/resources/pessto_marshall_table_selects.yaml"
stream = file(yamlFilePath, 'r')
marshallQueries = yaml.load(stream)
stream.close()
self.primaryIdColumnName = "primaryId"
self.raColName = "raDeg"
self.declColName = "decDeg"
# CREATE THE MARSHALL IMPORT CATALOGUE TABLES (IF THEY DO NOT EXIST)
directory_script_runner(
log=self.log,
pathToScriptDirectory='/'.join(string.split(__file__,
'/')[:-1]) + "/resources",
databaseName=self.settings["database settings"][
"static catalogues"]["db"],
loginPath=self.settings["database settings"][
"static catalogues"]["loginPath"],
successRule="delete",
failureRule="failed"
)
for k, v in marshallQueries["pessto queries"].iteritems():
self.dbTableName = k
self.databaseInsertbatchSize = 500
dictList = self._create_dictionary_of_marshall(
marshallQuery=v["query"],
marshallTable=v["marshallTable"]
)
tableName = self.dbTableName
self.add_data_to_database_table(
dictList=dictList
)
self.log.debug('completed the ``get`` method')
return None | [
"def",
"ingest",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``get`` method'",
")",
"# A YAML DICTIONARY OF sherlock-catalogues TABLE NAME AND THE SELECT",
"# QUERY TO LIFT THE DATA FROM THE MARSHALL",
"yamlFilePath",
"=",
"'/'",
".",
"join",
"(",
"string",
".",
"split",
"(",
"__file__",
",",
"'/'",
")",
"[",
":",
"-",
"1",
"]",
")",
"+",
"\"/resources/pessto_marshall_table_selects.yaml\"",
"stream",
"=",
"file",
"(",
"yamlFilePath",
",",
"'r'",
")",
"marshallQueries",
"=",
"yaml",
".",
"load",
"(",
"stream",
")",
"stream",
".",
"close",
"(",
")",
"self",
".",
"primaryIdColumnName",
"=",
"\"primaryId\"",
"self",
".",
"raColName",
"=",
"\"raDeg\"",
"self",
".",
"declColName",
"=",
"\"decDeg\"",
"# CREATE THE MARSHALL IMPORT CATALOGUE TABLES (IF THEY DO NOT EXIST)",
"directory_script_runner",
"(",
"log",
"=",
"self",
".",
"log",
",",
"pathToScriptDirectory",
"=",
"'/'",
".",
"join",
"(",
"string",
".",
"split",
"(",
"__file__",
",",
"'/'",
")",
"[",
":",
"-",
"1",
"]",
")",
"+",
"\"/resources\"",
",",
"databaseName",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"static catalogues\"",
"]",
"[",
"\"db\"",
"]",
",",
"loginPath",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"static catalogues\"",
"]",
"[",
"\"loginPath\"",
"]",
",",
"successRule",
"=",
"\"delete\"",
",",
"failureRule",
"=",
"\"failed\"",
")",
"for",
"k",
",",
"v",
"in",
"marshallQueries",
"[",
"\"pessto queries\"",
"]",
".",
"iteritems",
"(",
")",
":",
"self",
".",
"dbTableName",
"=",
"k",
"self",
".",
"databaseInsertbatchSize",
"=",
"500",
"dictList",
"=",
"self",
".",
"_create_dictionary_of_marshall",
"(",
"marshallQuery",
"=",
"v",
"[",
"\"query\"",
"]",
",",
"marshallTable",
"=",
"v",
"[",
"\"marshallTable\"",
"]",
")",
"tableName",
"=",
"self",
".",
"dbTableName",
"self",
".",
"add_data_to_database_table",
"(",
"dictList",
"=",
"dictList",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``get`` method'",
")",
"return",
"None"
] | *Ingest the ePESSTO Marshall transient stream into the catalogues database*
The method first creates the tables for the various marshall feeder surveys in the sherlock-catalogues database (if they do not yet exist). Then the marshall database is queried for each transient survey and the results imported into the sherlock-catalogues tables,
See the class docstring for usage
.. todo ::
- convert the directory_script_runner to 'load in file' | [
"*",
"Ingest",
"the",
"ePESSTO",
"Marshall",
"transient",
"stream",
"into",
"the",
"catalogues",
"database",
"*"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/marshall.py#L57-L108 |
thespacedoctor/sherlock | sherlock/imports/marshall.py | marshall._create_dictionary_of_marshall | def _create_dictionary_of_marshall(
self,
marshallQuery,
marshallTable):
"""create a list of dictionaries containing all the rows in the marshall stream
**Key Arguments:**
- ``marshallQuery`` -- the query used to lift the required data from the marshall database.
- ``marshallTable`` -- the name of the marshall table we are lifting the data from.
**Return:**
- ``dictList`` - a list of dictionaries containing all the rows in the marshall stream
"""
self.log.debug(
'starting the ``_create_dictionary_of_marshall`` method')
dictList = []
tableName = self.dbTableName
rows = readquery(
log=self.log,
sqlQuery=marshallQuery,
dbConn=self.pmDbConn,
quiet=False
)
totalCount = len(rows)
count = 0
for row in rows:
if "dateCreated" in row:
del row["dateCreated"]
count += 1
if count > 1:
# Cursor up one line and clear line
sys.stdout.write("\x1b[1A\x1b[2K")
print "%(count)s / %(totalCount)s `%(tableName)s` data added to memory" % locals()
dictList.append(dict(row))
self.log.debug(
'completed the ``_create_dictionary_of_marshall`` method')
return dictList | python | def _create_dictionary_of_marshall(
self,
marshallQuery,
marshallTable):
"""create a list of dictionaries containing all the rows in the marshall stream
**Key Arguments:**
- ``marshallQuery`` -- the query used to lift the required data from the marshall database.
- ``marshallTable`` -- the name of the marshall table we are lifting the data from.
**Return:**
- ``dictList`` - a list of dictionaries containing all the rows in the marshall stream
"""
self.log.debug(
'starting the ``_create_dictionary_of_marshall`` method')
dictList = []
tableName = self.dbTableName
rows = readquery(
log=self.log,
sqlQuery=marshallQuery,
dbConn=self.pmDbConn,
quiet=False
)
totalCount = len(rows)
count = 0
for row in rows:
if "dateCreated" in row:
del row["dateCreated"]
count += 1
if count > 1:
# Cursor up one line and clear line
sys.stdout.write("\x1b[1A\x1b[2K")
print "%(count)s / %(totalCount)s `%(tableName)s` data added to memory" % locals()
dictList.append(dict(row))
self.log.debug(
'completed the ``_create_dictionary_of_marshall`` method')
return dictList | [
"def",
"_create_dictionary_of_marshall",
"(",
"self",
",",
"marshallQuery",
",",
"marshallTable",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_create_dictionary_of_marshall`` method'",
")",
"dictList",
"=",
"[",
"]",
"tableName",
"=",
"self",
".",
"dbTableName",
"rows",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"marshallQuery",
",",
"dbConn",
"=",
"self",
".",
"pmDbConn",
",",
"quiet",
"=",
"False",
")",
"totalCount",
"=",
"len",
"(",
"rows",
")",
"count",
"=",
"0",
"for",
"row",
"in",
"rows",
":",
"if",
"\"dateCreated\"",
"in",
"row",
":",
"del",
"row",
"[",
"\"dateCreated\"",
"]",
"count",
"+=",
"1",
"if",
"count",
">",
"1",
":",
"# Cursor up one line and clear line",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\x1b[1A\\x1b[2K\"",
")",
"print",
"\"%(count)s / %(totalCount)s `%(tableName)s` data added to memory\"",
"%",
"locals",
"(",
")",
"dictList",
".",
"append",
"(",
"dict",
"(",
"row",
")",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_create_dictionary_of_marshall`` method'",
")",
"return",
"dictList"
] | create a list of dictionaries containing all the rows in the marshall stream
**Key Arguments:**
- ``marshallQuery`` -- the query used to lift the required data from the marshall database.
- ``marshallTable`` -- the name of the marshall table we are lifting the data from.
**Return:**
- ``dictList`` - a list of dictionaries containing all the rows in the marshall stream | [
"create",
"a",
"list",
"of",
"dictionaries",
"containing",
"all",
"the",
"rows",
"in",
"the",
"marshall",
"stream"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/marshall.py#L110-L151 |
thespacedoctor/sherlock | sherlock/imports/ned_d.py | ned_d.ingest | def ingest(self):
"""Import the ned_d catalogue into the catalogues database
The method first generates a list of python dictionaries from the ned_d datafile, imports this list of dictionaries into a database table and then generates the HTMIDs for that table.
**Usage:**
See class docstring for usage
.. todo ::
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``get`` method')
dictList = self._create_dictionary_of_ned_d()
self.primaryIdColumnName = "primaryId"
self.raColName = "raDeg"
self.declColName = "decDeg"
tableName = self.dbTableName
createStatement = u"""
CREATE TABLE `%(tableName)s` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`Method` varchar(150) DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,
`updated` varchar(45) DEFAULT '0',
`dist_derived_from_sn` varchar(150) DEFAULT NULL,
`dist_in_ned_flag` varchar(10) DEFAULT NULL,
`dist_index_id` mediumint(9) DEFAULT NULL,
`dist_mod` double DEFAULT NULL,
`dist_mod_err` double DEFAULT NULL,
`dist_mpc` double DEFAULT NULL,
`galaxy_index_id` mediumint(9) DEFAULT NULL,
`hubble_const` double DEFAULT NULL,
`lmc_mod` double DEFAULT NULL,
`notes` varchar(500) DEFAULT NULL,
`primary_ned_id` varchar(150) DEFAULT NULL,
`redshift` double DEFAULT NULL,
`ref` varchar(150) DEFAULT NULL,
`ref_date` int(11) DEFAULT NULL,
`master_row` tinyint(4) DEFAULT '0',
`major_diameter_arcmin` double DEFAULT NULL,
`ned_notes` varchar(700) DEFAULT NULL,
`object_type` varchar(100) DEFAULT NULL,
`redshift_err` double DEFAULT NULL,
`redshift_quality` varchar(100) DEFAULT NULL,
`magnitude_filter` varchar(10) DEFAULT NULL,
`minor_diameter_arcmin` double DEFAULT NULL,
`morphology` varchar(50) DEFAULT NULL,
`hierarchy` varchar(50) DEFAULT NULL,
`galaxy_morphology` varchar(50) DEFAULT NULL,
`radio_morphology` varchar(50) DEFAULT NULL,
`activity_type` varchar(50) DEFAULT NULL,
`in_ned` tinyint(4) DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`eb_v` double DEFAULT NULL,
`sdss_coverage` TINYINT DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `galaxy_index_id_dist_index_id` (`galaxy_index_id`,`dist_index_id`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
DROP VIEW IF EXISTS `view_%(tableName)s_master_recorders`;
CREATE
VIEW `view_%(tableName)s_master_recorders` AS
(SELECT
`%(tableName)s`.`primary_ned_id` AS `primary_ned_id`,
`%(tableName)s`.`object_type` AS `object_type`,
`%(tableName)s`.`raDeg` AS `raDeg`,
`%(tableName)s`.`decDeg` AS `decDeg`,
`%(tableName)s`.`dist_mpc` AS `dist_mpc`,
`%(tableName)s`.`dist_mod` AS `dist_mod`,
`%(tableName)s`.`dist_mod_err` AS `dist_mod_err`,
`%(tableName)s`.`Method` AS `dist_measurement_method`,
`%(tableName)s`.`redshift` AS `redshift`,
`%(tableName)s`.`redshift_err` AS `redshift_err`,
`%(tableName)s`.`redshift_quality` AS `redshift_quality`,
`%(tableName)s`.`major_diameter_arcmin` AS `major_diameter_arcmin`,
`%(tableName)s`.`minor_diameter_arcmin` AS `minor_diameter_arcmin`,
`%(tableName)s`.`magnitude_filter` AS `magnitude_filter`,
`%(tableName)s`.`eb_v` AS `gal_eb_v`,
`%(tableName)s`.`hierarchy` AS `hierarchy`,
`%(tableName)s`.`morphology` AS `morphology`,
`%(tableName)s`.`radio_morphology` AS `radio_morphology`,
`%(tableName)s`.`activity_type` AS `activity_type`,
`%(tableName)s`.`ned_notes` AS `ned_notes`,
`%(tableName)s`.`in_ned` AS `in_ned`,
`%(tableName)s`.`primaryId` AS `primaryId`
FROM
`%(tableName)s`
WHERE
(`%(tableName)s`.`master_row` = 1));
""" % locals()
self.add_data_to_database_table(
dictList=dictList,
createStatement=createStatement
)
self._clean_up_columns()
self._get_metadata_for_galaxies()
self._update_sdss_coverage()
self.log.debug('completed the ``get`` method')
return None | python | def ingest(self):
"""Import the ned_d catalogue into the catalogues database
The method first generates a list of python dictionaries from the ned_d datafile, imports this list of dictionaries into a database table and then generates the HTMIDs for that table.
**Usage:**
See class docstring for usage
.. todo ::
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``get`` method')
dictList = self._create_dictionary_of_ned_d()
self.primaryIdColumnName = "primaryId"
self.raColName = "raDeg"
self.declColName = "decDeg"
tableName = self.dbTableName
createStatement = u"""
CREATE TABLE `%(tableName)s` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`Method` varchar(150) DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,
`updated` varchar(45) DEFAULT '0',
`dist_derived_from_sn` varchar(150) DEFAULT NULL,
`dist_in_ned_flag` varchar(10) DEFAULT NULL,
`dist_index_id` mediumint(9) DEFAULT NULL,
`dist_mod` double DEFAULT NULL,
`dist_mod_err` double DEFAULT NULL,
`dist_mpc` double DEFAULT NULL,
`galaxy_index_id` mediumint(9) DEFAULT NULL,
`hubble_const` double DEFAULT NULL,
`lmc_mod` double DEFAULT NULL,
`notes` varchar(500) DEFAULT NULL,
`primary_ned_id` varchar(150) DEFAULT NULL,
`redshift` double DEFAULT NULL,
`ref` varchar(150) DEFAULT NULL,
`ref_date` int(11) DEFAULT NULL,
`master_row` tinyint(4) DEFAULT '0',
`major_diameter_arcmin` double DEFAULT NULL,
`ned_notes` varchar(700) DEFAULT NULL,
`object_type` varchar(100) DEFAULT NULL,
`redshift_err` double DEFAULT NULL,
`redshift_quality` varchar(100) DEFAULT NULL,
`magnitude_filter` varchar(10) DEFAULT NULL,
`minor_diameter_arcmin` double DEFAULT NULL,
`morphology` varchar(50) DEFAULT NULL,
`hierarchy` varchar(50) DEFAULT NULL,
`galaxy_morphology` varchar(50) DEFAULT NULL,
`radio_morphology` varchar(50) DEFAULT NULL,
`activity_type` varchar(50) DEFAULT NULL,
`in_ned` tinyint(4) DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`eb_v` double DEFAULT NULL,
`sdss_coverage` TINYINT DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `galaxy_index_id_dist_index_id` (`galaxy_index_id`,`dist_index_id`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
DROP VIEW IF EXISTS `view_%(tableName)s_master_recorders`;
CREATE
VIEW `view_%(tableName)s_master_recorders` AS
(SELECT
`%(tableName)s`.`primary_ned_id` AS `primary_ned_id`,
`%(tableName)s`.`object_type` AS `object_type`,
`%(tableName)s`.`raDeg` AS `raDeg`,
`%(tableName)s`.`decDeg` AS `decDeg`,
`%(tableName)s`.`dist_mpc` AS `dist_mpc`,
`%(tableName)s`.`dist_mod` AS `dist_mod`,
`%(tableName)s`.`dist_mod_err` AS `dist_mod_err`,
`%(tableName)s`.`Method` AS `dist_measurement_method`,
`%(tableName)s`.`redshift` AS `redshift`,
`%(tableName)s`.`redshift_err` AS `redshift_err`,
`%(tableName)s`.`redshift_quality` AS `redshift_quality`,
`%(tableName)s`.`major_diameter_arcmin` AS `major_diameter_arcmin`,
`%(tableName)s`.`minor_diameter_arcmin` AS `minor_diameter_arcmin`,
`%(tableName)s`.`magnitude_filter` AS `magnitude_filter`,
`%(tableName)s`.`eb_v` AS `gal_eb_v`,
`%(tableName)s`.`hierarchy` AS `hierarchy`,
`%(tableName)s`.`morphology` AS `morphology`,
`%(tableName)s`.`radio_morphology` AS `radio_morphology`,
`%(tableName)s`.`activity_type` AS `activity_type`,
`%(tableName)s`.`ned_notes` AS `ned_notes`,
`%(tableName)s`.`in_ned` AS `in_ned`,
`%(tableName)s`.`primaryId` AS `primaryId`
FROM
`%(tableName)s`
WHERE
(`%(tableName)s`.`master_row` = 1));
""" % locals()
self.add_data_to_database_table(
dictList=dictList,
createStatement=createStatement
)
self._clean_up_columns()
self._get_metadata_for_galaxies()
self._update_sdss_coverage()
self.log.debug('completed the ``get`` method')
return None | [
"def",
"ingest",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``get`` method'",
")",
"dictList",
"=",
"self",
".",
"_create_dictionary_of_ned_d",
"(",
")",
"self",
".",
"primaryIdColumnName",
"=",
"\"primaryId\"",
"self",
".",
"raColName",
"=",
"\"raDeg\"",
"self",
".",
"declColName",
"=",
"\"decDeg\"",
"tableName",
"=",
"self",
".",
"dbTableName",
"createStatement",
"=",
"u\"\"\"\n CREATE TABLE `%(tableName)s` (\n `primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',\n `Method` varchar(150) DEFAULT NULL,\n `dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,\n `dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,\n `updated` varchar(45) DEFAULT '0',\n `dist_derived_from_sn` varchar(150) DEFAULT NULL,\n `dist_in_ned_flag` varchar(10) DEFAULT NULL,\n `dist_index_id` mediumint(9) DEFAULT NULL,\n `dist_mod` double DEFAULT NULL,\n `dist_mod_err` double DEFAULT NULL,\n `dist_mpc` double DEFAULT NULL,\n `galaxy_index_id` mediumint(9) DEFAULT NULL,\n `hubble_const` double DEFAULT NULL,\n `lmc_mod` double DEFAULT NULL,\n `notes` varchar(500) DEFAULT NULL,\n `primary_ned_id` varchar(150) DEFAULT NULL,\n `redshift` double DEFAULT NULL,\n `ref` varchar(150) DEFAULT NULL,\n `ref_date` int(11) DEFAULT NULL,\n `master_row` tinyint(4) DEFAULT '0',\n `major_diameter_arcmin` double DEFAULT NULL,\n `ned_notes` varchar(700) DEFAULT NULL,\n `object_type` varchar(100) DEFAULT NULL,\n `redshift_err` double DEFAULT NULL,\n `redshift_quality` varchar(100) DEFAULT NULL,\n `magnitude_filter` varchar(10) DEFAULT NULL,\n `minor_diameter_arcmin` double DEFAULT NULL,\n `morphology` varchar(50) DEFAULT NULL,\n `hierarchy` varchar(50) DEFAULT NULL,\n `galaxy_morphology` varchar(50) DEFAULT NULL,\n `radio_morphology` varchar(50) DEFAULT NULL,\n `activity_type` varchar(50) DEFAULT NULL,\n `in_ned` tinyint(4) DEFAULT NULL,\n `raDeg` double DEFAULT NULL,\n `decDeg` double DEFAULT NULL,\n `eb_v` double DEFAULT NULL,\n `sdss_coverage` TINYINT DEFAULT NULL,\n PRIMARY KEY (`primaryId`),\n UNIQUE KEY `galaxy_index_id_dist_index_id` (`galaxy_index_id`,`dist_index_id`)\n ) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;\n DROP VIEW IF EXISTS `view_%(tableName)s_master_recorders`;\n CREATE\n VIEW `view_%(tableName)s_master_recorders` AS\n (SELECT \n `%(tableName)s`.`primary_ned_id` AS `primary_ned_id`,\n `%(tableName)s`.`object_type` AS `object_type`,\n `%(tableName)s`.`raDeg` AS `raDeg`,\n `%(tableName)s`.`decDeg` AS `decDeg`,\n `%(tableName)s`.`dist_mpc` AS `dist_mpc`,\n `%(tableName)s`.`dist_mod` AS `dist_mod`,\n `%(tableName)s`.`dist_mod_err` AS `dist_mod_err`,\n `%(tableName)s`.`Method` AS `dist_measurement_method`,\n `%(tableName)s`.`redshift` AS `redshift`,\n `%(tableName)s`.`redshift_err` AS `redshift_err`,\n `%(tableName)s`.`redshift_quality` AS `redshift_quality`,\n `%(tableName)s`.`major_diameter_arcmin` AS `major_diameter_arcmin`,\n `%(tableName)s`.`minor_diameter_arcmin` AS `minor_diameter_arcmin`,\n `%(tableName)s`.`magnitude_filter` AS `magnitude_filter`,\n `%(tableName)s`.`eb_v` AS `gal_eb_v`,\n `%(tableName)s`.`hierarchy` AS `hierarchy`,\n `%(tableName)s`.`morphology` AS `morphology`,\n `%(tableName)s`.`radio_morphology` AS `radio_morphology`,\n `%(tableName)s`.`activity_type` AS `activity_type`,\n `%(tableName)s`.`ned_notes` AS `ned_notes`,\n `%(tableName)s`.`in_ned` AS `in_ned`,\n `%(tableName)s`.`primaryId` AS `primaryId`\n FROM\n `%(tableName)s`\n WHERE\n (`%(tableName)s`.`master_row` = 1));\n \"\"\"",
"%",
"locals",
"(",
")",
"self",
".",
"add_data_to_database_table",
"(",
"dictList",
"=",
"dictList",
",",
"createStatement",
"=",
"createStatement",
")",
"self",
".",
"_clean_up_columns",
"(",
")",
"self",
".",
"_get_metadata_for_galaxies",
"(",
")",
"self",
".",
"_update_sdss_coverage",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``get`` method'",
")",
"return",
"None"
] | Import the ned_d catalogue into the catalogues database
The method first generates a list of python dictionaries from the ned_d datafile, imports this list of dictionaries into a database table and then generates the HTMIDs for that table.
**Usage:**
See class docstring for usage
.. todo ::
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"Import",
"the",
"ned_d",
"catalogue",
"into",
"the",
"catalogues",
"database"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/ned_d.py#L66-L175 |
thespacedoctor/sherlock | sherlock/imports/ned_d.py | ned_d._create_dictionary_of_ned_d | def _create_dictionary_of_ned_d(
self):
"""create a list of dictionaries containing all the rows in the ned_d catalogue
**Return:**
- ``dictList`` - a list of dictionaries containing all the rows in the ned_d catalogue
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_create_dictionary_of_ned_d`` method')
count = 0
with open(self.pathToDataFile, 'rb') as csvFile:
csvReader = csv.reader(
csvFile, dialect='excel', delimiter=',', quotechar='"')
totalRows = sum(1 for row in csvReader)
csvFile.close()
totalCount = totalRows
with open(self.pathToDataFile, 'rb') as csvFile:
csvReader = csv.reader(
csvFile, dialect='excel', delimiter=',', quotechar='"')
theseKeys = []
dictList = []
for row in csvReader:
if len(theseKeys) == 0:
totalRows -= 1
if "Exclusion Code" in row and "Hubble const." in row:
for i in row:
if i == "redshift (z)":
theseKeys.append("redshift")
elif i == "Hubble const.":
theseKeys.append("hubble_const")
elif i == "G":
theseKeys.append("galaxy_index_id")
elif i == "err":
theseKeys.append("dist_mod_err")
elif i == "D (Mpc)":
theseKeys.append("dist_mpc")
elif i == "Date (Yr. - 1980)":
theseKeys.append("ref_date")
elif i == "REFCODE":
theseKeys.append("ref")
elif i == "Exclusion Code":
theseKeys.append("dist_in_ned_flag")
elif i == "Adopted LMC modulus":
theseKeys.append("lmc_mod")
elif i == "m-M":
theseKeys.append("dist_mod")
elif i == "Notes":
theseKeys.append("notes")
elif i == "SN ID":
theseKeys.append("dist_derived_from_sn")
elif i == "method":
theseKeys.append("dist_method")
elif i == "Galaxy ID":
theseKeys.append("primary_ned_id")
elif i == "D":
theseKeys.append("dist_index_id")
else:
theseKeys.append(i)
continue
if len(theseKeys):
count += 1
if count > 1:
# Cursor up one line and clear line
sys.stdout.write("\x1b[1A\x1b[2K")
if count > totalCount:
count = totalCount
percent = (float(count) / float(totalCount)) * 100.
print "%(count)s / %(totalCount)s (%(percent)1.1f%%) rows added to memory" % locals()
rowDict = {}
for t, r in zip(theseKeys, row):
rowDict[t] = r
if t == "ref_date":
try:
rowDict[t] = int(r) + 1980
except:
rowDict[t] = None
if rowDict["dist_index_id"] != "999999":
dictList.append(rowDict)
csvFile.close()
self.log.debug(
'completed the ``_create_dictionary_of_ned_d`` method')
return dictList | python | def _create_dictionary_of_ned_d(
self):
"""create a list of dictionaries containing all the rows in the ned_d catalogue
**Return:**
- ``dictList`` - a list of dictionaries containing all the rows in the ned_d catalogue
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_create_dictionary_of_ned_d`` method')
count = 0
with open(self.pathToDataFile, 'rb') as csvFile:
csvReader = csv.reader(
csvFile, dialect='excel', delimiter=',', quotechar='"')
totalRows = sum(1 for row in csvReader)
csvFile.close()
totalCount = totalRows
with open(self.pathToDataFile, 'rb') as csvFile:
csvReader = csv.reader(
csvFile, dialect='excel', delimiter=',', quotechar='"')
theseKeys = []
dictList = []
for row in csvReader:
if len(theseKeys) == 0:
totalRows -= 1
if "Exclusion Code" in row and "Hubble const." in row:
for i in row:
if i == "redshift (z)":
theseKeys.append("redshift")
elif i == "Hubble const.":
theseKeys.append("hubble_const")
elif i == "G":
theseKeys.append("galaxy_index_id")
elif i == "err":
theseKeys.append("dist_mod_err")
elif i == "D (Mpc)":
theseKeys.append("dist_mpc")
elif i == "Date (Yr. - 1980)":
theseKeys.append("ref_date")
elif i == "REFCODE":
theseKeys.append("ref")
elif i == "Exclusion Code":
theseKeys.append("dist_in_ned_flag")
elif i == "Adopted LMC modulus":
theseKeys.append("lmc_mod")
elif i == "m-M":
theseKeys.append("dist_mod")
elif i == "Notes":
theseKeys.append("notes")
elif i == "SN ID":
theseKeys.append("dist_derived_from_sn")
elif i == "method":
theseKeys.append("dist_method")
elif i == "Galaxy ID":
theseKeys.append("primary_ned_id")
elif i == "D":
theseKeys.append("dist_index_id")
else:
theseKeys.append(i)
continue
if len(theseKeys):
count += 1
if count > 1:
# Cursor up one line and clear line
sys.stdout.write("\x1b[1A\x1b[2K")
if count > totalCount:
count = totalCount
percent = (float(count) / float(totalCount)) * 100.
print "%(count)s / %(totalCount)s (%(percent)1.1f%%) rows added to memory" % locals()
rowDict = {}
for t, r in zip(theseKeys, row):
rowDict[t] = r
if t == "ref_date":
try:
rowDict[t] = int(r) + 1980
except:
rowDict[t] = None
if rowDict["dist_index_id"] != "999999":
dictList.append(rowDict)
csvFile.close()
self.log.debug(
'completed the ``_create_dictionary_of_ned_d`` method')
return dictList | [
"def",
"_create_dictionary_of_ned_d",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_create_dictionary_of_ned_d`` method'",
")",
"count",
"=",
"0",
"with",
"open",
"(",
"self",
".",
"pathToDataFile",
",",
"'rb'",
")",
"as",
"csvFile",
":",
"csvReader",
"=",
"csv",
".",
"reader",
"(",
"csvFile",
",",
"dialect",
"=",
"'excel'",
",",
"delimiter",
"=",
"','",
",",
"quotechar",
"=",
"'\"'",
")",
"totalRows",
"=",
"sum",
"(",
"1",
"for",
"row",
"in",
"csvReader",
")",
"csvFile",
".",
"close",
"(",
")",
"totalCount",
"=",
"totalRows",
"with",
"open",
"(",
"self",
".",
"pathToDataFile",
",",
"'rb'",
")",
"as",
"csvFile",
":",
"csvReader",
"=",
"csv",
".",
"reader",
"(",
"csvFile",
",",
"dialect",
"=",
"'excel'",
",",
"delimiter",
"=",
"','",
",",
"quotechar",
"=",
"'\"'",
")",
"theseKeys",
"=",
"[",
"]",
"dictList",
"=",
"[",
"]",
"for",
"row",
"in",
"csvReader",
":",
"if",
"len",
"(",
"theseKeys",
")",
"==",
"0",
":",
"totalRows",
"-=",
"1",
"if",
"\"Exclusion Code\"",
"in",
"row",
"and",
"\"Hubble const.\"",
"in",
"row",
":",
"for",
"i",
"in",
"row",
":",
"if",
"i",
"==",
"\"redshift (z)\"",
":",
"theseKeys",
".",
"append",
"(",
"\"redshift\"",
")",
"elif",
"i",
"==",
"\"Hubble const.\"",
":",
"theseKeys",
".",
"append",
"(",
"\"hubble_const\"",
")",
"elif",
"i",
"==",
"\"G\"",
":",
"theseKeys",
".",
"append",
"(",
"\"galaxy_index_id\"",
")",
"elif",
"i",
"==",
"\"err\"",
":",
"theseKeys",
".",
"append",
"(",
"\"dist_mod_err\"",
")",
"elif",
"i",
"==",
"\"D (Mpc)\"",
":",
"theseKeys",
".",
"append",
"(",
"\"dist_mpc\"",
")",
"elif",
"i",
"==",
"\"Date (Yr. - 1980)\"",
":",
"theseKeys",
".",
"append",
"(",
"\"ref_date\"",
")",
"elif",
"i",
"==",
"\"REFCODE\"",
":",
"theseKeys",
".",
"append",
"(",
"\"ref\"",
")",
"elif",
"i",
"==",
"\"Exclusion Code\"",
":",
"theseKeys",
".",
"append",
"(",
"\"dist_in_ned_flag\"",
")",
"elif",
"i",
"==",
"\"Adopted LMC modulus\"",
":",
"theseKeys",
".",
"append",
"(",
"\"lmc_mod\"",
")",
"elif",
"i",
"==",
"\"m-M\"",
":",
"theseKeys",
".",
"append",
"(",
"\"dist_mod\"",
")",
"elif",
"i",
"==",
"\"Notes\"",
":",
"theseKeys",
".",
"append",
"(",
"\"notes\"",
")",
"elif",
"i",
"==",
"\"SN ID\"",
":",
"theseKeys",
".",
"append",
"(",
"\"dist_derived_from_sn\"",
")",
"elif",
"i",
"==",
"\"method\"",
":",
"theseKeys",
".",
"append",
"(",
"\"dist_method\"",
")",
"elif",
"i",
"==",
"\"Galaxy ID\"",
":",
"theseKeys",
".",
"append",
"(",
"\"primary_ned_id\"",
")",
"elif",
"i",
"==",
"\"D\"",
":",
"theseKeys",
".",
"append",
"(",
"\"dist_index_id\"",
")",
"else",
":",
"theseKeys",
".",
"append",
"(",
"i",
")",
"continue",
"if",
"len",
"(",
"theseKeys",
")",
":",
"count",
"+=",
"1",
"if",
"count",
">",
"1",
":",
"# Cursor up one line and clear line",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\x1b[1A\\x1b[2K\"",
")",
"if",
"count",
">",
"totalCount",
":",
"count",
"=",
"totalCount",
"percent",
"=",
"(",
"float",
"(",
"count",
")",
"/",
"float",
"(",
"totalCount",
")",
")",
"*",
"100.",
"print",
"\"%(count)s / %(totalCount)s (%(percent)1.1f%%) rows added to memory\"",
"%",
"locals",
"(",
")",
"rowDict",
"=",
"{",
"}",
"for",
"t",
",",
"r",
"in",
"zip",
"(",
"theseKeys",
",",
"row",
")",
":",
"rowDict",
"[",
"t",
"]",
"=",
"r",
"if",
"t",
"==",
"\"ref_date\"",
":",
"try",
":",
"rowDict",
"[",
"t",
"]",
"=",
"int",
"(",
"r",
")",
"+",
"1980",
"except",
":",
"rowDict",
"[",
"t",
"]",
"=",
"None",
"if",
"rowDict",
"[",
"\"dist_index_id\"",
"]",
"!=",
"\"999999\"",
":",
"dictList",
".",
"append",
"(",
"rowDict",
")",
"csvFile",
".",
"close",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_create_dictionary_of_ned_d`` method'",
")",
"return",
"dictList"
] | create a list of dictionaries containing all the rows in the ned_d catalogue
**Return:**
- ``dictList`` - a list of dictionaries containing all the rows in the ned_d catalogue
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"create",
"a",
"list",
"of",
"dictionaries",
"containing",
"all",
"the",
"rows",
"in",
"the",
"ned_d",
"catalogue"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/ned_d.py#L177-L274 |
thespacedoctor/sherlock | sherlock/imports/ned_d.py | ned_d._clean_up_columns | def _clean_up_columns(
self):
"""clean up columns of the NED table
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_clean_up_columns`` method')
tableName = self.dbTableName
print "cleaning up %(tableName)s columns" % locals()
sqlQuery = u"""
set sql_mode="STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION";
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
sqlQuery = u"""
update %(tableName)s set dist_mod_err = null where dist_mod_err = 0;
update %(tableName)s set dist_in_ned_flag = null where dist_in_ned_flag = "";
update %(tableName)s set notes = null where notes = "";
update %(tableName)s set redshift = null where redshift = 0;
update %(tableName)s set dist_derived_from_sn = null where dist_derived_from_sn = "";
update %(tableName)s set hubble_const = null where hubble_const = 0;
update %(tableName)s set lmc_mod = null where lmc_mod = 0;
update %(tableName)s set master_row = 0;
update %(tableName)s set master_row = 1 where primaryId in (select * from (select distinct primaryId from %(tableName)s group by galaxy_index_id) as alias);
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
self.log.debug('completed the ``_clean_up_columns`` method')
return None | python | def _clean_up_columns(
self):
"""clean up columns of the NED table
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_clean_up_columns`` method')
tableName = self.dbTableName
print "cleaning up %(tableName)s columns" % locals()
sqlQuery = u"""
set sql_mode="STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION";
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
sqlQuery = u"""
update %(tableName)s set dist_mod_err = null where dist_mod_err = 0;
update %(tableName)s set dist_in_ned_flag = null where dist_in_ned_flag = "";
update %(tableName)s set notes = null where notes = "";
update %(tableName)s set redshift = null where redshift = 0;
update %(tableName)s set dist_derived_from_sn = null where dist_derived_from_sn = "";
update %(tableName)s set hubble_const = null where hubble_const = 0;
update %(tableName)s set lmc_mod = null where lmc_mod = 0;
update %(tableName)s set master_row = 0;
update %(tableName)s set master_row = 1 where primaryId in (select * from (select distinct primaryId from %(tableName)s group by galaxy_index_id) as alias);
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
self.log.debug('completed the ``_clean_up_columns`` method')
return None | [
"def",
"_clean_up_columns",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_clean_up_columns`` method'",
")",
"tableName",
"=",
"self",
".",
"dbTableName",
"print",
"\"cleaning up %(tableName)s columns\"",
"%",
"locals",
"(",
")",
"sqlQuery",
"=",
"u\"\"\"\n set sql_mode=\"STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION\";\n \"\"\"",
"%",
"locals",
"(",
")",
"writequery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
")",
"sqlQuery",
"=",
"u\"\"\"\n update %(tableName)s set dist_mod_err = null where dist_mod_err = 0;\n update %(tableName)s set dist_in_ned_flag = null where dist_in_ned_flag = \"\";\n update %(tableName)s set notes = null where notes = \"\";\n update %(tableName)s set redshift = null where redshift = 0;\n update %(tableName)s set dist_derived_from_sn = null where dist_derived_from_sn = \"\";\n update %(tableName)s set hubble_const = null where hubble_const = 0;\n update %(tableName)s set lmc_mod = null where lmc_mod = 0;\n update %(tableName)s set master_row = 0;\n update %(tableName)s set master_row = 1 where primaryId in (select * from (select distinct primaryId from %(tableName)s group by galaxy_index_id) as alias);\n \"\"\"",
"%",
"locals",
"(",
")",
"writequery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_clean_up_columns`` method'",
")",
"return",
"None"
] | clean up columns of the NED table
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"clean",
"up",
"columns",
"of",
"the",
"NED",
"table"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/ned_d.py#L276-L323 |
thespacedoctor/sherlock | sherlock/imports/ned_d.py | ned_d._get_metadata_for_galaxies | def _get_metadata_for_galaxies(
self):
"""get metadata for galaxies
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_get_metadata_for_galaxies`` method')
total, batches = self._count_galaxies_requiring_metadata()
print "%(total)s galaxies require metadata. Need to send %(batches)s batch requests to NED." % locals()
totalBatches = self.batches
thisCount = 0
# FOR EACH BATCH, GET THE GALAXY IDs, QUERY NED AND UPDATE THE DATABASE
while self.total:
thisCount += 1
self._get_3000_galaxies_needing_metadata()
dictList = self._query_ned_and_add_results_to_database(thisCount)
self.add_data_to_database_table(
dictList=dictList,
createStatement=False
)
self._count_galaxies_requiring_metadata()
self.log.debug('completed the ``_get_metadata_for_galaxies`` method')
return None | python | def _get_metadata_for_galaxies(
self):
"""get metadata for galaxies
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_get_metadata_for_galaxies`` method')
total, batches = self._count_galaxies_requiring_metadata()
print "%(total)s galaxies require metadata. Need to send %(batches)s batch requests to NED." % locals()
totalBatches = self.batches
thisCount = 0
# FOR EACH BATCH, GET THE GALAXY IDs, QUERY NED AND UPDATE THE DATABASE
while self.total:
thisCount += 1
self._get_3000_galaxies_needing_metadata()
dictList = self._query_ned_and_add_results_to_database(thisCount)
self.add_data_to_database_table(
dictList=dictList,
createStatement=False
)
self._count_galaxies_requiring_metadata()
self.log.debug('completed the ``_get_metadata_for_galaxies`` method')
return None | [
"def",
"_get_metadata_for_galaxies",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_get_metadata_for_galaxies`` method'",
")",
"total",
",",
"batches",
"=",
"self",
".",
"_count_galaxies_requiring_metadata",
"(",
")",
"print",
"\"%(total)s galaxies require metadata. Need to send %(batches)s batch requests to NED.\"",
"%",
"locals",
"(",
")",
"totalBatches",
"=",
"self",
".",
"batches",
"thisCount",
"=",
"0",
"# FOR EACH BATCH, GET THE GALAXY IDs, QUERY NED AND UPDATE THE DATABASE",
"while",
"self",
".",
"total",
":",
"thisCount",
"+=",
"1",
"self",
".",
"_get_3000_galaxies_needing_metadata",
"(",
")",
"dictList",
"=",
"self",
".",
"_query_ned_and_add_results_to_database",
"(",
"thisCount",
")",
"self",
".",
"add_data_to_database_table",
"(",
"dictList",
"=",
"dictList",
",",
"createStatement",
"=",
"False",
")",
"self",
".",
"_count_galaxies_requiring_metadata",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_get_metadata_for_galaxies`` method'",
")",
"return",
"None"
] | get metadata for galaxies
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"get",
"metadata",
"for",
"galaxies"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/ned_d.py#L325-L361 |
thespacedoctor/sherlock | sherlock/imports/ned_d.py | ned_d._get_3000_galaxies_needing_metadata | def _get_3000_galaxies_needing_metadata(
self):
""" get 3000 galaxies needing metadata
**Return:**
- ``len(self.theseIds)`` -- the number of NED IDs returned
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_get_3000_galaxies_needing_metadata`` method')
tableName = self.dbTableName
# SELECT THE DATA FROM NED TABLE
self.theseIds = {}
sqlQuery = u"""
select primaryId, primary_ned_id from %(tableName)s where master_row = 1 and in_ned is null limit 3000;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
for row in rows:
self.theseIds[row["primary_ned_id"]] = row["primaryId"]
self.log.debug(
'completed the ``_get_3000_galaxies_needing_metadata`` method')
return len(self.theseIds) | python | def _get_3000_galaxies_needing_metadata(
self):
""" get 3000 galaxies needing metadata
**Return:**
- ``len(self.theseIds)`` -- the number of NED IDs returned
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_get_3000_galaxies_needing_metadata`` method')
tableName = self.dbTableName
# SELECT THE DATA FROM NED TABLE
self.theseIds = {}
sqlQuery = u"""
select primaryId, primary_ned_id from %(tableName)s where master_row = 1 and in_ned is null limit 3000;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
for row in rows:
self.theseIds[row["primary_ned_id"]] = row["primaryId"]
self.log.debug(
'completed the ``_get_3000_galaxies_needing_metadata`` method')
return len(self.theseIds) | [
"def",
"_get_3000_galaxies_needing_metadata",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_get_3000_galaxies_needing_metadata`` method'",
")",
"tableName",
"=",
"self",
".",
"dbTableName",
"# SELECT THE DATA FROM NED TABLE",
"self",
".",
"theseIds",
"=",
"{",
"}",
"sqlQuery",
"=",
"u\"\"\"\n select primaryId, primary_ned_id from %(tableName)s where master_row = 1 and in_ned is null limit 3000;\n \"\"\"",
"%",
"locals",
"(",
")",
"rows",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
"quiet",
"=",
"False",
")",
"for",
"row",
"in",
"rows",
":",
"self",
".",
"theseIds",
"[",
"row",
"[",
"\"primary_ned_id\"",
"]",
"]",
"=",
"row",
"[",
"\"primaryId\"",
"]",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_get_3000_galaxies_needing_metadata`` method'",
")",
"return",
"len",
"(",
"self",
".",
"theseIds",
")"
] | get 3000 galaxies needing metadata
**Return:**
- ``len(self.theseIds)`` -- the number of NED IDs returned
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"get",
"3000",
"galaxies",
"needing",
"metadata"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/ned_d.py#L404-L443 |
thespacedoctor/sherlock | sherlock/imports/ned_d.py | ned_d._query_ned_and_add_results_to_database | def _query_ned_and_add_results_to_database(
self,
batchCount):
""" query ned and add results to database
**Key Arguments:**
- ``batchCount`` - the index number of the batch sent to NED
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_query_ned_and_add_results_to_database`` method')
tableName = self.dbTableName
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
# QUERY NED WITH BATCH
totalCount = len(self.theseIds)
print "requesting metadata from NED for %(totalCount)s galaxies (batch %(batchCount)s)" % locals()
search = namesearch(
log=self.log,
names=self.theseIds.keys(),
quiet=True
)
results = search.get()
print "results returned from ned -- starting to add to database" % locals()
# CLEAN THE RETURNED DATA AND UPDATE DATABASE
totalCount = len(results)
count = 0
sqlQuery = ""
dictList = []
colList = ["redshift_quality", "redshift", "hierarchy", "object_type", "major_diameter_arcmin", "morphology", "magnitude_filter",
"ned_notes", "eb_v", "raDeg", "radio_morphology", "activity_type", "minor_diameter_arcmin", "decDeg", "redshift_err", "in_ned"]
if not len(results):
for k, v in self.theseIds.iteritems():
dictList.append({
"in_ned": 0,
"primaryID": v
})
for thisDict in results:
thisDict["tableName"] = tableName
count += 1
for k, v in thisDict.iteritems():
if not v or len(v) == 0:
thisDict[k] = "null"
if k in ["major_diameter_arcmin", "minor_diameter_arcmin"] and (":" in v or "?" in v or "<" in v):
thisDict[k] = v.replace(":", "").replace(
"?", "").replace("<", "")
if isinstance(v, str) and '"' in v:
thisDict[k] = v.replace('"', '\\"')
if "Input name not" not in thisDict["input_note"] and "Same object as" not in thisDict["input_note"]:
if thisDict["ra"] != "null" and thisDict["dec"] != "null":
thisDict["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=thisDict["ra"]
)
thisDict["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=thisDict["dec"]
)
else:
thisDict["raDeg"] = None
thisDict["decDeg"] = None
thisDict["in_ned"] = 1
thisDict["eb_v"] = thisDict["eb-v"]
row = {}
row["primary_ned_id"] = thisDict["input_name"]
try:
row["primaryID"] = self.theseIds[thisDict["input_name"]]
for c in colList:
if thisDict[c] == "null":
row[c] = None
else:
row[c] = thisDict[c]
dictList.append(row)
except:
g = thisDict["input_name"]
self.log.error(
"Cannot find database table %(tableName)s primaryID for '%(g)s'\n\n" % locals())
dictList.append({
"in_ned": 0,
"primary_ned_id": thisDict["input_name"]
})
else:
dictList.append({
"primary_ned_id": thisDict["input_name"],
"in_ned": 0,
"primaryID": self.theseIds[thisDict["input_name"]]
})
self.log.debug(
'completed the ``_query_ned_and_add_results_to_database`` method')
return dictList | python | def _query_ned_and_add_results_to_database(
self,
batchCount):
""" query ned and add results to database
**Key Arguments:**
- ``batchCount`` - the index number of the batch sent to NED
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_query_ned_and_add_results_to_database`` method')
tableName = self.dbTableName
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
# QUERY NED WITH BATCH
totalCount = len(self.theseIds)
print "requesting metadata from NED for %(totalCount)s galaxies (batch %(batchCount)s)" % locals()
search = namesearch(
log=self.log,
names=self.theseIds.keys(),
quiet=True
)
results = search.get()
print "results returned from ned -- starting to add to database" % locals()
# CLEAN THE RETURNED DATA AND UPDATE DATABASE
totalCount = len(results)
count = 0
sqlQuery = ""
dictList = []
colList = ["redshift_quality", "redshift", "hierarchy", "object_type", "major_diameter_arcmin", "morphology", "magnitude_filter",
"ned_notes", "eb_v", "raDeg", "radio_morphology", "activity_type", "minor_diameter_arcmin", "decDeg", "redshift_err", "in_ned"]
if not len(results):
for k, v in self.theseIds.iteritems():
dictList.append({
"in_ned": 0,
"primaryID": v
})
for thisDict in results:
thisDict["tableName"] = tableName
count += 1
for k, v in thisDict.iteritems():
if not v or len(v) == 0:
thisDict[k] = "null"
if k in ["major_diameter_arcmin", "minor_diameter_arcmin"] and (":" in v or "?" in v or "<" in v):
thisDict[k] = v.replace(":", "").replace(
"?", "").replace("<", "")
if isinstance(v, str) and '"' in v:
thisDict[k] = v.replace('"', '\\"')
if "Input name not" not in thisDict["input_note"] and "Same object as" not in thisDict["input_note"]:
if thisDict["ra"] != "null" and thisDict["dec"] != "null":
thisDict["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=thisDict["ra"]
)
thisDict["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=thisDict["dec"]
)
else:
thisDict["raDeg"] = None
thisDict["decDeg"] = None
thisDict["in_ned"] = 1
thisDict["eb_v"] = thisDict["eb-v"]
row = {}
row["primary_ned_id"] = thisDict["input_name"]
try:
row["primaryID"] = self.theseIds[thisDict["input_name"]]
for c in colList:
if thisDict[c] == "null":
row[c] = None
else:
row[c] = thisDict[c]
dictList.append(row)
except:
g = thisDict["input_name"]
self.log.error(
"Cannot find database table %(tableName)s primaryID for '%(g)s'\n\n" % locals())
dictList.append({
"in_ned": 0,
"primary_ned_id": thisDict["input_name"]
})
else:
dictList.append({
"primary_ned_id": thisDict["input_name"],
"in_ned": 0,
"primaryID": self.theseIds[thisDict["input_name"]]
})
self.log.debug(
'completed the ``_query_ned_and_add_results_to_database`` method')
return dictList | [
"def",
"_query_ned_and_add_results_to_database",
"(",
"self",
",",
"batchCount",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_query_ned_and_add_results_to_database`` method'",
")",
"tableName",
"=",
"self",
".",
"dbTableName",
"# ASTROCALC UNIT CONVERTER OBJECT",
"converter",
"=",
"unit_conversion",
"(",
"log",
"=",
"self",
".",
"log",
")",
"# QUERY NED WITH BATCH",
"totalCount",
"=",
"len",
"(",
"self",
".",
"theseIds",
")",
"print",
"\"requesting metadata from NED for %(totalCount)s galaxies (batch %(batchCount)s)\"",
"%",
"locals",
"(",
")",
"search",
"=",
"namesearch",
"(",
"log",
"=",
"self",
".",
"log",
",",
"names",
"=",
"self",
".",
"theseIds",
".",
"keys",
"(",
")",
",",
"quiet",
"=",
"True",
")",
"results",
"=",
"search",
".",
"get",
"(",
")",
"print",
"\"results returned from ned -- starting to add to database\"",
"%",
"locals",
"(",
")",
"# CLEAN THE RETURNED DATA AND UPDATE DATABASE",
"totalCount",
"=",
"len",
"(",
"results",
")",
"count",
"=",
"0",
"sqlQuery",
"=",
"\"\"",
"dictList",
"=",
"[",
"]",
"colList",
"=",
"[",
"\"redshift_quality\"",
",",
"\"redshift\"",
",",
"\"hierarchy\"",
",",
"\"object_type\"",
",",
"\"major_diameter_arcmin\"",
",",
"\"morphology\"",
",",
"\"magnitude_filter\"",
",",
"\"ned_notes\"",
",",
"\"eb_v\"",
",",
"\"raDeg\"",
",",
"\"radio_morphology\"",
",",
"\"activity_type\"",
",",
"\"minor_diameter_arcmin\"",
",",
"\"decDeg\"",
",",
"\"redshift_err\"",
",",
"\"in_ned\"",
"]",
"if",
"not",
"len",
"(",
"results",
")",
":",
"for",
"k",
",",
"v",
"in",
"self",
".",
"theseIds",
".",
"iteritems",
"(",
")",
":",
"dictList",
".",
"append",
"(",
"{",
"\"in_ned\"",
":",
"0",
",",
"\"primaryID\"",
":",
"v",
"}",
")",
"for",
"thisDict",
"in",
"results",
":",
"thisDict",
"[",
"\"tableName\"",
"]",
"=",
"tableName",
"count",
"+=",
"1",
"for",
"k",
",",
"v",
"in",
"thisDict",
".",
"iteritems",
"(",
")",
":",
"if",
"not",
"v",
"or",
"len",
"(",
"v",
")",
"==",
"0",
":",
"thisDict",
"[",
"k",
"]",
"=",
"\"null\"",
"if",
"k",
"in",
"[",
"\"major_diameter_arcmin\"",
",",
"\"minor_diameter_arcmin\"",
"]",
"and",
"(",
"\":\"",
"in",
"v",
"or",
"\"?\"",
"in",
"v",
"or",
"\"<\"",
"in",
"v",
")",
":",
"thisDict",
"[",
"k",
"]",
"=",
"v",
".",
"replace",
"(",
"\":\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"?\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"<\"",
",",
"\"\"",
")",
"if",
"isinstance",
"(",
"v",
",",
"str",
")",
"and",
"'\"'",
"in",
"v",
":",
"thisDict",
"[",
"k",
"]",
"=",
"v",
".",
"replace",
"(",
"'\"'",
",",
"'\\\\\"'",
")",
"if",
"\"Input name not\"",
"not",
"in",
"thisDict",
"[",
"\"input_note\"",
"]",
"and",
"\"Same object as\"",
"not",
"in",
"thisDict",
"[",
"\"input_note\"",
"]",
":",
"if",
"thisDict",
"[",
"\"ra\"",
"]",
"!=",
"\"null\"",
"and",
"thisDict",
"[",
"\"dec\"",
"]",
"!=",
"\"null\"",
":",
"thisDict",
"[",
"\"raDeg\"",
"]",
"=",
"converter",
".",
"ra_sexegesimal_to_decimal",
"(",
"ra",
"=",
"thisDict",
"[",
"\"ra\"",
"]",
")",
"thisDict",
"[",
"\"decDeg\"",
"]",
"=",
"converter",
".",
"dec_sexegesimal_to_decimal",
"(",
"dec",
"=",
"thisDict",
"[",
"\"dec\"",
"]",
")",
"else",
":",
"thisDict",
"[",
"\"raDeg\"",
"]",
"=",
"None",
"thisDict",
"[",
"\"decDeg\"",
"]",
"=",
"None",
"thisDict",
"[",
"\"in_ned\"",
"]",
"=",
"1",
"thisDict",
"[",
"\"eb_v\"",
"]",
"=",
"thisDict",
"[",
"\"eb-v\"",
"]",
"row",
"=",
"{",
"}",
"row",
"[",
"\"primary_ned_id\"",
"]",
"=",
"thisDict",
"[",
"\"input_name\"",
"]",
"try",
":",
"row",
"[",
"\"primaryID\"",
"]",
"=",
"self",
".",
"theseIds",
"[",
"thisDict",
"[",
"\"input_name\"",
"]",
"]",
"for",
"c",
"in",
"colList",
":",
"if",
"thisDict",
"[",
"c",
"]",
"==",
"\"null\"",
":",
"row",
"[",
"c",
"]",
"=",
"None",
"else",
":",
"row",
"[",
"c",
"]",
"=",
"thisDict",
"[",
"c",
"]",
"dictList",
".",
"append",
"(",
"row",
")",
"except",
":",
"g",
"=",
"thisDict",
"[",
"\"input_name\"",
"]",
"self",
".",
"log",
".",
"error",
"(",
"\"Cannot find database table %(tableName)s primaryID for '%(g)s'\\n\\n\"",
"%",
"locals",
"(",
")",
")",
"dictList",
".",
"append",
"(",
"{",
"\"in_ned\"",
":",
"0",
",",
"\"primary_ned_id\"",
":",
"thisDict",
"[",
"\"input_name\"",
"]",
"}",
")",
"else",
":",
"dictList",
".",
"append",
"(",
"{",
"\"primary_ned_id\"",
":",
"thisDict",
"[",
"\"input_name\"",
"]",
",",
"\"in_ned\"",
":",
"0",
",",
"\"primaryID\"",
":",
"self",
".",
"theseIds",
"[",
"thisDict",
"[",
"\"input_name\"",
"]",
"]",
"}",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_query_ned_and_add_results_to_database`` method'",
")",
"return",
"dictList"
] | query ned and add results to database
**Key Arguments:**
- ``batchCount`` - the index number of the batch sent to NED
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"query",
"ned",
"and",
"add",
"results",
"to",
"database"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/ned_d.py#L445-L553 |
thespacedoctor/sherlock | sherlock/imports/ned_d.py | ned_d._update_sdss_coverage | def _update_sdss_coverage(
self):
""" update sdss coverage
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_update_sdss_coverage`` method')
tableName = self.dbTableName
# SELECT THE LOCATIONS NEEDING TO BE CHECKED
sqlQuery = u"""
select primary_ned_id, primaryID, raDeg, decDeg, sdss_coverage from %(tableName)s where sdss_coverage is null and master_row = 1 and in_ned = 1 order by dist_mpc;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
totalCount = len(rows)
count = 0
for row in rows:
count += 1
if count > 1:
# Cursor up three lines and clear
sys.stdout.write("\x1b[1A\x1b[2K")
sys.stdout.write("\x1b[1A\x1b[2K")
sys.stdout.write("\x1b[1A\x1b[2K")
if count > totalCount:
count = totalCount
percent = (float(count) / float(totalCount)) * 100.
primaryID = row["primaryID"]
raDeg = float(row["raDeg"])
decDeg = float(row["decDeg"])
primary_ned_id = row["primary_ned_id"]
# SDSS CAN ONLY ACCEPT 60 QUERIES/MIN
time.sleep(1.1)
print "%(count)s / %(totalCount)s (%(percent)1.1f%%) NED galaxies checked for SDSS coverage" % locals()
print "NED NAME: ", primary_ned_id
# covered = True | False | 999 (i.e. not sure)
sdss_coverage = check_coverage(
log=self.log,
ra=raDeg,
dec=decDeg
).get()
if sdss_coverage == 999:
sdss_coverage_flag = "null"
elif sdss_coverage == True:
sdss_coverage_flag = 1
elif sdss_coverage == False:
sdss_coverage_flag = 0
else:
self.log.error('cound not get sdss coverage' % locals())
sys.exit(0)
# UPDATE THE DATABASE FLAG
sqlQuery = u"""
update %(tableName)s set sdss_coverage = %(sdss_coverage_flag)s where primaryID = %(primaryID)s
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
self.log.debug('completed the ``_update_sdss_coverage`` method')
return None | python | def _update_sdss_coverage(
self):
""" update sdss coverage
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_update_sdss_coverage`` method')
tableName = self.dbTableName
# SELECT THE LOCATIONS NEEDING TO BE CHECKED
sqlQuery = u"""
select primary_ned_id, primaryID, raDeg, decDeg, sdss_coverage from %(tableName)s where sdss_coverage is null and master_row = 1 and in_ned = 1 order by dist_mpc;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
totalCount = len(rows)
count = 0
for row in rows:
count += 1
if count > 1:
# Cursor up three lines and clear
sys.stdout.write("\x1b[1A\x1b[2K")
sys.stdout.write("\x1b[1A\x1b[2K")
sys.stdout.write("\x1b[1A\x1b[2K")
if count > totalCount:
count = totalCount
percent = (float(count) / float(totalCount)) * 100.
primaryID = row["primaryID"]
raDeg = float(row["raDeg"])
decDeg = float(row["decDeg"])
primary_ned_id = row["primary_ned_id"]
# SDSS CAN ONLY ACCEPT 60 QUERIES/MIN
time.sleep(1.1)
print "%(count)s / %(totalCount)s (%(percent)1.1f%%) NED galaxies checked for SDSS coverage" % locals()
print "NED NAME: ", primary_ned_id
# covered = True | False | 999 (i.e. not sure)
sdss_coverage = check_coverage(
log=self.log,
ra=raDeg,
dec=decDeg
).get()
if sdss_coverage == 999:
sdss_coverage_flag = "null"
elif sdss_coverage == True:
sdss_coverage_flag = 1
elif sdss_coverage == False:
sdss_coverage_flag = 0
else:
self.log.error('cound not get sdss coverage' % locals())
sys.exit(0)
# UPDATE THE DATABASE FLAG
sqlQuery = u"""
update %(tableName)s set sdss_coverage = %(sdss_coverage_flag)s where primaryID = %(primaryID)s
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
self.log.debug('completed the ``_update_sdss_coverage`` method')
return None | [
"def",
"_update_sdss_coverage",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_update_sdss_coverage`` method'",
")",
"tableName",
"=",
"self",
".",
"dbTableName",
"# SELECT THE LOCATIONS NEEDING TO BE CHECKED",
"sqlQuery",
"=",
"u\"\"\"\n select primary_ned_id, primaryID, raDeg, decDeg, sdss_coverage from %(tableName)s where sdss_coverage is null and master_row = 1 and in_ned = 1 order by dist_mpc;\n \"\"\"",
"%",
"locals",
"(",
")",
"rows",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
"quiet",
"=",
"False",
")",
"totalCount",
"=",
"len",
"(",
"rows",
")",
"count",
"=",
"0",
"for",
"row",
"in",
"rows",
":",
"count",
"+=",
"1",
"if",
"count",
">",
"1",
":",
"# Cursor up three lines and clear",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\x1b[1A\\x1b[2K\"",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\x1b[1A\\x1b[2K\"",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\x1b[1A\\x1b[2K\"",
")",
"if",
"count",
">",
"totalCount",
":",
"count",
"=",
"totalCount",
"percent",
"=",
"(",
"float",
"(",
"count",
")",
"/",
"float",
"(",
"totalCount",
")",
")",
"*",
"100.",
"primaryID",
"=",
"row",
"[",
"\"primaryID\"",
"]",
"raDeg",
"=",
"float",
"(",
"row",
"[",
"\"raDeg\"",
"]",
")",
"decDeg",
"=",
"float",
"(",
"row",
"[",
"\"decDeg\"",
"]",
")",
"primary_ned_id",
"=",
"row",
"[",
"\"primary_ned_id\"",
"]",
"# SDSS CAN ONLY ACCEPT 60 QUERIES/MIN",
"time",
".",
"sleep",
"(",
"1.1",
")",
"print",
"\"%(count)s / %(totalCount)s (%(percent)1.1f%%) NED galaxies checked for SDSS coverage\"",
"%",
"locals",
"(",
")",
"print",
"\"NED NAME: \"",
",",
"primary_ned_id",
"# covered = True | False | 999 (i.e. not sure)",
"sdss_coverage",
"=",
"check_coverage",
"(",
"log",
"=",
"self",
".",
"log",
",",
"ra",
"=",
"raDeg",
",",
"dec",
"=",
"decDeg",
")",
".",
"get",
"(",
")",
"if",
"sdss_coverage",
"==",
"999",
":",
"sdss_coverage_flag",
"=",
"\"null\"",
"elif",
"sdss_coverage",
"==",
"True",
":",
"sdss_coverage_flag",
"=",
"1",
"elif",
"sdss_coverage",
"==",
"False",
":",
"sdss_coverage_flag",
"=",
"0",
"else",
":",
"self",
".",
"log",
".",
"error",
"(",
"'cound not get sdss coverage'",
"%",
"locals",
"(",
")",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"# UPDATE THE DATABASE FLAG",
"sqlQuery",
"=",
"u\"\"\"\n update %(tableName)s set sdss_coverage = %(sdss_coverage_flag)s where primaryID = %(primaryID)s\n \"\"\"",
"%",
"locals",
"(",
")",
"writequery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_update_sdss_coverage`` method'",
")",
"return",
"None"
] | update sdss coverage
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"update",
"sdss",
"coverage"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/ned_d.py#L555-L636 |
emichael/PyREM | pyrem/host.py | RemoteHost.run | def run(self, command, **kwargs):
"""Run a command on the remote host.
This is just a wrapper around ``RemoteTask(self.hostname, ...)``
"""
return RemoteTask(self.hostname, command,
identity_file=self._identity_file, **kwargs) | python | def run(self, command, **kwargs):
"""Run a command on the remote host.
This is just a wrapper around ``RemoteTask(self.hostname, ...)``
"""
return RemoteTask(self.hostname, command,
identity_file=self._identity_file, **kwargs) | [
"def",
"run",
"(",
"self",
",",
"command",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"RemoteTask",
"(",
"self",
".",
"hostname",
",",
"command",
",",
"identity_file",
"=",
"self",
".",
"_identity_file",
",",
"*",
"*",
"kwargs",
")"
] | Run a command on the remote host.
This is just a wrapper around ``RemoteTask(self.hostname, ...)`` | [
"Run",
"a",
"command",
"on",
"the",
"remote",
"host",
"."
] | train | https://github.com/emichael/PyREM/blob/2609249ead197cd9496d164f4998ca9985503579/pyrem/host.py#L53-L59 |
emichael/PyREM | pyrem/host.py | RemoteHost._rsync_cmd | def _rsync_cmd(self):
"""Helper method to generate base rsync command."""
cmd = ['rsync']
if self._identity_file:
cmd += ['-e', 'ssh -i ' + os.path.expanduser(self._identity_file)]
return cmd | python | def _rsync_cmd(self):
"""Helper method to generate base rsync command."""
cmd = ['rsync']
if self._identity_file:
cmd += ['-e', 'ssh -i ' + os.path.expanduser(self._identity_file)]
return cmd | [
"def",
"_rsync_cmd",
"(",
"self",
")",
":",
"cmd",
"=",
"[",
"'rsync'",
"]",
"if",
"self",
".",
"_identity_file",
":",
"cmd",
"+=",
"[",
"'-e'",
",",
"'ssh -i '",
"+",
"os",
".",
"path",
".",
"expanduser",
"(",
"self",
".",
"_identity_file",
")",
"]",
"return",
"cmd"
] | Helper method to generate base rsync command. | [
"Helper",
"method",
"to",
"generate",
"base",
"rsync",
"command",
"."
] | train | https://github.com/emichael/PyREM/blob/2609249ead197cd9496d164f4998ca9985503579/pyrem/host.py#L61-L66 |
emichael/PyREM | pyrem/host.py | RemoteHost.send_file | def send_file(self, file_name, remote_destination=None, **kwargs):
"""Send a file to a remote host with rsync.
Args:
file_name (str): The relative location of the file on the local
host.
remote_destination (str): The destination for the file on the remote
host. If `None`, will be assumed to be the same as
**file_name**. Default `None`.
**kwargs: Passed to ``SubprocessTask``'s init method.
Return:
``pyrem.task.SubprocessTask``: The resulting task.
"""
if not remote_destination:
remote_destination = file_name
return SubprocessTask(
self._rsync_cmd() +
['-ut', file_name, '%s:%s' % (self.hostname, remote_destination)],
**kwargs) | python | def send_file(self, file_name, remote_destination=None, **kwargs):
"""Send a file to a remote host with rsync.
Args:
file_name (str): The relative location of the file on the local
host.
remote_destination (str): The destination for the file on the remote
host. If `None`, will be assumed to be the same as
**file_name**. Default `None`.
**kwargs: Passed to ``SubprocessTask``'s init method.
Return:
``pyrem.task.SubprocessTask``: The resulting task.
"""
if not remote_destination:
remote_destination = file_name
return SubprocessTask(
self._rsync_cmd() +
['-ut', file_name, '%s:%s' % (self.hostname, remote_destination)],
**kwargs) | [
"def",
"send_file",
"(",
"self",
",",
"file_name",
",",
"remote_destination",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"remote_destination",
":",
"remote_destination",
"=",
"file_name",
"return",
"SubprocessTask",
"(",
"self",
".",
"_rsync_cmd",
"(",
")",
"+",
"[",
"'-ut'",
",",
"file_name",
",",
"'%s:%s'",
"%",
"(",
"self",
".",
"hostname",
",",
"remote_destination",
")",
"]",
",",
"*",
"*",
"kwargs",
")"
] | Send a file to a remote host with rsync.
Args:
file_name (str): The relative location of the file on the local
host.
remote_destination (str): The destination for the file on the remote
host. If `None`, will be assumed to be the same as
**file_name**. Default `None`.
**kwargs: Passed to ``SubprocessTask``'s init method.
Return:
``pyrem.task.SubprocessTask``: The resulting task. | [
"Send",
"a",
"file",
"to",
"a",
"remote",
"host",
"with",
"rsync",
"."
] | train | https://github.com/emichael/PyREM/blob/2609249ead197cd9496d164f4998ca9985503579/pyrem/host.py#L68-L90 |
emichael/PyREM | pyrem/host.py | RemoteHost.get_file | def get_file(self, file_name, local_destination=None, **kwargs):
"""Get a file from a remote host with rsync.
Args:
file_name (str): The relative location of the file on the remote
host.
local_destination (str): The destination for the file on the local
host. If `None`, will be assumed to be the same as
**file_name**. Default `None`.
**kwargs: Passed to ``SubprocessTask``'s init method.
Return:
``pyrem.task.SubprocessTask``: The resulting task.
"""
if not local_destination:
local_destination = file_name
return SubprocessTask(
self._rsync_cmd() +
['-ut', '%s:%s' % (self.hostname, file_name), local_destination],
**kwargs) | python | def get_file(self, file_name, local_destination=None, **kwargs):
"""Get a file from a remote host with rsync.
Args:
file_name (str): The relative location of the file on the remote
host.
local_destination (str): The destination for the file on the local
host. If `None`, will be assumed to be the same as
**file_name**. Default `None`.
**kwargs: Passed to ``SubprocessTask``'s init method.
Return:
``pyrem.task.SubprocessTask``: The resulting task.
"""
if not local_destination:
local_destination = file_name
return SubprocessTask(
self._rsync_cmd() +
['-ut', '%s:%s' % (self.hostname, file_name), local_destination],
**kwargs) | [
"def",
"get_file",
"(",
"self",
",",
"file_name",
",",
"local_destination",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"local_destination",
":",
"local_destination",
"=",
"file_name",
"return",
"SubprocessTask",
"(",
"self",
".",
"_rsync_cmd",
"(",
")",
"+",
"[",
"'-ut'",
",",
"'%s:%s'",
"%",
"(",
"self",
".",
"hostname",
",",
"file_name",
")",
",",
"local_destination",
"]",
",",
"*",
"*",
"kwargs",
")"
] | Get a file from a remote host with rsync.
Args:
file_name (str): The relative location of the file on the remote
host.
local_destination (str): The destination for the file on the local
host. If `None`, will be assumed to be the same as
**file_name**. Default `None`.
**kwargs: Passed to ``SubprocessTask``'s init method.
Return:
``pyrem.task.SubprocessTask``: The resulting task. | [
"Get",
"a",
"file",
"from",
"a",
"remote",
"host",
"with",
"rsync",
"."
] | train | https://github.com/emichael/PyREM/blob/2609249ead197cd9496d164f4998ca9985503579/pyrem/host.py#L92-L114 |
Clinical-Genomics/trailblazer | trailblazer/mip/config.py | ConfigHandler.make_config | def make_config(self, data: dict):
"""Make a MIP config."""
self.validate_config(data)
config_data = self.prepare_config(data)
return config_data | python | def make_config(self, data: dict):
"""Make a MIP config."""
self.validate_config(data)
config_data = self.prepare_config(data)
return config_data | [
"def",
"make_config",
"(",
"self",
",",
"data",
":",
"dict",
")",
":",
"self",
".",
"validate_config",
"(",
"data",
")",
"config_data",
"=",
"self",
".",
"prepare_config",
"(",
"data",
")",
"return",
"config_data"
] | Make a MIP config. | [
"Make",
"a",
"MIP",
"config",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/mip/config.py#L43-L47 |
Clinical-Genomics/trailblazer | trailblazer/mip/config.py | ConfigHandler.validate_config | def validate_config(data: dict) -> dict:
"""Convert to MIP config format."""
errors = ConfigSchema().validate(data)
if errors:
for field, messages in errors.items():
if isinstance(messages, dict):
for level, sample_errors in messages.items():
sample_id = data['samples'][level]['sample_id']
for sub_field, sub_messages in sample_errors.items():
LOG.error(f"{sample_id} -> {sub_field}: {', '.join(sub_messages)}")
else:
LOG.error(f"{field}: {', '.join(messages)}")
raise ConfigError('invalid config input', errors=errors) | python | def validate_config(data: dict) -> dict:
"""Convert to MIP config format."""
errors = ConfigSchema().validate(data)
if errors:
for field, messages in errors.items():
if isinstance(messages, dict):
for level, sample_errors in messages.items():
sample_id = data['samples'][level]['sample_id']
for sub_field, sub_messages in sample_errors.items():
LOG.error(f"{sample_id} -> {sub_field}: {', '.join(sub_messages)}")
else:
LOG.error(f"{field}: {', '.join(messages)}")
raise ConfigError('invalid config input', errors=errors) | [
"def",
"validate_config",
"(",
"data",
":",
"dict",
")",
"->",
"dict",
":",
"errors",
"=",
"ConfigSchema",
"(",
")",
".",
"validate",
"(",
"data",
")",
"if",
"errors",
":",
"for",
"field",
",",
"messages",
"in",
"errors",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"messages",
",",
"dict",
")",
":",
"for",
"level",
",",
"sample_errors",
"in",
"messages",
".",
"items",
"(",
")",
":",
"sample_id",
"=",
"data",
"[",
"'samples'",
"]",
"[",
"level",
"]",
"[",
"'sample_id'",
"]",
"for",
"sub_field",
",",
"sub_messages",
"in",
"sample_errors",
".",
"items",
"(",
")",
":",
"LOG",
".",
"error",
"(",
"f\"{sample_id} -> {sub_field}: {', '.join(sub_messages)}\"",
")",
"else",
":",
"LOG",
".",
"error",
"(",
"f\"{field}: {', '.join(messages)}\"",
")",
"raise",
"ConfigError",
"(",
"'invalid config input'",
",",
"errors",
"=",
"errors",
")"
] | Convert to MIP config format. | [
"Convert",
"to",
"MIP",
"config",
"format",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/mip/config.py#L50-L62 |
Clinical-Genomics/trailblazer | trailblazer/mip/config.py | ConfigHandler.prepare_config | def prepare_config(data: dict) -> dict:
"""Prepare the config data."""
data_copy = deepcopy(data)
# handle single sample cases with 'unknown' phenotype
if len(data_copy['samples']) == 1:
if data_copy['samples'][0]['phenotype'] == 'unknown':
LOG.info("setting 'unknown' phenotype to 'unaffected'")
data_copy['samples'][0]['phenotype'] = 'unaffected'
# set the mother/father to '0' if they are not set for a sample
for sample_data in data_copy['samples']:
sample_data['mother'] = sample_data.get('mother') or '0'
sample_data['father'] = sample_data.get('father') or '0'
if sample_data['analysis_type'] == 'wgs' and sample_data.get('capture_kit') is None:
sample_data['capture_kit'] = DEFAULT_CAPTURE_KIT
return data_copy | python | def prepare_config(data: dict) -> dict:
"""Prepare the config data."""
data_copy = deepcopy(data)
# handle single sample cases with 'unknown' phenotype
if len(data_copy['samples']) == 1:
if data_copy['samples'][0]['phenotype'] == 'unknown':
LOG.info("setting 'unknown' phenotype to 'unaffected'")
data_copy['samples'][0]['phenotype'] = 'unaffected'
# set the mother/father to '0' if they are not set for a sample
for sample_data in data_copy['samples']:
sample_data['mother'] = sample_data.get('mother') or '0'
sample_data['father'] = sample_data.get('father') or '0'
if sample_data['analysis_type'] == 'wgs' and sample_data.get('capture_kit') is None:
sample_data['capture_kit'] = DEFAULT_CAPTURE_KIT
return data_copy | [
"def",
"prepare_config",
"(",
"data",
":",
"dict",
")",
"->",
"dict",
":",
"data_copy",
"=",
"deepcopy",
"(",
"data",
")",
"# handle single sample cases with 'unknown' phenotype",
"if",
"len",
"(",
"data_copy",
"[",
"'samples'",
"]",
")",
"==",
"1",
":",
"if",
"data_copy",
"[",
"'samples'",
"]",
"[",
"0",
"]",
"[",
"'phenotype'",
"]",
"==",
"'unknown'",
":",
"LOG",
".",
"info",
"(",
"\"setting 'unknown' phenotype to 'unaffected'\"",
")",
"data_copy",
"[",
"'samples'",
"]",
"[",
"0",
"]",
"[",
"'phenotype'",
"]",
"=",
"'unaffected'",
"# set the mother/father to '0' if they are not set for a sample",
"for",
"sample_data",
"in",
"data_copy",
"[",
"'samples'",
"]",
":",
"sample_data",
"[",
"'mother'",
"]",
"=",
"sample_data",
".",
"get",
"(",
"'mother'",
")",
"or",
"'0'",
"sample_data",
"[",
"'father'",
"]",
"=",
"sample_data",
".",
"get",
"(",
"'father'",
")",
"or",
"'0'",
"if",
"sample_data",
"[",
"'analysis_type'",
"]",
"==",
"'wgs'",
"and",
"sample_data",
".",
"get",
"(",
"'capture_kit'",
")",
"is",
"None",
":",
"sample_data",
"[",
"'capture_kit'",
"]",
"=",
"DEFAULT_CAPTURE_KIT",
"return",
"data_copy"
] | Prepare the config data. | [
"Prepare",
"the",
"config",
"data",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/mip/config.py#L65-L79 |
Clinical-Genomics/trailblazer | trailblazer/mip/config.py | ConfigHandler.save_config | def save_config(self, data: dict) -> Path:
"""Save a config to the expected location."""
out_dir = Path(self.families_dir) / data['family']
out_dir.mkdir(parents=True, exist_ok=True)
out_path = out_dir / 'pedigree.yaml'
dump = ruamel.yaml.round_trip_dump(data, indent=4, block_seq_indent=2)
out_path.write_text(dump)
return out_path | python | def save_config(self, data: dict) -> Path:
"""Save a config to the expected location."""
out_dir = Path(self.families_dir) / data['family']
out_dir.mkdir(parents=True, exist_ok=True)
out_path = out_dir / 'pedigree.yaml'
dump = ruamel.yaml.round_trip_dump(data, indent=4, block_seq_indent=2)
out_path.write_text(dump)
return out_path | [
"def",
"save_config",
"(",
"self",
",",
"data",
":",
"dict",
")",
"->",
"Path",
":",
"out_dir",
"=",
"Path",
"(",
"self",
".",
"families_dir",
")",
"/",
"data",
"[",
"'family'",
"]",
"out_dir",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"out_path",
"=",
"out_dir",
"/",
"'pedigree.yaml'",
"dump",
"=",
"ruamel",
".",
"yaml",
".",
"round_trip_dump",
"(",
"data",
",",
"indent",
"=",
"4",
",",
"block_seq_indent",
"=",
"2",
")",
"out_path",
".",
"write_text",
"(",
"dump",
")",
"return",
"out_path"
] | Save a config to the expected location. | [
"Save",
"a",
"config",
"to",
"the",
"expected",
"location",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/mip/config.py#L81-L88 |
andrewda/frc-livescore | livescore/simpleocr_utils/processor.py | _broadcast | def _broadcast(src_processor, src_atr_name, dest_processors, dest_atr_name, transform_function):
"""
To be used exclusively by create_broadcast.
A broadcast function gets an attribute on the src_processor and
sets it (possibly under a different name) on dest_processors
"""
value = getattr(src_processor, src_atr_name)
value = transform_function(value)
for d in dest_processors:
setattr(d, dest_atr_name, value) | python | def _broadcast(src_processor, src_atr_name, dest_processors, dest_atr_name, transform_function):
"""
To be used exclusively by create_broadcast.
A broadcast function gets an attribute on the src_processor and
sets it (possibly under a different name) on dest_processors
"""
value = getattr(src_processor, src_atr_name)
value = transform_function(value)
for d in dest_processors:
setattr(d, dest_atr_name, value) | [
"def",
"_broadcast",
"(",
"src_processor",
",",
"src_atr_name",
",",
"dest_processors",
",",
"dest_atr_name",
",",
"transform_function",
")",
":",
"value",
"=",
"getattr",
"(",
"src_processor",
",",
"src_atr_name",
")",
"value",
"=",
"transform_function",
"(",
"value",
")",
"for",
"d",
"in",
"dest_processors",
":",
"setattr",
"(",
"d",
",",
"dest_atr_name",
",",
"value",
")"
] | To be used exclusively by create_broadcast.
A broadcast function gets an attribute on the src_processor and
sets it (possibly under a different name) on dest_processors | [
"To",
"be",
"used",
"exclusively",
"by",
"create_broadcast",
".",
"A",
"broadcast",
"function",
"gets",
"an",
"attribute",
"on",
"the",
"src_processor",
"and",
"sets",
"it",
"(",
"possibly",
"under",
"a",
"different",
"name",
")",
"on",
"dest_processors"
] | train | https://github.com/andrewda/frc-livescore/blob/71594cd6d2c8b6c5feb3889bb05552d09b8128b1/livescore/simpleocr_utils/processor.py#L13-L22 |
andrewda/frc-livescore | livescore/simpleocr_utils/processor.py | create_broadcast | def create_broadcast(src_atr_name, dest_processors, dest_atr_name=None, transform_function=lambda x: x):
"""
This method creates a function, intended to be called as a
Processor posthook, that copies some of the processor's attributes
to other processors
"""
from functools import partial
if dest_atr_name == None:
dest_atr_name = src_atr_name
if not hasattr(dest_processors, "__iter__"): # a single processor was given instead
dest_processors = [dest_processors]
return partial(_broadcast, src_atr_name=src_atr_name, dest_processors=dest_processors, dest_atr_name=dest_atr_name,
transform_function=transform_function) | python | def create_broadcast(src_atr_name, dest_processors, dest_atr_name=None, transform_function=lambda x: x):
"""
This method creates a function, intended to be called as a
Processor posthook, that copies some of the processor's attributes
to other processors
"""
from functools import partial
if dest_atr_name == None:
dest_atr_name = src_atr_name
if not hasattr(dest_processors, "__iter__"): # a single processor was given instead
dest_processors = [dest_processors]
return partial(_broadcast, src_atr_name=src_atr_name, dest_processors=dest_processors, dest_atr_name=dest_atr_name,
transform_function=transform_function) | [
"def",
"create_broadcast",
"(",
"src_atr_name",
",",
"dest_processors",
",",
"dest_atr_name",
"=",
"None",
",",
"transform_function",
"=",
"lambda",
"x",
":",
"x",
")",
":",
"from",
"functools",
"import",
"partial",
"if",
"dest_atr_name",
"==",
"None",
":",
"dest_atr_name",
"=",
"src_atr_name",
"if",
"not",
"hasattr",
"(",
"dest_processors",
",",
"\"__iter__\"",
")",
":",
"# a single processor was given instead",
"dest_processors",
"=",
"[",
"dest_processors",
"]",
"return",
"partial",
"(",
"_broadcast",
",",
"src_atr_name",
"=",
"src_atr_name",
",",
"dest_processors",
"=",
"dest_processors",
",",
"dest_atr_name",
"=",
"dest_atr_name",
",",
"transform_function",
"=",
"transform_function",
")"
] | This method creates a function, intended to be called as a
Processor posthook, that copies some of the processor's attributes
to other processors | [
"This",
"method",
"creates",
"a",
"function",
"intended",
"to",
"be",
"called",
"as",
"a",
"Processor",
"posthook",
"that",
"copies",
"some",
"of",
"the",
"processor",
"s",
"attributes",
"to",
"other",
"processors"
] | train | https://github.com/andrewda/frc-livescore/blob/71594cd6d2c8b6c5feb3889bb05552d09b8128b1/livescore/simpleocr_utils/processor.py#L25-L37 |
andrewda/frc-livescore | livescore/simpleocr_utils/processor.py | Processor.get_parameters | def get_parameters(self):
"""returns a dictionary with the processor's stored parameters"""
parameter_names = self.PARAMETERS.keys()
# TODO: Unresolved reference for processor
parameter_values = [getattr(processor, n) for n in parameter_names]
return dict(zip(parameter_names, parameter_values)) | python | def get_parameters(self):
"""returns a dictionary with the processor's stored parameters"""
parameter_names = self.PARAMETERS.keys()
# TODO: Unresolved reference for processor
parameter_values = [getattr(processor, n) for n in parameter_names]
return dict(zip(parameter_names, parameter_values)) | [
"def",
"get_parameters",
"(",
"self",
")",
":",
"parameter_names",
"=",
"self",
".",
"PARAMETERS",
".",
"keys",
"(",
")",
"# TODO: Unresolved reference for processor",
"parameter_values",
"=",
"[",
"getattr",
"(",
"processor",
",",
"n",
")",
"for",
"n",
"in",
"parameter_names",
"]",
"return",
"dict",
"(",
"zip",
"(",
"parameter_names",
",",
"parameter_values",
")",
")"
] | returns a dictionary with the processor's stored parameters | [
"returns",
"a",
"dictionary",
"with",
"the",
"processor",
"s",
"stored",
"parameters"
] | train | https://github.com/andrewda/frc-livescore/blob/71594cd6d2c8b6c5feb3889bb05552d09b8128b1/livescore/simpleocr_utils/processor.py#L66-L71 |
andrewda/frc-livescore | livescore/simpleocr_utils/processor.py | Processor.set_parameters | def set_parameters(self, **args):
"""sets the processor stored parameters"""
for k, v in self.PARAMETERS.items():
new_value = args.get(k)
if new_value != None:
if not _same_type(new_value, v):
raise Exception(
"On processor {0}, argument {1} takes something like {2}, but {3} was given".format(self, k, v,
new_value))
setattr(self, k, new_value)
not_used = set(args.keys()).difference(set(self.PARAMETERS.keys()))
not_given = set(self.PARAMETERS.keys()).difference(set(args.keys()))
return not_used, not_given | python | def set_parameters(self, **args):
"""sets the processor stored parameters"""
for k, v in self.PARAMETERS.items():
new_value = args.get(k)
if new_value != None:
if not _same_type(new_value, v):
raise Exception(
"On processor {0}, argument {1} takes something like {2}, but {3} was given".format(self, k, v,
new_value))
setattr(self, k, new_value)
not_used = set(args.keys()).difference(set(self.PARAMETERS.keys()))
not_given = set(self.PARAMETERS.keys()).difference(set(args.keys()))
return not_used, not_given | [
"def",
"set_parameters",
"(",
"self",
",",
"*",
"*",
"args",
")",
":",
"for",
"k",
",",
"v",
"in",
"self",
".",
"PARAMETERS",
".",
"items",
"(",
")",
":",
"new_value",
"=",
"args",
".",
"get",
"(",
"k",
")",
"if",
"new_value",
"!=",
"None",
":",
"if",
"not",
"_same_type",
"(",
"new_value",
",",
"v",
")",
":",
"raise",
"Exception",
"(",
"\"On processor {0}, argument {1} takes something like {2}, but {3} was given\"",
".",
"format",
"(",
"self",
",",
"k",
",",
"v",
",",
"new_value",
")",
")",
"setattr",
"(",
"self",
",",
"k",
",",
"new_value",
")",
"not_used",
"=",
"set",
"(",
"args",
".",
"keys",
"(",
")",
")",
".",
"difference",
"(",
"set",
"(",
"self",
".",
"PARAMETERS",
".",
"keys",
"(",
")",
")",
")",
"not_given",
"=",
"set",
"(",
"self",
".",
"PARAMETERS",
".",
"keys",
"(",
")",
")",
".",
"difference",
"(",
"set",
"(",
"args",
".",
"keys",
"(",
")",
")",
")",
"return",
"not_used",
",",
"not_given"
] | sets the processor stored parameters | [
"sets",
"the",
"processor",
"stored",
"parameters"
] | train | https://github.com/andrewda/frc-livescore/blob/71594cd6d2c8b6c5feb3889bb05552d09b8128b1/livescore/simpleocr_utils/processor.py#L73-L85 |
andrewda/frc-livescore | livescore/simpleocr_utils/processor.py | ProcessorStack.get_parameters | def get_parameters(self):
"""gets from all wrapped processors"""
d = {}
for p in self.processors:
parameter_names = list(p.PARAMETERS.keys())
parameter_values = [getattr(p, n) for n in parameter_names]
d.update(dict(zip(parameter_names, parameter_values)))
return d | python | def get_parameters(self):
"""gets from all wrapped processors"""
d = {}
for p in self.processors:
parameter_names = list(p.PARAMETERS.keys())
parameter_values = [getattr(p, n) for n in parameter_names]
d.update(dict(zip(parameter_names, parameter_values)))
return d | [
"def",
"get_parameters",
"(",
"self",
")",
":",
"d",
"=",
"{",
"}",
"for",
"p",
"in",
"self",
".",
"processors",
":",
"parameter_names",
"=",
"list",
"(",
"p",
".",
"PARAMETERS",
".",
"keys",
"(",
")",
")",
"parameter_values",
"=",
"[",
"getattr",
"(",
"p",
",",
"n",
")",
"for",
"n",
"in",
"parameter_names",
"]",
"d",
".",
"update",
"(",
"dict",
"(",
"zip",
"(",
"parameter_names",
",",
"parameter_values",
")",
")",
")",
"return",
"d"
] | gets from all wrapped processors | [
"gets",
"from",
"all",
"wrapped",
"processors"
] | train | https://github.com/andrewda/frc-livescore/blob/71594cd6d2c8b6c5feb3889bb05552d09b8128b1/livescore/simpleocr_utils/processor.py#L128-L135 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.