repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
sprockets/sprockets.http | sprockets/http/app.py | CallbackManager.start | def start(self, io_loop):
"""
Run the ``before_run`` callbacks and queue to ``on_start`` callbacks.
:param tornado.ioloop.IOLoop io_loop: loop to start the app on.
"""
for callback in self.before_run_callbacks:
try:
callback(self.tornado_application, io_loop)
except Exception:
self.logger.error('before_run callback %r cancelled start',
callback, exc_info=1)
self.stop(io_loop)
raise
for callback in self.on_start_callbacks:
io_loop.spawn_callback(callback, self.tornado_application, io_loop) | python | def start(self, io_loop):
"""
Run the ``before_run`` callbacks and queue to ``on_start`` callbacks.
:param tornado.ioloop.IOLoop io_loop: loop to start the app on.
"""
for callback in self.before_run_callbacks:
try:
callback(self.tornado_application, io_loop)
except Exception:
self.logger.error('before_run callback %r cancelled start',
callback, exc_info=1)
self.stop(io_loop)
raise
for callback in self.on_start_callbacks:
io_loop.spawn_callback(callback, self.tornado_application, io_loop) | [
"def",
"start",
"(",
"self",
",",
"io_loop",
")",
":",
"for",
"callback",
"in",
"self",
".",
"before_run_callbacks",
":",
"try",
":",
"callback",
"(",
"self",
".",
"tornado_application",
",",
"io_loop",
")",
"except",
"Exception",
":",
"self",
".",
"logger",
".",
"error",
"(",
"'before_run callback %r cancelled start'",
",",
"callback",
",",
"exc_info",
"=",
"1",
")",
"self",
".",
"stop",
"(",
"io_loop",
")",
"raise",
"for",
"callback",
"in",
"self",
".",
"on_start_callbacks",
":",
"io_loop",
".",
"spawn_callback",
"(",
"callback",
",",
"self",
".",
"tornado_application",
",",
"io_loop",
")"
] | Run the ``before_run`` callbacks and queue to ``on_start`` callbacks.
:param tornado.ioloop.IOLoop io_loop: loop to start the app on. | [
"Run",
"the",
"before_run",
"callbacks",
"and",
"queue",
"to",
"on_start",
"callbacks",
"."
] | train | https://github.com/sprockets/sprockets.http/blob/8baa4cdc1fa35a162ee226fd6cc4170a0ca0ecd3/sprockets/http/app.py#L87-L104 |
sprockets/sprockets.http | sprockets/http/app.py | CallbackManager.stop | def stop(self, io_loop):
"""
Asynchronously stop the application.
:param tornado.ioloop.IOLoop io_loop: loop to run until all
callbacks, timeouts, and queued calls are complete
Call this method to start the application shutdown process.
The IOLoop will be stopped once the application is completely
shut down.
"""
running_async = False
shutdown = _ShutdownHandler(io_loop)
for callback in self.on_shutdown_callbacks:
try:
maybe_future = callback(self.tornado_application)
if asyncio.iscoroutine(maybe_future):
maybe_future = asyncio.create_task(maybe_future)
if concurrent.is_future(maybe_future):
shutdown.add_future(maybe_future)
running_async = True
except Exception as error:
self.logger.warning('exception raised from shutdown '
'callback %r, ignored: %s',
callback, error, exc_info=1)
if not running_async:
shutdown.on_shutdown_ready() | python | def stop(self, io_loop):
"""
Asynchronously stop the application.
:param tornado.ioloop.IOLoop io_loop: loop to run until all
callbacks, timeouts, and queued calls are complete
Call this method to start the application shutdown process.
The IOLoop will be stopped once the application is completely
shut down.
"""
running_async = False
shutdown = _ShutdownHandler(io_loop)
for callback in self.on_shutdown_callbacks:
try:
maybe_future = callback(self.tornado_application)
if asyncio.iscoroutine(maybe_future):
maybe_future = asyncio.create_task(maybe_future)
if concurrent.is_future(maybe_future):
shutdown.add_future(maybe_future)
running_async = True
except Exception as error:
self.logger.warning('exception raised from shutdown '
'callback %r, ignored: %s',
callback, error, exc_info=1)
if not running_async:
shutdown.on_shutdown_ready() | [
"def",
"stop",
"(",
"self",
",",
"io_loop",
")",
":",
"running_async",
"=",
"False",
"shutdown",
"=",
"_ShutdownHandler",
"(",
"io_loop",
")",
"for",
"callback",
"in",
"self",
".",
"on_shutdown_callbacks",
":",
"try",
":",
"maybe_future",
"=",
"callback",
"(",
"self",
".",
"tornado_application",
")",
"if",
"asyncio",
".",
"iscoroutine",
"(",
"maybe_future",
")",
":",
"maybe_future",
"=",
"asyncio",
".",
"create_task",
"(",
"maybe_future",
")",
"if",
"concurrent",
".",
"is_future",
"(",
"maybe_future",
")",
":",
"shutdown",
".",
"add_future",
"(",
"maybe_future",
")",
"running_async",
"=",
"True",
"except",
"Exception",
"as",
"error",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"'exception raised from shutdown '",
"'callback %r, ignored: %s'",
",",
"callback",
",",
"error",
",",
"exc_info",
"=",
"1",
")",
"if",
"not",
"running_async",
":",
"shutdown",
".",
"on_shutdown_ready",
"(",
")"
] | Asynchronously stop the application.
:param tornado.ioloop.IOLoop io_loop: loop to run until all
callbacks, timeouts, and queued calls are complete
Call this method to start the application shutdown process.
The IOLoop will be stopped once the application is completely
shut down. | [
"Asynchronously",
"stop",
"the",
"application",
"."
] | train | https://github.com/sprockets/sprockets.http/blob/8baa4cdc1fa35a162ee226fd6cc4170a0ca0ecd3/sprockets/http/app.py#L106-L136 |
sunlightlabs/django-mediasync | mediasync/__init__.py | combine_files | def combine_files(joinfile, sourcefiles, client):
"""
Given a combo file name (joinfile), combine the sourcefiles into a single
monolithic file.
Returns a string containing the combo file, or None if the specified
file can not be combo'd.
"""
from mediasync.conf import msettings
joinfile = joinfile.strip('/')
if joinfile.endswith('.css'):
dirname = msettings['CSS_PATH'].strip('/')
separator = '\n'
elif joinfile.endswith('.js'):
dirname = msettings['JS_PATH'].strip('/')
separator = ';\n'
else:
# By-pass this file since we only join CSS and JS.
return None
buffer = cStringIO.StringIO()
for sourcefile in sourcefiles:
sourcepath = os.path.join(client.media_root, dirname, sourcefile)
if os.path.isfile(sourcepath):
f = open(sourcepath)
buffer.write(f.read())
f.close()
buffer.write(separator)
filedata = buffer.getvalue()
buffer.close()
return (filedata, dirname) | python | def combine_files(joinfile, sourcefiles, client):
"""
Given a combo file name (joinfile), combine the sourcefiles into a single
monolithic file.
Returns a string containing the combo file, or None if the specified
file can not be combo'd.
"""
from mediasync.conf import msettings
joinfile = joinfile.strip('/')
if joinfile.endswith('.css'):
dirname = msettings['CSS_PATH'].strip('/')
separator = '\n'
elif joinfile.endswith('.js'):
dirname = msettings['JS_PATH'].strip('/')
separator = ';\n'
else:
# By-pass this file since we only join CSS and JS.
return None
buffer = cStringIO.StringIO()
for sourcefile in sourcefiles:
sourcepath = os.path.join(client.media_root, dirname, sourcefile)
if os.path.isfile(sourcepath):
f = open(sourcepath)
buffer.write(f.read())
f.close()
buffer.write(separator)
filedata = buffer.getvalue()
buffer.close()
return (filedata, dirname) | [
"def",
"combine_files",
"(",
"joinfile",
",",
"sourcefiles",
",",
"client",
")",
":",
"from",
"mediasync",
".",
"conf",
"import",
"msettings",
"joinfile",
"=",
"joinfile",
".",
"strip",
"(",
"'/'",
")",
"if",
"joinfile",
".",
"endswith",
"(",
"'.css'",
")",
":",
"dirname",
"=",
"msettings",
"[",
"'CSS_PATH'",
"]",
".",
"strip",
"(",
"'/'",
")",
"separator",
"=",
"'\\n'",
"elif",
"joinfile",
".",
"endswith",
"(",
"'.js'",
")",
":",
"dirname",
"=",
"msettings",
"[",
"'JS_PATH'",
"]",
".",
"strip",
"(",
"'/'",
")",
"separator",
"=",
"';\\n'",
"else",
":",
"# By-pass this file since we only join CSS and JS.",
"return",
"None",
"buffer",
"=",
"cStringIO",
".",
"StringIO",
"(",
")",
"for",
"sourcefile",
"in",
"sourcefiles",
":",
"sourcepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"client",
".",
"media_root",
",",
"dirname",
",",
"sourcefile",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"sourcepath",
")",
":",
"f",
"=",
"open",
"(",
"sourcepath",
")",
"buffer",
".",
"write",
"(",
"f",
".",
"read",
"(",
")",
")",
"f",
".",
"close",
"(",
")",
"buffer",
".",
"write",
"(",
"separator",
")",
"filedata",
"=",
"buffer",
".",
"getvalue",
"(",
")",
"buffer",
".",
"close",
"(",
")",
"return",
"(",
"filedata",
",",
"dirname",
")"
] | Given a combo file name (joinfile), combine the sourcefiles into a single
monolithic file.
Returns a string containing the combo file, or None if the specified
file can not be combo'd. | [
"Given",
"a",
"combo",
"file",
"name",
"(",
"joinfile",
")",
"combine",
"the",
"sourcefiles",
"into",
"a",
"single",
"monolithic",
"file",
".",
"Returns",
"a",
"string",
"containing",
"the",
"combo",
"file",
"or",
"None",
"if",
"the",
"specified",
"file",
"can",
"not",
"be",
"combo",
"d",
"."
] | train | https://github.com/sunlightlabs/django-mediasync/blob/aa8ce4cfff757bbdb488463c64c0863cca6a1932/mediasync/__init__.py#L65-L99 |
sunlightlabs/django-mediasync | mediasync/__init__.py | sync | def sync(client=None, force=False, verbose=True):
""" Let's face it... pushing this stuff to S3 is messy.
A lot of different things need to be calculated for each file
and they have to be in a certain order as some variables rely
on others.
"""
from mediasync import backends
from mediasync.conf import msettings
from mediasync.signals import pre_sync, post_sync
# create client connection
if client is None:
client = backends.client()
client.open()
client.serve_remote = True
# send pre-sync signal
pre_sync.send(sender=client)
#
# sync joined media
#
for joinfile, sourcefiles in msettings['JOINED'].iteritems():
filedata = combine_files(joinfile, sourcefiles, client)
if filedata is None:
# combine_files() is only interested in CSS/JS files.
continue
filedata, dirname = filedata
content_type = mimetypes.guess_type(joinfile)[0] or msettings['DEFAULT_MIMETYPE']
remote_path = joinfile
if dirname:
remote_path = "%s/%s" % (dirname, remote_path)
if client.process_and_put(filedata, content_type, remote_path, force=force):
if verbose:
print "[%s] %s" % (content_type, remote_path)
#
# sync static media
#
for dirname in os.listdir(client.media_root):
dirpath = os.path.abspath(os.path.join(client.media_root, dirname))
if os.path.isdir(dirpath):
for filename in listdir_recursive(dirpath):
# calculate local and remote paths
filepath = os.path.join(dirpath, filename)
remote_path = "%s/%s" % (dirname, filename)
content_type = mimetypes.guess_type(filepath)[0] or msettings['DEFAULT_MIMETYPE']
if not is_syncable_file(os.path.basename(filename)) or not os.path.isfile(filepath):
continue # hidden file or directory, do not upload
filedata = open(filepath, 'rb').read()
if client.process_and_put(filedata, content_type, remote_path, force=force):
if verbose:
print "[%s] %s" % (content_type, remote_path)
# send post-sync signal while client is still open
post_sync.send(sender=client)
client.close() | python | def sync(client=None, force=False, verbose=True):
""" Let's face it... pushing this stuff to S3 is messy.
A lot of different things need to be calculated for each file
and they have to be in a certain order as some variables rely
on others.
"""
from mediasync import backends
from mediasync.conf import msettings
from mediasync.signals import pre_sync, post_sync
# create client connection
if client is None:
client = backends.client()
client.open()
client.serve_remote = True
# send pre-sync signal
pre_sync.send(sender=client)
#
# sync joined media
#
for joinfile, sourcefiles in msettings['JOINED'].iteritems():
filedata = combine_files(joinfile, sourcefiles, client)
if filedata is None:
# combine_files() is only interested in CSS/JS files.
continue
filedata, dirname = filedata
content_type = mimetypes.guess_type(joinfile)[0] or msettings['DEFAULT_MIMETYPE']
remote_path = joinfile
if dirname:
remote_path = "%s/%s" % (dirname, remote_path)
if client.process_and_put(filedata, content_type, remote_path, force=force):
if verbose:
print "[%s] %s" % (content_type, remote_path)
#
# sync static media
#
for dirname in os.listdir(client.media_root):
dirpath = os.path.abspath(os.path.join(client.media_root, dirname))
if os.path.isdir(dirpath):
for filename in listdir_recursive(dirpath):
# calculate local and remote paths
filepath = os.path.join(dirpath, filename)
remote_path = "%s/%s" % (dirname, filename)
content_type = mimetypes.guess_type(filepath)[0] or msettings['DEFAULT_MIMETYPE']
if not is_syncable_file(os.path.basename(filename)) or not os.path.isfile(filepath):
continue # hidden file or directory, do not upload
filedata = open(filepath, 'rb').read()
if client.process_and_put(filedata, content_type, remote_path, force=force):
if verbose:
print "[%s] %s" % (content_type, remote_path)
# send post-sync signal while client is still open
post_sync.send(sender=client)
client.close() | [
"def",
"sync",
"(",
"client",
"=",
"None",
",",
"force",
"=",
"False",
",",
"verbose",
"=",
"True",
")",
":",
"from",
"mediasync",
"import",
"backends",
"from",
"mediasync",
".",
"conf",
"import",
"msettings",
"from",
"mediasync",
".",
"signals",
"import",
"pre_sync",
",",
"post_sync",
"# create client connection",
"if",
"client",
"is",
"None",
":",
"client",
"=",
"backends",
".",
"client",
"(",
")",
"client",
".",
"open",
"(",
")",
"client",
".",
"serve_remote",
"=",
"True",
"# send pre-sync signal",
"pre_sync",
".",
"send",
"(",
"sender",
"=",
"client",
")",
"#",
"# sync joined media",
"#",
"for",
"joinfile",
",",
"sourcefiles",
"in",
"msettings",
"[",
"'JOINED'",
"]",
".",
"iteritems",
"(",
")",
":",
"filedata",
"=",
"combine_files",
"(",
"joinfile",
",",
"sourcefiles",
",",
"client",
")",
"if",
"filedata",
"is",
"None",
":",
"# combine_files() is only interested in CSS/JS files.",
"continue",
"filedata",
",",
"dirname",
"=",
"filedata",
"content_type",
"=",
"mimetypes",
".",
"guess_type",
"(",
"joinfile",
")",
"[",
"0",
"]",
"or",
"msettings",
"[",
"'DEFAULT_MIMETYPE'",
"]",
"remote_path",
"=",
"joinfile",
"if",
"dirname",
":",
"remote_path",
"=",
"\"%s/%s\"",
"%",
"(",
"dirname",
",",
"remote_path",
")",
"if",
"client",
".",
"process_and_put",
"(",
"filedata",
",",
"content_type",
",",
"remote_path",
",",
"force",
"=",
"force",
")",
":",
"if",
"verbose",
":",
"print",
"\"[%s] %s\"",
"%",
"(",
"content_type",
",",
"remote_path",
")",
"#",
"# sync static media",
"#",
"for",
"dirname",
"in",
"os",
".",
"listdir",
"(",
"client",
".",
"media_root",
")",
":",
"dirpath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"client",
".",
"media_root",
",",
"dirname",
")",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"dirpath",
")",
":",
"for",
"filename",
"in",
"listdir_recursive",
"(",
"dirpath",
")",
":",
"# calculate local and remote paths",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirpath",
",",
"filename",
")",
"remote_path",
"=",
"\"%s/%s\"",
"%",
"(",
"dirname",
",",
"filename",
")",
"content_type",
"=",
"mimetypes",
".",
"guess_type",
"(",
"filepath",
")",
"[",
"0",
"]",
"or",
"msettings",
"[",
"'DEFAULT_MIMETYPE'",
"]",
"if",
"not",
"is_syncable_file",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
")",
"or",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"filepath",
")",
":",
"continue",
"# hidden file or directory, do not upload",
"filedata",
"=",
"open",
"(",
"filepath",
",",
"'rb'",
")",
".",
"read",
"(",
")",
"if",
"client",
".",
"process_and_put",
"(",
"filedata",
",",
"content_type",
",",
"remote_path",
",",
"force",
"=",
"force",
")",
":",
"if",
"verbose",
":",
"print",
"\"[%s] %s\"",
"%",
"(",
"content_type",
",",
"remote_path",
")",
"# send post-sync signal while client is still open",
"post_sync",
".",
"send",
"(",
"sender",
"=",
"client",
")",
"client",
".",
"close",
"(",
")"
] | Let's face it... pushing this stuff to S3 is messy.
A lot of different things need to be calculated for each file
and they have to be in a certain order as some variables rely
on others. | [
"Let",
"s",
"face",
"it",
"...",
"pushing",
"this",
"stuff",
"to",
"S3",
"is",
"messy",
".",
"A",
"lot",
"of",
"different",
"things",
"need",
"to",
"be",
"calculated",
"for",
"each",
"file",
"and",
"they",
"have",
"to",
"be",
"in",
"a",
"certain",
"order",
"as",
"some",
"variables",
"rely",
"on",
"others",
"."
] | train | https://github.com/sunlightlabs/django-mediasync/blob/aa8ce4cfff757bbdb488463c64c0863cca6a1932/mediasync/__init__.py#L101-L173 |
VitorRamos/cpufreq | cpufreq/cpufreq.py | cpuFreq.enable_all_cpu | def enable_all_cpu(self):
'''
Enable all offline cpus
'''
for cpu in self.__get_ranges("offline"):
fpath = path.join("cpu%i"%cpu,"online")
self.__write_cpu_file(fpath, b"1") | python | def enable_all_cpu(self):
'''
Enable all offline cpus
'''
for cpu in self.__get_ranges("offline"):
fpath = path.join("cpu%i"%cpu,"online")
self.__write_cpu_file(fpath, b"1") | [
"def",
"enable_all_cpu",
"(",
"self",
")",
":",
"for",
"cpu",
"in",
"self",
".",
"__get_ranges",
"(",
"\"offline\"",
")",
":",
"fpath",
"=",
"path",
".",
"join",
"(",
"\"cpu%i\"",
"%",
"cpu",
",",
"\"online\"",
")",
"self",
".",
"__write_cpu_file",
"(",
"fpath",
",",
"b\"1\"",
")"
] | Enable all offline cpus | [
"Enable",
"all",
"offline",
"cpus"
] | train | https://github.com/VitorRamos/cpufreq/blob/1246e35a8ceeb823df804af34730f7b15dc89204/cpufreq/cpufreq.py#L93-L99 |
VitorRamos/cpufreq | cpufreq/cpufreq.py | cpuFreq.reset | def reset(self, rg=None):
'''
Enable all offline cpus, and reset max and min frequencies files
rg: range or list of threads to reset
'''
if type(rg) == int:
rg= [rg]
to_reset= rg if rg else self.__get_ranges("present")
self.enable_cpu(to_reset)
for cpu in to_reset:
fpath = path.join("cpu%i"%cpu,"cpufreq","cpuinfo_max_freq")
max_freq = self.__read_cpu_file(fpath)
fpath = path.join("cpu%i"%cpu,"cpufreq","cpuinfo_min_freq")
min_freq = self.__read_cpu_file(fpath)
fpath = path.join("cpu%i"%cpu,"cpufreq","scaling_max_freq")
self.__write_cpu_file(fpath, max_freq.encode())
fpath = path.join("cpu%i"%cpu,"cpufreq","scaling_min_freq")
self.__write_cpu_file(fpath, min_freq.encode()) | python | def reset(self, rg=None):
'''
Enable all offline cpus, and reset max and min frequencies files
rg: range or list of threads to reset
'''
if type(rg) == int:
rg= [rg]
to_reset= rg if rg else self.__get_ranges("present")
self.enable_cpu(to_reset)
for cpu in to_reset:
fpath = path.join("cpu%i"%cpu,"cpufreq","cpuinfo_max_freq")
max_freq = self.__read_cpu_file(fpath)
fpath = path.join("cpu%i"%cpu,"cpufreq","cpuinfo_min_freq")
min_freq = self.__read_cpu_file(fpath)
fpath = path.join("cpu%i"%cpu,"cpufreq","scaling_max_freq")
self.__write_cpu_file(fpath, max_freq.encode())
fpath = path.join("cpu%i"%cpu,"cpufreq","scaling_min_freq")
self.__write_cpu_file(fpath, min_freq.encode()) | [
"def",
"reset",
"(",
"self",
",",
"rg",
"=",
"None",
")",
":",
"if",
"type",
"(",
"rg",
")",
"==",
"int",
":",
"rg",
"=",
"[",
"rg",
"]",
"to_reset",
"=",
"rg",
"if",
"rg",
"else",
"self",
".",
"__get_ranges",
"(",
"\"present\"",
")",
"self",
".",
"enable_cpu",
"(",
"to_reset",
")",
"for",
"cpu",
"in",
"to_reset",
":",
"fpath",
"=",
"path",
".",
"join",
"(",
"\"cpu%i\"",
"%",
"cpu",
",",
"\"cpufreq\"",
",",
"\"cpuinfo_max_freq\"",
")",
"max_freq",
"=",
"self",
".",
"__read_cpu_file",
"(",
"fpath",
")",
"fpath",
"=",
"path",
".",
"join",
"(",
"\"cpu%i\"",
"%",
"cpu",
",",
"\"cpufreq\"",
",",
"\"cpuinfo_min_freq\"",
")",
"min_freq",
"=",
"self",
".",
"__read_cpu_file",
"(",
"fpath",
")",
"fpath",
"=",
"path",
".",
"join",
"(",
"\"cpu%i\"",
"%",
"cpu",
",",
"\"cpufreq\"",
",",
"\"scaling_max_freq\"",
")",
"self",
".",
"__write_cpu_file",
"(",
"fpath",
",",
"max_freq",
".",
"encode",
"(",
")",
")",
"fpath",
"=",
"path",
".",
"join",
"(",
"\"cpu%i\"",
"%",
"cpu",
",",
"\"cpufreq\"",
",",
"\"scaling_min_freq\"",
")",
"self",
".",
"__write_cpu_file",
"(",
"fpath",
",",
"min_freq",
".",
"encode",
"(",
")",
")"
] | Enable all offline cpus, and reset max and min frequencies files
rg: range or list of threads to reset | [
"Enable",
"all",
"offline",
"cpus",
"and",
"reset",
"max",
"and",
"min",
"frequencies",
"files"
] | train | https://github.com/VitorRamos/cpufreq/blob/1246e35a8ceeb823df804af34730f7b15dc89204/cpufreq/cpufreq.py#L101-L120 |
VitorRamos/cpufreq | cpufreq/cpufreq.py | cpuFreq.disable_hyperthread | def disable_hyperthread(self):
'''
Disable all threads attached to the same core
'''
to_disable = []
online_cpus = self.__get_ranges("online")
for cpu in online_cpus:
fpath = path.join("cpu%i"%cpu,"topology","thread_siblings_list")
to_disable += self.__get_ranges(fpath)[1:]
to_disable = set(to_disable) & set(online_cpus)
for cpu in to_disable:
fpath = path.join("cpu%i"%cpu,"online")
self.__write_cpu_file(fpath, b"0") | python | def disable_hyperthread(self):
'''
Disable all threads attached to the same core
'''
to_disable = []
online_cpus = self.__get_ranges("online")
for cpu in online_cpus:
fpath = path.join("cpu%i"%cpu,"topology","thread_siblings_list")
to_disable += self.__get_ranges(fpath)[1:]
to_disable = set(to_disable) & set(online_cpus)
for cpu in to_disable:
fpath = path.join("cpu%i"%cpu,"online")
self.__write_cpu_file(fpath, b"0") | [
"def",
"disable_hyperthread",
"(",
"self",
")",
":",
"to_disable",
"=",
"[",
"]",
"online_cpus",
"=",
"self",
".",
"__get_ranges",
"(",
"\"online\"",
")",
"for",
"cpu",
"in",
"online_cpus",
":",
"fpath",
"=",
"path",
".",
"join",
"(",
"\"cpu%i\"",
"%",
"cpu",
",",
"\"topology\"",
",",
"\"thread_siblings_list\"",
")",
"to_disable",
"+=",
"self",
".",
"__get_ranges",
"(",
"fpath",
")",
"[",
"1",
":",
"]",
"to_disable",
"=",
"set",
"(",
"to_disable",
")",
"&",
"set",
"(",
"online_cpus",
")",
"for",
"cpu",
"in",
"to_disable",
":",
"fpath",
"=",
"path",
".",
"join",
"(",
"\"cpu%i\"",
"%",
"cpu",
",",
"\"online\"",
")",
"self",
".",
"__write_cpu_file",
"(",
"fpath",
",",
"b\"0\"",
")"
] | Disable all threads attached to the same core | [
"Disable",
"all",
"threads",
"attached",
"to",
"the",
"same",
"core"
] | train | https://github.com/VitorRamos/cpufreq/blob/1246e35a8ceeb823df804af34730f7b15dc89204/cpufreq/cpufreq.py#L122-L135 |
VitorRamos/cpufreq | cpufreq/cpufreq.py | cpuFreq.disable_cpu | def disable_cpu(self, rg):
'''
Disable cpus
rg: range or list of threads to disable
'''
if type(rg) == int:
rg= [rg]
to_disable= set(rg) & set(self.__get_ranges("online"))
for cpu in to_disable:
fpath = path.join("cpu%i"%cpu,"online")
self.__write_cpu_file(fpath, b"0") | python | def disable_cpu(self, rg):
'''
Disable cpus
rg: range or list of threads to disable
'''
if type(rg) == int:
rg= [rg]
to_disable= set(rg) & set(self.__get_ranges("online"))
for cpu in to_disable:
fpath = path.join("cpu%i"%cpu,"online")
self.__write_cpu_file(fpath, b"0") | [
"def",
"disable_cpu",
"(",
"self",
",",
"rg",
")",
":",
"if",
"type",
"(",
"rg",
")",
"==",
"int",
":",
"rg",
"=",
"[",
"rg",
"]",
"to_disable",
"=",
"set",
"(",
"rg",
")",
"&",
"set",
"(",
"self",
".",
"__get_ranges",
"(",
"\"online\"",
")",
")",
"for",
"cpu",
"in",
"to_disable",
":",
"fpath",
"=",
"path",
".",
"join",
"(",
"\"cpu%i\"",
"%",
"cpu",
",",
"\"online\"",
")",
"self",
".",
"__write_cpu_file",
"(",
"fpath",
",",
"b\"0\"",
")"
] | Disable cpus
rg: range or list of threads to disable | [
"Disable",
"cpus"
] | train | https://github.com/VitorRamos/cpufreq/blob/1246e35a8ceeb823df804af34730f7b15dc89204/cpufreq/cpufreq.py#L137-L148 |
VitorRamos/cpufreq | cpufreq/cpufreq.py | cpuFreq.enable_cpu | def enable_cpu(self, rg):
'''
Enable cpus
rg: range or list of threads to enable
'''
if type(rg) == int:
rg= [rg]
to_disable= set(rg) & set(self.__get_ranges("offline"))
for cpu in to_disable:
fpath = path.join("cpu%i"%cpu,"online")
self.__write_cpu_file(fpath, b"1") | python | def enable_cpu(self, rg):
'''
Enable cpus
rg: range or list of threads to enable
'''
if type(rg) == int:
rg= [rg]
to_disable= set(rg) & set(self.__get_ranges("offline"))
for cpu in to_disable:
fpath = path.join("cpu%i"%cpu,"online")
self.__write_cpu_file(fpath, b"1") | [
"def",
"enable_cpu",
"(",
"self",
",",
"rg",
")",
":",
"if",
"type",
"(",
"rg",
")",
"==",
"int",
":",
"rg",
"=",
"[",
"rg",
"]",
"to_disable",
"=",
"set",
"(",
"rg",
")",
"&",
"set",
"(",
"self",
".",
"__get_ranges",
"(",
"\"offline\"",
")",
")",
"for",
"cpu",
"in",
"to_disable",
":",
"fpath",
"=",
"path",
".",
"join",
"(",
"\"cpu%i\"",
"%",
"cpu",
",",
"\"online\"",
")",
"self",
".",
"__write_cpu_file",
"(",
"fpath",
",",
"b\"1\"",
")"
] | Enable cpus
rg: range or list of threads to enable | [
"Enable",
"cpus"
] | train | https://github.com/VitorRamos/cpufreq/blob/1246e35a8ceeb823df804af34730f7b15dc89204/cpufreq/cpufreq.py#L150-L161 |
VitorRamos/cpufreq | cpufreq/cpufreq.py | cpuFreq.set_frequencies | def set_frequencies(self, freq, rg=None, setMaxfeq=True, setMinfreq=True, setSpeed=True):
'''
Set cores frequencies
freq: int frequency in KHz
rg: list of range of cores
setMaxfeq: set the maximum frequency, default to true
setMinfreq: set the minimum frequency, default to true
setSpeed: only set the frequency, default to true
'''
to_change = self.__get_ranges("online")
if type(rg) == int:
rg= [rg]
if rg: to_change= set(rg) & set(self.__get_ranges("online"))
for cpu in to_change:
if setSpeed:
fpath = path.join("cpu%i"%cpu,"cpufreq","scaling_setspeed")
self.__write_cpu_file(fpath, str(freq).encode())
if setMinfreq:
fpath = path.join("cpu%i"%cpu,"cpufreq","scaling_min_freq")
self.__write_cpu_file(fpath, str(freq).encode())
if setMaxfeq:
fpath = path.join("cpu%i"%cpu,"cpufreq","scaling_max_freq")
self.__write_cpu_file(fpath, str(freq).encode()) | python | def set_frequencies(self, freq, rg=None, setMaxfeq=True, setMinfreq=True, setSpeed=True):
'''
Set cores frequencies
freq: int frequency in KHz
rg: list of range of cores
setMaxfeq: set the maximum frequency, default to true
setMinfreq: set the minimum frequency, default to true
setSpeed: only set the frequency, default to true
'''
to_change = self.__get_ranges("online")
if type(rg) == int:
rg= [rg]
if rg: to_change= set(rg) & set(self.__get_ranges("online"))
for cpu in to_change:
if setSpeed:
fpath = path.join("cpu%i"%cpu,"cpufreq","scaling_setspeed")
self.__write_cpu_file(fpath, str(freq).encode())
if setMinfreq:
fpath = path.join("cpu%i"%cpu,"cpufreq","scaling_min_freq")
self.__write_cpu_file(fpath, str(freq).encode())
if setMaxfeq:
fpath = path.join("cpu%i"%cpu,"cpufreq","scaling_max_freq")
self.__write_cpu_file(fpath, str(freq).encode()) | [
"def",
"set_frequencies",
"(",
"self",
",",
"freq",
",",
"rg",
"=",
"None",
",",
"setMaxfeq",
"=",
"True",
",",
"setMinfreq",
"=",
"True",
",",
"setSpeed",
"=",
"True",
")",
":",
"to_change",
"=",
"self",
".",
"__get_ranges",
"(",
"\"online\"",
")",
"if",
"type",
"(",
"rg",
")",
"==",
"int",
":",
"rg",
"=",
"[",
"rg",
"]",
"if",
"rg",
":",
"to_change",
"=",
"set",
"(",
"rg",
")",
"&",
"set",
"(",
"self",
".",
"__get_ranges",
"(",
"\"online\"",
")",
")",
"for",
"cpu",
"in",
"to_change",
":",
"if",
"setSpeed",
":",
"fpath",
"=",
"path",
".",
"join",
"(",
"\"cpu%i\"",
"%",
"cpu",
",",
"\"cpufreq\"",
",",
"\"scaling_setspeed\"",
")",
"self",
".",
"__write_cpu_file",
"(",
"fpath",
",",
"str",
"(",
"freq",
")",
".",
"encode",
"(",
")",
")",
"if",
"setMinfreq",
":",
"fpath",
"=",
"path",
".",
"join",
"(",
"\"cpu%i\"",
"%",
"cpu",
",",
"\"cpufreq\"",
",",
"\"scaling_min_freq\"",
")",
"self",
".",
"__write_cpu_file",
"(",
"fpath",
",",
"str",
"(",
"freq",
")",
".",
"encode",
"(",
")",
")",
"if",
"setMaxfeq",
":",
"fpath",
"=",
"path",
".",
"join",
"(",
"\"cpu%i\"",
"%",
"cpu",
",",
"\"cpufreq\"",
",",
"\"scaling_max_freq\"",
")",
"self",
".",
"__write_cpu_file",
"(",
"fpath",
",",
"str",
"(",
"freq",
")",
".",
"encode",
"(",
")",
")"
] | Set cores frequencies
freq: int frequency in KHz
rg: list of range of cores
setMaxfeq: set the maximum frequency, default to true
setMinfreq: set the minimum frequency, default to true
setSpeed: only set the frequency, default to true | [
"Set",
"cores",
"frequencies"
] | train | https://github.com/VitorRamos/cpufreq/blob/1246e35a8ceeb823df804af34730f7b15dc89204/cpufreq/cpufreq.py#L163-L186 |
VitorRamos/cpufreq | cpufreq/cpufreq.py | cpuFreq.set_governors | def set_governors(self, gov, rg=None):
'''
Set governors
gov: str name of the governor
rg: list of range of cores
'''
to_change = self.__get_ranges("online")
if type(rg) == int:
rg= [rg]
if rg: to_change= set(rg) & set(self.__get_ranges("online"))
for cpu in to_change:
fpath = path.join("cpu%i"%cpu,"cpufreq","scaling_governor")
self.__write_cpu_file(fpath, gov.encode()) | python | def set_governors(self, gov, rg=None):
'''
Set governors
gov: str name of the governor
rg: list of range of cores
'''
to_change = self.__get_ranges("online")
if type(rg) == int:
rg= [rg]
if rg: to_change= set(rg) & set(self.__get_ranges("online"))
for cpu in to_change:
fpath = path.join("cpu%i"%cpu,"cpufreq","scaling_governor")
self.__write_cpu_file(fpath, gov.encode()) | [
"def",
"set_governors",
"(",
"self",
",",
"gov",
",",
"rg",
"=",
"None",
")",
":",
"to_change",
"=",
"self",
".",
"__get_ranges",
"(",
"\"online\"",
")",
"if",
"type",
"(",
"rg",
")",
"==",
"int",
":",
"rg",
"=",
"[",
"rg",
"]",
"if",
"rg",
":",
"to_change",
"=",
"set",
"(",
"rg",
")",
"&",
"set",
"(",
"self",
".",
"__get_ranges",
"(",
"\"online\"",
")",
")",
"for",
"cpu",
"in",
"to_change",
":",
"fpath",
"=",
"path",
".",
"join",
"(",
"\"cpu%i\"",
"%",
"cpu",
",",
"\"cpufreq\"",
",",
"\"scaling_governor\"",
")",
"self",
".",
"__write_cpu_file",
"(",
"fpath",
",",
"gov",
".",
"encode",
"(",
")",
")"
] | Set governors
gov: str name of the governor
rg: list of range of cores | [
"Set",
"governors"
] | train | https://github.com/VitorRamos/cpufreq/blob/1246e35a8ceeb823df804af34730f7b15dc89204/cpufreq/cpufreq.py#L188-L201 |
VitorRamos/cpufreq | cpufreq/cpufreq.py | cpuFreq.get_available_frequencies | def get_available_frequencies(self):
'''
Get all possible frequencies
'''
fpath = path.join("cpu0","cpufreq","scaling_available_frequencies")
data = self.__read_cpu_file(fpath).rstrip("\n").split()
return data | python | def get_available_frequencies(self):
'''
Get all possible frequencies
'''
fpath = path.join("cpu0","cpufreq","scaling_available_frequencies")
data = self.__read_cpu_file(fpath).rstrip("\n").split()
return data | [
"def",
"get_available_frequencies",
"(",
"self",
")",
":",
"fpath",
"=",
"path",
".",
"join",
"(",
"\"cpu0\"",
",",
"\"cpufreq\"",
",",
"\"scaling_available_frequencies\"",
")",
"data",
"=",
"self",
".",
"__read_cpu_file",
"(",
"fpath",
")",
".",
"rstrip",
"(",
"\"\\n\"",
")",
".",
"split",
"(",
")",
"return",
"data"
] | Get all possible frequencies | [
"Get",
"all",
"possible",
"frequencies"
] | train | https://github.com/VitorRamos/cpufreq/blob/1246e35a8ceeb823df804af34730f7b15dc89204/cpufreq/cpufreq.py#L203-L209 |
codeforamerica/three | three/core.py | Three.configure | def configure(self, endpoint=None, **kwargs):
"""Configure a previously initialized instance of the class."""
if endpoint:
kwargs['endpoint'] = endpoint
keywords = self._keywords.copy()
keywords.update(kwargs)
if 'endpoint' in kwargs:
# Then we need to correctly format the endpoint.
endpoint = kwargs['endpoint']
keywords['endpoint'] = self._configure_endpoint(endpoint)
self.api_key = keywords['api_key'] or self._global_api_key()
self.endpoint = keywords['endpoint']
self.format = keywords['format'] or 'json'
self.jurisdiction = keywords['jurisdiction']
self.proxy = keywords['proxy']
self.discovery_url = keywords['discovery'] or None
# Use a custom requests session and set the correct SSL version if
# specified.
self.session = requests.Session()
if 'ssl_version' in keywords:
self.session.mount('https://', SSLAdapter(keywords['ssl_version'])) | python | def configure(self, endpoint=None, **kwargs):
"""Configure a previously initialized instance of the class."""
if endpoint:
kwargs['endpoint'] = endpoint
keywords = self._keywords.copy()
keywords.update(kwargs)
if 'endpoint' in kwargs:
# Then we need to correctly format the endpoint.
endpoint = kwargs['endpoint']
keywords['endpoint'] = self._configure_endpoint(endpoint)
self.api_key = keywords['api_key'] or self._global_api_key()
self.endpoint = keywords['endpoint']
self.format = keywords['format'] or 'json'
self.jurisdiction = keywords['jurisdiction']
self.proxy = keywords['proxy']
self.discovery_url = keywords['discovery'] or None
# Use a custom requests session and set the correct SSL version if
# specified.
self.session = requests.Session()
if 'ssl_version' in keywords:
self.session.mount('https://', SSLAdapter(keywords['ssl_version'])) | [
"def",
"configure",
"(",
"self",
",",
"endpoint",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"endpoint",
":",
"kwargs",
"[",
"'endpoint'",
"]",
"=",
"endpoint",
"keywords",
"=",
"self",
".",
"_keywords",
".",
"copy",
"(",
")",
"keywords",
".",
"update",
"(",
"kwargs",
")",
"if",
"'endpoint'",
"in",
"kwargs",
":",
"# Then we need to correctly format the endpoint.",
"endpoint",
"=",
"kwargs",
"[",
"'endpoint'",
"]",
"keywords",
"[",
"'endpoint'",
"]",
"=",
"self",
".",
"_configure_endpoint",
"(",
"endpoint",
")",
"self",
".",
"api_key",
"=",
"keywords",
"[",
"'api_key'",
"]",
"or",
"self",
".",
"_global_api_key",
"(",
")",
"self",
".",
"endpoint",
"=",
"keywords",
"[",
"'endpoint'",
"]",
"self",
".",
"format",
"=",
"keywords",
"[",
"'format'",
"]",
"or",
"'json'",
"self",
".",
"jurisdiction",
"=",
"keywords",
"[",
"'jurisdiction'",
"]",
"self",
".",
"proxy",
"=",
"keywords",
"[",
"'proxy'",
"]",
"self",
".",
"discovery_url",
"=",
"keywords",
"[",
"'discovery'",
"]",
"or",
"None",
"# Use a custom requests session and set the correct SSL version if",
"# specified.",
"self",
".",
"session",
"=",
"requests",
".",
"Session",
"(",
")",
"if",
"'ssl_version'",
"in",
"keywords",
":",
"self",
".",
"session",
".",
"mount",
"(",
"'https://'",
",",
"SSLAdapter",
"(",
"keywords",
"[",
"'ssl_version'",
"]",
")",
")"
] | Configure a previously initialized instance of the class. | [
"Configure",
"a",
"previously",
"initialized",
"instance",
"of",
"the",
"class",
"."
] | train | https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L66-L87 |
codeforamerica/three | three/core.py | Three._configure_endpoint | def _configure_endpoint(self, endpoint):
"""Configure the endpoint with a schema and end slash."""
if not endpoint.startswith('http'):
endpoint = 'https://' + endpoint
if not endpoint.endswith('/'):
endpoint += '/'
return endpoint | python | def _configure_endpoint(self, endpoint):
"""Configure the endpoint with a schema and end slash."""
if not endpoint.startswith('http'):
endpoint = 'https://' + endpoint
if not endpoint.endswith('/'):
endpoint += '/'
return endpoint | [
"def",
"_configure_endpoint",
"(",
"self",
",",
"endpoint",
")",
":",
"if",
"not",
"endpoint",
".",
"startswith",
"(",
"'http'",
")",
":",
"endpoint",
"=",
"'https://'",
"+",
"endpoint",
"if",
"not",
"endpoint",
".",
"endswith",
"(",
"'/'",
")",
":",
"endpoint",
"+=",
"'/'",
"return",
"endpoint"
] | Configure the endpoint with a schema and end slash. | [
"Configure",
"the",
"endpoint",
"with",
"a",
"schema",
"and",
"end",
"slash",
"."
] | train | https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L89-L95 |
codeforamerica/three | three/core.py | Three.get | def get(self, *args, **kwargs):
"""Perform a get request."""
if 'convert' in kwargs:
conversion = kwargs.pop('convert')
else:
conversion = True
kwargs = self._get_keywords(**kwargs)
url = self._create_path(*args)
request = self.session.get(url, params=kwargs)
content = request.content
self._request = request
return self.convert(content, conversion) | python | def get(self, *args, **kwargs):
"""Perform a get request."""
if 'convert' in kwargs:
conversion = kwargs.pop('convert')
else:
conversion = True
kwargs = self._get_keywords(**kwargs)
url = self._create_path(*args)
request = self.session.get(url, params=kwargs)
content = request.content
self._request = request
return self.convert(content, conversion) | [
"def",
"get",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'convert'",
"in",
"kwargs",
":",
"conversion",
"=",
"kwargs",
".",
"pop",
"(",
"'convert'",
")",
"else",
":",
"conversion",
"=",
"True",
"kwargs",
"=",
"self",
".",
"_get_keywords",
"(",
"*",
"*",
"kwargs",
")",
"url",
"=",
"self",
".",
"_create_path",
"(",
"*",
"args",
")",
"request",
"=",
"self",
".",
"session",
".",
"get",
"(",
"url",
",",
"params",
"=",
"kwargs",
")",
"content",
"=",
"request",
".",
"content",
"self",
".",
"_request",
"=",
"request",
"return",
"self",
".",
"convert",
"(",
"content",
",",
"conversion",
")"
] | Perform a get request. | [
"Perform",
"a",
"get",
"request",
"."
] | train | https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L107-L118 |
codeforamerica/three | three/core.py | Three._get_keywords | def _get_keywords(self, **kwargs):
"""Format GET request parameters and keywords."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'count' in kwargs:
kwargs['page_size'] = kwargs.pop('count')
if 'start' in kwargs:
start = kwargs.pop('start')
if 'end' in kwargs:
end = kwargs.pop('end')
else:
end = date.today().strftime('%m-%d-%Y')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
elif 'between' in kwargs:
start, end = kwargs.pop('between')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
return kwargs | python | def _get_keywords(self, **kwargs):
"""Format GET request parameters and keywords."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'count' in kwargs:
kwargs['page_size'] = kwargs.pop('count')
if 'start' in kwargs:
start = kwargs.pop('start')
if 'end' in kwargs:
end = kwargs.pop('end')
else:
end = date.today().strftime('%m-%d-%Y')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
elif 'between' in kwargs:
start, end = kwargs.pop('between')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
return kwargs | [
"def",
"_get_keywords",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"jurisdiction",
"and",
"'jurisdiction_id'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'jurisdiction_id'",
"]",
"=",
"self",
".",
"jurisdiction",
"if",
"'count'",
"in",
"kwargs",
":",
"kwargs",
"[",
"'page_size'",
"]",
"=",
"kwargs",
".",
"pop",
"(",
"'count'",
")",
"if",
"'start'",
"in",
"kwargs",
":",
"start",
"=",
"kwargs",
".",
"pop",
"(",
"'start'",
")",
"if",
"'end'",
"in",
"kwargs",
":",
"end",
"=",
"kwargs",
".",
"pop",
"(",
"'end'",
")",
"else",
":",
"end",
"=",
"date",
".",
"today",
"(",
")",
".",
"strftime",
"(",
"'%m-%d-%Y'",
")",
"start",
",",
"end",
"=",
"self",
".",
"_format_dates",
"(",
"start",
",",
"end",
")",
"kwargs",
"[",
"'start_date'",
"]",
"=",
"start",
"kwargs",
"[",
"'end_date'",
"]",
"=",
"end",
"elif",
"'between'",
"in",
"kwargs",
":",
"start",
",",
"end",
"=",
"kwargs",
".",
"pop",
"(",
"'between'",
")",
"start",
",",
"end",
"=",
"self",
".",
"_format_dates",
"(",
"start",
",",
"end",
")",
"kwargs",
"[",
"'start_date'",
"]",
"=",
"start",
"kwargs",
"[",
"'end_date'",
"]",
"=",
"end",
"return",
"kwargs"
] | Format GET request parameters and keywords. | [
"Format",
"GET",
"request",
"parameters",
"and",
"keywords",
"."
] | train | https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L120-L140 |
codeforamerica/three | three/core.py | Three._format_dates | def _format_dates(self, start, end):
"""Format start and end dates."""
start = self._split_date(start)
end = self._split_date(end)
return start, end | python | def _format_dates(self, start, end):
"""Format start and end dates."""
start = self._split_date(start)
end = self._split_date(end)
return start, end | [
"def",
"_format_dates",
"(",
"self",
",",
"start",
",",
"end",
")",
":",
"start",
"=",
"self",
".",
"_split_date",
"(",
"start",
")",
"end",
"=",
"self",
".",
"_split_date",
"(",
"end",
")",
"return",
"start",
",",
"end"
] | Format start and end dates. | [
"Format",
"start",
"and",
"end",
"dates",
"."
] | train | https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L142-L146 |
codeforamerica/three | three/core.py | Three._split_date | def _split_date(self, time):
"""Split apart a date string."""
if isinstance(time, str):
month, day, year = [int(t) for t in re.split(r'-|/', time)]
if year < 100:
# Quick hack for dates < 2000.
year += 2000
time = date(year, month, day)
return time.strftime('%Y-%m-%dT%H:%M:%SZ') | python | def _split_date(self, time):
"""Split apart a date string."""
if isinstance(time, str):
month, day, year = [int(t) for t in re.split(r'-|/', time)]
if year < 100:
# Quick hack for dates < 2000.
year += 2000
time = date(year, month, day)
return time.strftime('%Y-%m-%dT%H:%M:%SZ') | [
"def",
"_split_date",
"(",
"self",
",",
"time",
")",
":",
"if",
"isinstance",
"(",
"time",
",",
"str",
")",
":",
"month",
",",
"day",
",",
"year",
"=",
"[",
"int",
"(",
"t",
")",
"for",
"t",
"in",
"re",
".",
"split",
"(",
"r'-|/'",
",",
"time",
")",
"]",
"if",
"year",
"<",
"100",
":",
"# Quick hack for dates < 2000.",
"year",
"+=",
"2000",
"time",
"=",
"date",
"(",
"year",
",",
"month",
",",
"day",
")",
"return",
"time",
".",
"strftime",
"(",
"'%Y-%m-%dT%H:%M:%SZ'",
")"
] | Split apart a date string. | [
"Split",
"apart",
"a",
"date",
"string",
"."
] | train | https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L148-L156 |
codeforamerica/three | three/core.py | Three.convert | def convert(self, content, conversion):
"""Convert content to Python data structures."""
if not conversion:
data = content
elif self.format == 'json':
data = json.loads(content)
elif self.format == 'xml':
content = xml(content)
first = list(content.keys())[0]
data = content[first]
else:
data = content
return data | python | def convert(self, content, conversion):
"""Convert content to Python data structures."""
if not conversion:
data = content
elif self.format == 'json':
data = json.loads(content)
elif self.format == 'xml':
content = xml(content)
first = list(content.keys())[0]
data = content[first]
else:
data = content
return data | [
"def",
"convert",
"(",
"self",
",",
"content",
",",
"conversion",
")",
":",
"if",
"not",
"conversion",
":",
"data",
"=",
"content",
"elif",
"self",
".",
"format",
"==",
"'json'",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"content",
")",
"elif",
"self",
".",
"format",
"==",
"'xml'",
":",
"content",
"=",
"xml",
"(",
"content",
")",
"first",
"=",
"list",
"(",
"content",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"data",
"=",
"content",
"[",
"first",
"]",
"else",
":",
"data",
"=",
"content",
"return",
"data"
] | Convert content to Python data structures. | [
"Convert",
"content",
"to",
"Python",
"data",
"structures",
"."
] | train | https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L158-L170 |
codeforamerica/three | three/core.py | Three.discovery | def discovery(self, url=None):
"""
Retrieve the standard discovery file that provides routing
information.
>>> Three().discovery()
{'discovery': 'data'}
"""
if url:
data = self.session.get(url).content
elif self.discovery_url:
response = self.session.get(self.discovery_url)
if self.format == 'xml':
# Because, SF doesn't follow the spec.
data = xml(response.text)
else:
# Spec calls for discovery always allowing JSON.
data = response.json()
else:
data = self.get('discovery')
return data | python | def discovery(self, url=None):
"""
Retrieve the standard discovery file that provides routing
information.
>>> Three().discovery()
{'discovery': 'data'}
"""
if url:
data = self.session.get(url).content
elif self.discovery_url:
response = self.session.get(self.discovery_url)
if self.format == 'xml':
# Because, SF doesn't follow the spec.
data = xml(response.text)
else:
# Spec calls for discovery always allowing JSON.
data = response.json()
else:
data = self.get('discovery')
return data | [
"def",
"discovery",
"(",
"self",
",",
"url",
"=",
"None",
")",
":",
"if",
"url",
":",
"data",
"=",
"self",
".",
"session",
".",
"get",
"(",
"url",
")",
".",
"content",
"elif",
"self",
".",
"discovery_url",
":",
"response",
"=",
"self",
".",
"session",
".",
"get",
"(",
"self",
".",
"discovery_url",
")",
"if",
"self",
".",
"format",
"==",
"'xml'",
":",
"# Because, SF doesn't follow the spec.",
"data",
"=",
"xml",
"(",
"response",
".",
"text",
")",
"else",
":",
"# Spec calls for discovery always allowing JSON.",
"data",
"=",
"response",
".",
"json",
"(",
")",
"else",
":",
"data",
"=",
"self",
".",
"get",
"(",
"'discovery'",
")",
"return",
"data"
] | Retrieve the standard discovery file that provides routing
information.
>>> Three().discovery()
{'discovery': 'data'} | [
"Retrieve",
"the",
"standard",
"discovery",
"file",
"that",
"provides",
"routing",
"information",
"."
] | train | https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L172-L192 |
codeforamerica/three | three/core.py | Three.services | def services(self, code=None, **kwargs):
"""
Retrieve information about available services. You can also enter a
specific service code argument.
>>> Three().services()
{'all': {'service_code': 'data'}}
>>> Three().services('033')
{'033': {'service_code': 'data'}}
"""
data = self.get('services', code, **kwargs)
return data | python | def services(self, code=None, **kwargs):
"""
Retrieve information about available services. You can also enter a
specific service code argument.
>>> Three().services()
{'all': {'service_code': 'data'}}
>>> Three().services('033')
{'033': {'service_code': 'data'}}
"""
data = self.get('services', code, **kwargs)
return data | [
"def",
"services",
"(",
"self",
",",
"code",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"self",
".",
"get",
"(",
"'services'",
",",
"code",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | Retrieve information about available services. You can also enter a
specific service code argument.
>>> Three().services()
{'all': {'service_code': 'data'}}
>>> Three().services('033')
{'033': {'service_code': 'data'}} | [
"Retrieve",
"information",
"about",
"available",
"services",
".",
"You",
"can",
"also",
"enter",
"a",
"specific",
"service",
"code",
"argument",
"."
] | train | https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L194-L205 |
codeforamerica/three | three/core.py | Three.requests | def requests(self, code=None, **kwargs):
"""
Retrieve open requests. You can also enter a specific service code
argument.
>>> Three('api.city.gov').requests()
{'all': {'requests': 'data'}}
>>> Three('api.city.gov').requests('123')
{'123': {'requests': 'data'}}
"""
if code:
kwargs['service_code'] = code
data = self.get('requests', **kwargs)
return data | python | def requests(self, code=None, **kwargs):
"""
Retrieve open requests. You can also enter a specific service code
argument.
>>> Three('api.city.gov').requests()
{'all': {'requests': 'data'}}
>>> Three('api.city.gov').requests('123')
{'123': {'requests': 'data'}}
"""
if code:
kwargs['service_code'] = code
data = self.get('requests', **kwargs)
return data | [
"def",
"requests",
"(",
"self",
",",
"code",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"code",
":",
"kwargs",
"[",
"'service_code'",
"]",
"=",
"code",
"data",
"=",
"self",
".",
"get",
"(",
"'requests'",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | Retrieve open requests. You can also enter a specific service code
argument.
>>> Three('api.city.gov').requests()
{'all': {'requests': 'data'}}
>>> Three('api.city.gov').requests('123')
{'123': {'requests': 'data'}} | [
"Retrieve",
"open",
"requests",
".",
"You",
"can",
"also",
"enter",
"a",
"specific",
"service",
"code",
"argument",
"."
] | train | https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L207-L220 |
codeforamerica/three | three/core.py | Three.request | def request(self, id, **kwargs):
"""
Retrieve a specific request using its service code ID.
>>> Three('api.city.gov').request('12345')
{'request': {'service_code': {'12345': 'data'}}}
"""
data = self.get('requests', id, **kwargs)
return data | python | def request(self, id, **kwargs):
"""
Retrieve a specific request using its service code ID.
>>> Three('api.city.gov').request('12345')
{'request': {'service_code': {'12345': 'data'}}}
"""
data = self.get('requests', id, **kwargs)
return data | [
"def",
"request",
"(",
"self",
",",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"self",
".",
"get",
"(",
"'requests'",
",",
"id",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | Retrieve a specific request using its service code ID.
>>> Three('api.city.gov').request('12345')
{'request': {'service_code': {'12345': 'data'}}} | [
"Retrieve",
"a",
"specific",
"request",
"using",
"its",
"service",
"code",
"ID",
"."
] | train | https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L222-L230 |
codeforamerica/three | three/core.py | Three.post | def post(self, service_code='0', **kwargs):
"""
Post a new Open311 request.
>>> t = Three('api.city.gov')
>>> t.post('123', address='123 Any St', name='Zach Williams',
... phone='555-5555', description='My issue description.',
... media=open('photo.png', 'rb'))
{'successful': {'request': 'post'}}
"""
kwargs['service_code'] = service_code
kwargs = self._post_keywords(**kwargs)
media = kwargs.pop('media', None)
if media:
files = {'media': media}
else:
files = None
url = self._create_path('requests')
self.post_response = self.session.post(url,
data=kwargs, files=files)
content = self.post_response.content
if self.post_response.status_code >= 500:
conversion = False
else:
conversion = True
return self.convert(content, conversion) | python | def post(self, service_code='0', **kwargs):
"""
Post a new Open311 request.
>>> t = Three('api.city.gov')
>>> t.post('123', address='123 Any St', name='Zach Williams',
... phone='555-5555', description='My issue description.',
... media=open('photo.png', 'rb'))
{'successful': {'request': 'post'}}
"""
kwargs['service_code'] = service_code
kwargs = self._post_keywords(**kwargs)
media = kwargs.pop('media', None)
if media:
files = {'media': media}
else:
files = None
url = self._create_path('requests')
self.post_response = self.session.post(url,
data=kwargs, files=files)
content = self.post_response.content
if self.post_response.status_code >= 500:
conversion = False
else:
conversion = True
return self.convert(content, conversion) | [
"def",
"post",
"(",
"self",
",",
"service_code",
"=",
"'0'",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'service_code'",
"]",
"=",
"service_code",
"kwargs",
"=",
"self",
".",
"_post_keywords",
"(",
"*",
"*",
"kwargs",
")",
"media",
"=",
"kwargs",
".",
"pop",
"(",
"'media'",
",",
"None",
")",
"if",
"media",
":",
"files",
"=",
"{",
"'media'",
":",
"media",
"}",
"else",
":",
"files",
"=",
"None",
"url",
"=",
"self",
".",
"_create_path",
"(",
"'requests'",
")",
"self",
".",
"post_response",
"=",
"self",
".",
"session",
".",
"post",
"(",
"url",
",",
"data",
"=",
"kwargs",
",",
"files",
"=",
"files",
")",
"content",
"=",
"self",
".",
"post_response",
".",
"content",
"if",
"self",
".",
"post_response",
".",
"status_code",
">=",
"500",
":",
"conversion",
"=",
"False",
"else",
":",
"conversion",
"=",
"True",
"return",
"self",
".",
"convert",
"(",
"content",
",",
"conversion",
")"
] | Post a new Open311 request.
>>> t = Three('api.city.gov')
>>> t.post('123', address='123 Any St', name='Zach Williams',
... phone='555-5555', description='My issue description.',
... media=open('photo.png', 'rb'))
{'successful': {'request': 'post'}} | [
"Post",
"a",
"new",
"Open311",
"request",
"."
] | train | https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L232-L257 |
codeforamerica/three | three/core.py | Three._post_keywords | def _post_keywords(self, **kwargs):
"""Configure keyword arguments for Open311 POST requests."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'address' in kwargs:
address = kwargs.pop('address')
kwargs['address_string'] = address
if 'name' in kwargs:
first, last = kwargs.pop('name').split(' ')
kwargs['first_name'] = first
kwargs['last_name'] = last
if 'api_key' not in kwargs:
kwargs['api_key'] = self.api_key
return kwargs | python | def _post_keywords(self, **kwargs):
"""Configure keyword arguments for Open311 POST requests."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'address' in kwargs:
address = kwargs.pop('address')
kwargs['address_string'] = address
if 'name' in kwargs:
first, last = kwargs.pop('name').split(' ')
kwargs['first_name'] = first
kwargs['last_name'] = last
if 'api_key' not in kwargs:
kwargs['api_key'] = self.api_key
return kwargs | [
"def",
"_post_keywords",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"jurisdiction",
"and",
"'jurisdiction_id'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'jurisdiction_id'",
"]",
"=",
"self",
".",
"jurisdiction",
"if",
"'address'",
"in",
"kwargs",
":",
"address",
"=",
"kwargs",
".",
"pop",
"(",
"'address'",
")",
"kwargs",
"[",
"'address_string'",
"]",
"=",
"address",
"if",
"'name'",
"in",
"kwargs",
":",
"first",
",",
"last",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
")",
".",
"split",
"(",
"' '",
")",
"kwargs",
"[",
"'first_name'",
"]",
"=",
"first",
"kwargs",
"[",
"'last_name'",
"]",
"=",
"last",
"if",
"'api_key'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'api_key'",
"]",
"=",
"self",
".",
"api_key",
"return",
"kwargs"
] | Configure keyword arguments for Open311 POST requests. | [
"Configure",
"keyword",
"arguments",
"for",
"Open311",
"POST",
"requests",
"."
] | train | https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L259-L272 |
codeforamerica/three | three/core.py | Three.token | def token(self, id, **kwargs):
"""
Retrieve a service request ID from a token.
>>> Three('api.city.gov').token('12345')
{'service_request_id': {'for': {'token': '12345'}}}
"""
data = self.get('tokens', id, **kwargs)
return data | python | def token(self, id, **kwargs):
"""
Retrieve a service request ID from a token.
>>> Three('api.city.gov').token('12345')
{'service_request_id': {'for': {'token': '12345'}}}
"""
data = self.get('tokens', id, **kwargs)
return data | [
"def",
"token",
"(",
"self",
",",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"self",
".",
"get",
"(",
"'tokens'",
",",
"id",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | Retrieve a service request ID from a token.
>>> Three('api.city.gov').token('12345')
{'service_request_id': {'for': {'token': '12345'}}} | [
"Retrieve",
"a",
"service",
"request",
"ID",
"from",
"a",
"token",
"."
] | train | https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L274-L282 |
matllubos/django-is-core | is_core/utils/compatibility.py | CompatibilityWidgetMixin.build_attrs | def build_attrs(self, base_attrs, extra_attrs=None, **kwargs):
"""
Helper function for building an attribute dictionary.
This is combination of the same method from Django<=1.10 and Django1.11+
"""
attrs = dict(base_attrs, **kwargs)
if extra_attrs:
attrs.update(extra_attrs)
return attrs | python | def build_attrs(self, base_attrs, extra_attrs=None, **kwargs):
"""
Helper function for building an attribute dictionary.
This is combination of the same method from Django<=1.10 and Django1.11+
"""
attrs = dict(base_attrs, **kwargs)
if extra_attrs:
attrs.update(extra_attrs)
return attrs | [
"def",
"build_attrs",
"(",
"self",
",",
"base_attrs",
",",
"extra_attrs",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"attrs",
"=",
"dict",
"(",
"base_attrs",
",",
"*",
"*",
"kwargs",
")",
"if",
"extra_attrs",
":",
"attrs",
".",
"update",
"(",
"extra_attrs",
")",
"return",
"attrs"
] | Helper function for building an attribute dictionary.
This is combination of the same method from Django<=1.10 and Django1.11+ | [
"Helper",
"function",
"for",
"building",
"an",
"attribute",
"dictionary",
".",
"This",
"is",
"combination",
"of",
"the",
"same",
"method",
"from",
"Django<",
"=",
"1",
".",
"10",
"and",
"Django1",
".",
"11",
"+"
] | train | https://github.com/matllubos/django-is-core/blob/3f87ec56a814738683c732dce5f07e0328c2300d/is_core/utils/compatibility.py#L74-L83 |
xapple/plumbing | plumbing/csv_tables.py | CSVTable.to_dataframe | def to_dataframe(self, **kwargs):
"""Load up the CSV file as a pandas dataframe"""
return pandas.io.parsers.read_csv(self.path, sep=self.d, **kwargs) | python | def to_dataframe(self, **kwargs):
"""Load up the CSV file as a pandas dataframe"""
return pandas.io.parsers.read_csv(self.path, sep=self.d, **kwargs) | [
"def",
"to_dataframe",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"pandas",
".",
"io",
".",
"parsers",
".",
"read_csv",
"(",
"self",
".",
"path",
",",
"sep",
"=",
"self",
".",
"d",
",",
"*",
"*",
"kwargs",
")"
] | Load up the CSV file as a pandas dataframe | [
"Load",
"up",
"the",
"CSV",
"file",
"as",
"a",
"pandas",
"dataframe"
] | train | https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/csv_tables.py#L61-L63 |
moonso/interval_tree | interval_tree/interval_tree.py | IntervalTree.get_elementary_intervals | def get_elementary_intervals(self, features):
"""Generates a sorted list of elementary intervals"""
coords = []
try:
for interval in features:
if len(interval) != 3:
raise SyntaxError('Interval malformed %s. Allways specify start and end position for interval.' % str(interval))
coords.extend([interval[0],interval[1]])
except IndexError:
raise SyntaxError('Interval malformed %s. Allways specify start and end position for interval.' % str(interval))
coords = list(set(coords))
coords.sort()
return coords | python | def get_elementary_intervals(self, features):
"""Generates a sorted list of elementary intervals"""
coords = []
try:
for interval in features:
if len(interval) != 3:
raise SyntaxError('Interval malformed %s. Allways specify start and end position for interval.' % str(interval))
coords.extend([interval[0],interval[1]])
except IndexError:
raise SyntaxError('Interval malformed %s. Allways specify start and end position for interval.' % str(interval))
coords = list(set(coords))
coords.sort()
return coords | [
"def",
"get_elementary_intervals",
"(",
"self",
",",
"features",
")",
":",
"coords",
"=",
"[",
"]",
"try",
":",
"for",
"interval",
"in",
"features",
":",
"if",
"len",
"(",
"interval",
")",
"!=",
"3",
":",
"raise",
"SyntaxError",
"(",
"'Interval malformed %s. Allways specify start and end position for interval.'",
"%",
"str",
"(",
"interval",
")",
")",
"coords",
".",
"extend",
"(",
"[",
"interval",
"[",
"0",
"]",
",",
"interval",
"[",
"1",
"]",
"]",
")",
"except",
"IndexError",
":",
"raise",
"SyntaxError",
"(",
"'Interval malformed %s. Allways specify start and end position for interval.'",
"%",
"str",
"(",
"interval",
")",
")",
"coords",
"=",
"list",
"(",
"set",
"(",
"coords",
")",
")",
"coords",
".",
"sort",
"(",
")",
"return",
"coords"
] | Generates a sorted list of elementary intervals | [
"Generates",
"a",
"sorted",
"list",
"of",
"elementary",
"intervals"
] | train | https://github.com/moonso/interval_tree/blob/c588177f5bd90bd9e2f1447216c78b024353f7a1/interval_tree/interval_tree.py#L50-L62 |
moonso/interval_tree | interval_tree/interval_tree.py | IntervalTree.recursive_build_tree | def recursive_build_tree(self, intervals):
"""
recursively builds a BST based on the elementary intervals.
each node is an array: [interval value, left descendent nodes, right descendent nodes, [ids]].
nodes with no descendents have a -1 value in left/right descendent positions.
for example, a node with two empty descendents:
[500, interval value
[-1,-1,-1,['id5','id6']], left descendent
[-1,-1,-1,['id4']], right descendent
['id1',id2',id3']] data values
"""
center = int(round(len(intervals) / 2))
left = intervals[:center]
right = intervals[center + 1:]
node = intervals[center]
if len(left) > 1:
left = self.recursive_build_tree(left)
elif len(left) == 1:
left = [left[0],[-1,-1,-1,[]],[-1,-1,-1,[]],[]]
else:
left = [-1,-1,-1,[]]
if len(right) > 1:
right = self.recursive_build_tree(right)
elif len(right) == 1:
right = [right[0],[-1,-1,-1,[]],[-1,-1,-1,[]],[]]
else:
right = [-1,-1,-1,[]]
return [node, left, right, []] | python | def recursive_build_tree(self, intervals):
"""
recursively builds a BST based on the elementary intervals.
each node is an array: [interval value, left descendent nodes, right descendent nodes, [ids]].
nodes with no descendents have a -1 value in left/right descendent positions.
for example, a node with two empty descendents:
[500, interval value
[-1,-1,-1,['id5','id6']], left descendent
[-1,-1,-1,['id4']], right descendent
['id1',id2',id3']] data values
"""
center = int(round(len(intervals) / 2))
left = intervals[:center]
right = intervals[center + 1:]
node = intervals[center]
if len(left) > 1:
left = self.recursive_build_tree(left)
elif len(left) == 1:
left = [left[0],[-1,-1,-1,[]],[-1,-1,-1,[]],[]]
else:
left = [-1,-1,-1,[]]
if len(right) > 1:
right = self.recursive_build_tree(right)
elif len(right) == 1:
right = [right[0],[-1,-1,-1,[]],[-1,-1,-1,[]],[]]
else:
right = [-1,-1,-1,[]]
return [node, left, right, []] | [
"def",
"recursive_build_tree",
"(",
"self",
",",
"intervals",
")",
":",
"center",
"=",
"int",
"(",
"round",
"(",
"len",
"(",
"intervals",
")",
"/",
"2",
")",
")",
"left",
"=",
"intervals",
"[",
":",
"center",
"]",
"right",
"=",
"intervals",
"[",
"center",
"+",
"1",
":",
"]",
"node",
"=",
"intervals",
"[",
"center",
"]",
"if",
"len",
"(",
"left",
")",
">",
"1",
":",
"left",
"=",
"self",
".",
"recursive_build_tree",
"(",
"left",
")",
"elif",
"len",
"(",
"left",
")",
"==",
"1",
":",
"left",
"=",
"[",
"left",
"[",
"0",
"]",
",",
"[",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
",",
"[",
"]",
"]",
",",
"[",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
",",
"[",
"]",
"]",
",",
"[",
"]",
"]",
"else",
":",
"left",
"=",
"[",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
",",
"[",
"]",
"]",
"if",
"len",
"(",
"right",
")",
">",
"1",
":",
"right",
"=",
"self",
".",
"recursive_build_tree",
"(",
"right",
")",
"elif",
"len",
"(",
"right",
")",
"==",
"1",
":",
"right",
"=",
"[",
"right",
"[",
"0",
"]",
",",
"[",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
",",
"[",
"]",
"]",
",",
"[",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
",",
"[",
"]",
"]",
",",
"[",
"]",
"]",
"else",
":",
"right",
"=",
"[",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
",",
"[",
"]",
"]",
"return",
"[",
"node",
",",
"left",
",",
"right",
",",
"[",
"]",
"]"
] | recursively builds a BST based on the elementary intervals.
each node is an array: [interval value, left descendent nodes, right descendent nodes, [ids]].
nodes with no descendents have a -1 value in left/right descendent positions.
for example, a node with two empty descendents:
[500, interval value
[-1,-1,-1,['id5','id6']], left descendent
[-1,-1,-1,['id4']], right descendent
['id1',id2',id3']] data values | [
"recursively",
"builds",
"a",
"BST",
"based",
"on",
"the",
"elementary",
"intervals",
".",
"each",
"node",
"is",
"an",
"array",
":",
"[",
"interval",
"value",
"left",
"descendent",
"nodes",
"right",
"descendent",
"nodes",
"[",
"ids",
"]]",
".",
"nodes",
"with",
"no",
"descendents",
"have",
"a",
"-",
"1",
"value",
"in",
"left",
"/",
"right",
"descendent",
"positions",
"."
] | train | https://github.com/moonso/interval_tree/blob/c588177f5bd90bd9e2f1447216c78b024353f7a1/interval_tree/interval_tree.py#L64-L97 |
moonso/interval_tree | interval_tree/interval_tree.py | IntervalTree.pt_within | def pt_within(self, pt, subject):
"""Accessory function to check if a point is within a range"""
try:
if pt >= int(subject[0]) and pt <= int(subject[1]):
return True
except ValueError:
raise ValueError('Interval start and stop has to be integers. %s' % str(subject))
return False | python | def pt_within(self, pt, subject):
"""Accessory function to check if a point is within a range"""
try:
if pt >= int(subject[0]) and pt <= int(subject[1]):
return True
except ValueError:
raise ValueError('Interval start and stop has to be integers. %s' % str(subject))
return False | [
"def",
"pt_within",
"(",
"self",
",",
"pt",
",",
"subject",
")",
":",
"try",
":",
"if",
"pt",
">=",
"int",
"(",
"subject",
"[",
"0",
"]",
")",
"and",
"pt",
"<=",
"int",
"(",
"subject",
"[",
"1",
"]",
")",
":",
"return",
"True",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'Interval start and stop has to be integers. %s'",
"%",
"str",
"(",
"subject",
")",
")",
"return",
"False"
] | Accessory function to check if a point is within a range | [
"Accessory",
"function",
"to",
"check",
"if",
"a",
"point",
"is",
"within",
"a",
"range"
] | train | https://github.com/moonso/interval_tree/blob/c588177f5bd90bd9e2f1447216c78b024353f7a1/interval_tree/interval_tree.py#L99-L107 |
moonso/interval_tree | interval_tree/interval_tree.py | IntervalTree.is_within | def is_within(self, query, subject):
"""Accessory function to check if a range is fully within another range"""
if self.pt_within(query[0], subject) and self.pt_within(query[1], subject):
return True
return False | python | def is_within(self, query, subject):
"""Accessory function to check if a range is fully within another range"""
if self.pt_within(query[0], subject) and self.pt_within(query[1], subject):
return True
return False | [
"def",
"is_within",
"(",
"self",
",",
"query",
",",
"subject",
")",
":",
"if",
"self",
".",
"pt_within",
"(",
"query",
"[",
"0",
"]",
",",
"subject",
")",
"and",
"self",
".",
"pt_within",
"(",
"query",
"[",
"1",
"]",
",",
"subject",
")",
":",
"return",
"True",
"return",
"False"
] | Accessory function to check if a range is fully within another range | [
"Accessory",
"function",
"to",
"check",
"if",
"a",
"range",
"is",
"fully",
"within",
"another",
"range"
] | train | https://github.com/moonso/interval_tree/blob/c588177f5bd90bd9e2f1447216c78b024353f7a1/interval_tree/interval_tree.py#L109-L114 |
moonso/interval_tree | interval_tree/interval_tree.py | IntervalTree.overlap | def overlap(self, query, subject):
"""Accessory function to check if two ranges overlap"""
if (self.pt_within(query[0], subject) or self.pt_within(query[1], subject) or
self.pt_within(subject[0], query) or self.pt_within(subject[1], query)):
return True
return False | python | def overlap(self, query, subject):
"""Accessory function to check if two ranges overlap"""
if (self.pt_within(query[0], subject) or self.pt_within(query[1], subject) or
self.pt_within(subject[0], query) or self.pt_within(subject[1], query)):
return True
return False | [
"def",
"overlap",
"(",
"self",
",",
"query",
",",
"subject",
")",
":",
"if",
"(",
"self",
".",
"pt_within",
"(",
"query",
"[",
"0",
"]",
",",
"subject",
")",
"or",
"self",
".",
"pt_within",
"(",
"query",
"[",
"1",
"]",
",",
"subject",
")",
"or",
"self",
".",
"pt_within",
"(",
"subject",
"[",
"0",
"]",
",",
"query",
")",
"or",
"self",
".",
"pt_within",
"(",
"subject",
"[",
"1",
"]",
",",
"query",
")",
")",
":",
"return",
"True",
"return",
"False"
] | Accessory function to check if two ranges overlap | [
"Accessory",
"function",
"to",
"check",
"if",
"two",
"ranges",
"overlap"
] | train | https://github.com/moonso/interval_tree/blob/c588177f5bd90bd9e2f1447216c78b024353f7a1/interval_tree/interval_tree.py#L116-L122 |
moonso/interval_tree | interval_tree/interval_tree.py | IntervalTree.recursive_insert | def recursive_insert(self, node, coord, data, start, end):
"""Recursively inserts id data into nodes"""
if node[0] != -1:
left = (start, node[0])
right = (node[0], end)
#if left is totally within coord
if self.is_within(left, coord):
node[1][-1].append(data)
elif self.overlap(left, coord):
self.recursive_insert(node[1], coord, data, left[0], left[1])
if self.is_within(right, coord):
node[2][-1].append(data)
elif self.overlap(right, coord):
self.recursive_insert(node[2], coord, data, right[0], right[1]) | python | def recursive_insert(self, node, coord, data, start, end):
"""Recursively inserts id data into nodes"""
if node[0] != -1:
left = (start, node[0])
right = (node[0], end)
#if left is totally within coord
if self.is_within(left, coord):
node[1][-1].append(data)
elif self.overlap(left, coord):
self.recursive_insert(node[1], coord, data, left[0], left[1])
if self.is_within(right, coord):
node[2][-1].append(data)
elif self.overlap(right, coord):
self.recursive_insert(node[2], coord, data, right[0], right[1]) | [
"def",
"recursive_insert",
"(",
"self",
",",
"node",
",",
"coord",
",",
"data",
",",
"start",
",",
"end",
")",
":",
"if",
"node",
"[",
"0",
"]",
"!=",
"-",
"1",
":",
"left",
"=",
"(",
"start",
",",
"node",
"[",
"0",
"]",
")",
"right",
"=",
"(",
"node",
"[",
"0",
"]",
",",
"end",
")",
"#if left is totally within coord",
"if",
"self",
".",
"is_within",
"(",
"left",
",",
"coord",
")",
":",
"node",
"[",
"1",
"]",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"data",
")",
"elif",
"self",
".",
"overlap",
"(",
"left",
",",
"coord",
")",
":",
"self",
".",
"recursive_insert",
"(",
"node",
"[",
"1",
"]",
",",
"coord",
",",
"data",
",",
"left",
"[",
"0",
"]",
",",
"left",
"[",
"1",
"]",
")",
"if",
"self",
".",
"is_within",
"(",
"right",
",",
"coord",
")",
":",
"node",
"[",
"2",
"]",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"data",
")",
"elif",
"self",
".",
"overlap",
"(",
"right",
",",
"coord",
")",
":",
"self",
".",
"recursive_insert",
"(",
"node",
"[",
"2",
"]",
",",
"coord",
",",
"data",
",",
"right",
"[",
"0",
"]",
",",
"right",
"[",
"1",
"]",
")"
] | Recursively inserts id data into nodes | [
"Recursively",
"inserts",
"id",
"data",
"into",
"nodes"
] | train | https://github.com/moonso/interval_tree/blob/c588177f5bd90bd9e2f1447216c78b024353f7a1/interval_tree/interval_tree.py#L124-L139 |
moonso/interval_tree | interval_tree/interval_tree.py | IntervalTree.insert_data | def insert_data(self, node, data, start, end):
"""loops through all the data and inserts them into the empty tree"""
for item in data:
self.recursive_insert(node, [item[0], item[1]], item[-1], start, end) | python | def insert_data(self, node, data, start, end):
"""loops through all the data and inserts them into the empty tree"""
for item in data:
self.recursive_insert(node, [item[0], item[1]], item[-1], start, end) | [
"def",
"insert_data",
"(",
"self",
",",
"node",
",",
"data",
",",
"start",
",",
"end",
")",
":",
"for",
"item",
"in",
"data",
":",
"self",
".",
"recursive_insert",
"(",
"node",
",",
"[",
"item",
"[",
"0",
"]",
",",
"item",
"[",
"1",
"]",
"]",
",",
"item",
"[",
"-",
"1",
"]",
",",
"start",
",",
"end",
")"
] | loops through all the data and inserts them into the empty tree | [
"loops",
"through",
"all",
"the",
"data",
"and",
"inserts",
"them",
"into",
"the",
"empty",
"tree"
] | train | https://github.com/moonso/interval_tree/blob/c588177f5bd90bd9e2f1447216c78b024353f7a1/interval_tree/interval_tree.py#L141-L144 |
moonso/interval_tree | interval_tree/interval_tree.py | IntervalTree.trim_tree | def trim_tree(self, node):
"""trims the tree for any empty data nodes"""
data_len = len(node[-1])
if node[1] == -1 and node[2] == -1:
if data_len == 0:
return 1
else:
return 0
else:
if self.trim_tree(node[1]) == 1:
node[1] = -1
if self.trim_tree(node[2]) == 1:
node[2] = -1
if node[1] == -1 and node[2] == -1:
if data_len == 0:
return 1
else:
return 0 | python | def trim_tree(self, node):
"""trims the tree for any empty data nodes"""
data_len = len(node[-1])
if node[1] == -1 and node[2] == -1:
if data_len == 0:
return 1
else:
return 0
else:
if self.trim_tree(node[1]) == 1:
node[1] = -1
if self.trim_tree(node[2]) == 1:
node[2] = -1
if node[1] == -1 and node[2] == -1:
if data_len == 0:
return 1
else:
return 0 | [
"def",
"trim_tree",
"(",
"self",
",",
"node",
")",
":",
"data_len",
"=",
"len",
"(",
"node",
"[",
"-",
"1",
"]",
")",
"if",
"node",
"[",
"1",
"]",
"==",
"-",
"1",
"and",
"node",
"[",
"2",
"]",
"==",
"-",
"1",
":",
"if",
"data_len",
"==",
"0",
":",
"return",
"1",
"else",
":",
"return",
"0",
"else",
":",
"if",
"self",
".",
"trim_tree",
"(",
"node",
"[",
"1",
"]",
")",
"==",
"1",
":",
"node",
"[",
"1",
"]",
"=",
"-",
"1",
"if",
"self",
".",
"trim_tree",
"(",
"node",
"[",
"2",
"]",
")",
"==",
"1",
":",
"node",
"[",
"2",
"]",
"=",
"-",
"1",
"if",
"node",
"[",
"1",
"]",
"==",
"-",
"1",
"and",
"node",
"[",
"2",
"]",
"==",
"-",
"1",
":",
"if",
"data_len",
"==",
"0",
":",
"return",
"1",
"else",
":",
"return",
"0"
] | trims the tree for any empty data nodes | [
"trims",
"the",
"tree",
"for",
"any",
"empty",
"data",
"nodes"
] | train | https://github.com/moonso/interval_tree/blob/c588177f5bd90bd9e2f1447216c78b024353f7a1/interval_tree/interval_tree.py#L146-L166 |
moonso/interval_tree | interval_tree/interval_tree.py | IntervalTree.find | def find(self, node, interval, start, end):
"""recursively finds ids within a range"""
data = []
if len(interval) != 2:
raise SyntaxError('Interval malformed %s. Allways specify start and end position for interval.' % str(interval))
left = (start, node[0])
right = (node[0], end)
if self.overlap(left, interval):
data.extend(node[-1])
if node[1] != -1:
data.extend(self.find(node[1], interval, left[0], left[1]))
if self.overlap(right, interval):
data.extend(node[-1])
if node[2] != -1:
data.extend(self.find(node[2], interval, right[0], right[1]))
return list(set(data)) | python | def find(self, node, interval, start, end):
"""recursively finds ids within a range"""
data = []
if len(interval) != 2:
raise SyntaxError('Interval malformed %s. Allways specify start and end position for interval.' % str(interval))
left = (start, node[0])
right = (node[0], end)
if self.overlap(left, interval):
data.extend(node[-1])
if node[1] != -1:
data.extend(self.find(node[1], interval, left[0], left[1]))
if self.overlap(right, interval):
data.extend(node[-1])
if node[2] != -1:
data.extend(self.find(node[2], interval, right[0], right[1]))
return list(set(data)) | [
"def",
"find",
"(",
"self",
",",
"node",
",",
"interval",
",",
"start",
",",
"end",
")",
":",
"data",
"=",
"[",
"]",
"if",
"len",
"(",
"interval",
")",
"!=",
"2",
":",
"raise",
"SyntaxError",
"(",
"'Interval malformed %s. Allways specify start and end position for interval.'",
"%",
"str",
"(",
"interval",
")",
")",
"left",
"=",
"(",
"start",
",",
"node",
"[",
"0",
"]",
")",
"right",
"=",
"(",
"node",
"[",
"0",
"]",
",",
"end",
")",
"if",
"self",
".",
"overlap",
"(",
"left",
",",
"interval",
")",
":",
"data",
".",
"extend",
"(",
"node",
"[",
"-",
"1",
"]",
")",
"if",
"node",
"[",
"1",
"]",
"!=",
"-",
"1",
":",
"data",
".",
"extend",
"(",
"self",
".",
"find",
"(",
"node",
"[",
"1",
"]",
",",
"interval",
",",
"left",
"[",
"0",
"]",
",",
"left",
"[",
"1",
"]",
")",
")",
"if",
"self",
".",
"overlap",
"(",
"right",
",",
"interval",
")",
":",
"data",
".",
"extend",
"(",
"node",
"[",
"-",
"1",
"]",
")",
"if",
"node",
"[",
"2",
"]",
"!=",
"-",
"1",
":",
"data",
".",
"extend",
"(",
"self",
".",
"find",
"(",
"node",
"[",
"2",
"]",
",",
"interval",
",",
"right",
"[",
"0",
"]",
",",
"right",
"[",
"1",
"]",
")",
")",
"return",
"list",
"(",
"set",
"(",
"data",
")",
")"
] | recursively finds ids within a range | [
"recursively",
"finds",
"ids",
"within",
"a",
"range"
] | train | https://github.com/moonso/interval_tree/blob/c588177f5bd90bd9e2f1447216c78b024353f7a1/interval_tree/interval_tree.py#L168-L188 |
moonso/interval_tree | interval_tree/interval_tree.py | IntervalTree.find_range | def find_range(self, interval):
"""wrapper for find"""
return self.find(self.tree, interval, self.start, self.end) | python | def find_range(self, interval):
"""wrapper for find"""
return self.find(self.tree, interval, self.start, self.end) | [
"def",
"find_range",
"(",
"self",
",",
"interval",
")",
":",
"return",
"self",
".",
"find",
"(",
"self",
".",
"tree",
",",
"interval",
",",
"self",
".",
"start",
",",
"self",
".",
"end",
")"
] | wrapper for find | [
"wrapper",
"for",
"find"
] | train | https://github.com/moonso/interval_tree/blob/c588177f5bd90bd9e2f1447216c78b024353f7a1/interval_tree/interval_tree.py#L190-L192 |
moonso/interval_tree | interval_tree/interval_tree.py | IntervalTree.pprint | def pprint(self, ind):
"""pretty prints the tree with indentation"""
pp = pprint.PrettyPrinter(indent=ind)
pp.pprint(self.tree) | python | def pprint(self, ind):
"""pretty prints the tree with indentation"""
pp = pprint.PrettyPrinter(indent=ind)
pp.pprint(self.tree) | [
"def",
"pprint",
"(",
"self",
",",
"ind",
")",
":",
"pp",
"=",
"pprint",
".",
"PrettyPrinter",
"(",
"indent",
"=",
"ind",
")",
"pp",
".",
"pprint",
"(",
"self",
".",
"tree",
")"
] | pretty prints the tree with indentation | [
"pretty",
"prints",
"the",
"tree",
"with",
"indentation"
] | train | https://github.com/moonso/interval_tree/blob/c588177f5bd90bd9e2f1447216c78b024353f7a1/interval_tree/interval_tree.py#L194-L197 |
matllubos/django-is-core | is_core/site.py | get_model_core | def get_model_core(model):
"""
Return core view of given model or None
"""
model_label = lower('%s.%s' % (model._meta.app_label, model._meta.object_name))
return registered_model_cores.get(model_label) | python | def get_model_core(model):
"""
Return core view of given model or None
"""
model_label = lower('%s.%s' % (model._meta.app_label, model._meta.object_name))
return registered_model_cores.get(model_label) | [
"def",
"get_model_core",
"(",
"model",
")",
":",
"model_label",
"=",
"lower",
"(",
"'%s.%s'",
"%",
"(",
"model",
".",
"_meta",
".",
"app_label",
",",
"model",
".",
"_meta",
".",
"object_name",
")",
")",
"return",
"registered_model_cores",
".",
"get",
"(",
"model_label",
")"
] | Return core view of given model or None | [
"Return",
"core",
"view",
"of",
"given",
"model",
"or",
"None"
] | train | https://github.com/matllubos/django-is-core/blob/3f87ec56a814738683c732dce5f07e0328c2300d/is_core/site.py#L28-L33 |
vcs-python/libvcs | libvcs/shortcuts.py | create_repo | def create_repo(url, vcs, **kwargs):
r"""Return a object representation of a VCS repository.
:returns: instance of a repository object
:rtype: :class:`libvcs.svn.SubversionRepo`, :class:`libvcs.git.GitRepo` or
:class:`libvcs.hg.MercurialRepo`.
Usage Example::
>>> from libvcs.shortcuts import create_repo
>>> r = create_repo(
... url='https://www.github.com/you/myrepo',
... vcs='git',
... repo_dir='/tmp/myrepo')
>>> r.update_repo()
|myrepo| (git) Repo directory for myrepo (git) does not exist @ \
/tmp/myrepo
|myrepo| (git) Cloning.
|myrepo| (git) git clone https://www.github.com/tony/myrepo \
/tmp/myrepo
Cloning into '/tmp/myrepo'...
Checking connectivity... done.
|myrepo| (git) git fetch
|myrepo| (git) git pull
Already up-to-date.
"""
if vcs == 'git':
return GitRepo(url, **kwargs)
elif vcs == 'hg':
return MercurialRepo(url, **kwargs)
elif vcs == 'svn':
return SubversionRepo(url, **kwargs)
else:
raise InvalidVCS('VCS %s is not a valid VCS' % vcs) | python | def create_repo(url, vcs, **kwargs):
r"""Return a object representation of a VCS repository.
:returns: instance of a repository object
:rtype: :class:`libvcs.svn.SubversionRepo`, :class:`libvcs.git.GitRepo` or
:class:`libvcs.hg.MercurialRepo`.
Usage Example::
>>> from libvcs.shortcuts import create_repo
>>> r = create_repo(
... url='https://www.github.com/you/myrepo',
... vcs='git',
... repo_dir='/tmp/myrepo')
>>> r.update_repo()
|myrepo| (git) Repo directory for myrepo (git) does not exist @ \
/tmp/myrepo
|myrepo| (git) Cloning.
|myrepo| (git) git clone https://www.github.com/tony/myrepo \
/tmp/myrepo
Cloning into '/tmp/myrepo'...
Checking connectivity... done.
|myrepo| (git) git fetch
|myrepo| (git) git pull
Already up-to-date.
"""
if vcs == 'git':
return GitRepo(url, **kwargs)
elif vcs == 'hg':
return MercurialRepo(url, **kwargs)
elif vcs == 'svn':
return SubversionRepo(url, **kwargs)
else:
raise InvalidVCS('VCS %s is not a valid VCS' % vcs) | [
"def",
"create_repo",
"(",
"url",
",",
"vcs",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"vcs",
"==",
"'git'",
":",
"return",
"GitRepo",
"(",
"url",
",",
"*",
"*",
"kwargs",
")",
"elif",
"vcs",
"==",
"'hg'",
":",
"return",
"MercurialRepo",
"(",
"url",
",",
"*",
"*",
"kwargs",
")",
"elif",
"vcs",
"==",
"'svn'",
":",
"return",
"SubversionRepo",
"(",
"url",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"InvalidVCS",
"(",
"'VCS %s is not a valid VCS'",
"%",
"vcs",
")"
] | r"""Return a object representation of a VCS repository.
:returns: instance of a repository object
:rtype: :class:`libvcs.svn.SubversionRepo`, :class:`libvcs.git.GitRepo` or
:class:`libvcs.hg.MercurialRepo`.
Usage Example::
>>> from libvcs.shortcuts import create_repo
>>> r = create_repo(
... url='https://www.github.com/you/myrepo',
... vcs='git',
... repo_dir='/tmp/myrepo')
>>> r.update_repo()
|myrepo| (git) Repo directory for myrepo (git) does not exist @ \
/tmp/myrepo
|myrepo| (git) Cloning.
|myrepo| (git) git clone https://www.github.com/tony/myrepo \
/tmp/myrepo
Cloning into '/tmp/myrepo'...
Checking connectivity... done.
|myrepo| (git) git fetch
|myrepo| (git) git pull
Already up-to-date. | [
"r",
"Return",
"a",
"object",
"representation",
"of",
"a",
"VCS",
"repository",
"."
] | train | https://github.com/vcs-python/libvcs/blob/f7dc055250199bac6be7439b1d2240583f0bb354/libvcs/shortcuts.py#L8-L43 |
vcs-python/libvcs | libvcs/shortcuts.py | create_repo_from_pip_url | def create_repo_from_pip_url(pip_url, **kwargs):
r"""Return a object representation of a VCS repository via pip-style url.
:returns: instance of a repository object
:rtype: :class:`libvcs.svn.SubversionRepo`, :class:`libvcs.git.GitRepo` or
:class:`libvcs.hg.MercurialRepo`.
Usage Example::
>>> from libvcs.shortcuts import create_repo_from_pip_url
>>> r = create_repo_from_pip_url(
... pip_url='git+https://www.github.com/you/myrepo',
... repo_dir='/tmp/myrepo')
>>> r.update_repo()
|myrepo| (git) Repo directory for myrepo (git) does not exist @ \
/tmp/myrepo
|myrepo| (git) Cloning.
|myrepo| (git) git clone https://www.github.com/tony/myrepo \
/tmp/myrepo
Cloning into '/tmp/myrepo'...
Checking connectivity... done.
|myrepo| (git) git fetch
|myrepo| (git) git pull
Already up-to-date.
"""
if pip_url.startswith('git+'):
return GitRepo.from_pip_url(pip_url, **kwargs)
elif pip_url.startswith('hg+'):
return MercurialRepo.from_pip_url(pip_url, **kwargs)
elif pip_url.startswith('svn+'):
return SubversionRepo.from_pip_url(pip_url, **kwargs)
else:
raise InvalidPipURL(pip_url) | python | def create_repo_from_pip_url(pip_url, **kwargs):
r"""Return a object representation of a VCS repository via pip-style url.
:returns: instance of a repository object
:rtype: :class:`libvcs.svn.SubversionRepo`, :class:`libvcs.git.GitRepo` or
:class:`libvcs.hg.MercurialRepo`.
Usage Example::
>>> from libvcs.shortcuts import create_repo_from_pip_url
>>> r = create_repo_from_pip_url(
... pip_url='git+https://www.github.com/you/myrepo',
... repo_dir='/tmp/myrepo')
>>> r.update_repo()
|myrepo| (git) Repo directory for myrepo (git) does not exist @ \
/tmp/myrepo
|myrepo| (git) Cloning.
|myrepo| (git) git clone https://www.github.com/tony/myrepo \
/tmp/myrepo
Cloning into '/tmp/myrepo'...
Checking connectivity... done.
|myrepo| (git) git fetch
|myrepo| (git) git pull
Already up-to-date.
"""
if pip_url.startswith('git+'):
return GitRepo.from_pip_url(pip_url, **kwargs)
elif pip_url.startswith('hg+'):
return MercurialRepo.from_pip_url(pip_url, **kwargs)
elif pip_url.startswith('svn+'):
return SubversionRepo.from_pip_url(pip_url, **kwargs)
else:
raise InvalidPipURL(pip_url) | [
"def",
"create_repo_from_pip_url",
"(",
"pip_url",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"pip_url",
".",
"startswith",
"(",
"'git+'",
")",
":",
"return",
"GitRepo",
".",
"from_pip_url",
"(",
"pip_url",
",",
"*",
"*",
"kwargs",
")",
"elif",
"pip_url",
".",
"startswith",
"(",
"'hg+'",
")",
":",
"return",
"MercurialRepo",
".",
"from_pip_url",
"(",
"pip_url",
",",
"*",
"*",
"kwargs",
")",
"elif",
"pip_url",
".",
"startswith",
"(",
"'svn+'",
")",
":",
"return",
"SubversionRepo",
".",
"from_pip_url",
"(",
"pip_url",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"InvalidPipURL",
"(",
"pip_url",
")"
] | r"""Return a object representation of a VCS repository via pip-style url.
:returns: instance of a repository object
:rtype: :class:`libvcs.svn.SubversionRepo`, :class:`libvcs.git.GitRepo` or
:class:`libvcs.hg.MercurialRepo`.
Usage Example::
>>> from libvcs.shortcuts import create_repo_from_pip_url
>>> r = create_repo_from_pip_url(
... pip_url='git+https://www.github.com/you/myrepo',
... repo_dir='/tmp/myrepo')
>>> r.update_repo()
|myrepo| (git) Repo directory for myrepo (git) does not exist @ \
/tmp/myrepo
|myrepo| (git) Cloning.
|myrepo| (git) git clone https://www.github.com/tony/myrepo \
/tmp/myrepo
Cloning into '/tmp/myrepo'...
Checking connectivity... done.
|myrepo| (git) git fetch
|myrepo| (git) git pull
Already up-to-date. | [
"r",
"Return",
"a",
"object",
"representation",
"of",
"a",
"VCS",
"repository",
"via",
"pip",
"-",
"style",
"url",
"."
] | train | https://github.com/vcs-python/libvcs/blob/f7dc055250199bac6be7439b1d2240583f0bb354/libvcs/shortcuts.py#L46-L80 |
xapple/plumbing | plumbing/slurm/job.py | JobSLURM.set_paths | def set_paths(self, base_dir, script_path):
"""Set the directory, the script path and the outfile path"""
# Make absolute paths #
if 'change_dir' in self.kwargs:
self.kwargs['change_dir'] = DirectoryPath(os.path.abspath(self.kwargs['change_dir']))
if 'out_file' in self.kwargs:
self.kwargs['out_file'] = FilePath(os.path.abspath(self.kwargs['out_file']))
# In case there is a base directory #
if base_dir is not None:
self.base_dir = DirectoryPath(os.path.abspath(base_dir))
self.script_path = FilePath(base_dir + "run." + self.extensions[self.language])
self.kwargs['change_dir'] = base_dir
self.kwargs['out_file'] = FilePath(base_dir + "run.out")
# Other cases #
if base_dir is None and script_path is None: self.script_path = FilePath(new_temp_path())
if script_path is not None: self.script_path = FilePath(os.path.abspath(script_path)) | python | def set_paths(self, base_dir, script_path):
"""Set the directory, the script path and the outfile path"""
# Make absolute paths #
if 'change_dir' in self.kwargs:
self.kwargs['change_dir'] = DirectoryPath(os.path.abspath(self.kwargs['change_dir']))
if 'out_file' in self.kwargs:
self.kwargs['out_file'] = FilePath(os.path.abspath(self.kwargs['out_file']))
# In case there is a base directory #
if base_dir is not None:
self.base_dir = DirectoryPath(os.path.abspath(base_dir))
self.script_path = FilePath(base_dir + "run." + self.extensions[self.language])
self.kwargs['change_dir'] = base_dir
self.kwargs['out_file'] = FilePath(base_dir + "run.out")
# Other cases #
if base_dir is None and script_path is None: self.script_path = FilePath(new_temp_path())
if script_path is not None: self.script_path = FilePath(os.path.abspath(script_path)) | [
"def",
"set_paths",
"(",
"self",
",",
"base_dir",
",",
"script_path",
")",
":",
"# Make absolute paths #",
"if",
"'change_dir'",
"in",
"self",
".",
"kwargs",
":",
"self",
".",
"kwargs",
"[",
"'change_dir'",
"]",
"=",
"DirectoryPath",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"self",
".",
"kwargs",
"[",
"'change_dir'",
"]",
")",
")",
"if",
"'out_file'",
"in",
"self",
".",
"kwargs",
":",
"self",
".",
"kwargs",
"[",
"'out_file'",
"]",
"=",
"FilePath",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"self",
".",
"kwargs",
"[",
"'out_file'",
"]",
")",
")",
"# In case there is a base directory #",
"if",
"base_dir",
"is",
"not",
"None",
":",
"self",
".",
"base_dir",
"=",
"DirectoryPath",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"base_dir",
")",
")",
"self",
".",
"script_path",
"=",
"FilePath",
"(",
"base_dir",
"+",
"\"run.\"",
"+",
"self",
".",
"extensions",
"[",
"self",
".",
"language",
"]",
")",
"self",
".",
"kwargs",
"[",
"'change_dir'",
"]",
"=",
"base_dir",
"self",
".",
"kwargs",
"[",
"'out_file'",
"]",
"=",
"FilePath",
"(",
"base_dir",
"+",
"\"run.out\"",
")",
"# Other cases #",
"if",
"base_dir",
"is",
"None",
"and",
"script_path",
"is",
"None",
":",
"self",
".",
"script_path",
"=",
"FilePath",
"(",
"new_temp_path",
"(",
")",
")",
"if",
"script_path",
"is",
"not",
"None",
":",
"self",
".",
"script_path",
"=",
"FilePath",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"script_path",
")",
")"
] | Set the directory, the script path and the outfile path | [
"Set",
"the",
"directory",
"the",
"script",
"path",
"and",
"the",
"outfile",
"path"
] | train | https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/slurm/job.py#L121-L136 |
xapple/plumbing | plumbing/slurm/job.py | JobSLURM.slurm_params | def slurm_params(self):
"""The list of parameters to give to the `sbatch` command."""
# Main loop #
result = OrderedDict()
for param, info in self.slurm_headers.items():
if not info['needed'] and not param in self.kwargs: continue
if param in self.kwargs: result[param] = self.kwargs.get(param)
else: result[param] = info['default']
# Special cases #
if result.get('cluster') == 'halvan': result['partition'] = 'halvan'
# Return #
return result | python | def slurm_params(self):
"""The list of parameters to give to the `sbatch` command."""
# Main loop #
result = OrderedDict()
for param, info in self.slurm_headers.items():
if not info['needed'] and not param in self.kwargs: continue
if param in self.kwargs: result[param] = self.kwargs.get(param)
else: result[param] = info['default']
# Special cases #
if result.get('cluster') == 'halvan': result['partition'] = 'halvan'
# Return #
return result | [
"def",
"slurm_params",
"(",
"self",
")",
":",
"# Main loop #",
"result",
"=",
"OrderedDict",
"(",
")",
"for",
"param",
",",
"info",
"in",
"self",
".",
"slurm_headers",
".",
"items",
"(",
")",
":",
"if",
"not",
"info",
"[",
"'needed'",
"]",
"and",
"not",
"param",
"in",
"self",
".",
"kwargs",
":",
"continue",
"if",
"param",
"in",
"self",
".",
"kwargs",
":",
"result",
"[",
"param",
"]",
"=",
"self",
".",
"kwargs",
".",
"get",
"(",
"param",
")",
"else",
":",
"result",
"[",
"param",
"]",
"=",
"info",
"[",
"'default'",
"]",
"# Special cases #",
"if",
"result",
".",
"get",
"(",
"'cluster'",
")",
"==",
"'halvan'",
":",
"result",
"[",
"'partition'",
"]",
"=",
"'halvan'",
"# Return #",
"return",
"result"
] | The list of parameters to give to the `sbatch` command. | [
"The",
"list",
"of",
"parameters",
"to",
"give",
"to",
"the",
"sbatch",
"command",
"."
] | train | https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/slurm/job.py#L139-L150 |
xapple/plumbing | plumbing/slurm/job.py | JobSLURM.script | def script(self):
"""The script to be submitted to the SLURM queue."""
self.shebang_header = self.shebang_headers[self.language]
self.slurm_header = [self.slurm_headers[k]['tag'] % v for k,v in self.slurm_params.items()]
self.script_header = self.script_headers[self.language]
self.script_footer = self.script_footers[self.language]
return '\n'.join(flatter([self.shebang_header,
self.slurm_header,
self.script_header,
self.command,
self.script_footer])) | python | def script(self):
"""The script to be submitted to the SLURM queue."""
self.shebang_header = self.shebang_headers[self.language]
self.slurm_header = [self.slurm_headers[k]['tag'] % v for k,v in self.slurm_params.items()]
self.script_header = self.script_headers[self.language]
self.script_footer = self.script_footers[self.language]
return '\n'.join(flatter([self.shebang_header,
self.slurm_header,
self.script_header,
self.command,
self.script_footer])) | [
"def",
"script",
"(",
"self",
")",
":",
"self",
".",
"shebang_header",
"=",
"self",
".",
"shebang_headers",
"[",
"self",
".",
"language",
"]",
"self",
".",
"slurm_header",
"=",
"[",
"self",
".",
"slurm_headers",
"[",
"k",
"]",
"[",
"'tag'",
"]",
"%",
"v",
"for",
"k",
",",
"v",
"in",
"self",
".",
"slurm_params",
".",
"items",
"(",
")",
"]",
"self",
".",
"script_header",
"=",
"self",
".",
"script_headers",
"[",
"self",
".",
"language",
"]",
"self",
".",
"script_footer",
"=",
"self",
".",
"script_footers",
"[",
"self",
".",
"language",
"]",
"return",
"'\\n'",
".",
"join",
"(",
"flatter",
"(",
"[",
"self",
".",
"shebang_header",
",",
"self",
".",
"slurm_header",
",",
"self",
".",
"script_header",
",",
"self",
".",
"command",
",",
"self",
".",
"script_footer",
"]",
")",
")"
] | The script to be submitted to the SLURM queue. | [
"The",
"script",
"to",
"be",
"submitted",
"to",
"the",
"SLURM",
"queue",
"."
] | train | https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/slurm/job.py#L153-L163 |
xapple/plumbing | plumbing/slurm/job.py | JobSLURM.make_script | def make_script(self):
"""Make the script and return a FilePath object pointing to the script above."""
self.script_path.write(self.script)
self.script_path.permissions.make_executable()
return self.script_path | python | def make_script(self):
"""Make the script and return a FilePath object pointing to the script above."""
self.script_path.write(self.script)
self.script_path.permissions.make_executable()
return self.script_path | [
"def",
"make_script",
"(",
"self",
")",
":",
"self",
".",
"script_path",
".",
"write",
"(",
"self",
".",
"script",
")",
"self",
".",
"script_path",
".",
"permissions",
".",
"make_executable",
"(",
")",
"return",
"self",
".",
"script_path"
] | Make the script and return a FilePath object pointing to the script above. | [
"Make",
"the",
"script",
"and",
"return",
"a",
"FilePath",
"object",
"pointing",
"to",
"the",
"script",
"above",
"."
] | train | https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/slurm/job.py#L165-L169 |
xapple/plumbing | plumbing/slurm/job.py | JobSLURM.status | def status(self):
"""What is the status of the job ?"""
# If there is no script it is either ready or a lost duplicate #
if not self.script_path.exists:
if self.name in jobs.names: return "DUPLICATE"
if self.name not in jobs.names: return "READY"
# It is submitted already #
if self.name in jobs.names:
if jobs[self.name]['type'] == 'queued': return "QUEUED"
if jobs[self.name]['type'] == 'running': return "RUNNING"
# So the script exists for sure but it is not in the queue #
if not self.kwargs['out_file'].exists: return "ABORTED"
# Let's look in log file #
if 'CANCELED' in self.log_tail: return "CANCELLED"
if 'slurmstepd: error' in self.log_tail: return "CANCELLED"
# It all looks good #
if 'SLURM: end at' in self.log_tail: return "FINISHED"
# At this point we have no idea #
return "INTERUPTED" | python | def status(self):
"""What is the status of the job ?"""
# If there is no script it is either ready or a lost duplicate #
if not self.script_path.exists:
if self.name in jobs.names: return "DUPLICATE"
if self.name not in jobs.names: return "READY"
# It is submitted already #
if self.name in jobs.names:
if jobs[self.name]['type'] == 'queued': return "QUEUED"
if jobs[self.name]['type'] == 'running': return "RUNNING"
# So the script exists for sure but it is not in the queue #
if not self.kwargs['out_file'].exists: return "ABORTED"
# Let's look in log file #
if 'CANCELED' in self.log_tail: return "CANCELLED"
if 'slurmstepd: error' in self.log_tail: return "CANCELLED"
# It all looks good #
if 'SLURM: end at' in self.log_tail: return "FINISHED"
# At this point we have no idea #
return "INTERUPTED" | [
"def",
"status",
"(",
"self",
")",
":",
"# If there is no script it is either ready or a lost duplicate #",
"if",
"not",
"self",
".",
"script_path",
".",
"exists",
":",
"if",
"self",
".",
"name",
"in",
"jobs",
".",
"names",
":",
"return",
"\"DUPLICATE\"",
"if",
"self",
".",
"name",
"not",
"in",
"jobs",
".",
"names",
":",
"return",
"\"READY\"",
"# It is submitted already #",
"if",
"self",
".",
"name",
"in",
"jobs",
".",
"names",
":",
"if",
"jobs",
"[",
"self",
".",
"name",
"]",
"[",
"'type'",
"]",
"==",
"'queued'",
":",
"return",
"\"QUEUED\"",
"if",
"jobs",
"[",
"self",
".",
"name",
"]",
"[",
"'type'",
"]",
"==",
"'running'",
":",
"return",
"\"RUNNING\"",
"# So the script exists for sure but it is not in the queue #",
"if",
"not",
"self",
".",
"kwargs",
"[",
"'out_file'",
"]",
".",
"exists",
":",
"return",
"\"ABORTED\"",
"# Let's look in log file #",
"if",
"'CANCELED'",
"in",
"self",
".",
"log_tail",
":",
"return",
"\"CANCELLED\"",
"if",
"'slurmstepd: error'",
"in",
"self",
".",
"log_tail",
":",
"return",
"\"CANCELLED\"",
"# It all looks good #",
"if",
"'SLURM: end at'",
"in",
"self",
".",
"log_tail",
":",
"return",
"\"FINISHED\"",
"# At this point we have no idea #",
"return",
"\"INTERUPTED\""
] | What is the status of the job ? | [
"What",
"is",
"the",
"status",
"of",
"the",
"job",
"?"
] | train | https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/slurm/job.py#L183-L201 |
xapple/plumbing | plumbing/slurm/job.py | JobSLURM.info | def info(self):
"""Get the existing job information dictionary"""
if self.name not in jobs: return {'status': self.status}
else: return jobs[self.name] | python | def info(self):
"""Get the existing job information dictionary"""
if self.name not in jobs: return {'status': self.status}
else: return jobs[self.name] | [
"def",
"info",
"(",
"self",
")",
":",
"if",
"self",
".",
"name",
"not",
"in",
"jobs",
":",
"return",
"{",
"'status'",
":",
"self",
".",
"status",
"}",
"else",
":",
"return",
"jobs",
"[",
"self",
".",
"name",
"]"
] | Get the existing job information dictionary | [
"Get",
"the",
"existing",
"job",
"information",
"dictionary"
] | train | https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/slurm/job.py#L204-L207 |
xapple/plumbing | plumbing/slurm/job.py | JobSLURM.run | def run(self):
"""Will call self.launch() after performing some checks"""
# Check already exists #
if self.status == "READY": return self.launch()
# Check name conflict #
if self.status == "DUPLICATE": message = "Job with same name '%s' already in queue, but we lost the script."
if self.status == "QUEUED": message = "Job '%s' already in queue."
if self.status == "RUNNING": message = "Job '%s' already running."
if self.status == "FINISHED": message = "Job '%s' already ended successfully."
if self.status == "ABORTED": message = "Job '%s' was killed without any output file (?)."
if self.status == "CANCELED": message = "Job '%s' was canceled or killed while running."
if self.status == "INTERUPTED": message = "Job '%s' is not running. We don't know why. Look at the log file."
print Color.i_red + message % (self.name,) + Color.end
print "Job might have run already (?). Not starting." | python | def run(self):
"""Will call self.launch() after performing some checks"""
# Check already exists #
if self.status == "READY": return self.launch()
# Check name conflict #
if self.status == "DUPLICATE": message = "Job with same name '%s' already in queue, but we lost the script."
if self.status == "QUEUED": message = "Job '%s' already in queue."
if self.status == "RUNNING": message = "Job '%s' already running."
if self.status == "FINISHED": message = "Job '%s' already ended successfully."
if self.status == "ABORTED": message = "Job '%s' was killed without any output file (?)."
if self.status == "CANCELED": message = "Job '%s' was canceled or killed while running."
if self.status == "INTERUPTED": message = "Job '%s' is not running. We don't know why. Look at the log file."
print Color.i_red + message % (self.name,) + Color.end
print "Job might have run already (?). Not starting." | [
"def",
"run",
"(",
"self",
")",
":",
"# Check already exists #",
"if",
"self",
".",
"status",
"==",
"\"READY\"",
":",
"return",
"self",
".",
"launch",
"(",
")",
"# Check name conflict #",
"if",
"self",
".",
"status",
"==",
"\"DUPLICATE\"",
":",
"message",
"=",
"\"Job with same name '%s' already in queue, but we lost the script.\"",
"if",
"self",
".",
"status",
"==",
"\"QUEUED\"",
":",
"message",
"=",
"\"Job '%s' already in queue.\"",
"if",
"self",
".",
"status",
"==",
"\"RUNNING\"",
":",
"message",
"=",
"\"Job '%s' already running.\"",
"if",
"self",
".",
"status",
"==",
"\"FINISHED\"",
":",
"message",
"=",
"\"Job '%s' already ended successfully.\"",
"if",
"self",
".",
"status",
"==",
"\"ABORTED\"",
":",
"message",
"=",
"\"Job '%s' was killed without any output file (?).\"",
"if",
"self",
".",
"status",
"==",
"\"CANCELED\"",
":",
"message",
"=",
"\"Job '%s' was canceled or killed while running.\"",
"if",
"self",
".",
"status",
"==",
"\"INTERUPTED\"",
":",
"message",
"=",
"\"Job '%s' is not running. We don't know why. Look at the log file.\"",
"print",
"Color",
".",
"i_red",
"+",
"message",
"%",
"(",
"self",
".",
"name",
",",
")",
"+",
"Color",
".",
"end",
"print",
"\"Job might have run already (?). Not starting.\""
] | Will call self.launch() after performing some checks | [
"Will",
"call",
"self",
".",
"launch",
"()",
"after",
"performing",
"some",
"checks"
] | train | https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/slurm/job.py#L210-L223 |
xapple/plumbing | plumbing/slurm/job.py | JobSLURM.launch | def launch(self):
"""Make the script file and return the newly created job id"""
# Make script file #
self.make_script()
# Do it #
sbatch_out = sh.sbatch(self.script_path)
jobs.expire()
# Message #
print Color.i_blu + "SLURM:" + Color.end + " " + str(sbatch_out),
# Return id #
self.id = int(re.findall("Submitted batch job ([0-9]+)", str(sbatch_out))[0])
return self.id | python | def launch(self):
"""Make the script file and return the newly created job id"""
# Make script file #
self.make_script()
# Do it #
sbatch_out = sh.sbatch(self.script_path)
jobs.expire()
# Message #
print Color.i_blu + "SLURM:" + Color.end + " " + str(sbatch_out),
# Return id #
self.id = int(re.findall("Submitted batch job ([0-9]+)", str(sbatch_out))[0])
return self.id | [
"def",
"launch",
"(",
"self",
")",
":",
"# Make script file #",
"self",
".",
"make_script",
"(",
")",
"# Do it #",
"sbatch_out",
"=",
"sh",
".",
"sbatch",
"(",
"self",
".",
"script_path",
")",
"jobs",
".",
"expire",
"(",
")",
"# Message #",
"print",
"Color",
".",
"i_blu",
"+",
"\"SLURM:\"",
"+",
"Color",
".",
"end",
"+",
"\" \"",
"+",
"str",
"(",
"sbatch_out",
")",
",",
"# Return id #",
"self",
".",
"id",
"=",
"int",
"(",
"re",
".",
"findall",
"(",
"\"Submitted batch job ([0-9]+)\"",
",",
"str",
"(",
"sbatch_out",
")",
")",
"[",
"0",
"]",
")",
"return",
"self",
".",
"id"
] | Make the script file and return the newly created job id | [
"Make",
"the",
"script",
"file",
"and",
"return",
"the",
"newly",
"created",
"job",
"id"
] | train | https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/slurm/job.py#L225-L236 |
xapple/plumbing | plumbing/slurm/job.py | JobSLURM.run_locally | def run_locally(self):
"""A convenience method to run the same result as a SLURM job
but locally in a non-blocking way. Useful for testing."""
self.thread = threading.Thread(target=self.execute_locally)
self.thread.daemon = True # So that they die when we die
self.thread.start() | python | def run_locally(self):
"""A convenience method to run the same result as a SLURM job
but locally in a non-blocking way. Useful for testing."""
self.thread = threading.Thread(target=self.execute_locally)
self.thread.daemon = True # So that they die when we die
self.thread.start() | [
"def",
"run_locally",
"(",
"self",
")",
":",
"self",
".",
"thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"execute_locally",
")",
"self",
".",
"thread",
".",
"daemon",
"=",
"True",
"# So that they die when we die",
"self",
".",
"thread",
".",
"start",
"(",
")"
] | A convenience method to run the same result as a SLURM job
but locally in a non-blocking way. Useful for testing. | [
"A",
"convenience",
"method",
"to",
"run",
"the",
"same",
"result",
"as",
"a",
"SLURM",
"job",
"but",
"locally",
"in",
"a",
"non",
"-",
"blocking",
"way",
".",
"Useful",
"for",
"testing",
"."
] | train | https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/slurm/job.py#L248-L253 |
xapple/plumbing | plumbing/slurm/job.py | JobSLURM.execute_locally | def execute_locally(self):
"""Runs the equivalent command locally in a blocking way."""
# Make script file #
self.make_script()
# Do it #
with open(self.kwargs['out_file'], 'w') as handle:
sh.python(self.script_path, _out=handle, _err=handle) | python | def execute_locally(self):
"""Runs the equivalent command locally in a blocking way."""
# Make script file #
self.make_script()
# Do it #
with open(self.kwargs['out_file'], 'w') as handle:
sh.python(self.script_path, _out=handle, _err=handle) | [
"def",
"execute_locally",
"(",
"self",
")",
":",
"# Make script file #",
"self",
".",
"make_script",
"(",
")",
"# Do it #",
"with",
"open",
"(",
"self",
".",
"kwargs",
"[",
"'out_file'",
"]",
",",
"'w'",
")",
"as",
"handle",
":",
"sh",
".",
"python",
"(",
"self",
".",
"script_path",
",",
"_out",
"=",
"handle",
",",
"_err",
"=",
"handle",
")"
] | Runs the equivalent command locally in a blocking way. | [
"Runs",
"the",
"equivalent",
"command",
"locally",
"in",
"a",
"blocking",
"way",
"."
] | train | https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/slurm/job.py#L255-L261 |
xapple/plumbing | plumbing/slurm/job.py | JobSLURM.wait_locally | def wait_locally(self):
"""If you have run the query in a non-blocking way, call this method to pause
until the query is finished."""
try: self.thread.join(sys.maxint) # maxint timeout so that we can Ctrl-C them
except KeyboardInterrupt: print "Stopped waiting on job '%s'" % self.kwargs['job_name'] | python | def wait_locally(self):
"""If you have run the query in a non-blocking way, call this method to pause
until the query is finished."""
try: self.thread.join(sys.maxint) # maxint timeout so that we can Ctrl-C them
except KeyboardInterrupt: print "Stopped waiting on job '%s'" % self.kwargs['job_name'] | [
"def",
"wait_locally",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"thread",
".",
"join",
"(",
"sys",
".",
"maxint",
")",
"# maxint timeout so that we can Ctrl-C them",
"except",
"KeyboardInterrupt",
":",
"print",
"\"Stopped waiting on job '%s'\"",
"%",
"self",
".",
"kwargs",
"[",
"'job_name'",
"]"
] | If you have run the query in a non-blocking way, call this method to pause
until the query is finished. | [
"If",
"you",
"have",
"run",
"the",
"query",
"in",
"a",
"non",
"-",
"blocking",
"way",
"call",
"this",
"method",
"to",
"pause",
"until",
"the",
"query",
"is",
"finished",
"."
] | train | https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/slurm/job.py#L263-L267 |
Infinidat/infi.projector | src/infi/projector/plugins/builtins/version/__init__.py | VersionPlugin.replace_version_tag | def replace_version_tag(self):
"""find the next major/minor/trivial version number if applicable"""
version_tag = self.arguments.get('<version>')
special_keywords = ['current', 'latest']
if version_tag in special_keywords:
logger.error("releasing version '{}' is disallowed. Did you mean 'version upload'?".format(version_tag))
raise SystemExit(1)
placeholders = dict(major=0, minor=1, trivial=2)
placeholder = placeholders.get(version_tag)
if placeholder is None:
return version_tag
current_version = self.get_git_describe().lstrip('v')
version_numbers = current_version.split('-')[0].split('.')
version_numbers = [int(item) for item in version_numbers]
version_numbers = version_numbers[:placeholder + 1]
while len(version_numbers) < 3:
version_numbers.append(0)
version_numbers[placeholder] += 1
return '.'.join([str(item) for item in version_numbers[:2 if placeholder < 2 else 3]]) | python | def replace_version_tag(self):
"""find the next major/minor/trivial version number if applicable"""
version_tag = self.arguments.get('<version>')
special_keywords = ['current', 'latest']
if version_tag in special_keywords:
logger.error("releasing version '{}' is disallowed. Did you mean 'version upload'?".format(version_tag))
raise SystemExit(1)
placeholders = dict(major=0, minor=1, trivial=2)
placeholder = placeholders.get(version_tag)
if placeholder is None:
return version_tag
current_version = self.get_git_describe().lstrip('v')
version_numbers = current_version.split('-')[0].split('.')
version_numbers = [int(item) for item in version_numbers]
version_numbers = version_numbers[:placeholder + 1]
while len(version_numbers) < 3:
version_numbers.append(0)
version_numbers[placeholder] += 1
return '.'.join([str(item) for item in version_numbers[:2 if placeholder < 2 else 3]]) | [
"def",
"replace_version_tag",
"(",
"self",
")",
":",
"version_tag",
"=",
"self",
".",
"arguments",
".",
"get",
"(",
"'<version>'",
")",
"special_keywords",
"=",
"[",
"'current'",
",",
"'latest'",
"]",
"if",
"version_tag",
"in",
"special_keywords",
":",
"logger",
".",
"error",
"(",
"\"releasing version '{}' is disallowed. Did you mean 'version upload'?\"",
".",
"format",
"(",
"version_tag",
")",
")",
"raise",
"SystemExit",
"(",
"1",
")",
"placeholders",
"=",
"dict",
"(",
"major",
"=",
"0",
",",
"minor",
"=",
"1",
",",
"trivial",
"=",
"2",
")",
"placeholder",
"=",
"placeholders",
".",
"get",
"(",
"version_tag",
")",
"if",
"placeholder",
"is",
"None",
":",
"return",
"version_tag",
"current_version",
"=",
"self",
".",
"get_git_describe",
"(",
")",
".",
"lstrip",
"(",
"'v'",
")",
"version_numbers",
"=",
"current_version",
".",
"split",
"(",
"'-'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'.'",
")",
"version_numbers",
"=",
"[",
"int",
"(",
"item",
")",
"for",
"item",
"in",
"version_numbers",
"]",
"version_numbers",
"=",
"version_numbers",
"[",
":",
"placeholder",
"+",
"1",
"]",
"while",
"len",
"(",
"version_numbers",
")",
"<",
"3",
":",
"version_numbers",
".",
"append",
"(",
"0",
")",
"version_numbers",
"[",
"placeholder",
"]",
"+=",
"1",
"return",
"'.'",
".",
"join",
"(",
"[",
"str",
"(",
"item",
")",
"for",
"item",
"in",
"version_numbers",
"[",
":",
"2",
"if",
"placeholder",
"<",
"2",
"else",
"3",
"]",
"]",
")"
] | find the next major/minor/trivial version number if applicable | [
"find",
"the",
"next",
"major",
"/",
"minor",
"/",
"trivial",
"version",
"number",
"if",
"applicable"
] | train | https://github.com/Infinidat/infi.projector/blob/4a0d098f4f8f14ffb3e7536c7515fa79a21c1134/src/infi/projector/plugins/builtins/version/__init__.py#L40-L58 |
Infinidat/infi.projector | src/infi/projector/commandline_parser/__init__.py | parse_docopt_string | def parse_docopt_string(docopt_string):
"""returns a 2-tuple (usage, options)"""
from re import match, DOTALL
only_usage_pattern = r"""\s+Usage:\s+(?P<usage>.*)\s+"""
usage_and_options_pattern = r"""\s+Usage:\s+(?P<usage>.*)\s+Options:\s+(?P<options>.*)\s+"""
usage, options = '', ''
if match(usage_and_options_pattern, docopt_string, DOTALL):
usage = match(usage_and_options_pattern, docopt_string, DOTALL).groupdict()['usage']
options = match(usage_and_options_pattern, docopt_string, DOTALL).groupdict()['options']
elif match(only_usage_pattern, docopt_string, DOTALL):
usage = match(only_usage_pattern, docopt_string, DOTALL).groupdict()['usage']
return usage, options | python | def parse_docopt_string(docopt_string):
"""returns a 2-tuple (usage, options)"""
from re import match, DOTALL
only_usage_pattern = r"""\s+Usage:\s+(?P<usage>.*)\s+"""
usage_and_options_pattern = r"""\s+Usage:\s+(?P<usage>.*)\s+Options:\s+(?P<options>.*)\s+"""
usage, options = '', ''
if match(usage_and_options_pattern, docopt_string, DOTALL):
usage = match(usage_and_options_pattern, docopt_string, DOTALL).groupdict()['usage']
options = match(usage_and_options_pattern, docopt_string, DOTALL).groupdict()['options']
elif match(only_usage_pattern, docopt_string, DOTALL):
usage = match(only_usage_pattern, docopt_string, DOTALL).groupdict()['usage']
return usage, options | [
"def",
"parse_docopt_string",
"(",
"docopt_string",
")",
":",
"from",
"re",
"import",
"match",
",",
"DOTALL",
"only_usage_pattern",
"=",
"r\"\"\"\\s+Usage:\\s+(?P<usage>.*)\\s+\"\"\"",
"usage_and_options_pattern",
"=",
"r\"\"\"\\s+Usage:\\s+(?P<usage>.*)\\s+Options:\\s+(?P<options>.*)\\s+\"\"\"",
"usage",
",",
"options",
"=",
"''",
",",
"''",
"if",
"match",
"(",
"usage_and_options_pattern",
",",
"docopt_string",
",",
"DOTALL",
")",
":",
"usage",
"=",
"match",
"(",
"usage_and_options_pattern",
",",
"docopt_string",
",",
"DOTALL",
")",
".",
"groupdict",
"(",
")",
"[",
"'usage'",
"]",
"options",
"=",
"match",
"(",
"usage_and_options_pattern",
",",
"docopt_string",
",",
"DOTALL",
")",
".",
"groupdict",
"(",
")",
"[",
"'options'",
"]",
"elif",
"match",
"(",
"only_usage_pattern",
",",
"docopt_string",
",",
"DOTALL",
")",
":",
"usage",
"=",
"match",
"(",
"only_usage_pattern",
",",
"docopt_string",
",",
"DOTALL",
")",
".",
"groupdict",
"(",
")",
"[",
"'usage'",
"]",
"return",
"usage",
",",
"options"
] | returns a 2-tuple (usage, options) | [
"returns",
"a",
"2",
"-",
"tuple",
"(",
"usage",
"options",
")"
] | train | https://github.com/Infinidat/infi.projector/blob/4a0d098f4f8f14ffb3e7536c7515fa79a21c1134/src/infi/projector/commandline_parser/__init__.py#L8-L19 |
matllubos/django-is-core | is_core/filters/__init__.py | UIFilterMixin.get_widget | def get_widget(self, request):
"""
Returns concrete widget that will be used for rendering table filter.
"""
widget = self.widget
if isinstance(widget, type):
widget = widget()
return widget | python | def get_widget(self, request):
"""
Returns concrete widget that will be used for rendering table filter.
"""
widget = self.widget
if isinstance(widget, type):
widget = widget()
return widget | [
"def",
"get_widget",
"(",
"self",
",",
"request",
")",
":",
"widget",
"=",
"self",
".",
"widget",
"if",
"isinstance",
"(",
"widget",
",",
"type",
")",
":",
"widget",
"=",
"widget",
"(",
")",
"return",
"widget"
] | Returns concrete widget that will be used for rendering table filter. | [
"Returns",
"concrete",
"widget",
"that",
"will",
"be",
"used",
"for",
"rendering",
"table",
"filter",
"."
] | train | https://github.com/matllubos/django-is-core/blob/3f87ec56a814738683c732dce5f07e0328c2300d/is_core/filters/__init__.py#L38-L45 |
sunlightlabs/django-mediasync | mediasync/backends/s3.py | Client.remote_media_url | def remote_media_url(self, with_ssl=False):
"""
Returns the base remote media URL. In this case, we can safely make
some assumptions on the URL string based on bucket names, and having
public ACL on.
args:
with_ssl: (bool) If True, return an HTTPS url.
"""
protocol = 'http' if with_ssl is False else 'https'
url = (self.aws_bucket_cname and "%s://%s" or "%s://s3.amazonaws.com/%s") % (protocol, self.aws_bucket)
if self.aws_prefix:
url = "%s/%s" % (url, self.aws_prefix)
return url | python | def remote_media_url(self, with_ssl=False):
"""
Returns the base remote media URL. In this case, we can safely make
some assumptions on the URL string based on bucket names, and having
public ACL on.
args:
with_ssl: (bool) If True, return an HTTPS url.
"""
protocol = 'http' if with_ssl is False else 'https'
url = (self.aws_bucket_cname and "%s://%s" or "%s://s3.amazonaws.com/%s") % (protocol, self.aws_bucket)
if self.aws_prefix:
url = "%s/%s" % (url, self.aws_prefix)
return url | [
"def",
"remote_media_url",
"(",
"self",
",",
"with_ssl",
"=",
"False",
")",
":",
"protocol",
"=",
"'http'",
"if",
"with_ssl",
"is",
"False",
"else",
"'https'",
"url",
"=",
"(",
"self",
".",
"aws_bucket_cname",
"and",
"\"%s://%s\"",
"or",
"\"%s://s3.amazonaws.com/%s\"",
")",
"%",
"(",
"protocol",
",",
"self",
".",
"aws_bucket",
")",
"if",
"self",
".",
"aws_prefix",
":",
"url",
"=",
"\"%s/%s\"",
"%",
"(",
"url",
",",
"self",
".",
"aws_prefix",
")",
"return",
"url"
] | Returns the base remote media URL. In this case, we can safely make
some assumptions on the URL string based on bucket names, and having
public ACL on.
args:
with_ssl: (bool) If True, return an HTTPS url. | [
"Returns",
"the",
"base",
"remote",
"media",
"URL",
".",
"In",
"this",
"case",
"we",
"can",
"safely",
"make",
"some",
"assumptions",
"on",
"the",
"URL",
"string",
"based",
"on",
"bucket",
"names",
"and",
"having",
"public",
"ACL",
"on",
".",
"args",
":",
"with_ssl",
":",
"(",
"bool",
")",
"If",
"True",
"return",
"an",
"HTTPS",
"url",
"."
] | train | https://github.com/sunlightlabs/django-mediasync/blob/aa8ce4cfff757bbdb488463c64c0863cca6a1932/mediasync/backends/s3.py#L43-L56 |
softwarefactory-project/distroinfo | scripts/di.py | distroinfo | def distroinfo(cargs, version=__version__):
"""
distroinfo Command-Line Interface
"""
code = 1
args = docopt(__doc__, argv=cargs)
try:
if args['--version']:
if not version:
version = 'N/A'
print(version)
code = 0
elif args['fetch']:
code = fetch(
info_url=args['<info-url>'],
info_files=args['<info-file>'],
cache_dir=args['--cache-dir'],
fetcher=args['--fetcher'],
)
elif args['dump']:
code = dump(
info_url=args['<info-url>'],
info_files=args['<info-file>'],
yaml_out=args['--yaml-out'],
json_out=args['--json-out'],
cache_dir=args['--cache-dir'],
fetcher=args['--fetcher'],
)
except (
exception.InvalidInfoFormat,
KeyboardInterrupt,
) as ex:
code = getattr(ex, 'exit_code', code)
print("")
print(str(ex) or type(ex).__name__)
return code | python | def distroinfo(cargs, version=__version__):
"""
distroinfo Command-Line Interface
"""
code = 1
args = docopt(__doc__, argv=cargs)
try:
if args['--version']:
if not version:
version = 'N/A'
print(version)
code = 0
elif args['fetch']:
code = fetch(
info_url=args['<info-url>'],
info_files=args['<info-file>'],
cache_dir=args['--cache-dir'],
fetcher=args['--fetcher'],
)
elif args['dump']:
code = dump(
info_url=args['<info-url>'],
info_files=args['<info-file>'],
yaml_out=args['--yaml-out'],
json_out=args['--json-out'],
cache_dir=args['--cache-dir'],
fetcher=args['--fetcher'],
)
except (
exception.InvalidInfoFormat,
KeyboardInterrupt,
) as ex:
code = getattr(ex, 'exit_code', code)
print("")
print(str(ex) or type(ex).__name__)
return code | [
"def",
"distroinfo",
"(",
"cargs",
",",
"version",
"=",
"__version__",
")",
":",
"code",
"=",
"1",
"args",
"=",
"docopt",
"(",
"__doc__",
",",
"argv",
"=",
"cargs",
")",
"try",
":",
"if",
"args",
"[",
"'--version'",
"]",
":",
"if",
"not",
"version",
":",
"version",
"=",
"'N/A'",
"print",
"(",
"version",
")",
"code",
"=",
"0",
"elif",
"args",
"[",
"'fetch'",
"]",
":",
"code",
"=",
"fetch",
"(",
"info_url",
"=",
"args",
"[",
"'<info-url>'",
"]",
",",
"info_files",
"=",
"args",
"[",
"'<info-file>'",
"]",
",",
"cache_dir",
"=",
"args",
"[",
"'--cache-dir'",
"]",
",",
"fetcher",
"=",
"args",
"[",
"'--fetcher'",
"]",
",",
")",
"elif",
"args",
"[",
"'dump'",
"]",
":",
"code",
"=",
"dump",
"(",
"info_url",
"=",
"args",
"[",
"'<info-url>'",
"]",
",",
"info_files",
"=",
"args",
"[",
"'<info-file>'",
"]",
",",
"yaml_out",
"=",
"args",
"[",
"'--yaml-out'",
"]",
",",
"json_out",
"=",
"args",
"[",
"'--json-out'",
"]",
",",
"cache_dir",
"=",
"args",
"[",
"'--cache-dir'",
"]",
",",
"fetcher",
"=",
"args",
"[",
"'--fetcher'",
"]",
",",
")",
"except",
"(",
"exception",
".",
"InvalidInfoFormat",
",",
"KeyboardInterrupt",
",",
")",
"as",
"ex",
":",
"code",
"=",
"getattr",
"(",
"ex",
",",
"'exit_code'",
",",
"code",
")",
"print",
"(",
"\"\"",
")",
"print",
"(",
"str",
"(",
"ex",
")",
"or",
"type",
"(",
"ex",
")",
".",
"__name__",
")",
"return",
"code"
] | distroinfo Command-Line Interface | [
"distroinfo",
"Command",
"-",
"Line",
"Interface"
] | train | https://github.com/softwarefactory-project/distroinfo/blob/86a7419232a3376157c06e70528ec627e03ff82a/scripts/di.py#L95-L132 |
matllubos/django-is-core | is_core/rest/resource.py | RESTResourceMixin._get_error_response | def _get_error_response(self, exception):
"""
Trasform pyston exceptions to Is-core exceptions and raise it
"""
response_exceptions = {
MimerDataException: HTTPBadRequestResponseException,
NotAllowedException: HTTPForbiddenResponseException,
UnsupportedMediaTypeException: HTTPUnsupportedMediaTypeResponseException,
Http404: Http404,
ResourceNotFoundException: Http404,
NotAllowedMethodException: HTTPMethodNotAllowedResponseException,
DuplicateEntryException: HTTPDuplicateResponseException,
ConflictException: HTTPDuplicateResponseException,
}
response_exception = response_exceptions.get(type(exception))
if response_exception:
raise response_exception
return super(RESTResourceMixin, self)._get_error_response(exception) | python | def _get_error_response(self, exception):
"""
Trasform pyston exceptions to Is-core exceptions and raise it
"""
response_exceptions = {
MimerDataException: HTTPBadRequestResponseException,
NotAllowedException: HTTPForbiddenResponseException,
UnsupportedMediaTypeException: HTTPUnsupportedMediaTypeResponseException,
Http404: Http404,
ResourceNotFoundException: Http404,
NotAllowedMethodException: HTTPMethodNotAllowedResponseException,
DuplicateEntryException: HTTPDuplicateResponseException,
ConflictException: HTTPDuplicateResponseException,
}
response_exception = response_exceptions.get(type(exception))
if response_exception:
raise response_exception
return super(RESTResourceMixin, self)._get_error_response(exception) | [
"def",
"_get_error_response",
"(",
"self",
",",
"exception",
")",
":",
"response_exceptions",
"=",
"{",
"MimerDataException",
":",
"HTTPBadRequestResponseException",
",",
"NotAllowedException",
":",
"HTTPForbiddenResponseException",
",",
"UnsupportedMediaTypeException",
":",
"HTTPUnsupportedMediaTypeResponseException",
",",
"Http404",
":",
"Http404",
",",
"ResourceNotFoundException",
":",
"Http404",
",",
"NotAllowedMethodException",
":",
"HTTPMethodNotAllowedResponseException",
",",
"DuplicateEntryException",
":",
"HTTPDuplicateResponseException",
",",
"ConflictException",
":",
"HTTPDuplicateResponseException",
",",
"}",
"response_exception",
"=",
"response_exceptions",
".",
"get",
"(",
"type",
"(",
"exception",
")",
")",
"if",
"response_exception",
":",
"raise",
"response_exception",
"return",
"super",
"(",
"RESTResourceMixin",
",",
"self",
")",
".",
"_get_error_response",
"(",
"exception",
")"
] | Trasform pyston exceptions to Is-core exceptions and raise it | [
"Trasform",
"pyston",
"exceptions",
"to",
"Is",
"-",
"core",
"exceptions",
"and",
"raise",
"it"
] | train | https://github.com/matllubos/django-is-core/blob/3f87ec56a814738683c732dce5f07e0328c2300d/is_core/rest/resource.py#L138-L155 |
dgtony/afg | afg/scenarios.py | Supervisor.reprompt_error | def reprompt_error(self, message=None):
"""
Intended to be used in case of erroneous input data
"""
try:
session_id = session.sessionId
self.session_machines.rollback_fsm(session_id)
current_state = self.session_machines.current_state(session_id)
if message is None:
err_msg = choice(self._scenario_steps[current_state]['reprompt'])
else:
err_msg = message
return question(err_msg)
except UninitializedStateMachine as e:
logger.error(e)
return statement(INTERNAL_ERROR_MSG) | python | def reprompt_error(self, message=None):
"""
Intended to be used in case of erroneous input data
"""
try:
session_id = session.sessionId
self.session_machines.rollback_fsm(session_id)
current_state = self.session_machines.current_state(session_id)
if message is None:
err_msg = choice(self._scenario_steps[current_state]['reprompt'])
else:
err_msg = message
return question(err_msg)
except UninitializedStateMachine as e:
logger.error(e)
return statement(INTERNAL_ERROR_MSG) | [
"def",
"reprompt_error",
"(",
"self",
",",
"message",
"=",
"None",
")",
":",
"try",
":",
"session_id",
"=",
"session",
".",
"sessionId",
"self",
".",
"session_machines",
".",
"rollback_fsm",
"(",
"session_id",
")",
"current_state",
"=",
"self",
".",
"session_machines",
".",
"current_state",
"(",
"session_id",
")",
"if",
"message",
"is",
"None",
":",
"err_msg",
"=",
"choice",
"(",
"self",
".",
"_scenario_steps",
"[",
"current_state",
"]",
"[",
"'reprompt'",
"]",
")",
"else",
":",
"err_msg",
"=",
"message",
"return",
"question",
"(",
"err_msg",
")",
"except",
"UninitializedStateMachine",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"e",
")",
"return",
"statement",
"(",
"INTERNAL_ERROR_MSG",
")"
] | Intended to be used in case of erroneous input data | [
"Intended",
"to",
"be",
"used",
"in",
"case",
"of",
"erroneous",
"input",
"data"
] | train | https://github.com/dgtony/afg/blob/831ad4c6d158610393dc21e74277d4ed6a004afd/afg/scenarios.py#L116-L131 |
dgtony/afg | afg/scenarios.py | Supervisor.move_to_step | def move_to_step(self, step):
"""
Use in cases when you need to move in given step depending on input
"""
if step not in self._scenario_steps.keys():
raise UndefinedState("step {} not defined in scenario".format(step))
try:
session_id = session.sessionId
self.session_machines.set_state(session_id, step)
except UninitializedStateMachine as e:
logger.error(e)
return statement(INTERNAL_ERROR_MSG) | python | def move_to_step(self, step):
"""
Use in cases when you need to move in given step depending on input
"""
if step not in self._scenario_steps.keys():
raise UndefinedState("step {} not defined in scenario".format(step))
try:
session_id = session.sessionId
self.session_machines.set_state(session_id, step)
except UninitializedStateMachine as e:
logger.error(e)
return statement(INTERNAL_ERROR_MSG) | [
"def",
"move_to_step",
"(",
"self",
",",
"step",
")",
":",
"if",
"step",
"not",
"in",
"self",
".",
"_scenario_steps",
".",
"keys",
"(",
")",
":",
"raise",
"UndefinedState",
"(",
"\"step {} not defined in scenario\"",
".",
"format",
"(",
"step",
")",
")",
"try",
":",
"session_id",
"=",
"session",
".",
"sessionId",
"self",
".",
"session_machines",
".",
"set_state",
"(",
"session_id",
",",
"step",
")",
"except",
"UninitializedStateMachine",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"e",
")",
"return",
"statement",
"(",
"INTERNAL_ERROR_MSG",
")"
] | Use in cases when you need to move in given step depending on input | [
"Use",
"in",
"cases",
"when",
"you",
"need",
"to",
"move",
"in",
"given",
"step",
"depending",
"on",
"input"
] | train | https://github.com/dgtony/afg/blob/831ad4c6d158610393dc21e74277d4ed6a004afd/afg/scenarios.py#L133-L144 |
dgtony/afg | afg/scenarios.py | Supervisor.get_current_state | def get_current_state(self):
"""
Get current state for user session or None if session doesn't exist
"""
try:
session_id = session.sessionId
return self.session_machines.current_state(session_id)
except UninitializedStateMachine as e:
logger.error(e) | python | def get_current_state(self):
"""
Get current state for user session or None if session doesn't exist
"""
try:
session_id = session.sessionId
return self.session_machines.current_state(session_id)
except UninitializedStateMachine as e:
logger.error(e) | [
"def",
"get_current_state",
"(",
"self",
")",
":",
"try",
":",
"session_id",
"=",
"session",
".",
"sessionId",
"return",
"self",
".",
"session_machines",
".",
"current_state",
"(",
"session_id",
")",
"except",
"UninitializedStateMachine",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"e",
")"
] | Get current state for user session or None if session doesn't exist | [
"Get",
"current",
"state",
"for",
"user",
"session",
"or",
"None",
"if",
"session",
"doesn",
"t",
"exist"
] | train | https://github.com/dgtony/afg/blob/831ad4c6d158610393dc21e74277d4ed6a004afd/afg/scenarios.py#L146-L154 |
dgtony/afg | afg/scenarios.py | Supervisor.get_help | def get_help(self):
"""
Get context help, depending on the current step. If no help for current step
was specified in scenario description file, default one will be returned.
"""
current_state = self.get_current_state()
if current_state is None:
return statement(INTERNAL_ERROR_MSG)
else:
try:
return choice(self._scenario_steps[current_state]['help'])
except KeyError:
return choice(self._default_help) | python | def get_help(self):
"""
Get context help, depending on the current step. If no help for current step
was specified in scenario description file, default one will be returned.
"""
current_state = self.get_current_state()
if current_state is None:
return statement(INTERNAL_ERROR_MSG)
else:
try:
return choice(self._scenario_steps[current_state]['help'])
except KeyError:
return choice(self._default_help) | [
"def",
"get_help",
"(",
"self",
")",
":",
"current_state",
"=",
"self",
".",
"get_current_state",
"(",
")",
"if",
"current_state",
"is",
"None",
":",
"return",
"statement",
"(",
"INTERNAL_ERROR_MSG",
")",
"else",
":",
"try",
":",
"return",
"choice",
"(",
"self",
".",
"_scenario_steps",
"[",
"current_state",
"]",
"[",
"'help'",
"]",
")",
"except",
"KeyError",
":",
"return",
"choice",
"(",
"self",
".",
"_default_help",
")"
] | Get context help, depending on the current step. If no help for current step
was specified in scenario description file, default one will be returned. | [
"Get",
"context",
"help",
"depending",
"on",
"the",
"current",
"step",
".",
"If",
"no",
"help",
"for",
"current",
"step",
"was",
"specified",
"in",
"scenario",
"description",
"file",
"default",
"one",
"will",
"be",
"returned",
"."
] | train | https://github.com/dgtony/afg/blob/831ad4c6d158610393dc21e74277d4ed6a004afd/afg/scenarios.py#L156-L168 |
confirm/ansibleci | ansibleci/runner.py | Runner.run | def run(self):
'''
Runs all enabled tests.
'''
# Run all tests.
for cls in self.get_test_classes():
# Print informational message.
self.logger.info('Running {cls.__name__} test...'.format(cls=cls))
# Create new test instance.
test = cls(runner=self)
# Run test and evaluate result.
if test._run():
self.logger.passed('Test {cls.__name__} succeeded!'.format(cls=cls))
else:
self.logger.failed('Test {cls.__name__} failed!'.format(cls=cls))
self.has_passed = False
# Print summary.
if self.has_passed:
self.logger.passed('Summary: All tests passed!')
else:
self.logger.failed('Summary: One or more tests failed!')
return self.has_passed | python | def run(self):
'''
Runs all enabled tests.
'''
# Run all tests.
for cls in self.get_test_classes():
# Print informational message.
self.logger.info('Running {cls.__name__} test...'.format(cls=cls))
# Create new test instance.
test = cls(runner=self)
# Run test and evaluate result.
if test._run():
self.logger.passed('Test {cls.__name__} succeeded!'.format(cls=cls))
else:
self.logger.failed('Test {cls.__name__} failed!'.format(cls=cls))
self.has_passed = False
# Print summary.
if self.has_passed:
self.logger.passed('Summary: All tests passed!')
else:
self.logger.failed('Summary: One or more tests failed!')
return self.has_passed | [
"def",
"run",
"(",
"self",
")",
":",
"# Run all tests.",
"for",
"cls",
"in",
"self",
".",
"get_test_classes",
"(",
")",
":",
"# Print informational message.",
"self",
".",
"logger",
".",
"info",
"(",
"'Running {cls.__name__} test...'",
".",
"format",
"(",
"cls",
"=",
"cls",
")",
")",
"# Create new test instance.",
"test",
"=",
"cls",
"(",
"runner",
"=",
"self",
")",
"# Run test and evaluate result.",
"if",
"test",
".",
"_run",
"(",
")",
":",
"self",
".",
"logger",
".",
"passed",
"(",
"'Test {cls.__name__} succeeded!'",
".",
"format",
"(",
"cls",
"=",
"cls",
")",
")",
"else",
":",
"self",
".",
"logger",
".",
"failed",
"(",
"'Test {cls.__name__} failed!'",
".",
"format",
"(",
"cls",
"=",
"cls",
")",
")",
"self",
".",
"has_passed",
"=",
"False",
"# Print summary.",
"if",
"self",
".",
"has_passed",
":",
"self",
".",
"logger",
".",
"passed",
"(",
"'Summary: All tests passed!'",
")",
"else",
":",
"self",
".",
"logger",
".",
"failed",
"(",
"'Summary: One or more tests failed!'",
")",
"return",
"self",
".",
"has_passed"
] | Runs all enabled tests. | [
"Runs",
"all",
"enabled",
"tests",
"."
] | train | https://github.com/confirm/ansibleci/blob/6a53ae8c4a4653624977e146092422857f661b8f/ansibleci/runner.py#L71-L97 |
xapple/plumbing | plumbing/dependencies.py | check_executable | def check_executable(tool_name):
"""Raises an warning if the executable *tool_name* is not found."""
result = subprocess.call(['which', tool_name], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result != 0:
message = "The executable '%s' cannot be found in your $PATH" % tool_name
raise Exception(message) | python | def check_executable(tool_name):
"""Raises an warning if the executable *tool_name* is not found."""
result = subprocess.call(['which', tool_name], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result != 0:
message = "The executable '%s' cannot be found in your $PATH" % tool_name
raise Exception(message) | [
"def",
"check_executable",
"(",
"tool_name",
")",
":",
"result",
"=",
"subprocess",
".",
"call",
"(",
"[",
"'which'",
",",
"tool_name",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"if",
"result",
"!=",
"0",
":",
"message",
"=",
"\"The executable '%s' cannot be found in your $PATH\"",
"%",
"tool_name",
"raise",
"Exception",
"(",
"message",
")"
] | Raises an warning if the executable *tool_name* is not found. | [
"Raises",
"an",
"warning",
"if",
"the",
"executable",
"*",
"tool_name",
"*",
"is",
"not",
"found",
"."
] | train | https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/dependencies.py#L20-L25 |
xapple/plumbing | plumbing/dependencies.py | check_module | def check_module(mod_name):
"""Calls sys.exit() if the module *mod_name* is not found."""
# Special cases #
if mod_name in module_name: mod_name = module_name[mod_name]
# Use a try except block #
try:
__import__(mod_name)
except ImportError as e:
if str(e) != 'No module named %s' % mod_name: raise e
print 'You do not seem to have the "%s" package properly installed.' \
' Either you never installed it or your $PYTHONPATH is not set up correctly.' \
' For more instructions see the README file. (%s)' % (mod_name, e)
sys.exit() | python | def check_module(mod_name):
"""Calls sys.exit() if the module *mod_name* is not found."""
# Special cases #
if mod_name in module_name: mod_name = module_name[mod_name]
# Use a try except block #
try:
__import__(mod_name)
except ImportError as e:
if str(e) != 'No module named %s' % mod_name: raise e
print 'You do not seem to have the "%s" package properly installed.' \
' Either you never installed it or your $PYTHONPATH is not set up correctly.' \
' For more instructions see the README file. (%s)' % (mod_name, e)
sys.exit() | [
"def",
"check_module",
"(",
"mod_name",
")",
":",
"# Special cases #",
"if",
"mod_name",
"in",
"module_name",
":",
"mod_name",
"=",
"module_name",
"[",
"mod_name",
"]",
"# Use a try except block #",
"try",
":",
"__import__",
"(",
"mod_name",
")",
"except",
"ImportError",
"as",
"e",
":",
"if",
"str",
"(",
"e",
")",
"!=",
"'No module named %s'",
"%",
"mod_name",
":",
"raise",
"e",
"print",
"'You do not seem to have the \"%s\" package properly installed.'",
"' Either you never installed it or your $PYTHONPATH is not set up correctly.'",
"' For more instructions see the README file. (%s)'",
"%",
"(",
"mod_name",
",",
"e",
")",
"sys",
".",
"exit",
"(",
")"
] | Calls sys.exit() if the module *mod_name* is not found. | [
"Calls",
"sys",
".",
"exit",
"()",
"if",
"the",
"module",
"*",
"mod_name",
"*",
"is",
"not",
"found",
"."
] | train | https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/dependencies.py#L28-L40 |
inveniosoftware/invenio-iiif | invenio_iiif/utils.py | iiif_image_key | def iiif_image_key(obj):
"""Generate the IIIF image key."""
if isinstance(obj, ObjectVersion):
bucket_id = obj.bucket_id
version_id = obj.version_id
key = obj.key
else:
bucket_id = obj.get('bucket')
version_id = obj.get('version_id')
key = obj.get('key')
return u'{}:{}:{}'.format(
bucket_id,
version_id,
key,
) | python | def iiif_image_key(obj):
"""Generate the IIIF image key."""
if isinstance(obj, ObjectVersion):
bucket_id = obj.bucket_id
version_id = obj.version_id
key = obj.key
else:
bucket_id = obj.get('bucket')
version_id = obj.get('version_id')
key = obj.get('key')
return u'{}:{}:{}'.format(
bucket_id,
version_id,
key,
) | [
"def",
"iiif_image_key",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"ObjectVersion",
")",
":",
"bucket_id",
"=",
"obj",
".",
"bucket_id",
"version_id",
"=",
"obj",
".",
"version_id",
"key",
"=",
"obj",
".",
"key",
"else",
":",
"bucket_id",
"=",
"obj",
".",
"get",
"(",
"'bucket'",
")",
"version_id",
"=",
"obj",
".",
"get",
"(",
"'version_id'",
")",
"key",
"=",
"obj",
".",
"get",
"(",
"'key'",
")",
"return",
"u'{}:{}:{}'",
".",
"format",
"(",
"bucket_id",
",",
"version_id",
",",
"key",
",",
")"
] | Generate the IIIF image key. | [
"Generate",
"the",
"IIIF",
"image",
"key",
"."
] | train | https://github.com/inveniosoftware/invenio-iiif/blob/e4f2f93eaabdc8e2efea81c239ab76d481191959/invenio_iiif/utils.py#L18-L32 |
inveniosoftware/invenio-iiif | invenio_iiif/utils.py | ui_iiif_image_url | def ui_iiif_image_url(obj, version='v2', region='full', size='full',
rotation=0, quality='default', image_format='png'):
"""Generate IIIF image URL from the UI application."""
return u'{prefix}{version}/{identifier}/{region}/{size}/{rotation}/' \
u'{quality}.{image_format}'.format(
prefix=current_app.config['IIIF_UI_URL'],
version=version,
identifier=quote(
iiif_image_key(obj).encode('utf8'), safe=':'),
region=region,
size=size,
rotation=rotation,
quality=quality,
image_format=image_format,
) | python | def ui_iiif_image_url(obj, version='v2', region='full', size='full',
rotation=0, quality='default', image_format='png'):
"""Generate IIIF image URL from the UI application."""
return u'{prefix}{version}/{identifier}/{region}/{size}/{rotation}/' \
u'{quality}.{image_format}'.format(
prefix=current_app.config['IIIF_UI_URL'],
version=version,
identifier=quote(
iiif_image_key(obj).encode('utf8'), safe=':'),
region=region,
size=size,
rotation=rotation,
quality=quality,
image_format=image_format,
) | [
"def",
"ui_iiif_image_url",
"(",
"obj",
",",
"version",
"=",
"'v2'",
",",
"region",
"=",
"'full'",
",",
"size",
"=",
"'full'",
",",
"rotation",
"=",
"0",
",",
"quality",
"=",
"'default'",
",",
"image_format",
"=",
"'png'",
")",
":",
"return",
"u'{prefix}{version}/{identifier}/{region}/{size}/{rotation}/'",
"u'{quality}.{image_format}'",
".",
"format",
"(",
"prefix",
"=",
"current_app",
".",
"config",
"[",
"'IIIF_UI_URL'",
"]",
",",
"version",
"=",
"version",
",",
"identifier",
"=",
"quote",
"(",
"iiif_image_key",
"(",
"obj",
")",
".",
"encode",
"(",
"'utf8'",
")",
",",
"safe",
"=",
"':'",
")",
",",
"region",
"=",
"region",
",",
"size",
"=",
"size",
",",
"rotation",
"=",
"rotation",
",",
"quality",
"=",
"quality",
",",
"image_format",
"=",
"image_format",
",",
")"
] | Generate IIIF image URL from the UI application. | [
"Generate",
"IIIF",
"image",
"URL",
"from",
"the",
"UI",
"application",
"."
] | train | https://github.com/inveniosoftware/invenio-iiif/blob/e4f2f93eaabdc8e2efea81c239ab76d481191959/invenio_iiif/utils.py#L35-L49 |
IwoHerka/sexpr | sexpr/types/non_terminal.py | NonTerminal.matches | def matches(self, sexp):
'''
Body of a non-terminal is always a :class:`Sequence`. For an s-expr
to match, it must be of the form::
['name'] + [sexpr-0, ..., sexpr-n]
where the first list contains a name of the non-terminal,
and the second one matches its body sequence.
'''
if sexp and isinstance(sexp, list) and self.name == sexp[0]:
return self.body.matches(sexp[1:])
return False | python | def matches(self, sexp):
'''
Body of a non-terminal is always a :class:`Sequence`. For an s-expr
to match, it must be of the form::
['name'] + [sexpr-0, ..., sexpr-n]
where the first list contains a name of the non-terminal,
and the second one matches its body sequence.
'''
if sexp and isinstance(sexp, list) and self.name == sexp[0]:
return self.body.matches(sexp[1:])
return False | [
"def",
"matches",
"(",
"self",
",",
"sexp",
")",
":",
"if",
"sexp",
"and",
"isinstance",
"(",
"sexp",
",",
"list",
")",
"and",
"self",
".",
"name",
"==",
"sexp",
"[",
"0",
"]",
":",
"return",
"self",
".",
"body",
".",
"matches",
"(",
"sexp",
"[",
"1",
":",
"]",
")",
"return",
"False"
] | Body of a non-terminal is always a :class:`Sequence`. For an s-expr
to match, it must be of the form::
['name'] + [sexpr-0, ..., sexpr-n]
where the first list contains a name of the non-terminal,
and the second one matches its body sequence. | [
"Body",
"of",
"a",
"non",
"-",
"terminal",
"is",
"always",
"a",
":",
"class",
":",
"Sequence",
".",
"For",
"an",
"s",
"-",
"expr",
"to",
"match",
"it",
"must",
"be",
"of",
"the",
"form",
"::"
] | train | https://github.com/IwoHerka/sexpr/blob/28e32f543a127bbbf832b2dba7cb93f9e57db3b6/sexpr/types/non_terminal.py#L9-L21 |
inveniosoftware/invenio-iiif | invenio_iiif/previewer.py | preview | def preview(file):
"""Render appropriate template with embed flag."""
params = deepcopy(current_app.config['IIIF_PREVIEWER_PARAMS'])
if 'image_format' not in params:
params['image_format'] = \
'png' if file.has_extensions('.png') else 'jpg'
return render_template(
current_app.config['IIIF_PREVIEW_TEMPLATE'],
file=file,
file_url=ui_iiif_image_url(
file.file,
**params
)
) | python | def preview(file):
"""Render appropriate template with embed flag."""
params = deepcopy(current_app.config['IIIF_PREVIEWER_PARAMS'])
if 'image_format' not in params:
params['image_format'] = \
'png' if file.has_extensions('.png') else 'jpg'
return render_template(
current_app.config['IIIF_PREVIEW_TEMPLATE'],
file=file,
file_url=ui_iiif_image_url(
file.file,
**params
)
) | [
"def",
"preview",
"(",
"file",
")",
":",
"params",
"=",
"deepcopy",
"(",
"current_app",
".",
"config",
"[",
"'IIIF_PREVIEWER_PARAMS'",
"]",
")",
"if",
"'image_format'",
"not",
"in",
"params",
":",
"params",
"[",
"'image_format'",
"]",
"=",
"'png'",
"if",
"file",
".",
"has_extensions",
"(",
"'.png'",
")",
"else",
"'jpg'",
"return",
"render_template",
"(",
"current_app",
".",
"config",
"[",
"'IIIF_PREVIEW_TEMPLATE'",
"]",
",",
"file",
"=",
"file",
",",
"file_url",
"=",
"ui_iiif_image_url",
"(",
"file",
".",
"file",
",",
"*",
"*",
"params",
")",
")"
] | Render appropriate template with embed flag. | [
"Render",
"appropriate",
"template",
"with",
"embed",
"flag",
"."
] | train | https://github.com/inveniosoftware/invenio-iiif/blob/e4f2f93eaabdc8e2efea81c239ab76d481191959/invenio_iiif/previewer.py#L35-L48 |
intelligenia/modeltranslation | modeltranslation/templatetags/modeltranslation_tags.py | get_translated_attribute | def get_translated_attribute(instance, attr):
"""
Wraps Django Model __getattribute__ method making translation in templates less painful
"""
# If its class has no translatable fields, returns attribute
try:
if not hasattr(instance._meta, "translatable_fields") or len(getattr(instance._meta,"translatable_fields"))==0:
return getattr(instance, attr)
except AttributeError:
return instance
# Translatable fields of this instance
translatable_fields = instance._meta.translatable_fields
# Current language
cur_language = get_language()
lang = cur_language.title().lower()
# If current language is default language, returns attribute
if lang == settings.LANGUAGE_CODE:
return getattr(instance, attr)
# Otherwise, if a translation is NOT needed for attr atribute, get attribute
if not attr in translatable_fields:
return getattr(instance, attr)
# Gets field translations of this instance and return the translated attribute
field_translation = _get_fieldtranslations(instance, field=attr, lang=lang)
if field_translation:
if not field_translation.is_fuzzy:
return field_translation.translation
return getattr(instance, attr) | python | def get_translated_attribute(instance, attr):
"""
Wraps Django Model __getattribute__ method making translation in templates less painful
"""
# If its class has no translatable fields, returns attribute
try:
if not hasattr(instance._meta, "translatable_fields") or len(getattr(instance._meta,"translatable_fields"))==0:
return getattr(instance, attr)
except AttributeError:
return instance
# Translatable fields of this instance
translatable_fields = instance._meta.translatable_fields
# Current language
cur_language = get_language()
lang = cur_language.title().lower()
# If current language is default language, returns attribute
if lang == settings.LANGUAGE_CODE:
return getattr(instance, attr)
# Otherwise, if a translation is NOT needed for attr atribute, get attribute
if not attr in translatable_fields:
return getattr(instance, attr)
# Gets field translations of this instance and return the translated attribute
field_translation = _get_fieldtranslations(instance, field=attr, lang=lang)
if field_translation:
if not field_translation.is_fuzzy:
return field_translation.translation
return getattr(instance, attr) | [
"def",
"get_translated_attribute",
"(",
"instance",
",",
"attr",
")",
":",
"# If its class has no translatable fields, returns attribute",
"try",
":",
"if",
"not",
"hasattr",
"(",
"instance",
".",
"_meta",
",",
"\"translatable_fields\"",
")",
"or",
"len",
"(",
"getattr",
"(",
"instance",
".",
"_meta",
",",
"\"translatable_fields\"",
")",
")",
"==",
"0",
":",
"return",
"getattr",
"(",
"instance",
",",
"attr",
")",
"except",
"AttributeError",
":",
"return",
"instance",
"# Translatable fields of this instance",
"translatable_fields",
"=",
"instance",
".",
"_meta",
".",
"translatable_fields",
"# Current language",
"cur_language",
"=",
"get_language",
"(",
")",
"lang",
"=",
"cur_language",
".",
"title",
"(",
")",
".",
"lower",
"(",
")",
"# If current language is default language, returns attribute",
"if",
"lang",
"==",
"settings",
".",
"LANGUAGE_CODE",
":",
"return",
"getattr",
"(",
"instance",
",",
"attr",
")",
"# Otherwise, if a translation is NOT needed for attr atribute, get attribute",
"if",
"not",
"attr",
"in",
"translatable_fields",
":",
"return",
"getattr",
"(",
"instance",
",",
"attr",
")",
"# Gets field translations of this instance and return the translated attribute",
"field_translation",
"=",
"_get_fieldtranslations",
"(",
"instance",
",",
"field",
"=",
"attr",
",",
"lang",
"=",
"lang",
")",
"if",
"field_translation",
":",
"if",
"not",
"field_translation",
".",
"is_fuzzy",
":",
"return",
"field_translation",
".",
"translation",
"return",
"getattr",
"(",
"instance",
",",
"attr",
")"
] | Wraps Django Model __getattribute__ method making translation in templates less painful | [
"Wraps",
"Django",
"Model",
"__getattribute__",
"method",
"making",
"translation",
"in",
"templates",
"less",
"painful"
] | train | https://github.com/intelligenia/modeltranslation/blob/64d6adeb537747321d5020efedf5d7e0d135862d/modeltranslation/templatetags/modeltranslation_tags.py#L15-L47 |
The-Politico/django-slackchat-serializer | slackchat/authentication.py | secure | def secure(view):
"""Set an auth decorator applied for views.
If DEBUG is on, we serve the view without authenticating.
Default is 'django.contrib.auth.decorators.login_required'.
Can also be 'django.contrib.admin.views.decorators.staff_member_required'
or a custom decorator.
"""
AUTH = getattr(
settings,
'SLACKCHAT_AUTH_DECORATOR',
'django.contrib.admin.views.decorators.staff_member_required'
)
auth_decorator = import_class(AUTH)
return method_decorator(auth_decorator, name='dispatch')(view) | python | def secure(view):
"""Set an auth decorator applied for views.
If DEBUG is on, we serve the view without authenticating.
Default is 'django.contrib.auth.decorators.login_required'.
Can also be 'django.contrib.admin.views.decorators.staff_member_required'
or a custom decorator.
"""
AUTH = getattr(
settings,
'SLACKCHAT_AUTH_DECORATOR',
'django.contrib.admin.views.decorators.staff_member_required'
)
auth_decorator = import_class(AUTH)
return method_decorator(auth_decorator, name='dispatch')(view) | [
"def",
"secure",
"(",
"view",
")",
":",
"AUTH",
"=",
"getattr",
"(",
"settings",
",",
"'SLACKCHAT_AUTH_DECORATOR'",
",",
"'django.contrib.admin.views.decorators.staff_member_required'",
")",
"auth_decorator",
"=",
"import_class",
"(",
"AUTH",
")",
"return",
"method_decorator",
"(",
"auth_decorator",
",",
"name",
"=",
"'dispatch'",
")",
"(",
"view",
")"
] | Set an auth decorator applied for views.
If DEBUG is on, we serve the view without authenticating.
Default is 'django.contrib.auth.decorators.login_required'.
Can also be 'django.contrib.admin.views.decorators.staff_member_required'
or a custom decorator. | [
"Set",
"an",
"auth",
"decorator",
"applied",
"for",
"views",
".",
"If",
"DEBUG",
"is",
"on",
"we",
"serve",
"the",
"view",
"without",
"authenticating",
".",
"Default",
"is",
"django",
".",
"contrib",
".",
"auth",
".",
"decorators",
".",
"login_required",
".",
"Can",
"also",
"be",
"django",
".",
"contrib",
".",
"admin",
".",
"views",
".",
"decorators",
".",
"staff_member_required",
"or",
"a",
"custom",
"decorator",
"."
] | train | https://github.com/The-Politico/django-slackchat-serializer/blob/9a41e0477d1bc7bb2ec3f8af40baddf8d4230d40/slackchat/authentication.py#L46-L59 |
intelligenia/modeltranslation | modeltranslation/views.py | import_translations | def import_translations(request, language):
"""
Importa las traducciones a partir de un archivo PO. Ten en cuenta
que el archivo PO ha de ser generado desde esta aplicación, de forma
que los comentarios sirvan como id de traducción (lo metemos nosotros
en la exportación).
"""
def _import_po_file(uploadedfile, lang):
lines = []
for line in uploadedfile:
lines.append(line)
num_lines = len(lines)
prog_ctxt = re.compile(r"msgctxt\s+\"(?P<id>\d+)--(?P<model>\w+)--(?P<object_id>\d+)--(?P<field>\w+)\"")
prog_msgid = re.compile(r"msgid\s+\"(?P<source_text>.+)\"$")
prog_msgstr = re.compile(r"msgstr\s+(?P<trans>.+)")
i = 0
while i < num_lines:
line = lines[i]
result = prog_ctxt.match(line)
if result:
id = result.group("id")
is_fuzzy = (lines[i-1] == "#, fuzzy\n")
source_text = lines[i+1]
translation_line = lines[i+2]
# Traducción
g = prog_msgstr.match(translation_line)
if g is None:
i += 1
continue
translation = g.group("trans").replace("msgstr","")[1:-1].replace("\\\"","\"").replace('\\\'','\'')
# Obtención de la traducción a partir del id
try:
field_trans = FieldTranslation.objects.get(id=id)
except FieldTranslation.DoesNotExist:
source_text = source_text.replace("msgid","")[1:-1].replace("\\\"","\"").replace('\\\'','\'')
source_md5 = hashlib.md5(source_text.encode("utf-8")).hexdigest()
field_trans = FieldTranslation(model=result.group("model"), object_id=result.group("object_id"), field=result.group("field"), lang=lang, source_text=source_text, source_md5=source_md5)
# Establecemos la traducción y si es fuzzy
field_trans.translation = translation
field_trans.is_fuzzy = is_fuzzy
field_trans.save()
#print translation
#print is_fuzzy
i += 4
i += 1
# Elimina traducciones que no estén asociadas a ningún objeto
FieldTranslation.delete_orphan_translations()
# Acceso obligatoriamente por POST
if request.method != "POST":
return HttpResponseRedirect(reverse("modeltranslation:admin_url"))
form = ImportTranslationsForm(request.POST, request.FILES)
if form.is_valid():
_import_po_file(request.FILES['file'], language)
#cache = TransCache.factory()
#cache.clear()
return HttpResponseRedirect(reverse("modeltranslation:view_all_url",args=(language,"all")))
return HttpResponseRedirect(reverse("modeltranslation:admin_url")) | python | def import_translations(request, language):
"""
Importa las traducciones a partir de un archivo PO. Ten en cuenta
que el archivo PO ha de ser generado desde esta aplicación, de forma
que los comentarios sirvan como id de traducción (lo metemos nosotros
en la exportación).
"""
def _import_po_file(uploadedfile, lang):
lines = []
for line in uploadedfile:
lines.append(line)
num_lines = len(lines)
prog_ctxt = re.compile(r"msgctxt\s+\"(?P<id>\d+)--(?P<model>\w+)--(?P<object_id>\d+)--(?P<field>\w+)\"")
prog_msgid = re.compile(r"msgid\s+\"(?P<source_text>.+)\"$")
prog_msgstr = re.compile(r"msgstr\s+(?P<trans>.+)")
i = 0
while i < num_lines:
line = lines[i]
result = prog_ctxt.match(line)
if result:
id = result.group("id")
is_fuzzy = (lines[i-1] == "#, fuzzy\n")
source_text = lines[i+1]
translation_line = lines[i+2]
# Traducción
g = prog_msgstr.match(translation_line)
if g is None:
i += 1
continue
translation = g.group("trans").replace("msgstr","")[1:-1].replace("\\\"","\"").replace('\\\'','\'')
# Obtención de la traducción a partir del id
try:
field_trans = FieldTranslation.objects.get(id=id)
except FieldTranslation.DoesNotExist:
source_text = source_text.replace("msgid","")[1:-1].replace("\\\"","\"").replace('\\\'','\'')
source_md5 = hashlib.md5(source_text.encode("utf-8")).hexdigest()
field_trans = FieldTranslation(model=result.group("model"), object_id=result.group("object_id"), field=result.group("field"), lang=lang, source_text=source_text, source_md5=source_md5)
# Establecemos la traducción y si es fuzzy
field_trans.translation = translation
field_trans.is_fuzzy = is_fuzzy
field_trans.save()
#print translation
#print is_fuzzy
i += 4
i += 1
# Elimina traducciones que no estén asociadas a ningún objeto
FieldTranslation.delete_orphan_translations()
# Acceso obligatoriamente por POST
if request.method != "POST":
return HttpResponseRedirect(reverse("modeltranslation:admin_url"))
form = ImportTranslationsForm(request.POST, request.FILES)
if form.is_valid():
_import_po_file(request.FILES['file'], language)
#cache = TransCache.factory()
#cache.clear()
return HttpResponseRedirect(reverse("modeltranslation:view_all_url",args=(language,"all")))
return HttpResponseRedirect(reverse("modeltranslation:admin_url")) | [
"def",
"import_translations",
"(",
"request",
",",
"language",
")",
":",
"def",
"_import_po_file",
"(",
"uploadedfile",
",",
"lang",
")",
":",
"lines",
"=",
"[",
"]",
"for",
"line",
"in",
"uploadedfile",
":",
"lines",
".",
"append",
"(",
"line",
")",
"num_lines",
"=",
"len",
"(",
"lines",
")",
"prog_ctxt",
"=",
"re",
".",
"compile",
"(",
"r\"msgctxt\\s+\\\"(?P<id>\\d+)--(?P<model>\\w+)--(?P<object_id>\\d+)--(?P<field>\\w+)\\\"\"",
")",
"prog_msgid",
"=",
"re",
".",
"compile",
"(",
"r\"msgid\\s+\\\"(?P<source_text>.+)\\\"$\"",
")",
"prog_msgstr",
"=",
"re",
".",
"compile",
"(",
"r\"msgstr\\s+(?P<trans>.+)\"",
")",
"i",
"=",
"0",
"while",
"i",
"<",
"num_lines",
":",
"line",
"=",
"lines",
"[",
"i",
"]",
"result",
"=",
"prog_ctxt",
".",
"match",
"(",
"line",
")",
"if",
"result",
":",
"id",
"=",
"result",
".",
"group",
"(",
"\"id\"",
")",
"is_fuzzy",
"=",
"(",
"lines",
"[",
"i",
"-",
"1",
"]",
"==",
"\"#, fuzzy\\n\"",
")",
"source_text",
"=",
"lines",
"[",
"i",
"+",
"1",
"]",
"translation_line",
"=",
"lines",
"[",
"i",
"+",
"2",
"]",
"# Traducción",
"g",
"=",
"prog_msgstr",
".",
"match",
"(",
"translation_line",
")",
"if",
"g",
"is",
"None",
":",
"i",
"+=",
"1",
"continue",
"translation",
"=",
"g",
".",
"group",
"(",
"\"trans\"",
")",
".",
"replace",
"(",
"\"msgstr\"",
",",
"\"\"",
")",
"[",
"1",
":",
"-",
"1",
"]",
".",
"replace",
"(",
"\"\\\\\\\"\"",
",",
"\"\\\"\"",
")",
".",
"replace",
"(",
"'\\\\\\''",
",",
"'\\''",
")",
"# Obtención de la traducción a partir del id",
"try",
":",
"field_trans",
"=",
"FieldTranslation",
".",
"objects",
".",
"get",
"(",
"id",
"=",
"id",
")",
"except",
"FieldTranslation",
".",
"DoesNotExist",
":",
"source_text",
"=",
"source_text",
".",
"replace",
"(",
"\"msgid\"",
",",
"\"\"",
")",
"[",
"1",
":",
"-",
"1",
"]",
".",
"replace",
"(",
"\"\\\\\\\"\"",
",",
"\"\\\"\"",
")",
".",
"replace",
"(",
"'\\\\\\''",
",",
"'\\''",
")",
"source_md5",
"=",
"hashlib",
".",
"md5",
"(",
"source_text",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
".",
"hexdigest",
"(",
")",
"field_trans",
"=",
"FieldTranslation",
"(",
"model",
"=",
"result",
".",
"group",
"(",
"\"model\"",
")",
",",
"object_id",
"=",
"result",
".",
"group",
"(",
"\"object_id\"",
")",
",",
"field",
"=",
"result",
".",
"group",
"(",
"\"field\"",
")",
",",
"lang",
"=",
"lang",
",",
"source_text",
"=",
"source_text",
",",
"source_md5",
"=",
"source_md5",
")",
"# Establecemos la traducción y si es fuzzy",
"field_trans",
".",
"translation",
"=",
"translation",
"field_trans",
".",
"is_fuzzy",
"=",
"is_fuzzy",
"field_trans",
".",
"save",
"(",
")",
"#print translation",
"#print is_fuzzy",
"i",
"+=",
"4",
"i",
"+=",
"1",
"# Elimina traducciones que no estén asociadas a ningún objeto",
"FieldTranslation",
".",
"delete_orphan_translations",
"(",
")",
"# Acceso obligatoriamente por POST",
"if",
"request",
".",
"method",
"!=",
"\"POST\"",
":",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"\"modeltranslation:admin_url\"",
")",
")",
"form",
"=",
"ImportTranslationsForm",
"(",
"request",
".",
"POST",
",",
"request",
".",
"FILES",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"_import_po_file",
"(",
"request",
".",
"FILES",
"[",
"'file'",
"]",
",",
"language",
")",
"#cache = TransCache.factory()",
"#cache.clear()",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"\"modeltranslation:view_all_url\"",
",",
"args",
"=",
"(",
"language",
",",
"\"all\"",
")",
")",
")",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"\"modeltranslation:admin_url\"",
")",
")"
] | Importa las traducciones a partir de un archivo PO. Ten en cuenta
que el archivo PO ha de ser generado desde esta aplicación, de forma
que los comentarios sirvan como id de traducción (lo metemos nosotros
en la exportación). | [
"Importa",
"las",
"traducciones",
"a",
"partir",
"de",
"un",
"archivo",
"PO",
".",
"Ten",
"en",
"cuenta",
"que",
"el",
"archivo",
"PO",
"ha",
"de",
"ser",
"generado",
"desde",
"esta",
"aplicación",
"de",
"forma",
"que",
"los",
"comentarios",
"sirvan",
"como",
"id",
"de",
"traducción",
"(",
"lo",
"metemos",
"nosotros",
"en",
"la",
"exportación",
")",
"."
] | train | https://github.com/intelligenia/modeltranslation/blob/64d6adeb537747321d5020efedf5d7e0d135862d/modeltranslation/views.py#L117-L183 |
cloud-compose/cloud-compose-cluster | cloudcompose/cluster/commands/cli.py | up | def up(cloud_init, use_snapshots, upgrade_image, snapshot_cluster, snapshot_time):
"""
creates a new cluster
"""
try:
cloud_config = CloudConfig()
ci = None
if cloud_init:
ci = CloudInit()
cloud_controller = CloudController(cloud_config)
cloud_controller.up(ci, use_snapshots, upgrade_image, snapshot_cluster, snapshot_time)
except CloudComposeException as ex:
print(ex) | python | def up(cloud_init, use_snapshots, upgrade_image, snapshot_cluster, snapshot_time):
"""
creates a new cluster
"""
try:
cloud_config = CloudConfig()
ci = None
if cloud_init:
ci = CloudInit()
cloud_controller = CloudController(cloud_config)
cloud_controller.up(ci, use_snapshots, upgrade_image, snapshot_cluster, snapshot_time)
except CloudComposeException as ex:
print(ex) | [
"def",
"up",
"(",
"cloud_init",
",",
"use_snapshots",
",",
"upgrade_image",
",",
"snapshot_cluster",
",",
"snapshot_time",
")",
":",
"try",
":",
"cloud_config",
"=",
"CloudConfig",
"(",
")",
"ci",
"=",
"None",
"if",
"cloud_init",
":",
"ci",
"=",
"CloudInit",
"(",
")",
"cloud_controller",
"=",
"CloudController",
"(",
"cloud_config",
")",
"cloud_controller",
".",
"up",
"(",
"ci",
",",
"use_snapshots",
",",
"upgrade_image",
",",
"snapshot_cluster",
",",
"snapshot_time",
")",
"except",
"CloudComposeException",
"as",
"ex",
":",
"print",
"(",
"ex",
")"
] | creates a new cluster | [
"creates",
"a",
"new",
"cluster"
] | train | https://github.com/cloud-compose/cloud-compose-cluster/blob/b6042419e778f3bf2257915def067972e5ce72cc/cloudcompose/cluster/commands/cli.py#L18-L32 |
cloud-compose/cloud-compose-cluster | cloudcompose/cluster/commands/cli.py | down | def down(force):
"""
destroys an existing cluster
"""
try:
cloud_config = CloudConfig()
cloud_controller = CloudController(cloud_config)
cloud_controller.down(force)
except CloudComposeException as ex:
print(ex) | python | def down(force):
"""
destroys an existing cluster
"""
try:
cloud_config = CloudConfig()
cloud_controller = CloudController(cloud_config)
cloud_controller.down(force)
except CloudComposeException as ex:
print(ex) | [
"def",
"down",
"(",
"force",
")",
":",
"try",
":",
"cloud_config",
"=",
"CloudConfig",
"(",
")",
"cloud_controller",
"=",
"CloudController",
"(",
"cloud_config",
")",
"cloud_controller",
".",
"down",
"(",
"force",
")",
"except",
"CloudComposeException",
"as",
"ex",
":",
"print",
"(",
"ex",
")"
] | destroys an existing cluster | [
"destroys",
"an",
"existing",
"cluster"
] | train | https://github.com/cloud-compose/cloud-compose-cluster/blob/b6042419e778f3bf2257915def067972e5ce72cc/cloudcompose/cluster/commands/cli.py#L36-L45 |
cloud-compose/cloud-compose-cluster | cloudcompose/cluster/commands/cli.py | cleanup | def cleanup():
"""
deletes launch configs and auto scaling group
"""
try:
cloud_config = CloudConfig()
cloud_controller = CloudController(cloud_config)
cloud_controller.cleanup()
except CloudComposeException as ex:
print(ex) | python | def cleanup():
"""
deletes launch configs and auto scaling group
"""
try:
cloud_config = CloudConfig()
cloud_controller = CloudController(cloud_config)
cloud_controller.cleanup()
except CloudComposeException as ex:
print(ex) | [
"def",
"cleanup",
"(",
")",
":",
"try",
":",
"cloud_config",
"=",
"CloudConfig",
"(",
")",
"cloud_controller",
"=",
"CloudController",
"(",
"cloud_config",
")",
"cloud_controller",
".",
"cleanup",
"(",
")",
"except",
"CloudComposeException",
"as",
"ex",
":",
"print",
"(",
"ex",
")"
] | deletes launch configs and auto scaling group | [
"deletes",
"launch",
"configs",
"and",
"auto",
"scaling",
"group"
] | train | https://github.com/cloud-compose/cloud-compose-cluster/blob/b6042419e778f3bf2257915def067972e5ce72cc/cloudcompose/cluster/commands/cli.py#L48-L57 |
cloud-compose/cloud-compose-cluster | cloudcompose/cluster/commands/cli.py | build | def build():
"""
builds the cloud_init script
"""
try:
cloud_config = CloudConfig()
config_data = cloud_config.config_data('cluster')
cloud_init = CloudInit()
print(cloud_init.build(config_data))
except CloudComposeException as ex:
print(ex) | python | def build():
"""
builds the cloud_init script
"""
try:
cloud_config = CloudConfig()
config_data = cloud_config.config_data('cluster')
cloud_init = CloudInit()
print(cloud_init.build(config_data))
except CloudComposeException as ex:
print(ex) | [
"def",
"build",
"(",
")",
":",
"try",
":",
"cloud_config",
"=",
"CloudConfig",
"(",
")",
"config_data",
"=",
"cloud_config",
".",
"config_data",
"(",
"'cluster'",
")",
"cloud_init",
"=",
"CloudInit",
"(",
")",
"print",
"(",
"cloud_init",
".",
"build",
"(",
"config_data",
")",
")",
"except",
"CloudComposeException",
"as",
"ex",
":",
"print",
"(",
"ex",
")"
] | builds the cloud_init script | [
"builds",
"the",
"cloud_init",
"script"
] | train | https://github.com/cloud-compose/cloud-compose-cluster/blob/b6042419e778f3bf2257915def067972e5ce72cc/cloudcompose/cluster/commands/cli.py#L60-L70 |
epandurski/flask_signalbus | flask_signalbus/signalbus_cli.py | flush | def flush(signal_names, exclude, wait):
"""Send pending signals over the message bus.
If a list of SIGNAL_NAMES is specified, flushes only those
signals. If no SIGNAL_NAMES are specified, flushes all signals.
"""
signalbus = current_app.extensions['signalbus']
signal_names = set(signal_names)
exclude = set(exclude)
models_to_flush = signalbus.get_signal_models()
if signal_names and exclude:
click.echo('Warning: Specified both SIGNAL_NAMES and exclude option.')
if signal_names:
wrong_signal_names = signal_names - {m.__name__ for m in models_to_flush}
models_to_flush = [m for m in models_to_flush if m.__name__ in signal_names]
else:
wrong_signal_names = exclude - {m.__name__ for m in models_to_flush}
for name in wrong_signal_names:
click.echo('Warning: A signal with name "{}" does not exist.'.format(name))
models_to_flush = [m for m in models_to_flush if m.__name__ not in exclude]
logger = logging.getLogger(__name__)
try:
if wait is not None:
signal_count = signalbus.flush(models_to_flush, wait=max(0.0, wait))
else:
signal_count = signalbus.flush(models_to_flush)
except Exception:
logger.exception('Caught error while sending pending signals.')
sys.exit(1)
if signal_count == 1:
logger.warning('%i signal has been successfully processed.', signal_count)
elif signal_count > 1:
logger.warning('%i signals have been successfully processed.', signal_count) | python | def flush(signal_names, exclude, wait):
"""Send pending signals over the message bus.
If a list of SIGNAL_NAMES is specified, flushes only those
signals. If no SIGNAL_NAMES are specified, flushes all signals.
"""
signalbus = current_app.extensions['signalbus']
signal_names = set(signal_names)
exclude = set(exclude)
models_to_flush = signalbus.get_signal_models()
if signal_names and exclude:
click.echo('Warning: Specified both SIGNAL_NAMES and exclude option.')
if signal_names:
wrong_signal_names = signal_names - {m.__name__ for m in models_to_flush}
models_to_flush = [m for m in models_to_flush if m.__name__ in signal_names]
else:
wrong_signal_names = exclude - {m.__name__ for m in models_to_flush}
for name in wrong_signal_names:
click.echo('Warning: A signal with name "{}" does not exist.'.format(name))
models_to_flush = [m for m in models_to_flush if m.__name__ not in exclude]
logger = logging.getLogger(__name__)
try:
if wait is not None:
signal_count = signalbus.flush(models_to_flush, wait=max(0.0, wait))
else:
signal_count = signalbus.flush(models_to_flush)
except Exception:
logger.exception('Caught error while sending pending signals.')
sys.exit(1)
if signal_count == 1:
logger.warning('%i signal has been successfully processed.', signal_count)
elif signal_count > 1:
logger.warning('%i signals have been successfully processed.', signal_count) | [
"def",
"flush",
"(",
"signal_names",
",",
"exclude",
",",
"wait",
")",
":",
"signalbus",
"=",
"current_app",
".",
"extensions",
"[",
"'signalbus'",
"]",
"signal_names",
"=",
"set",
"(",
"signal_names",
")",
"exclude",
"=",
"set",
"(",
"exclude",
")",
"models_to_flush",
"=",
"signalbus",
".",
"get_signal_models",
"(",
")",
"if",
"signal_names",
"and",
"exclude",
":",
"click",
".",
"echo",
"(",
"'Warning: Specified both SIGNAL_NAMES and exclude option.'",
")",
"if",
"signal_names",
":",
"wrong_signal_names",
"=",
"signal_names",
"-",
"{",
"m",
".",
"__name__",
"for",
"m",
"in",
"models_to_flush",
"}",
"models_to_flush",
"=",
"[",
"m",
"for",
"m",
"in",
"models_to_flush",
"if",
"m",
".",
"__name__",
"in",
"signal_names",
"]",
"else",
":",
"wrong_signal_names",
"=",
"exclude",
"-",
"{",
"m",
".",
"__name__",
"for",
"m",
"in",
"models_to_flush",
"}",
"for",
"name",
"in",
"wrong_signal_names",
":",
"click",
".",
"echo",
"(",
"'Warning: A signal with name \"{}\" does not exist.'",
".",
"format",
"(",
"name",
")",
")",
"models_to_flush",
"=",
"[",
"m",
"for",
"m",
"in",
"models_to_flush",
"if",
"m",
".",
"__name__",
"not",
"in",
"exclude",
"]",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"try",
":",
"if",
"wait",
"is",
"not",
"None",
":",
"signal_count",
"=",
"signalbus",
".",
"flush",
"(",
"models_to_flush",
",",
"wait",
"=",
"max",
"(",
"0.0",
",",
"wait",
")",
")",
"else",
":",
"signal_count",
"=",
"signalbus",
".",
"flush",
"(",
"models_to_flush",
")",
"except",
"Exception",
":",
"logger",
".",
"exception",
"(",
"'Caught error while sending pending signals.'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"signal_count",
"==",
"1",
":",
"logger",
".",
"warning",
"(",
"'%i signal has been successfully processed.'",
",",
"signal_count",
")",
"elif",
"signal_count",
">",
"1",
":",
"logger",
".",
"warning",
"(",
"'%i signals have been successfully processed.'",
",",
"signal_count",
")"
] | Send pending signals over the message bus.
If a list of SIGNAL_NAMES is specified, flushes only those
signals. If no SIGNAL_NAMES are specified, flushes all signals. | [
"Send",
"pending",
"signals",
"over",
"the",
"message",
"bus",
"."
] | train | https://github.com/epandurski/flask_signalbus/blob/253800118443821a40404f04416422b076d62b6e/flask_signalbus/signalbus_cli.py#L21-L55 |
epandurski/flask_signalbus | flask_signalbus/signalbus_cli.py | flushmany | def flushmany():
"""Send a potentially huge number of pending signals over the message bus.
This command assumes that the number of pending signals might be
huge, so that they might not fit into memory. However, it is not
very smart in handling concurrent senders. It is mostly useful
when recovering from long periods of disconnectedness from the
message bus.
"""
signalbus = current_app.extensions['signalbus']
signal_count = signalbus.flushmany()
logger = logging.getLogger(__name__)
if signal_count == 1:
logger.warning('%i signal has been successfully processed.', signal_count)
elif signal_count > 1:
logger.warning('%i signals have been successfully processed.', signal_count) | python | def flushmany():
"""Send a potentially huge number of pending signals over the message bus.
This command assumes that the number of pending signals might be
huge, so that they might not fit into memory. However, it is not
very smart in handling concurrent senders. It is mostly useful
when recovering from long periods of disconnectedness from the
message bus.
"""
signalbus = current_app.extensions['signalbus']
signal_count = signalbus.flushmany()
logger = logging.getLogger(__name__)
if signal_count == 1:
logger.warning('%i signal has been successfully processed.', signal_count)
elif signal_count > 1:
logger.warning('%i signals have been successfully processed.', signal_count) | [
"def",
"flushmany",
"(",
")",
":",
"signalbus",
"=",
"current_app",
".",
"extensions",
"[",
"'signalbus'",
"]",
"signal_count",
"=",
"signalbus",
".",
"flushmany",
"(",
")",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"if",
"signal_count",
"==",
"1",
":",
"logger",
".",
"warning",
"(",
"'%i signal has been successfully processed.'",
",",
"signal_count",
")",
"elif",
"signal_count",
">",
"1",
":",
"logger",
".",
"warning",
"(",
"'%i signals have been successfully processed.'",
",",
"signal_count",
")"
] | Send a potentially huge number of pending signals over the message bus.
This command assumes that the number of pending signals might be
huge, so that they might not fit into memory. However, it is not
very smart in handling concurrent senders. It is mostly useful
when recovering from long periods of disconnectedness from the
message bus. | [
"Send",
"a",
"potentially",
"huge",
"number",
"of",
"pending",
"signals",
"over",
"the",
"message",
"bus",
"."
] | train | https://github.com/epandurski/flask_signalbus/blob/253800118443821a40404f04416422b076d62b6e/flask_signalbus/signalbus_cli.py#L60-L77 |
epandurski/flask_signalbus | flask_signalbus/signalbus_cli.py | signals | def signals():
"""Show all signal types."""
signalbus = current_app.extensions['signalbus']
for signal_model in signalbus.get_signal_models():
click.echo(signal_model.__name__) | python | def signals():
"""Show all signal types."""
signalbus = current_app.extensions['signalbus']
for signal_model in signalbus.get_signal_models():
click.echo(signal_model.__name__) | [
"def",
"signals",
"(",
")",
":",
"signalbus",
"=",
"current_app",
".",
"extensions",
"[",
"'signalbus'",
"]",
"for",
"signal_model",
"in",
"signalbus",
".",
"get_signal_models",
"(",
")",
":",
"click",
".",
"echo",
"(",
"signal_model",
".",
"__name__",
")"
] | Show all signal types. | [
"Show",
"all",
"signal",
"types",
"."
] | train | https://github.com/epandurski/flask_signalbus/blob/253800118443821a40404f04416422b076d62b6e/flask_signalbus/signalbus_cli.py#L82-L87 |
epandurski/flask_signalbus | flask_signalbus/signalbus_cli.py | pending | def pending():
"""Show the number of pending signals by signal type."""
signalbus = current_app.extensions['signalbus']
pending = []
total_pending = 0
for signal_model in signalbus.get_signal_models():
count = signal_model.query.count()
if count > 0:
pending.append((count, signal_model.__name__))
total_pending += count
if pending:
pending.sort()
max_chars = len(str(pending[-1][0]))
for n, signal_name in pending:
click.echo('{} of type "{}"'.format(str(n).rjust(max_chars), signal_name))
click.echo(25 * '-')
click.echo('Total pending: {} '.format(total_pending)) | python | def pending():
"""Show the number of pending signals by signal type."""
signalbus = current_app.extensions['signalbus']
pending = []
total_pending = 0
for signal_model in signalbus.get_signal_models():
count = signal_model.query.count()
if count > 0:
pending.append((count, signal_model.__name__))
total_pending += count
if pending:
pending.sort()
max_chars = len(str(pending[-1][0]))
for n, signal_name in pending:
click.echo('{} of type "{}"'.format(str(n).rjust(max_chars), signal_name))
click.echo(25 * '-')
click.echo('Total pending: {} '.format(total_pending)) | [
"def",
"pending",
"(",
")",
":",
"signalbus",
"=",
"current_app",
".",
"extensions",
"[",
"'signalbus'",
"]",
"pending",
"=",
"[",
"]",
"total_pending",
"=",
"0",
"for",
"signal_model",
"in",
"signalbus",
".",
"get_signal_models",
"(",
")",
":",
"count",
"=",
"signal_model",
".",
"query",
".",
"count",
"(",
")",
"if",
"count",
">",
"0",
":",
"pending",
".",
"append",
"(",
"(",
"count",
",",
"signal_model",
".",
"__name__",
")",
")",
"total_pending",
"+=",
"count",
"if",
"pending",
":",
"pending",
".",
"sort",
"(",
")",
"max_chars",
"=",
"len",
"(",
"str",
"(",
"pending",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
")",
")",
"for",
"n",
",",
"signal_name",
"in",
"pending",
":",
"click",
".",
"echo",
"(",
"'{} of type \"{}\"'",
".",
"format",
"(",
"str",
"(",
"n",
")",
".",
"rjust",
"(",
"max_chars",
")",
",",
"signal_name",
")",
")",
"click",
".",
"echo",
"(",
"25",
"*",
"'-'",
")",
"click",
".",
"echo",
"(",
"'Total pending: {} '",
".",
"format",
"(",
"total_pending",
")",
")"
] | Show the number of pending signals by signal type. | [
"Show",
"the",
"number",
"of",
"pending",
"signals",
"by",
"signal",
"type",
"."
] | train | https://github.com/epandurski/flask_signalbus/blob/253800118443821a40404f04416422b076d62b6e/flask_signalbus/signalbus_cli.py#L92-L109 |
intelligenia/modeltranslation | modeltranslation/transcache.py | TransCache._create_key | def _create_key(lang, instance):
"""Crea la clave única de la caché"""
model_name = instance.__class__.__name__
return "{0}__{1}_{2}".format(lang,model_name,instance.id) | python | def _create_key(lang, instance):
"""Crea la clave única de la caché"""
model_name = instance.__class__.__name__
return "{0}__{1}_{2}".format(lang,model_name,instance.id) | [
"def",
"_create_key",
"(",
"lang",
",",
"instance",
")",
":",
"model_name",
"=",
"instance",
".",
"__class__",
".",
"__name__",
"return",
"\"{0}__{1}_{2}\"",
".",
"format",
"(",
"lang",
",",
"model_name",
",",
"instance",
".",
"id",
")"
] | Crea la clave única de la caché | [
"Crea",
"la",
"clave",
"única",
"de",
"la",
"caché"
] | train | https://github.com/intelligenia/modeltranslation/blob/64d6adeb537747321d5020efedf5d7e0d135862d/modeltranslation/transcache.py#L34-L37 |
intelligenia/modeltranslation | modeltranslation/transcache.py | TransCache._cache_is_expired | def _cache_is_expired():
"""Indica si la caché está caducada"""
now = timezone.now()
timediff = TransCache.SINGLETON_CREATION_DATETIME - now
return (timediff.total_seconds() > TransCache.SINGLETON_EXPIRATION_MAX_SECONDS) | python | def _cache_is_expired():
"""Indica si la caché está caducada"""
now = timezone.now()
timediff = TransCache.SINGLETON_CREATION_DATETIME - now
return (timediff.total_seconds() > TransCache.SINGLETON_EXPIRATION_MAX_SECONDS) | [
"def",
"_cache_is_expired",
"(",
")",
":",
"now",
"=",
"timezone",
".",
"now",
"(",
")",
"timediff",
"=",
"TransCache",
".",
"SINGLETON_CREATION_DATETIME",
"-",
"now",
"return",
"(",
"timediff",
".",
"total_seconds",
"(",
")",
">",
"TransCache",
".",
"SINGLETON_EXPIRATION_MAX_SECONDS",
")"
] | Indica si la caché está caducada | [
"Indica",
"si",
"la",
"caché",
"está",
"caducada"
] | train | https://github.com/intelligenia/modeltranslation/blob/64d6adeb537747321d5020efedf5d7e0d135862d/modeltranslation/transcache.py#L40-L44 |
intelligenia/modeltranslation | modeltranslation/transcache.py | TransCache.factory | def factory():
"""Factoría del singleton, o crea una nueva o devuelve la existente"""
if not TransCache.SINGLETON or TransCache._cache_is_expired():
TransCache.SINGLETON = TransCache()
TransCache.SINGLETON_CREATION_DATETIME = timezone.now()
return TransCache.SINGLETON | python | def factory():
"""Factoría del singleton, o crea una nueva o devuelve la existente"""
if not TransCache.SINGLETON or TransCache._cache_is_expired():
TransCache.SINGLETON = TransCache()
TransCache.SINGLETON_CREATION_DATETIME = timezone.now()
return TransCache.SINGLETON | [
"def",
"factory",
"(",
")",
":",
"if",
"not",
"TransCache",
".",
"SINGLETON",
"or",
"TransCache",
".",
"_cache_is_expired",
"(",
")",
":",
"TransCache",
".",
"SINGLETON",
"=",
"TransCache",
"(",
")",
"TransCache",
".",
"SINGLETON_CREATION_DATETIME",
"=",
"timezone",
".",
"now",
"(",
")",
"return",
"TransCache",
".",
"SINGLETON"
] | Factoría del singleton, o crea una nueva o devuelve la existente | [
"Factoría",
"del",
"singleton",
"o",
"crea",
"una",
"nueva",
"o",
"devuelve",
"la",
"existente"
] | train | https://github.com/intelligenia/modeltranslation/blob/64d6adeb537747321d5020efedf5d7e0d135862d/modeltranslation/transcache.py#L55-L61 |
intelligenia/modeltranslation | modeltranslation/transcache.py | TransCache.set | def set(self, lang, instance):
"""
Establece en la instancia actual los atributos de traducción
y la almacena en un diccionario de claves _create_key y valores
el objeto con los atributos dinámicos.
"""
if self._cache_is_too_big():
self.cache = {}
instance_key = TransCache._create_key(lang, instance)
instance._translations_are_cached = True
instance.load_translations(lang=lang)
self.cache[instance_key] = instance | python | def set(self, lang, instance):
"""
Establece en la instancia actual los atributos de traducción
y la almacena en un diccionario de claves _create_key y valores
el objeto con los atributos dinámicos.
"""
if self._cache_is_too_big():
self.cache = {}
instance_key = TransCache._create_key(lang, instance)
instance._translations_are_cached = True
instance.load_translations(lang=lang)
self.cache[instance_key] = instance | [
"def",
"set",
"(",
"self",
",",
"lang",
",",
"instance",
")",
":",
"if",
"self",
".",
"_cache_is_too_big",
"(",
")",
":",
"self",
".",
"cache",
"=",
"{",
"}",
"instance_key",
"=",
"TransCache",
".",
"_create_key",
"(",
"lang",
",",
"instance",
")",
"instance",
".",
"_translations_are_cached",
"=",
"True",
"instance",
".",
"load_translations",
"(",
"lang",
"=",
"lang",
")",
"self",
".",
"cache",
"[",
"instance_key",
"]",
"=",
"instance"
] | Establece en la instancia actual los atributos de traducción
y la almacena en un diccionario de claves _create_key y valores
el objeto con los atributos dinámicos. | [
"Establece",
"en",
"la",
"instancia",
"actual",
"los",
"atributos",
"de",
"traducción",
"y",
"la",
"almacena",
"en",
"un",
"diccionario",
"de",
"claves",
"_create_key",
"y",
"valores",
"el",
"objeto",
"con",
"los",
"atributos",
"dinámicos",
"."
] | train | https://github.com/intelligenia/modeltranslation/blob/64d6adeb537747321d5020efedf5d7e0d135862d/modeltranslation/transcache.py#L68-L79 |
intelligenia/modeltranslation | modeltranslation/transcache.py | TransCache.has | def has(self, lang, instance):
"""
Indica si la caché tiene un objeto igual a éste con los
atributos dinámicos de traducción
"""
instance_key = TransCache._create_key(lang, instance)
return instance_key in self.cache | python | def has(self, lang, instance):
"""
Indica si la caché tiene un objeto igual a éste con los
atributos dinámicos de traducción
"""
instance_key = TransCache._create_key(lang, instance)
return instance_key in self.cache | [
"def",
"has",
"(",
"self",
",",
"lang",
",",
"instance",
")",
":",
"instance_key",
"=",
"TransCache",
".",
"_create_key",
"(",
"lang",
",",
"instance",
")",
"return",
"instance_key",
"in",
"self",
".",
"cache"
] | Indica si la caché tiene un objeto igual a éste con los
atributos dinámicos de traducción | [
"Indica",
"si",
"la",
"caché",
"tiene",
"un",
"objeto",
"igual",
"a",
"éste",
"con",
"los",
"atributos",
"dinámicos",
"de",
"traducción"
] | train | https://github.com/intelligenia/modeltranslation/blob/64d6adeb537747321d5020efedf5d7e0d135862d/modeltranslation/transcache.py#L81-L87 |
intelligenia/modeltranslation | modeltranslation/transcache.py | TransCache.get | def get(self, lang, instance):
"""
Obtiene una instancia igual a ésta, pero con los atributos
dinámicos de traduccción
"""
instance_key = TransCache._create_key(lang, instance)
return self.cache[instance_key] | python | def get(self, lang, instance):
"""
Obtiene una instancia igual a ésta, pero con los atributos
dinámicos de traduccción
"""
instance_key = TransCache._create_key(lang, instance)
return self.cache[instance_key] | [
"def",
"get",
"(",
"self",
",",
"lang",
",",
"instance",
")",
":",
"instance_key",
"=",
"TransCache",
".",
"_create_key",
"(",
"lang",
",",
"instance",
")",
"return",
"self",
".",
"cache",
"[",
"instance_key",
"]"
] | Obtiene una instancia igual a ésta, pero con los atributos
dinámicos de traduccción | [
"Obtiene",
"una",
"instancia",
"igual",
"a",
"ésta",
"pero",
"con",
"los",
"atributos",
"dinámicos",
"de",
"traduccción"
] | train | https://github.com/intelligenia/modeltranslation/blob/64d6adeb537747321d5020efedf5d7e0d135862d/modeltranslation/transcache.py#L89-L95 |
inveniosoftware/invenio-iiif | invenio_iiif/tasks.py | create_thumbnail | def create_thumbnail(uuid, thumbnail_width):
"""Create the thumbnail for an image."""
# size = '!' + thumbnail_width + ','
size = thumbnail_width + ',' # flask_iiif doesn't support ! at the moment
thumbnail = IIIFImageAPI.get('v2', uuid, size, 0, 'default', 'jpg') | python | def create_thumbnail(uuid, thumbnail_width):
"""Create the thumbnail for an image."""
# size = '!' + thumbnail_width + ','
size = thumbnail_width + ',' # flask_iiif doesn't support ! at the moment
thumbnail = IIIFImageAPI.get('v2', uuid, size, 0, 'default', 'jpg') | [
"def",
"create_thumbnail",
"(",
"uuid",
",",
"thumbnail_width",
")",
":",
"# size = '!' + thumbnail_width + ','",
"size",
"=",
"thumbnail_width",
"+",
"','",
"# flask_iiif doesn't support ! at the moment",
"thumbnail",
"=",
"IIIFImageAPI",
".",
"get",
"(",
"'v2'",
",",
"uuid",
",",
"size",
",",
"0",
",",
"'default'",
",",
"'jpg'",
")"
] | Create the thumbnail for an image. | [
"Create",
"the",
"thumbnail",
"for",
"an",
"image",
"."
] | train | https://github.com/inveniosoftware/invenio-iiif/blob/e4f2f93eaabdc8e2efea81c239ab76d481191959/invenio_iiif/tasks.py#L18-L22 |
kserhii/money-parser | money_parser/__init__.py | price_str | def price_str(raw_price, default=_not_defined, dec_point='.'):
"""Search and clean price value.
Convert raw price string presented in any localization
as a valid number string with an optional decimal point.
If raw price does not contain valid price value or contains
more than one price value, then return default value.
If default value not set, then raise ValueError.
Examples:
12.007 => 12007
00012,33 => 12.33
+1 => 1
- 520.05 => -520.05
1,000,777.5 => 1000777.5
1.777.000,99 => 1777000.99
1 234 567.89 => 1234567.89
99.77.11.000,1 => 997711000.1
NIO5,242 => 5242
Not a MINUS-.45 => 45
42 \t \n => 42
=> <default>
1...2 => <default>
:param str raw_price: string that contains price value.
:param default: value that will be returned if raw price not valid.
:param str dec_point: symbol that separate integer and fractional parts.
:return: cleaned price string.
:raise ValueError: error if raw price not valid and default value not set.
"""
def _error_or_default(err_msg):
if default == _not_defined:
raise ValueError(err_msg)
return default
# check and clean
if not isinstance(raw_price, str):
return _error_or_default(
'Wrong raw price type "{price_type}" '
'(expected type "str")'.format(price_type=type(raw_price)))
price = re.sub('\s', '', raw_price)
cleaned_price = _CLEANED_PRICE_RE.findall(price)
if len(cleaned_price) == 0:
return _error_or_default(
'Raw price value "{price}" does not contain '
'valid price digits'.format(price=raw_price))
if len(cleaned_price) > 1:
return _error_or_default(
'Raw price value "{price}" contains '
'more than one price value'.format(price=raw_price))
price = cleaned_price[0]
# clean truncated decimal (e.g. 99. -> 99)
price = price.rstrip('.,')
# get sign
sign = ''
if price[0] in {'-', '+'}:
sign, price = price[0], price[1:]
sign = '-' if sign == '-' else ''
# extract fractional digits
fractional = _FRACTIONAL_PRICE_RE.match(price)
if fractional:
integer, fraction = fractional.groups()
else:
integer, fraction = price, ''
# leave only digits in the integer part of the price
integer = re.sub('\D', '', integer)
# remove leading zeros (e.g. 007 -> 7, but 0.1 -> 0.1)
integer = integer.lstrip('0')
if integer == '':
integer = '0'
# construct price
price = sign + integer
if fraction:
price = ''.join((price, dec_point, fraction))
return price | python | def price_str(raw_price, default=_not_defined, dec_point='.'):
"""Search and clean price value.
Convert raw price string presented in any localization
as a valid number string with an optional decimal point.
If raw price does not contain valid price value or contains
more than one price value, then return default value.
If default value not set, then raise ValueError.
Examples:
12.007 => 12007
00012,33 => 12.33
+1 => 1
- 520.05 => -520.05
1,000,777.5 => 1000777.5
1.777.000,99 => 1777000.99
1 234 567.89 => 1234567.89
99.77.11.000,1 => 997711000.1
NIO5,242 => 5242
Not a MINUS-.45 => 45
42 \t \n => 42
=> <default>
1...2 => <default>
:param str raw_price: string that contains price value.
:param default: value that will be returned if raw price not valid.
:param str dec_point: symbol that separate integer and fractional parts.
:return: cleaned price string.
:raise ValueError: error if raw price not valid and default value not set.
"""
def _error_or_default(err_msg):
if default == _not_defined:
raise ValueError(err_msg)
return default
# check and clean
if not isinstance(raw_price, str):
return _error_or_default(
'Wrong raw price type "{price_type}" '
'(expected type "str")'.format(price_type=type(raw_price)))
price = re.sub('\s', '', raw_price)
cleaned_price = _CLEANED_PRICE_RE.findall(price)
if len(cleaned_price) == 0:
return _error_or_default(
'Raw price value "{price}" does not contain '
'valid price digits'.format(price=raw_price))
if len(cleaned_price) > 1:
return _error_or_default(
'Raw price value "{price}" contains '
'more than one price value'.format(price=raw_price))
price = cleaned_price[0]
# clean truncated decimal (e.g. 99. -> 99)
price = price.rstrip('.,')
# get sign
sign = ''
if price[0] in {'-', '+'}:
sign, price = price[0], price[1:]
sign = '-' if sign == '-' else ''
# extract fractional digits
fractional = _FRACTIONAL_PRICE_RE.match(price)
if fractional:
integer, fraction = fractional.groups()
else:
integer, fraction = price, ''
# leave only digits in the integer part of the price
integer = re.sub('\D', '', integer)
# remove leading zeros (e.g. 007 -> 7, but 0.1 -> 0.1)
integer = integer.lstrip('0')
if integer == '':
integer = '0'
# construct price
price = sign + integer
if fraction:
price = ''.join((price, dec_point, fraction))
return price | [
"def",
"price_str",
"(",
"raw_price",
",",
"default",
"=",
"_not_defined",
",",
"dec_point",
"=",
"'.'",
")",
":",
"def",
"_error_or_default",
"(",
"err_msg",
")",
":",
"if",
"default",
"==",
"_not_defined",
":",
"raise",
"ValueError",
"(",
"err_msg",
")",
"return",
"default",
"# check and clean",
"if",
"not",
"isinstance",
"(",
"raw_price",
",",
"str",
")",
":",
"return",
"_error_or_default",
"(",
"'Wrong raw price type \"{price_type}\" '",
"'(expected type \"str\")'",
".",
"format",
"(",
"price_type",
"=",
"type",
"(",
"raw_price",
")",
")",
")",
"price",
"=",
"re",
".",
"sub",
"(",
"'\\s'",
",",
"''",
",",
"raw_price",
")",
"cleaned_price",
"=",
"_CLEANED_PRICE_RE",
".",
"findall",
"(",
"price",
")",
"if",
"len",
"(",
"cleaned_price",
")",
"==",
"0",
":",
"return",
"_error_or_default",
"(",
"'Raw price value \"{price}\" does not contain '",
"'valid price digits'",
".",
"format",
"(",
"price",
"=",
"raw_price",
")",
")",
"if",
"len",
"(",
"cleaned_price",
")",
">",
"1",
":",
"return",
"_error_or_default",
"(",
"'Raw price value \"{price}\" contains '",
"'more than one price value'",
".",
"format",
"(",
"price",
"=",
"raw_price",
")",
")",
"price",
"=",
"cleaned_price",
"[",
"0",
"]",
"# clean truncated decimal (e.g. 99. -> 99)",
"price",
"=",
"price",
".",
"rstrip",
"(",
"'.,'",
")",
"# get sign",
"sign",
"=",
"''",
"if",
"price",
"[",
"0",
"]",
"in",
"{",
"'-'",
",",
"'+'",
"}",
":",
"sign",
",",
"price",
"=",
"price",
"[",
"0",
"]",
",",
"price",
"[",
"1",
":",
"]",
"sign",
"=",
"'-'",
"if",
"sign",
"==",
"'-'",
"else",
"''",
"# extract fractional digits",
"fractional",
"=",
"_FRACTIONAL_PRICE_RE",
".",
"match",
"(",
"price",
")",
"if",
"fractional",
":",
"integer",
",",
"fraction",
"=",
"fractional",
".",
"groups",
"(",
")",
"else",
":",
"integer",
",",
"fraction",
"=",
"price",
",",
"''",
"# leave only digits in the integer part of the price",
"integer",
"=",
"re",
".",
"sub",
"(",
"'\\D'",
",",
"''",
",",
"integer",
")",
"# remove leading zeros (e.g. 007 -> 7, but 0.1 -> 0.1)",
"integer",
"=",
"integer",
".",
"lstrip",
"(",
"'0'",
")",
"if",
"integer",
"==",
"''",
":",
"integer",
"=",
"'0'",
"# construct price",
"price",
"=",
"sign",
"+",
"integer",
"if",
"fraction",
":",
"price",
"=",
"''",
".",
"join",
"(",
"(",
"price",
",",
"dec_point",
",",
"fraction",
")",
")",
"return",
"price"
] | Search and clean price value.
Convert raw price string presented in any localization
as a valid number string with an optional decimal point.
If raw price does not contain valid price value or contains
more than one price value, then return default value.
If default value not set, then raise ValueError.
Examples:
12.007 => 12007
00012,33 => 12.33
+1 => 1
- 520.05 => -520.05
1,000,777.5 => 1000777.5
1.777.000,99 => 1777000.99
1 234 567.89 => 1234567.89
99.77.11.000,1 => 997711000.1
NIO5,242 => 5242
Not a MINUS-.45 => 45
42 \t \n => 42
=> <default>
1...2 => <default>
:param str raw_price: string that contains price value.
:param default: value that will be returned if raw price not valid.
:param str dec_point: symbol that separate integer and fractional parts.
:return: cleaned price string.
:raise ValueError: error if raw price not valid and default value not set. | [
"Search",
"and",
"clean",
"price",
"value",
"."
] | train | https://github.com/kserhii/money-parser/blob/d02da58eb3065c55b73c9a7e601ffb3e1448bcd1/money_parser/__init__.py#L15-L101 |
kserhii/money-parser | money_parser/__init__.py | price_dec | def price_dec(raw_price, default=_not_defined):
"""Price decimal value from raw string.
Extract price value from input raw string and
present as Decimal number.
If raw price does not contain valid price value or contains
more than one price value, then return default value.
If default value not set, then raise ValueError.
:param str raw_price: string that contains price value.
:param default: value that will be returned if raw price not valid.
:return: Decimal price value.
:raise ValueError: error if raw price not valid and default value not set.
"""
try:
price = price_str(raw_price)
return decimal.Decimal(price)
except ValueError as err:
if default == _not_defined:
raise err
return default | python | def price_dec(raw_price, default=_not_defined):
"""Price decimal value from raw string.
Extract price value from input raw string and
present as Decimal number.
If raw price does not contain valid price value or contains
more than one price value, then return default value.
If default value not set, then raise ValueError.
:param str raw_price: string that contains price value.
:param default: value that will be returned if raw price not valid.
:return: Decimal price value.
:raise ValueError: error if raw price not valid and default value not set.
"""
try:
price = price_str(raw_price)
return decimal.Decimal(price)
except ValueError as err:
if default == _not_defined:
raise err
return default | [
"def",
"price_dec",
"(",
"raw_price",
",",
"default",
"=",
"_not_defined",
")",
":",
"try",
":",
"price",
"=",
"price_str",
"(",
"raw_price",
")",
"return",
"decimal",
".",
"Decimal",
"(",
"price",
")",
"except",
"ValueError",
"as",
"err",
":",
"if",
"default",
"==",
"_not_defined",
":",
"raise",
"err",
"return",
"default"
] | Price decimal value from raw string.
Extract price value from input raw string and
present as Decimal number.
If raw price does not contain valid price value or contains
more than one price value, then return default value.
If default value not set, then raise ValueError.
:param str raw_price: string that contains price value.
:param default: value that will be returned if raw price not valid.
:return: Decimal price value.
:raise ValueError: error if raw price not valid and default value not set. | [
"Price",
"decimal",
"value",
"from",
"raw",
"string",
"."
] | train | https://github.com/kserhii/money-parser/blob/d02da58eb3065c55b73c9a7e601ffb3e1448bcd1/money_parser/__init__.py#L104-L127 |
ericholscher/django-kong | kong/views.py | flotify | def flotify(result, num=50):
"""
Return a list of (timestamp, duration) sets for test result.
"""
results = list(TestResult.objects.filter(test=result.test, site=result.site)[:num])
results.reverse()
return [[get_timestamp(result.run_date), result.duration/1000] for result in results] | python | def flotify(result, num=50):
"""
Return a list of (timestamp, duration) sets for test result.
"""
results = list(TestResult.objects.filter(test=result.test, site=result.site)[:num])
results.reverse()
return [[get_timestamp(result.run_date), result.duration/1000] for result in results] | [
"def",
"flotify",
"(",
"result",
",",
"num",
"=",
"50",
")",
":",
"results",
"=",
"list",
"(",
"TestResult",
".",
"objects",
".",
"filter",
"(",
"test",
"=",
"result",
".",
"test",
",",
"site",
"=",
"result",
".",
"site",
")",
"[",
":",
"num",
"]",
")",
"results",
".",
"reverse",
"(",
")",
"return",
"[",
"[",
"get_timestamp",
"(",
"result",
".",
"run_date",
")",
",",
"result",
".",
"duration",
"/",
"1000",
"]",
"for",
"result",
"in",
"results",
"]"
] | Return a list of (timestamp, duration) sets for test result. | [
"Return",
"a",
"list",
"of",
"(",
"timestamp",
"duration",
")",
"sets",
"for",
"test",
"result",
"."
] | train | https://github.com/ericholscher/django-kong/blob/02e1bc4332739387adc1d7b2f4aeadd7f7db896f/kong/views.py#L38-L44 |
ericholscher/django-kong | kong/plugins/munin.py | Plugin.__get_dynamic_attr | def __get_dynamic_attr(self, attname, arg, default=None):
"""
Gets "something" from self, which could be an attribute or
a callable with either 0 or 1 arguments (besides self).
Stolen from django.contrib.syntication.feeds.Feed.
"""
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr):
# Check func_code.co_argcount rather than try/excepting the
# function and catching the TypeError, because something inside
# the function may raise the TypeError. This technique is more
# accurate.
if hasattr(attr, 'func_code'):
argcount = attr.func_code.co_argcount
else:
argcount = attr.__call__.func_code.co_argcount
if argcount == 2: # one argument is 'self'
return attr(arg)
else:
return attr()
return attr | python | def __get_dynamic_attr(self, attname, arg, default=None):
"""
Gets "something" from self, which could be an attribute or
a callable with either 0 or 1 arguments (besides self).
Stolen from django.contrib.syntication.feeds.Feed.
"""
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr):
# Check func_code.co_argcount rather than try/excepting the
# function and catching the TypeError, because something inside
# the function may raise the TypeError. This technique is more
# accurate.
if hasattr(attr, 'func_code'):
argcount = attr.func_code.co_argcount
else:
argcount = attr.__call__.func_code.co_argcount
if argcount == 2: # one argument is 'self'
return attr(arg)
else:
return attr()
return attr | [
"def",
"__get_dynamic_attr",
"(",
"self",
",",
"attname",
",",
"arg",
",",
"default",
"=",
"None",
")",
":",
"try",
":",
"attr",
"=",
"getattr",
"(",
"self",
",",
"attname",
")",
"except",
"AttributeError",
":",
"return",
"default",
"if",
"callable",
"(",
"attr",
")",
":",
"# Check func_code.co_argcount rather than try/excepting the",
"# function and catching the TypeError, because something inside",
"# the function may raise the TypeError. This technique is more",
"# accurate.",
"if",
"hasattr",
"(",
"attr",
",",
"'func_code'",
")",
":",
"argcount",
"=",
"attr",
".",
"func_code",
".",
"co_argcount",
"else",
":",
"argcount",
"=",
"attr",
".",
"__call__",
".",
"func_code",
".",
"co_argcount",
"if",
"argcount",
"==",
"2",
":",
"# one argument is 'self'",
"return",
"attr",
"(",
"arg",
")",
"else",
":",
"return",
"attr",
"(",
")",
"return",
"attr"
] | Gets "something" from self, which could be an attribute or
a callable with either 0 or 1 arguments (besides self).
Stolen from django.contrib.syntication.feeds.Feed. | [
"Gets",
"something",
"from",
"self",
"which",
"could",
"be",
"an",
"attribute",
"or",
"a",
"callable",
"with",
"either",
"0",
"or",
"1",
"arguments",
"(",
"besides",
"self",
")",
".",
"Stolen",
"from",
"django",
".",
"contrib",
".",
"syntication",
".",
"feeds",
".",
"Feed",
"."
] | train | https://github.com/ericholscher/django-kong/blob/02e1bc4332739387adc1d7b2f4aeadd7f7db896f/kong/plugins/munin.py#L55-L79 |
paksu/pytelegraf | telegraf/client.py | ClientBase.metric | def metric(self, measurement_name, values, tags=None, timestamp=None):
"""
Append global tags configured for the client to the tags given then
converts the data into InfluxDB Line protocol and sends to to socket
"""
if not measurement_name or values in (None, {}):
# Don't try to send empty data
return
tags = tags or {}
# Do a shallow merge of the metric tags and global tags
all_tags = dict(self.tags, **tags)
# Create a metric line from the input and then send it to socket
line = Line(measurement_name, values, all_tags, timestamp)
self.send(line.to_line_protocol()) | python | def metric(self, measurement_name, values, tags=None, timestamp=None):
"""
Append global tags configured for the client to the tags given then
converts the data into InfluxDB Line protocol and sends to to socket
"""
if not measurement_name or values in (None, {}):
# Don't try to send empty data
return
tags = tags or {}
# Do a shallow merge of the metric tags and global tags
all_tags = dict(self.tags, **tags)
# Create a metric line from the input and then send it to socket
line = Line(measurement_name, values, all_tags, timestamp)
self.send(line.to_line_protocol()) | [
"def",
"metric",
"(",
"self",
",",
"measurement_name",
",",
"values",
",",
"tags",
"=",
"None",
",",
"timestamp",
"=",
"None",
")",
":",
"if",
"not",
"measurement_name",
"or",
"values",
"in",
"(",
"None",
",",
"{",
"}",
")",
":",
"# Don't try to send empty data",
"return",
"tags",
"=",
"tags",
"or",
"{",
"}",
"# Do a shallow merge of the metric tags and global tags",
"all_tags",
"=",
"dict",
"(",
"self",
".",
"tags",
",",
"*",
"*",
"tags",
")",
"# Create a metric line from the input and then send it to socket",
"line",
"=",
"Line",
"(",
"measurement_name",
",",
"values",
",",
"all_tags",
",",
"timestamp",
")",
"self",
".",
"send",
"(",
"line",
".",
"to_line_protocol",
"(",
")",
")"
] | Append global tags configured for the client to the tags given then
converts the data into InfluxDB Line protocol and sends to to socket | [
"Append",
"global",
"tags",
"configured",
"for",
"the",
"client",
"to",
"the",
"tags",
"given",
"then",
"converts",
"the",
"data",
"into",
"InfluxDB",
"Line",
"protocol",
"and",
"sends",
"to",
"to",
"socket"
] | train | https://github.com/paksu/pytelegraf/blob/a5a326bd99902768be2bf10da7dde2dfa165c013/telegraf/client.py#L13-L29 |
paksu/pytelegraf | telegraf/client.py | TelegrafClient.send | def send(self, data):
"""
Sends the given data to the socket via UDP
"""
try:
self.socket.sendto(data.encode('utf8') + b'\n', (self.host, self.port))
except (socket.error, RuntimeError):
# Socket errors should fail silently so they don't affect anything else
pass | python | def send(self, data):
"""
Sends the given data to the socket via UDP
"""
try:
self.socket.sendto(data.encode('utf8') + b'\n', (self.host, self.port))
except (socket.error, RuntimeError):
# Socket errors should fail silently so they don't affect anything else
pass | [
"def",
"send",
"(",
"self",
",",
"data",
")",
":",
"try",
":",
"self",
".",
"socket",
".",
"sendto",
"(",
"data",
".",
"encode",
"(",
"'utf8'",
")",
"+",
"b'\\n'",
",",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
")",
")",
"except",
"(",
"socket",
".",
"error",
",",
"RuntimeError",
")",
":",
"# Socket errors should fail silently so they don't affect anything else",
"pass"
] | Sends the given data to the socket via UDP | [
"Sends",
"the",
"given",
"data",
"to",
"the",
"socket",
"via",
"UDP"
] | train | https://github.com/paksu/pytelegraf/blob/a5a326bd99902768be2bf10da7dde2dfa165c013/telegraf/client.py#L44-L52 |
paksu/pytelegraf | telegraf/client.py | HttpClient.send | def send(self, data):
"""
Send the data in a separate thread via HTTP POST.
HTTP introduces some overhead, so to avoid blocking the main thread,
this issues the request in the background.
"""
self.future_session.post(url=self.url, data=data) | python | def send(self, data):
"""
Send the data in a separate thread via HTTP POST.
HTTP introduces some overhead, so to avoid blocking the main thread,
this issues the request in the background.
"""
self.future_session.post(url=self.url, data=data) | [
"def",
"send",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"future_session",
".",
"post",
"(",
"url",
"=",
"self",
".",
"url",
",",
"data",
"=",
"data",
")"
] | Send the data in a separate thread via HTTP POST.
HTTP introduces some overhead, so to avoid blocking the main thread,
this issues the request in the background. | [
"Send",
"the",
"data",
"in",
"a",
"separate",
"thread",
"via",
"HTTP",
"POST",
".",
"HTTP",
"introduces",
"some",
"overhead",
"so",
"to",
"avoid",
"blocking",
"the",
"main",
"thread",
"this",
"issues",
"the",
"request",
"in",
"the",
"background",
"."
] | train | https://github.com/paksu/pytelegraf/blob/a5a326bd99902768be2bf10da7dde2dfa165c013/telegraf/client.py#L73-L79 |
paksu/pytelegraf | telegraf/protocol.py | Line.get_output_values | def get_output_values(self):
"""
Return an escaped string of comma separated value_name: value pairs
"""
# Handle primitive values here and implicitly convert them to a dict because
# it allows the API to be simpler.
# Also influxDB mandates that each value also has a name so the default name
# for any non-dict value is "value"
if not isinstance(self.values, dict):
metric_values = {'value': self.values}
else:
metric_values = self.values
# Sort the values in lexicographically by value name
sorted_values = sorted(metric_values.items())
# Remove None values
sorted_values = [(k, v) for k, v in sorted_values if v is not None]
return u",".join(u"{0}={1}".format(format_string(k), format_value(v)) for k, v in sorted_values) | python | def get_output_values(self):
"""
Return an escaped string of comma separated value_name: value pairs
"""
# Handle primitive values here and implicitly convert them to a dict because
# it allows the API to be simpler.
# Also influxDB mandates that each value also has a name so the default name
# for any non-dict value is "value"
if not isinstance(self.values, dict):
metric_values = {'value': self.values}
else:
metric_values = self.values
# Sort the values in lexicographically by value name
sorted_values = sorted(metric_values.items())
# Remove None values
sorted_values = [(k, v) for k, v in sorted_values if v is not None]
return u",".join(u"{0}={1}".format(format_string(k), format_value(v)) for k, v in sorted_values) | [
"def",
"get_output_values",
"(",
"self",
")",
":",
"# Handle primitive values here and implicitly convert them to a dict because",
"# it allows the API to be simpler.",
"# Also influxDB mandates that each value also has a name so the default name",
"# for any non-dict value is \"value\"",
"if",
"not",
"isinstance",
"(",
"self",
".",
"values",
",",
"dict",
")",
":",
"metric_values",
"=",
"{",
"'value'",
":",
"self",
".",
"values",
"}",
"else",
":",
"metric_values",
"=",
"self",
".",
"values",
"# Sort the values in lexicographically by value name",
"sorted_values",
"=",
"sorted",
"(",
"metric_values",
".",
"items",
"(",
")",
")",
"# Remove None values",
"sorted_values",
"=",
"[",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"sorted_values",
"if",
"v",
"is",
"not",
"None",
"]",
"return",
"u\",\"",
".",
"join",
"(",
"u\"{0}={1}\"",
".",
"format",
"(",
"format_string",
"(",
"k",
")",
",",
"format_value",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"sorted_values",
")"
] | Return an escaped string of comma separated value_name: value pairs | [
"Return",
"an",
"escaped",
"string",
"of",
"comma",
"separated",
"value_name",
":",
"value",
"pairs"
] | train | https://github.com/paksu/pytelegraf/blob/a5a326bd99902768be2bf10da7dde2dfa165c013/telegraf/protocol.py#L24-L44 |
paksu/pytelegraf | telegraf/protocol.py | Line.get_output_tags | def get_output_tags(self):
"""
Return an escaped string of comma separated tag_name: tag_value pairs
Tags should be sorted by key before being sent for best performance. The sort should
match that from the Go bytes. Compare function (http://golang.org/pkg/bytes/#Compare).
"""
# Sort the tags in lexicographically by tag name
sorted_tags = sorted(self.tags.items())
# Finally render, escape and return the tag string
return u",".join(u"{0}={1}".format(format_string(k), format_string(v)) for k, v in sorted_tags) | python | def get_output_tags(self):
"""
Return an escaped string of comma separated tag_name: tag_value pairs
Tags should be sorted by key before being sent for best performance. The sort should
match that from the Go bytes. Compare function (http://golang.org/pkg/bytes/#Compare).
"""
# Sort the tags in lexicographically by tag name
sorted_tags = sorted(self.tags.items())
# Finally render, escape and return the tag string
return u",".join(u"{0}={1}".format(format_string(k), format_string(v)) for k, v in sorted_tags) | [
"def",
"get_output_tags",
"(",
"self",
")",
":",
"# Sort the tags in lexicographically by tag name",
"sorted_tags",
"=",
"sorted",
"(",
"self",
".",
"tags",
".",
"items",
"(",
")",
")",
"# Finally render, escape and return the tag string",
"return",
"u\",\"",
".",
"join",
"(",
"u\"{0}={1}\"",
".",
"format",
"(",
"format_string",
"(",
"k",
")",
",",
"format_string",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"sorted_tags",
")"
] | Return an escaped string of comma separated tag_name: tag_value pairs
Tags should be sorted by key before being sent for best performance. The sort should
match that from the Go bytes. Compare function (http://golang.org/pkg/bytes/#Compare). | [
"Return",
"an",
"escaped",
"string",
"of",
"comma",
"separated",
"tag_name",
":",
"tag_value",
"pairs"
] | train | https://github.com/paksu/pytelegraf/blob/a5a326bd99902768be2bf10da7dde2dfa165c013/telegraf/protocol.py#L46-L58 |
paksu/pytelegraf | telegraf/protocol.py | Line.to_line_protocol | def to_line_protocol(self):
"""
Converts the given metrics as a single line of InfluxDB line protocol
"""
tags = self.get_output_tags()
return u"{0}{1} {2}{3}".format(
self.get_output_measurement(),
"," + tags if tags else '',
self.get_output_values(),
self.get_output_timestamp()
) | python | def to_line_protocol(self):
"""
Converts the given metrics as a single line of InfluxDB line protocol
"""
tags = self.get_output_tags()
return u"{0}{1} {2}{3}".format(
self.get_output_measurement(),
"," + tags if tags else '',
self.get_output_values(),
self.get_output_timestamp()
) | [
"def",
"to_line_protocol",
"(",
"self",
")",
":",
"tags",
"=",
"self",
".",
"get_output_tags",
"(",
")",
"return",
"u\"{0}{1} {2}{3}\"",
".",
"format",
"(",
"self",
".",
"get_output_measurement",
"(",
")",
",",
"\",\"",
"+",
"tags",
"if",
"tags",
"else",
"''",
",",
"self",
".",
"get_output_values",
"(",
")",
",",
"self",
".",
"get_output_timestamp",
"(",
")",
")"
] | Converts the given metrics as a single line of InfluxDB line protocol | [
"Converts",
"the",
"given",
"metrics",
"as",
"a",
"single",
"line",
"of",
"InfluxDB",
"line",
"protocol"
] | train | https://github.com/paksu/pytelegraf/blob/a5a326bd99902768be2bf10da7dde2dfa165c013/telegraf/protocol.py#L66-L77 |
paksu/pytelegraf | telegraf/utils.py | format_string | def format_string(key):
"""
Formats either measurement names, tag names or tag values.
Measurement name and any optional tags separated by commas. Measurement names, tag keys,
and tag values must escape any spaces, commas or equal signs using a backslash (\).
For example: \ and \,.
All tag values are stored as strings and should not be surrounded in quotes.
"""
if isinstance(key, basestring):
key = key.replace(",", "\,")
key = key.replace(" ", "\ ")
key = key.replace("=", "\=")
return key | python | def format_string(key):
"""
Formats either measurement names, tag names or tag values.
Measurement name and any optional tags separated by commas. Measurement names, tag keys,
and tag values must escape any spaces, commas or equal signs using a backslash (\).
For example: \ and \,.
All tag values are stored as strings and should not be surrounded in quotes.
"""
if isinstance(key, basestring):
key = key.replace(",", "\,")
key = key.replace(" ", "\ ")
key = key.replace("=", "\=")
return key | [
"def",
"format_string",
"(",
"key",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"basestring",
")",
":",
"key",
"=",
"key",
".",
"replace",
"(",
"\",\"",
",",
"\"\\,\"",
")",
"key",
"=",
"key",
".",
"replace",
"(",
"\" \"",
",",
"\"\\ \"",
")",
"key",
"=",
"key",
".",
"replace",
"(",
"\"=\"",
",",
"\"\\=\"",
")",
"return",
"key"
] | Formats either measurement names, tag names or tag values.
Measurement name and any optional tags separated by commas. Measurement names, tag keys,
and tag values must escape any spaces, commas or equal signs using a backslash (\).
For example: \ and \,.
All tag values are stored as strings and should not be surrounded in quotes. | [
"Formats",
"either",
"measurement",
"names",
"tag",
"names",
"or",
"tag",
"values",
"."
] | train | https://github.com/paksu/pytelegraf/blob/a5a326bd99902768be2bf10da7dde2dfa165c013/telegraf/utils.py#L8-L22 |
paksu/pytelegraf | telegraf/utils.py | format_value | def format_value(value):
"""
Integers are numeric values that do not include a decimal and are followed by a trailing i when inserted
(e.g. 1i, 345i, 2015i, -10i). Note that all values must have a trailing i.
If they do not they will be written as floats.
Floats are numeric values that are not followed by a trailing i. (e.g. 1, 1.0, -3.14, 6.0e5, 10).
Boolean values indicate true or false. Valid boolean strings for line protocol are
(t, T, true, True, TRUE, f, F, false, False and FALSE).
Strings are text values. All string field values must be surrounded in double-quotes ".
If the string contains a double-quote, the double-quote must be escaped with a backslash, e.g. \".
"""
if isinstance(value, basestring):
value = value.replace('"', '\"')
value = u'"{0}"'.format(value)
elif isinstance(value, bool):
value = str(value)
elif isinstance(value, int):
value = "{0}i".format(value)
elif isinstance(value, float):
value = str(value)
return value | python | def format_value(value):
"""
Integers are numeric values that do not include a decimal and are followed by a trailing i when inserted
(e.g. 1i, 345i, 2015i, -10i). Note that all values must have a trailing i.
If they do not they will be written as floats.
Floats are numeric values that are not followed by a trailing i. (e.g. 1, 1.0, -3.14, 6.0e5, 10).
Boolean values indicate true or false. Valid boolean strings for line protocol are
(t, T, true, True, TRUE, f, F, false, False and FALSE).
Strings are text values. All string field values must be surrounded in double-quotes ".
If the string contains a double-quote, the double-quote must be escaped with a backslash, e.g. \".
"""
if isinstance(value, basestring):
value = value.replace('"', '\"')
value = u'"{0}"'.format(value)
elif isinstance(value, bool):
value = str(value)
elif isinstance(value, int):
value = "{0}i".format(value)
elif isinstance(value, float):
value = str(value)
return value | [
"def",
"format_value",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"basestring",
")",
":",
"value",
"=",
"value",
".",
"replace",
"(",
"'\"'",
",",
"'\\\"'",
")",
"value",
"=",
"u'\"{0}\"'",
".",
"format",
"(",
"value",
")",
"elif",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"value",
"=",
"str",
"(",
"value",
")",
"elif",
"isinstance",
"(",
"value",
",",
"int",
")",
":",
"value",
"=",
"\"{0}i\"",
".",
"format",
"(",
"value",
")",
"elif",
"isinstance",
"(",
"value",
",",
"float",
")",
":",
"value",
"=",
"str",
"(",
"value",
")",
"return",
"value"
] | Integers are numeric values that do not include a decimal and are followed by a trailing i when inserted
(e.g. 1i, 345i, 2015i, -10i). Note that all values must have a trailing i.
If they do not they will be written as floats.
Floats are numeric values that are not followed by a trailing i. (e.g. 1, 1.0, -3.14, 6.0e5, 10).
Boolean values indicate true or false. Valid boolean strings for line protocol are
(t, T, true, True, TRUE, f, F, false, False and FALSE).
Strings are text values. All string field values must be surrounded in double-quotes ".
If the string contains a double-quote, the double-quote must be escaped with a backslash, e.g. \". | [
"Integers",
"are",
"numeric",
"values",
"that",
"do",
"not",
"include",
"a",
"decimal",
"and",
"are",
"followed",
"by",
"a",
"trailing",
"i",
"when",
"inserted",
"(",
"e",
".",
"g",
".",
"1i",
"345i",
"2015i",
"-",
"10i",
")",
".",
"Note",
"that",
"all",
"values",
"must",
"have",
"a",
"trailing",
"i",
".",
"If",
"they",
"do",
"not",
"they",
"will",
"be",
"written",
"as",
"floats",
"."
] | train | https://github.com/paksu/pytelegraf/blob/a5a326bd99902768be2bf10da7dde2dfa165c013/telegraf/utils.py#L25-L48 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.