repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
bierschenk/ode
ode/integrators.py
euler
def euler(dfun, xzero, timerange, timestep): '''Euler method integration. This function wraps the Euler class. :param dfun: derivative function of the system. The differential system arranged as a series of first-order equations: \\dot{X} = dfun(t, x) :param xzero: the initial condition of the system :param timerange: the start and end times as (starttime, endtime) :param timestep: the timestep :returns: t, x: as lists ''' return zip(*list(Euler(dfun, xzero, timerange, timestep)))
python
def euler(dfun, xzero, timerange, timestep): '''Euler method integration. This function wraps the Euler class. :param dfun: derivative function of the system. The differential system arranged as a series of first-order equations: \\dot{X} = dfun(t, x) :param xzero: the initial condition of the system :param timerange: the start and end times as (starttime, endtime) :param timestep: the timestep :returns: t, x: as lists ''' return zip(*list(Euler(dfun, xzero, timerange, timestep)))
[ "def", "euler", "(", "dfun", ",", "xzero", ",", "timerange", ",", "timestep", ")", ":", "return", "zip", "(", "*", "list", "(", "Euler", "(", "dfun", ",", "xzero", ",", "timerange", ",", "timestep", ")", ")", ")" ]
Euler method integration. This function wraps the Euler class. :param dfun: derivative function of the system. The differential system arranged as a series of first-order equations: \\dot{X} = dfun(t, x) :param xzero: the initial condition of the system :param timerange: the start and end times as (starttime, endtime) :param timestep: the timestep :returns: t, x: as lists
[ "Euler", "method", "integration", ".", "This", "function", "wraps", "the", "Euler", "class", "." ]
train
https://github.com/bierschenk/ode/blob/01fb714874926f0988a4bb250d2a0c8a2429e4f0/ode/integrators.py#L72-L88
bierschenk/ode
ode/integrators.py
verlet
def verlet(dfun, xzero, vzero, timerange, timestep): '''Verlet method integration. This function wraps the Verlet class. :param dfun: second derivative function of the system. The differential system arranged as a series of second-order equations: \ddot{X} = dfun(t, x) :param xzero: the initial condition of the system :param vzero: the initial condition of first derivative of the system :param timerange: the start and end times as (starttime, endtime) :param timestep: the timestep :returns: t, x, v: as lists. ''' return zip(*list(Verlet(dfun, xzero, vzero, timerange, timestep)))
python
def verlet(dfun, xzero, vzero, timerange, timestep): '''Verlet method integration. This function wraps the Verlet class. :param dfun: second derivative function of the system. The differential system arranged as a series of second-order equations: \ddot{X} = dfun(t, x) :param xzero: the initial condition of the system :param vzero: the initial condition of first derivative of the system :param timerange: the start and end times as (starttime, endtime) :param timestep: the timestep :returns: t, x, v: as lists. ''' return zip(*list(Verlet(dfun, xzero, vzero, timerange, timestep)))
[ "def", "verlet", "(", "dfun", ",", "xzero", ",", "vzero", ",", "timerange", ",", "timestep", ")", ":", "return", "zip", "(", "*", "list", "(", "Verlet", "(", "dfun", ",", "xzero", ",", "vzero", ",", "timerange", ",", "timestep", ")", ")", ")" ]
Verlet method integration. This function wraps the Verlet class. :param dfun: second derivative function of the system. The differential system arranged as a series of second-order equations: \ddot{X} = dfun(t, x) :param xzero: the initial condition of the system :param vzero: the initial condition of first derivative of the system :param timerange: the start and end times as (starttime, endtime) :param timestep: the timestep :returns: t, x, v: as lists.
[ "Verlet", "method", "integration", ".", "This", "function", "wraps", "the", "Verlet", "class", "." ]
train
https://github.com/bierschenk/ode/blob/01fb714874926f0988a4bb250d2a0c8a2429e4f0/ode/integrators.py#L145-L163
bierschenk/ode
ode/integrators.py
backwardeuler
def backwardeuler(dfun, xzero, timerange, timestep): '''Backward Euler method integration. This function wraps BackwardEuler. :param dfun: Derivative function of the system. The differential system arranged as a series of first-order equations: \dot{X} = dfun(t, x) :param xzero: The initial condition of the system. :param vzero: The initial condition of first derivative of the system. :param timerange: The start and end times as (starttime, endtime). :param timestep: The timestep. :param convergencethreshold: Each step requires an iterative solution of an implicit equation. This is the threshold of convergence. :param maxiterations: Maximum iterations of the implicit equation before raising an exception. :returns: t, x: as lists. ''' return zip(*list(BackwardEuler(dfun, xzero, timerange, timestep)))
python
def backwardeuler(dfun, xzero, timerange, timestep): '''Backward Euler method integration. This function wraps BackwardEuler. :param dfun: Derivative function of the system. The differential system arranged as a series of first-order equations: \dot{X} = dfun(t, x) :param xzero: The initial condition of the system. :param vzero: The initial condition of first derivative of the system. :param timerange: The start and end times as (starttime, endtime). :param timestep: The timestep. :param convergencethreshold: Each step requires an iterative solution of an implicit equation. This is the threshold of convergence. :param maxiterations: Maximum iterations of the implicit equation before raising an exception. :returns: t, x: as lists. ''' return zip(*list(BackwardEuler(dfun, xzero, timerange, timestep)))
[ "def", "backwardeuler", "(", "dfun", ",", "xzero", ",", "timerange", ",", "timestep", ")", ":", "return", "zip", "(", "*", "list", "(", "BackwardEuler", "(", "dfun", ",", "xzero", ",", "timerange", ",", "timestep", ")", ")", ")" ]
Backward Euler method integration. This function wraps BackwardEuler. :param dfun: Derivative function of the system. The differential system arranged as a series of first-order equations: \dot{X} = dfun(t, x) :param xzero: The initial condition of the system. :param vzero: The initial condition of first derivative of the system. :param timerange: The start and end times as (starttime, endtime). :param timestep: The timestep. :param convergencethreshold: Each step requires an iterative solution of an implicit equation. This is the threshold of convergence. :param maxiterations: Maximum iterations of the implicit equation before raising an exception. :returns: t, x: as lists.
[ "Backward", "Euler", "method", "integration", ".", "This", "function", "wraps", "BackwardEuler", "." ]
train
https://github.com/bierschenk/ode/blob/01fb714874926f0988a4bb250d2a0c8a2429e4f0/ode/integrators.py#L228-L252
craigahobbs/chisel
src/chisel/app.py
Application.add_request
def add_request(self, request): """ Add a request object """ # Duplicate request name? if request.name in self.requests: raise ValueError('redefinition of request "{0}"'.format(request.name)) self.requests[request.name] = request # Add the request URLs for method, url in request.urls: # URL with arguments? if RE_URL_ARG.search(url): request_regex = '^' + RE_URL_ARG_ESC.sub(r'/(?P<\1>[^/]+)', re.escape(url)) + '$' self.__request_regex.append((method, re.compile(request_regex), request)) else: request_key = (method, url) if request_key in self.__request_urls: raise ValueError('redefinition of request URL "{0}"'.format(url)) self.__request_urls[request_key] = request
python
def add_request(self, request): """ Add a request object """ # Duplicate request name? if request.name in self.requests: raise ValueError('redefinition of request "{0}"'.format(request.name)) self.requests[request.name] = request # Add the request URLs for method, url in request.urls: # URL with arguments? if RE_URL_ARG.search(url): request_regex = '^' + RE_URL_ARG_ESC.sub(r'/(?P<\1>[^/]+)', re.escape(url)) + '$' self.__request_regex.append((method, re.compile(request_regex), request)) else: request_key = (method, url) if request_key in self.__request_urls: raise ValueError('redefinition of request URL "{0}"'.format(url)) self.__request_urls[request_key] = request
[ "def", "add_request", "(", "self", ",", "request", ")", ":", "# Duplicate request name?", "if", "request", ".", "name", "in", "self", ".", "requests", ":", "raise", "ValueError", "(", "'redefinition of request \"{0}\"'", ".", "format", "(", "request", ".", "name", ")", ")", "self", ".", "requests", "[", "request", ".", "name", "]", "=", "request", "# Add the request URLs", "for", "method", ",", "url", "in", "request", ".", "urls", ":", "# URL with arguments?", "if", "RE_URL_ARG", ".", "search", "(", "url", ")", ":", "request_regex", "=", "'^'", "+", "RE_URL_ARG_ESC", ".", "sub", "(", "r'/(?P<\\1>[^/]+)'", ",", "re", ".", "escape", "(", "url", ")", ")", "+", "'$'", "self", ".", "__request_regex", ".", "append", "(", "(", "method", ",", "re", ".", "compile", "(", "request_regex", ")", ",", "request", ")", ")", "else", ":", "request_key", "=", "(", "method", ",", "url", ")", "if", "request_key", "in", "self", ".", "__request_urls", ":", "raise", "ValueError", "(", "'redefinition of request URL \"{0}\"'", ".", "format", "(", "url", ")", ")", "self", ".", "__request_urls", "[", "request_key", "]", "=", "request" ]
Add a request object
[ "Add", "a", "request", "object" ]
train
https://github.com/craigahobbs/chisel/blob/d306a9eae2ff757647c6ca1c933bc944efa5c326/src/chisel/app.py#L44-L65
craigahobbs/chisel
src/chisel/app.py
Context.add_header
def add_header(self, key, value): """ Add a response header """ assert isinstance(key, str), 'header key must be of type str' assert isinstance(value, str), 'header value must be of type str' self.headers[key] = value
python
def add_header(self, key, value): """ Add a response header """ assert isinstance(key, str), 'header key must be of type str' assert isinstance(value, str), 'header value must be of type str' self.headers[key] = value
[ "def", "add_header", "(", "self", ",", "key", ",", "value", ")", ":", "assert", "isinstance", "(", "key", ",", "str", ")", ",", "'header key must be of type str'", "assert", "isinstance", "(", "value", ",", "str", ")", ",", "'header value must be of type str'", "self", ".", "headers", "[", "key", "]", "=", "value" ]
Add a response header
[ "Add", "a", "response", "header" ]
train
https://github.com/craigahobbs/chisel/blob/d306a9eae2ff757647c6ca1c933bc944efa5c326/src/chisel/app.py#L178-L185
craigahobbs/chisel
src/chisel/app.py
Context.response
def response(self, status, content_type, content, headers=None): """ Send an HTTP response """ assert not isinstance(content, (str, bytes)), 'response content cannot be of type str or bytes' response_headers = [('Content-Type', content_type)] if headers: response_headers.extend(headers) self.start_response(status, response_headers) return content
python
def response(self, status, content_type, content, headers=None): """ Send an HTTP response """ assert not isinstance(content, (str, bytes)), 'response content cannot be of type str or bytes' response_headers = [('Content-Type', content_type)] if headers: response_headers.extend(headers) self.start_response(status, response_headers) return content
[ "def", "response", "(", "self", ",", "status", ",", "content_type", ",", "content", ",", "headers", "=", "None", ")", ":", "assert", "not", "isinstance", "(", "content", ",", "(", "str", ",", "bytes", ")", ")", ",", "'response content cannot be of type str or bytes'", "response_headers", "=", "[", "(", "'Content-Type'", ",", "content_type", ")", "]", "if", "headers", ":", "response_headers", ".", "extend", "(", "headers", ")", "self", ".", "start_response", "(", "status", ",", "response_headers", ")", "return", "content" ]
Send an HTTP response
[ "Send", "an", "HTTP", "response" ]
train
https://github.com/craigahobbs/chisel/blob/d306a9eae2ff757647c6ca1c933bc944efa5c326/src/chisel/app.py#L200-L210
craigahobbs/chisel
src/chisel/app.py
Context.response_text
def response_text(self, status, text=None, content_type='text/plain', encoding='utf-8', headers=None): """ Send a plain-text response """ if text is None: if isinstance(status, str): text = status else: text = status.phrase return self.response(status, content_type, [text.encode(encoding)], headers=headers)
python
def response_text(self, status, text=None, content_type='text/plain', encoding='utf-8', headers=None): """ Send a plain-text response """ if text is None: if isinstance(status, str): text = status else: text = status.phrase return self.response(status, content_type, [text.encode(encoding)], headers=headers)
[ "def", "response_text", "(", "self", ",", "status", ",", "text", "=", "None", ",", "content_type", "=", "'text/plain'", ",", "encoding", "=", "'utf-8'", ",", "headers", "=", "None", ")", ":", "if", "text", "is", "None", ":", "if", "isinstance", "(", "status", ",", "str", ")", ":", "text", "=", "status", "else", ":", "text", "=", "status", ".", "phrase", "return", "self", ".", "response", "(", "status", ",", "content_type", ",", "[", "text", ".", "encode", "(", "encoding", ")", "]", ",", "headers", "=", "headers", ")" ]
Send a plain-text response
[ "Send", "a", "plain", "-", "text", "response" ]
train
https://github.com/craigahobbs/chisel/blob/d306a9eae2ff757647c6ca1c933bc944efa5c326/src/chisel/app.py#L212-L222
craigahobbs/chisel
src/chisel/app.py
Context.response_json
def response_json(self, status, response, content_type='application/json', encoding='utf-8', headers=None, jsonp=None): """ Send a JSON response """ encoder = JSONEncoder( check_circular=self.app.validate_output, allow_nan=False, sort_keys=True, indent=2 if self.app.pretty_output else None, separators=(',', ': ') if self.app.pretty_output else (',', ':') ) content = encoder.encode(response) if jsonp: content_list = [jsonp.encode(encoding), b'(', content.encode(encoding), b');'] else: content_list = [content.encode(encoding)] return self.response(status, content_type, content_list, headers=headers)
python
def response_json(self, status, response, content_type='application/json', encoding='utf-8', headers=None, jsonp=None): """ Send a JSON response """ encoder = JSONEncoder( check_circular=self.app.validate_output, allow_nan=False, sort_keys=True, indent=2 if self.app.pretty_output else None, separators=(',', ': ') if self.app.pretty_output else (',', ':') ) content = encoder.encode(response) if jsonp: content_list = [jsonp.encode(encoding), b'(', content.encode(encoding), b');'] else: content_list = [content.encode(encoding)] return self.response(status, content_type, content_list, headers=headers)
[ "def", "response_json", "(", "self", ",", "status", ",", "response", ",", "content_type", "=", "'application/json'", ",", "encoding", "=", "'utf-8'", ",", "headers", "=", "None", ",", "jsonp", "=", "None", ")", ":", "encoder", "=", "JSONEncoder", "(", "check_circular", "=", "self", ".", "app", ".", "validate_output", ",", "allow_nan", "=", "False", ",", "sort_keys", "=", "True", ",", "indent", "=", "2", "if", "self", ".", "app", ".", "pretty_output", "else", "None", ",", "separators", "=", "(", "','", ",", "': '", ")", "if", "self", ".", "app", ".", "pretty_output", "else", "(", "','", ",", "':'", ")", ")", "content", "=", "encoder", ".", "encode", "(", "response", ")", "if", "jsonp", ":", "content_list", "=", "[", "jsonp", ".", "encode", "(", "encoding", ")", ",", "b'('", ",", "content", ".", "encode", "(", "encoding", ")", ",", "b');'", "]", "else", ":", "content_list", "=", "[", "content", ".", "encode", "(", "encoding", ")", "]", "return", "self", ".", "response", "(", "status", ",", "content_type", ",", "content_list", ",", "headers", "=", "headers", ")" ]
Send a JSON response
[ "Send", "a", "JSON", "response" ]
train
https://github.com/craigahobbs/chisel/blob/d306a9eae2ff757647c6ca1c933bc944efa5c326/src/chisel/app.py#L224-L241
craigahobbs/chisel
src/chisel/app.py
Context.reconstruct_url
def reconstruct_url(self, path_info=None, query_string=None, relative=False): """ Reconstructs the request URL using the algorithm provided by PEP3333 """ environ = self.environ if relative: url = '' else: url = environ['wsgi.url_scheme'] + '://' if environ.get('HTTP_HOST'): url += environ['HTTP_HOST'] else: url += environ['SERVER_NAME'] if environ['wsgi.url_scheme'] == 'https': if environ['SERVER_PORT'] != '443': url += ':' + environ['SERVER_PORT'] else: if environ['SERVER_PORT'] != '80': url += ':' + environ['SERVER_PORT'] url += quote(environ.get('SCRIPT_NAME', '')) if path_info is None: url += quote(environ.get('PATH_INFO', '')) else: url += path_info if query_string is None: if environ.get('QUERY_STRING'): url += '?' + environ['QUERY_STRING'] else: if query_string: if isinstance(query_string, str): url += '?' + query_string else: url += '?' + encode_query_string(query_string) return url
python
def reconstruct_url(self, path_info=None, query_string=None, relative=False): """ Reconstructs the request URL using the algorithm provided by PEP3333 """ environ = self.environ if relative: url = '' else: url = environ['wsgi.url_scheme'] + '://' if environ.get('HTTP_HOST'): url += environ['HTTP_HOST'] else: url += environ['SERVER_NAME'] if environ['wsgi.url_scheme'] == 'https': if environ['SERVER_PORT'] != '443': url += ':' + environ['SERVER_PORT'] else: if environ['SERVER_PORT'] != '80': url += ':' + environ['SERVER_PORT'] url += quote(environ.get('SCRIPT_NAME', '')) if path_info is None: url += quote(environ.get('PATH_INFO', '')) else: url += path_info if query_string is None: if environ.get('QUERY_STRING'): url += '?' + environ['QUERY_STRING'] else: if query_string: if isinstance(query_string, str): url += '?' + query_string else: url += '?' + encode_query_string(query_string) return url
[ "def", "reconstruct_url", "(", "self", ",", "path_info", "=", "None", ",", "query_string", "=", "None", ",", "relative", "=", "False", ")", ":", "environ", "=", "self", ".", "environ", "if", "relative", ":", "url", "=", "''", "else", ":", "url", "=", "environ", "[", "'wsgi.url_scheme'", "]", "+", "'://'", "if", "environ", ".", "get", "(", "'HTTP_HOST'", ")", ":", "url", "+=", "environ", "[", "'HTTP_HOST'", "]", "else", ":", "url", "+=", "environ", "[", "'SERVER_NAME'", "]", "if", "environ", "[", "'wsgi.url_scheme'", "]", "==", "'https'", ":", "if", "environ", "[", "'SERVER_PORT'", "]", "!=", "'443'", ":", "url", "+=", "':'", "+", "environ", "[", "'SERVER_PORT'", "]", "else", ":", "if", "environ", "[", "'SERVER_PORT'", "]", "!=", "'80'", ":", "url", "+=", "':'", "+", "environ", "[", "'SERVER_PORT'", "]", "url", "+=", "quote", "(", "environ", ".", "get", "(", "'SCRIPT_NAME'", ",", "''", ")", ")", "if", "path_info", "is", "None", ":", "url", "+=", "quote", "(", "environ", ".", "get", "(", "'PATH_INFO'", ",", "''", ")", ")", "else", ":", "url", "+=", "path_info", "if", "query_string", "is", "None", ":", "if", "environ", ".", "get", "(", "'QUERY_STRING'", ")", ":", "url", "+=", "'?'", "+", "environ", "[", "'QUERY_STRING'", "]", "else", ":", "if", "query_string", ":", "if", "isinstance", "(", "query_string", ",", "str", ")", ":", "url", "+=", "'?'", "+", "query_string", "else", ":", "url", "+=", "'?'", "+", "encode_query_string", "(", "query_string", ")", "return", "url" ]
Reconstructs the request URL using the algorithm provided by PEP3333
[ "Reconstructs", "the", "request", "URL", "using", "the", "algorithm", "provided", "by", "PEP3333" ]
train
https://github.com/craigahobbs/chisel/blob/d306a9eae2ff757647c6ca1c933bc944efa5c326/src/chisel/app.py#L243-L281
HolmesNL/confidence
confidence/io.py
read_xdg_config_dirs
def read_xdg_config_dirs(name, extension): """ Read from files found in XDG-specified system-wide configuration paths, defaulting to ``/etc/xdg``. Depends on ``XDG_CONFIG_DIRS`` environment variable. :param name: application or configuration set name :param extension: file extension to look for :return: a `.Configuration` instance with values read from XDG-specified directories """ # find optional value of ${XDG_CONFIG_DIRS} config_dirs = environ.get('XDG_CONFIG_DIRS') if config_dirs: # PATH-like env vars operate in decreasing precedence, reverse this path set to mimic the end result config_dirs = reversed(config_dirs.split(path.pathsep)) else: # XDG spec: "If $XDG_CONFIG_DIRS is either not set or empty, a value equal to /etc/xdg should be used." config_dirs = ['/etc/xdg'] # load a file from all config dirs, default to NotConfigured fname = '{name}.{extension}'.format(name=name, extension=extension) return loadf(*(path.join(config_dir, fname) for config_dir in config_dirs), default=NotConfigured)
python
def read_xdg_config_dirs(name, extension): """ Read from files found in XDG-specified system-wide configuration paths, defaulting to ``/etc/xdg``. Depends on ``XDG_CONFIG_DIRS`` environment variable. :param name: application or configuration set name :param extension: file extension to look for :return: a `.Configuration` instance with values read from XDG-specified directories """ # find optional value of ${XDG_CONFIG_DIRS} config_dirs = environ.get('XDG_CONFIG_DIRS') if config_dirs: # PATH-like env vars operate in decreasing precedence, reverse this path set to mimic the end result config_dirs = reversed(config_dirs.split(path.pathsep)) else: # XDG spec: "If $XDG_CONFIG_DIRS is either not set or empty, a value equal to /etc/xdg should be used." config_dirs = ['/etc/xdg'] # load a file from all config dirs, default to NotConfigured fname = '{name}.{extension}'.format(name=name, extension=extension) return loadf(*(path.join(config_dir, fname) for config_dir in config_dirs), default=NotConfigured)
[ "def", "read_xdg_config_dirs", "(", "name", ",", "extension", ")", ":", "# find optional value of ${XDG_CONFIG_DIRS}", "config_dirs", "=", "environ", ".", "get", "(", "'XDG_CONFIG_DIRS'", ")", "if", "config_dirs", ":", "# PATH-like env vars operate in decreasing precedence, reverse this path set to mimic the end result", "config_dirs", "=", "reversed", "(", "config_dirs", ".", "split", "(", "path", ".", "pathsep", ")", ")", "else", ":", "# XDG spec: \"If $XDG_CONFIG_DIRS is either not set or empty, a value equal to /etc/xdg should be used.\"", "config_dirs", "=", "[", "'/etc/xdg'", "]", "# load a file from all config dirs, default to NotConfigured", "fname", "=", "'{name}.{extension}'", ".", "format", "(", "name", "=", "name", ",", "extension", "=", "extension", ")", "return", "loadf", "(", "*", "(", "path", ".", "join", "(", "config_dir", ",", "fname", ")", "for", "config_dir", "in", "config_dirs", ")", ",", "default", "=", "NotConfigured", ")" ]
Read from files found in XDG-specified system-wide configuration paths, defaulting to ``/etc/xdg``. Depends on ``XDG_CONFIG_DIRS`` environment variable. :param name: application or configuration set name :param extension: file extension to look for :return: a `.Configuration` instance with values read from XDG-specified directories
[ "Read", "from", "files", "found", "in", "XDG", "-", "specified", "system", "-", "wide", "configuration", "paths", "defaulting", "to", "/", "etc", "/", "xdg", ".", "Depends", "on", "XDG_CONFIG_DIRS", "environment", "variable", "." ]
train
https://github.com/HolmesNL/confidence/blob/e14d2d8769a01fa55676716f7a2f22714c2616d3/confidence/io.py#L12-L35
HolmesNL/confidence
confidence/io.py
read_xdg_config_home
def read_xdg_config_home(name, extension): """ Read from file found in XDG-specified configuration home directory, expanding to ``${HOME}/.config/name.extension`` by default. Depends on ``XDG_CONFIG_HOME`` or ``HOME`` environment variables. :param name: application or configuration set name :param extension: file extension to look for :return: a `.Configuration` instance, possibly `.NotConfigured` """ # find optional value of ${XDG_CONFIG_HOME} config_home = environ.get('XDG_CONFIG_HOME') if not config_home: # XDG spec: "If $XDG_CONFIG_HOME is either not set or empty, a default equal to $HOME/.config should be used." # see https://specifications.freedesktop.org/basedir-spec/latest/ar01s03.html config_home = path.expanduser('~/.config') # expand to full path to configuration file in XDG config path return loadf(path.join(config_home, '{name}.{extension}'.format(name=name, extension=extension)), default=NotConfigured)
python
def read_xdg_config_home(name, extension): """ Read from file found in XDG-specified configuration home directory, expanding to ``${HOME}/.config/name.extension`` by default. Depends on ``XDG_CONFIG_HOME`` or ``HOME`` environment variables. :param name: application or configuration set name :param extension: file extension to look for :return: a `.Configuration` instance, possibly `.NotConfigured` """ # find optional value of ${XDG_CONFIG_HOME} config_home = environ.get('XDG_CONFIG_HOME') if not config_home: # XDG spec: "If $XDG_CONFIG_HOME is either not set or empty, a default equal to $HOME/.config should be used." # see https://specifications.freedesktop.org/basedir-spec/latest/ar01s03.html config_home = path.expanduser('~/.config') # expand to full path to configuration file in XDG config path return loadf(path.join(config_home, '{name}.{extension}'.format(name=name, extension=extension)), default=NotConfigured)
[ "def", "read_xdg_config_home", "(", "name", ",", "extension", ")", ":", "# find optional value of ${XDG_CONFIG_HOME}", "config_home", "=", "environ", ".", "get", "(", "'XDG_CONFIG_HOME'", ")", "if", "not", "config_home", ":", "# XDG spec: \"If $XDG_CONFIG_HOME is either not set or empty, a default equal to $HOME/.config should be used.\"", "# see https://specifications.freedesktop.org/basedir-spec/latest/ar01s03.html", "config_home", "=", "path", ".", "expanduser", "(", "'~/.config'", ")", "# expand to full path to configuration file in XDG config path", "return", "loadf", "(", "path", ".", "join", "(", "config_home", ",", "'{name}.{extension}'", ".", "format", "(", "name", "=", "name", ",", "extension", "=", "extension", ")", ")", ",", "default", "=", "NotConfigured", ")" ]
Read from file found in XDG-specified configuration home directory, expanding to ``${HOME}/.config/name.extension`` by default. Depends on ``XDG_CONFIG_HOME`` or ``HOME`` environment variables. :param name: application or configuration set name :param extension: file extension to look for :return: a `.Configuration` instance, possibly `.NotConfigured`
[ "Read", "from", "file", "found", "in", "XDG", "-", "specified", "configuration", "home", "directory", "expanding", "to", "$", "{", "HOME", "}", "/", ".", "config", "/", "name", ".", "extension", "by", "default", ".", "Depends", "on", "XDG_CONFIG_HOME", "or", "HOME", "environment", "variables", "." ]
train
https://github.com/HolmesNL/confidence/blob/e14d2d8769a01fa55676716f7a2f22714c2616d3/confidence/io.py#L38-L57
HolmesNL/confidence
confidence/io.py
read_envvars
def read_envvars(name, extension): """ Read environment variables starting with ``NAME_``, where subsequent underscores are interpreted as namespaces. Underscores can be retained as namespaces by doubling them up, e.g. ``NAME_SPA__CE_KEY`` would be accessible in the resulting `.Configuration` as ``c.spa_ce.key``, where ``c`` is the `.Configuration` instance. .. note:: Environment variables are always `str`s, this function makes no effort to changes this. All values read from command line variables will be `str` instances. :param name: environment variable prefix to look for (without the ``_``) :param extension: *(unused)* :return: a `.Configuration` instance, possibly `.NotConfigured` """ prefix = '{}_'.format(name) prefix_len = len(prefix) envvar_file = '{}_config_file'.format(name) # create a new mapping from environment values starting with the prefix (but stripped of that prefix) values = {var.lower()[prefix_len:]: value for var, value in environ.items() # TODO: document ignoring envvar_file if var.lower().startswith(prefix) and var.lower() != envvar_file} # TODO: envvar values can only be str, how do we configure non-str values? if not values: return NotConfigured def dotted(name): # replace 'regular' underscores (those between alphanumeric characters) with dots first name = re.sub(r'([0-9A-Za-z])_([0-9A-Za-z])', r'\1.\2', name) # unescape double underscores back to a single one return re.sub(r'__', '_', name) return Configuration({dotted(name): value for name, value in values.items()})
python
def read_envvars(name, extension): """ Read environment variables starting with ``NAME_``, where subsequent underscores are interpreted as namespaces. Underscores can be retained as namespaces by doubling them up, e.g. ``NAME_SPA__CE_KEY`` would be accessible in the resulting `.Configuration` as ``c.spa_ce.key``, where ``c`` is the `.Configuration` instance. .. note:: Environment variables are always `str`s, this function makes no effort to changes this. All values read from command line variables will be `str` instances. :param name: environment variable prefix to look for (without the ``_``) :param extension: *(unused)* :return: a `.Configuration` instance, possibly `.NotConfigured` """ prefix = '{}_'.format(name) prefix_len = len(prefix) envvar_file = '{}_config_file'.format(name) # create a new mapping from environment values starting with the prefix (but stripped of that prefix) values = {var.lower()[prefix_len:]: value for var, value in environ.items() # TODO: document ignoring envvar_file if var.lower().startswith(prefix) and var.lower() != envvar_file} # TODO: envvar values can only be str, how do we configure non-str values? if not values: return NotConfigured def dotted(name): # replace 'regular' underscores (those between alphanumeric characters) with dots first name = re.sub(r'([0-9A-Za-z])_([0-9A-Za-z])', r'\1.\2', name) # unescape double underscores back to a single one return re.sub(r'__', '_', name) return Configuration({dotted(name): value for name, value in values.items()})
[ "def", "read_envvars", "(", "name", ",", "extension", ")", ":", "prefix", "=", "'{}_'", ".", "format", "(", "name", ")", "prefix_len", "=", "len", "(", "prefix", ")", "envvar_file", "=", "'{}_config_file'", ".", "format", "(", "name", ")", "# create a new mapping from environment values starting with the prefix (but stripped of that prefix)", "values", "=", "{", "var", ".", "lower", "(", ")", "[", "prefix_len", ":", "]", ":", "value", "for", "var", ",", "value", "in", "environ", ".", "items", "(", ")", "# TODO: document ignoring envvar_file", "if", "var", ".", "lower", "(", ")", ".", "startswith", "(", "prefix", ")", "and", "var", ".", "lower", "(", ")", "!=", "envvar_file", "}", "# TODO: envvar values can only be str, how do we configure non-str values?", "if", "not", "values", ":", "return", "NotConfigured", "def", "dotted", "(", "name", ")", ":", "# replace 'regular' underscores (those between alphanumeric characters) with dots first", "name", "=", "re", ".", "sub", "(", "r'([0-9A-Za-z])_([0-9A-Za-z])'", ",", "r'\\1.\\2'", ",", "name", ")", "# unescape double underscores back to a single one", "return", "re", ".", "sub", "(", "r'__'", ",", "'_'", ",", "name", ")", "return", "Configuration", "(", "{", "dotted", "(", "name", ")", ":", "value", "for", "name", ",", "value", "in", "values", ".", "items", "(", ")", "}", ")" ]
Read environment variables starting with ``NAME_``, where subsequent underscores are interpreted as namespaces. Underscores can be retained as namespaces by doubling them up, e.g. ``NAME_SPA__CE_KEY`` would be accessible in the resulting `.Configuration` as ``c.spa_ce.key``, where ``c`` is the `.Configuration` instance. .. note:: Environment variables are always `str`s, this function makes no effort to changes this. All values read from command line variables will be `str` instances. :param name: environment variable prefix to look for (without the ``_``) :param extension: *(unused)* :return: a `.Configuration` instance, possibly `.NotConfigured`
[ "Read", "environment", "variables", "starting", "with", "NAME_", "where", "subsequent", "underscores", "are", "interpreted", "as", "namespaces", ".", "Underscores", "can", "be", "retained", "as", "namespaces", "by", "doubling", "them", "up", "e", ".", "g", ".", "NAME_SPA__CE_KEY", "would", "be", "accessible", "in", "the", "resulting", ".", "Configuration", "as", "c", ".", "spa_ce", ".", "key", "where", "c", "is", "the", ".", "Configuration", "instance", "." ]
train
https://github.com/HolmesNL/confidence/blob/e14d2d8769a01fa55676716f7a2f22714c2616d3/confidence/io.py#L60-L96
HolmesNL/confidence
confidence/io.py
read_envvar_file
def read_envvar_file(name, extension): """ Read values from a file provided as a environment variable ``NAME_CONFIG_FILE``. :param name: environment variable prefix to look for (without the ``_CONFIG_FILE``) :param extension: *(unused)* :return: a `.Configuration`, possibly `.NotConfigured` """ envvar_file = environ.get('{}_config_file'.format(name).upper()) if envvar_file: # envvar set, load value as file return loadf(envvar_file) else: # envvar not set, return an empty source return NotConfigured
python
def read_envvar_file(name, extension): """ Read values from a file provided as a environment variable ``NAME_CONFIG_FILE``. :param name: environment variable prefix to look for (without the ``_CONFIG_FILE``) :param extension: *(unused)* :return: a `.Configuration`, possibly `.NotConfigured` """ envvar_file = environ.get('{}_config_file'.format(name).upper()) if envvar_file: # envvar set, load value as file return loadf(envvar_file) else: # envvar not set, return an empty source return NotConfigured
[ "def", "read_envvar_file", "(", "name", ",", "extension", ")", ":", "envvar_file", "=", "environ", ".", "get", "(", "'{}_config_file'", ".", "format", "(", "name", ")", ".", "upper", "(", ")", ")", "if", "envvar_file", ":", "# envvar set, load value as file", "return", "loadf", "(", "envvar_file", ")", "else", ":", "# envvar not set, return an empty source", "return", "NotConfigured" ]
Read values from a file provided as a environment variable ``NAME_CONFIG_FILE``. :param name: environment variable prefix to look for (without the ``_CONFIG_FILE``) :param extension: *(unused)* :return: a `.Configuration`, possibly `.NotConfigured`
[ "Read", "values", "from", "a", "file", "provided", "as", "a", "environment", "variable", "NAME_CONFIG_FILE", "." ]
train
https://github.com/HolmesNL/confidence/blob/e14d2d8769a01fa55676716f7a2f22714c2616d3/confidence/io.py#L99-L115
HolmesNL/confidence
confidence/io.py
read_envvar_dir
def read_envvar_dir(envvar, name, extension): """ Read values from a file located in a directory specified by a particular environment file. ``read_envvar_dir('HOME', 'example', 'yaml')`` would look for a file at ``/home/user/example.yaml``. When the environment variable isn't set or the file does not exist, `NotConfigured` will be returned. :param envvar: the environment variable to interpret as a directory :param name: application or configuration set name :param extension: file extension to look for :return: a `.Configuration`, possibly `.NotConfigured` """ config_dir = environ.get(envvar) if not config_dir: return NotConfigured # envvar is set, construct full file path, expanding user to allow the envvar containing a value like ~/config config_path = path.join(path.expanduser(config_dir), '{name}.{extension}'.format(name=name, extension=extension)) return loadf(config_path, default=NotConfigured)
python
def read_envvar_dir(envvar, name, extension): """ Read values from a file located in a directory specified by a particular environment file. ``read_envvar_dir('HOME', 'example', 'yaml')`` would look for a file at ``/home/user/example.yaml``. When the environment variable isn't set or the file does not exist, `NotConfigured` will be returned. :param envvar: the environment variable to interpret as a directory :param name: application or configuration set name :param extension: file extension to look for :return: a `.Configuration`, possibly `.NotConfigured` """ config_dir = environ.get(envvar) if not config_dir: return NotConfigured # envvar is set, construct full file path, expanding user to allow the envvar containing a value like ~/config config_path = path.join(path.expanduser(config_dir), '{name}.{extension}'.format(name=name, extension=extension)) return loadf(config_path, default=NotConfigured)
[ "def", "read_envvar_dir", "(", "envvar", ",", "name", ",", "extension", ")", ":", "config_dir", "=", "environ", ".", "get", "(", "envvar", ")", "if", "not", "config_dir", ":", "return", "NotConfigured", "# envvar is set, construct full file path, expanding user to allow the envvar containing a value like ~/config", "config_path", "=", "path", ".", "join", "(", "path", ".", "expanduser", "(", "config_dir", ")", ",", "'{name}.{extension}'", ".", "format", "(", "name", "=", "name", ",", "extension", "=", "extension", ")", ")", "return", "loadf", "(", "config_path", ",", "default", "=", "NotConfigured", ")" ]
Read values from a file located in a directory specified by a particular environment file. ``read_envvar_dir('HOME', 'example', 'yaml')`` would look for a file at ``/home/user/example.yaml``. When the environment variable isn't set or the file does not exist, `NotConfigured` will be returned. :param envvar: the environment variable to interpret as a directory :param name: application or configuration set name :param extension: file extension to look for :return: a `.Configuration`, possibly `.NotConfigured`
[ "Read", "values", "from", "a", "file", "located", "in", "a", "directory", "specified", "by", "a", "particular", "environment", "file", ".", "read_envvar_dir", "(", "HOME", "example", "yaml", ")", "would", "look", "for", "a", "file", "at", "/", "home", "/", "user", "/", "example", ".", "yaml", ".", "When", "the", "environment", "variable", "isn", "t", "set", "or", "the", "file", "does", "not", "exist", "NotConfigured", "will", "be", "returned", "." ]
train
https://github.com/HolmesNL/confidence/blob/e14d2d8769a01fa55676716f7a2f22714c2616d3/confidence/io.py#L118-L137
HolmesNL/confidence
confidence/io.py
loaders
def loaders(*specifiers): """ Generates loaders in the specified order. Arguments can be `.Locality` instances, producing the loader(s) available for that locality, `str` instances (used as file path templates) or `callable`s. These can be mixed: .. code-block:: python # define a load order using predefined user-local locations, # an explicit path, a template and a user-defined function load_order = loaders(Locality.user, '/etc/defaults/hard-coded.yaml', '/path/to/{name}.{extension}', my_loader) # load configuration for name 'my-application' using the load order # defined above config = load_name('my-application', load_order=load_order) :param specifiers: :return: a `generator` of configuration loaders in the specified order """ for specifier in specifiers: if isinstance(specifier, Locality): # localities can carry multiple loaders, flatten this yield from _LOADERS[specifier] else: # something not a locality, pass along verbatim yield specifier
python
def loaders(*specifiers): """ Generates loaders in the specified order. Arguments can be `.Locality` instances, producing the loader(s) available for that locality, `str` instances (used as file path templates) or `callable`s. These can be mixed: .. code-block:: python # define a load order using predefined user-local locations, # an explicit path, a template and a user-defined function load_order = loaders(Locality.user, '/etc/defaults/hard-coded.yaml', '/path/to/{name}.{extension}', my_loader) # load configuration for name 'my-application' using the load order # defined above config = load_name('my-application', load_order=load_order) :param specifiers: :return: a `generator` of configuration loaders in the specified order """ for specifier in specifiers: if isinstance(specifier, Locality): # localities can carry multiple loaders, flatten this yield from _LOADERS[specifier] else: # something not a locality, pass along verbatim yield specifier
[ "def", "loaders", "(", "*", "specifiers", ")", ":", "for", "specifier", "in", "specifiers", ":", "if", "isinstance", "(", "specifier", ",", "Locality", ")", ":", "# localities can carry multiple loaders, flatten this", "yield", "from", "_LOADERS", "[", "specifier", "]", "else", ":", "# something not a locality, pass along verbatim", "yield", "specifier" ]
Generates loaders in the specified order. Arguments can be `.Locality` instances, producing the loader(s) available for that locality, `str` instances (used as file path templates) or `callable`s. These can be mixed: .. code-block:: python # define a load order using predefined user-local locations, # an explicit path, a template and a user-defined function load_order = loaders(Locality.user, '/etc/defaults/hard-coded.yaml', '/path/to/{name}.{extension}', my_loader) # load configuration for name 'my-application' using the load order # defined above config = load_name('my-application', load_order=load_order) :param specifiers: :return: a `generator` of configuration loaders in the specified order
[ "Generates", "loaders", "in", "the", "specified", "order", "." ]
train
https://github.com/HolmesNL/confidence/blob/e14d2d8769a01fa55676716f7a2f22714c2616d3/confidence/io.py#L184-L214
HolmesNL/confidence
confidence/io.py
load
def load(*fps, missing=Missing.silent): """ Read a `.Configuration` instance from file-like objects. :param fps: file-like objects (supporting ``.read()``) :param missing: policy to be used when a configured key is missing, either as a `.Missing` instance or a default value :return: a `.Configuration` instance providing values from *fps* :rtype: `.Configuration` """ return Configuration(*(yaml.safe_load(fp.read()) for fp in fps), missing=missing)
python
def load(*fps, missing=Missing.silent): """ Read a `.Configuration` instance from file-like objects. :param fps: file-like objects (supporting ``.read()``) :param missing: policy to be used when a configured key is missing, either as a `.Missing` instance or a default value :return: a `.Configuration` instance providing values from *fps* :rtype: `.Configuration` """ return Configuration(*(yaml.safe_load(fp.read()) for fp in fps), missing=missing)
[ "def", "load", "(", "*", "fps", ",", "missing", "=", "Missing", ".", "silent", ")", ":", "return", "Configuration", "(", "*", "(", "yaml", ".", "safe_load", "(", "fp", ".", "read", "(", ")", ")", "for", "fp", "in", "fps", ")", ",", "missing", "=", "missing", ")" ]
Read a `.Configuration` instance from file-like objects. :param fps: file-like objects (supporting ``.read()``) :param missing: policy to be used when a configured key is missing, either as a `.Missing` instance or a default value :return: a `.Configuration` instance providing values from *fps* :rtype: `.Configuration`
[ "Read", "a", ".", "Configuration", "instance", "from", "file", "-", "like", "objects", "." ]
train
https://github.com/HolmesNL/confidence/blob/e14d2d8769a01fa55676716f7a2f22714c2616d3/confidence/io.py#L223-L233
HolmesNL/confidence
confidence/io.py
loadf
def loadf(*fnames, default=_NoDefault, missing=Missing.silent): """ Read a `.Configuration` instance from named files. :param fnames: name of the files to ``open()`` :param default: `dict` or `.Configuration` to use when a file does not exist (default is to raise a `FileNotFoundError`) :param missing: policy to be used when a configured key is missing, either as a `.Missing` instance or a default value :return: a `.Configuration` instance providing values from *fnames* :rtype: `.Configuration` """ def readf(fname): if default is _NoDefault or path.exists(fname): # (attempt to) open fname if it exists OR if we're expected to raise an error on a missing file with open(fname, 'r') as fp: # default to empty dict, yaml.safe_load will return None for an empty document return yaml.safe_load(fp.read()) or {} else: return default return Configuration(*(readf(path.expanduser(fname)) for fname in fnames), missing=missing)
python
def loadf(*fnames, default=_NoDefault, missing=Missing.silent): """ Read a `.Configuration` instance from named files. :param fnames: name of the files to ``open()`` :param default: `dict` or `.Configuration` to use when a file does not exist (default is to raise a `FileNotFoundError`) :param missing: policy to be used when a configured key is missing, either as a `.Missing` instance or a default value :return: a `.Configuration` instance providing values from *fnames* :rtype: `.Configuration` """ def readf(fname): if default is _NoDefault or path.exists(fname): # (attempt to) open fname if it exists OR if we're expected to raise an error on a missing file with open(fname, 'r') as fp: # default to empty dict, yaml.safe_load will return None for an empty document return yaml.safe_load(fp.read()) or {} else: return default return Configuration(*(readf(path.expanduser(fname)) for fname in fnames), missing=missing)
[ "def", "loadf", "(", "*", "fnames", ",", "default", "=", "_NoDefault", ",", "missing", "=", "Missing", ".", "silent", ")", ":", "def", "readf", "(", "fname", ")", ":", "if", "default", "is", "_NoDefault", "or", "path", ".", "exists", "(", "fname", ")", ":", "# (attempt to) open fname if it exists OR if we're expected to raise an error on a missing file", "with", "open", "(", "fname", ",", "'r'", ")", "as", "fp", ":", "# default to empty dict, yaml.safe_load will return None for an empty document", "return", "yaml", ".", "safe_load", "(", "fp", ".", "read", "(", ")", ")", "or", "{", "}", "else", ":", "return", "default", "return", "Configuration", "(", "*", "(", "readf", "(", "path", ".", "expanduser", "(", "fname", ")", ")", "for", "fname", "in", "fnames", ")", ",", "missing", "=", "missing", ")" ]
Read a `.Configuration` instance from named files. :param fnames: name of the files to ``open()`` :param default: `dict` or `.Configuration` to use when a file does not exist (default is to raise a `FileNotFoundError`) :param missing: policy to be used when a configured key is missing, either as a `.Missing` instance or a default value :return: a `.Configuration` instance providing values from *fnames* :rtype: `.Configuration`
[ "Read", "a", ".", "Configuration", "instance", "from", "named", "files", "." ]
train
https://github.com/HolmesNL/confidence/blob/e14d2d8769a01fa55676716f7a2f22714c2616d3/confidence/io.py#L236-L257
HolmesNL/confidence
confidence/io.py
loads
def loads(*strings, missing=Missing.silent): """ Read a `.Configuration` instance from strings. :param strings: configuration contents :param missing: policy to be used when a configured key is missing, either as a `.Missing` instance or a default value :return: a `.Configuration` instance providing values from *strings* :rtype: `.Configuration` """ return Configuration(*(yaml.safe_load(string) for string in strings), missing=missing)
python
def loads(*strings, missing=Missing.silent): """ Read a `.Configuration` instance from strings. :param strings: configuration contents :param missing: policy to be used when a configured key is missing, either as a `.Missing` instance or a default value :return: a `.Configuration` instance providing values from *strings* :rtype: `.Configuration` """ return Configuration(*(yaml.safe_load(string) for string in strings), missing=missing)
[ "def", "loads", "(", "*", "strings", ",", "missing", "=", "Missing", ".", "silent", ")", ":", "return", "Configuration", "(", "*", "(", "yaml", ".", "safe_load", "(", "string", ")", "for", "string", "in", "strings", ")", ",", "missing", "=", "missing", ")" ]
Read a `.Configuration` instance from strings. :param strings: configuration contents :param missing: policy to be used when a configured key is missing, either as a `.Missing` instance or a default value :return: a `.Configuration` instance providing values from *strings* :rtype: `.Configuration`
[ "Read", "a", ".", "Configuration", "instance", "from", "strings", "." ]
train
https://github.com/HolmesNL/confidence/blob/e14d2d8769a01fa55676716f7a2f22714c2616d3/confidence/io.py#L260-L270
HolmesNL/confidence
confidence/io.py
load_name
def load_name(*names, load_order=DEFAULT_LOAD_ORDER, extension='yaml', missing=Missing.silent): """ Read a `.Configuration` instance by name, trying to read from files in increasing significance. The default load order is `.system`, `.user`, `.application`, `.environment`. Multiple names are combined with multiple loaders using names as the 'inner loop / selector', loading ``/etc/name1.yaml`` and ``/etc/name2.yaml`` before ``./name1.yaml`` and ``./name2.yaml``. :param names: application or configuration set names, in increasing significance :param load_order: ordered list of name templates or `callable`s, in increasing order of significance :param extension: file extension to be used :param missing: policy to be used when a configured key is missing, either as a `.Missing` instance or a default value :return: a `.Configuration` instances providing values loaded from *names* in *load_order* ordering """ def generate_sources(): # argument order for product matters, for names "foo" and "bar": # /etc/foo.yaml before /etc/bar.yaml, but both of them before ~/.foo.yaml and ~/.bar.yaml for source, name in product(load_order, names): if callable(source): yield source(name, extension) else: # expand user to turn ~/.name.yaml into /home/user/.name.yaml candidate = path.expanduser(source.format(name=name, extension=extension)) yield loadf(candidate, default=NotConfigured) return Configuration(*generate_sources(), missing=missing)
python
def load_name(*names, load_order=DEFAULT_LOAD_ORDER, extension='yaml', missing=Missing.silent): """ Read a `.Configuration` instance by name, trying to read from files in increasing significance. The default load order is `.system`, `.user`, `.application`, `.environment`. Multiple names are combined with multiple loaders using names as the 'inner loop / selector', loading ``/etc/name1.yaml`` and ``/etc/name2.yaml`` before ``./name1.yaml`` and ``./name2.yaml``. :param names: application or configuration set names, in increasing significance :param load_order: ordered list of name templates or `callable`s, in increasing order of significance :param extension: file extension to be used :param missing: policy to be used when a configured key is missing, either as a `.Missing` instance or a default value :return: a `.Configuration` instances providing values loaded from *names* in *load_order* ordering """ def generate_sources(): # argument order for product matters, for names "foo" and "bar": # /etc/foo.yaml before /etc/bar.yaml, but both of them before ~/.foo.yaml and ~/.bar.yaml for source, name in product(load_order, names): if callable(source): yield source(name, extension) else: # expand user to turn ~/.name.yaml into /home/user/.name.yaml candidate = path.expanduser(source.format(name=name, extension=extension)) yield loadf(candidate, default=NotConfigured) return Configuration(*generate_sources(), missing=missing)
[ "def", "load_name", "(", "*", "names", ",", "load_order", "=", "DEFAULT_LOAD_ORDER", ",", "extension", "=", "'yaml'", ",", "missing", "=", "Missing", ".", "silent", ")", ":", "def", "generate_sources", "(", ")", ":", "# argument order for product matters, for names \"foo\" and \"bar\":", "# /etc/foo.yaml before /etc/bar.yaml, but both of them before ~/.foo.yaml and ~/.bar.yaml", "for", "source", ",", "name", "in", "product", "(", "load_order", ",", "names", ")", ":", "if", "callable", "(", "source", ")", ":", "yield", "source", "(", "name", ",", "extension", ")", "else", ":", "# expand user to turn ~/.name.yaml into /home/user/.name.yaml", "candidate", "=", "path", ".", "expanduser", "(", "source", ".", "format", "(", "name", "=", "name", ",", "extension", "=", "extension", ")", ")", "yield", "loadf", "(", "candidate", ",", "default", "=", "NotConfigured", ")", "return", "Configuration", "(", "*", "generate_sources", "(", ")", ",", "missing", "=", "missing", ")" ]
Read a `.Configuration` instance by name, trying to read from files in increasing significance. The default load order is `.system`, `.user`, `.application`, `.environment`. Multiple names are combined with multiple loaders using names as the 'inner loop / selector', loading ``/etc/name1.yaml`` and ``/etc/name2.yaml`` before ``./name1.yaml`` and ``./name2.yaml``. :param names: application or configuration set names, in increasing significance :param load_order: ordered list of name templates or `callable`s, in increasing order of significance :param extension: file extension to be used :param missing: policy to be used when a configured key is missing, either as a `.Missing` instance or a default value :return: a `.Configuration` instances providing values loaded from *names* in *load_order* ordering
[ "Read", "a", ".", "Configuration", "instance", "by", "name", "trying", "to", "read", "from", "files", "in", "increasing", "significance", ".", "The", "default", "load", "order", "is", ".", "system", ".", "user", ".", "application", ".", "environment", "." ]
train
https://github.com/HolmesNL/confidence/blob/e14d2d8769a01fa55676716f7a2f22714c2616d3/confidence/io.py#L273-L304
hassa/BeatCop
beatcop.py
Lock.acquire
def acquire(self, block=True): """Acquire lock. Blocks until acquired if `block` is `True`, otherwise returns `False` if the lock could not be acquired.""" while True: # Try to set the lock if self.redis.set(self.name, self.value, px=self.timeout, nx=True): # It's ours until the timeout now return True # Lock is taken if not block: return False # If blocking, try again in a bit time.sleep(self.sleep)
python
def acquire(self, block=True): """Acquire lock. Blocks until acquired if `block` is `True`, otherwise returns `False` if the lock could not be acquired.""" while True: # Try to set the lock if self.redis.set(self.name, self.value, px=self.timeout, nx=True): # It's ours until the timeout now return True # Lock is taken if not block: return False # If blocking, try again in a bit time.sleep(self.sleep)
[ "def", "acquire", "(", "self", ",", "block", "=", "True", ")", ":", "while", "True", ":", "# Try to set the lock", "if", "self", ".", "redis", ".", "set", "(", "self", ".", "name", ",", "self", ".", "value", ",", "px", "=", "self", ".", "timeout", ",", "nx", "=", "True", ")", ":", "# It's ours until the timeout now", "return", "True", "# Lock is taken", "if", "not", "block", ":", "return", "False", "# If blocking, try again in a bit", "time", ".", "sleep", "(", "self", ".", "sleep", ")" ]
Acquire lock. Blocks until acquired if `block` is `True`, otherwise returns `False` if the lock could not be acquired.
[ "Acquire", "lock", ".", "Blocks", "until", "acquired", "if", "block", "is", "True", "otherwise", "returns", "False", "if", "the", "lock", "could", "not", "be", "acquired", "." ]
train
https://github.com/hassa/BeatCop/blob/bf7721e17a7828728b15c5833f047d858111197c/beatcop.py#L59-L70
hassa/BeatCop
beatcop.py
Lock.refresh
def refresh(self): """Refresh an existing lock to prevent it from expiring. Uses a LUA (EVAL) script to ensure only a lock which we own is being overwritten. Returns True if refresh succeeded, False if not.""" keys = [self.name] args = [self.value, self.timeout] # Redis docs claim EVALs are atomic, and I'm inclined to believe it. if hasattr(self, '_refresh_script'): return self._refresh_script(keys=keys, args=args) == 1 else: keys_and_args = keys + args return self.redis.eval(self.lua_refresh, len(keys), *keys_and_args)
python
def refresh(self): """Refresh an existing lock to prevent it from expiring. Uses a LUA (EVAL) script to ensure only a lock which we own is being overwritten. Returns True if refresh succeeded, False if not.""" keys = [self.name] args = [self.value, self.timeout] # Redis docs claim EVALs are atomic, and I'm inclined to believe it. if hasattr(self, '_refresh_script'): return self._refresh_script(keys=keys, args=args) == 1 else: keys_and_args = keys + args return self.redis.eval(self.lua_refresh, len(keys), *keys_and_args)
[ "def", "refresh", "(", "self", ")", ":", "keys", "=", "[", "self", ".", "name", "]", "args", "=", "[", "self", ".", "value", ",", "self", ".", "timeout", "]", "# Redis docs claim EVALs are atomic, and I'm inclined to believe it.", "if", "hasattr", "(", "self", ",", "'_refresh_script'", ")", ":", "return", "self", ".", "_refresh_script", "(", "keys", "=", "keys", ",", "args", "=", "args", ")", "==", "1", "else", ":", "keys_and_args", "=", "keys", "+", "args", "return", "self", ".", "redis", ".", "eval", "(", "self", ".", "lua_refresh", ",", "len", "(", "keys", ")", ",", "*", "keys_and_args", ")" ]
Refresh an existing lock to prevent it from expiring. Uses a LUA (EVAL) script to ensure only a lock which we own is being overwritten. Returns True if refresh succeeded, False if not.
[ "Refresh", "an", "existing", "lock", "to", "prevent", "it", "from", "expiring", ".", "Uses", "a", "LUA", "(", "EVAL", ")", "script", "to", "ensure", "only", "a", "lock", "which", "we", "own", "is", "being", "overwritten", ".", "Returns", "True", "if", "refresh", "succeeded", "False", "if", "not", "." ]
train
https://github.com/hassa/BeatCop/blob/bf7721e17a7828728b15c5833f047d858111197c/beatcop.py#L72-L83
hassa/BeatCop
beatcop.py
BeatCop.run
def run(self): """Run process if nobody else is, otherwise wait until we're needed. Never returns.""" log.info("Waiting for lock, currently held by %s", self.lock.who()) if self.lock.acquire(): log.info("Lock '%s' acquired", self.lockname) # We got the lock, so we make sure the process is running and keep refreshing the lock - if we ever stop for any reason, for example because our host died, the lock will soon expire. while True: if self.process is None: # Process not spawned yet self.process = self.spawn(self.command) log.info("Spawned PID %d", self.process.pid) child_status = self.process.poll() if child_status is not None: # Oops, process died on us. log.error("Child died with exit code %d", child_status) sys.exit(1) # Refresh lock and sleep if not self.lock.refresh(): who = self.lock.who() if who is None: if self.lock.acquire(block=False): log.warning("Lock refresh failed, but successfully re-acquired unclaimed lock") else: log.error("Lock refresh and subsequent re-acquire failed, giving up (Lock now held by %s)", self.lock.who()) self.cleanup() sys.exit(os.EX_UNAVAILABLE) else: log.error("Lock refresh failed, %s stole it - bailing out", self.lock.who()) self.cleanup() sys.exit(os.EX_UNAVAILABLE) time.sleep(self.sleep)
python
def run(self): """Run process if nobody else is, otherwise wait until we're needed. Never returns.""" log.info("Waiting for lock, currently held by %s", self.lock.who()) if self.lock.acquire(): log.info("Lock '%s' acquired", self.lockname) # We got the lock, so we make sure the process is running and keep refreshing the lock - if we ever stop for any reason, for example because our host died, the lock will soon expire. while True: if self.process is None: # Process not spawned yet self.process = self.spawn(self.command) log.info("Spawned PID %d", self.process.pid) child_status = self.process.poll() if child_status is not None: # Oops, process died on us. log.error("Child died with exit code %d", child_status) sys.exit(1) # Refresh lock and sleep if not self.lock.refresh(): who = self.lock.who() if who is None: if self.lock.acquire(block=False): log.warning("Lock refresh failed, but successfully re-acquired unclaimed lock") else: log.error("Lock refresh and subsequent re-acquire failed, giving up (Lock now held by %s)", self.lock.who()) self.cleanup() sys.exit(os.EX_UNAVAILABLE) else: log.error("Lock refresh failed, %s stole it - bailing out", self.lock.who()) self.cleanup() sys.exit(os.EX_UNAVAILABLE) time.sleep(self.sleep)
[ "def", "run", "(", "self", ")", ":", "log", ".", "info", "(", "\"Waiting for lock, currently held by %s\"", ",", "self", ".", "lock", ".", "who", "(", ")", ")", "if", "self", ".", "lock", ".", "acquire", "(", ")", ":", "log", ".", "info", "(", "\"Lock '%s' acquired\"", ",", "self", ".", "lockname", ")", "# We got the lock, so we make sure the process is running and keep refreshing the lock - if we ever stop for any reason, for example because our host died, the lock will soon expire.", "while", "True", ":", "if", "self", ".", "process", "is", "None", ":", "# Process not spawned yet", "self", ".", "process", "=", "self", ".", "spawn", "(", "self", ".", "command", ")", "log", ".", "info", "(", "\"Spawned PID %d\"", ",", "self", ".", "process", ".", "pid", ")", "child_status", "=", "self", ".", "process", ".", "poll", "(", ")", "if", "child_status", "is", "not", "None", ":", "# Oops, process died on us.", "log", ".", "error", "(", "\"Child died with exit code %d\"", ",", "child_status", ")", "sys", ".", "exit", "(", "1", ")", "# Refresh lock and sleep", "if", "not", "self", ".", "lock", ".", "refresh", "(", ")", ":", "who", "=", "self", ".", "lock", ".", "who", "(", ")", "if", "who", "is", "None", ":", "if", "self", ".", "lock", ".", "acquire", "(", "block", "=", "False", ")", ":", "log", ".", "warning", "(", "\"Lock refresh failed, but successfully re-acquired unclaimed lock\"", ")", "else", ":", "log", ".", "error", "(", "\"Lock refresh and subsequent re-acquire failed, giving up (Lock now held by %s)\"", ",", "self", ".", "lock", ".", "who", "(", ")", ")", "self", ".", "cleanup", "(", ")", "sys", ".", "exit", "(", "os", ".", "EX_UNAVAILABLE", ")", "else", ":", "log", ".", "error", "(", "\"Lock refresh failed, %s stole it - bailing out\"", ",", "self", ".", "lock", ".", "who", "(", ")", ")", "self", ".", "cleanup", "(", ")", "sys", ".", "exit", "(", "os", ".", "EX_UNAVAILABLE", ")", "time", ".", "sleep", "(", "self", ".", "sleep", ")" ]
Run process if nobody else is, otherwise wait until we're needed. Never returns.
[ "Run", "process", "if", "nobody", "else", "is", "otherwise", "wait", "until", "we", "re", "needed", ".", "Never", "returns", "." ]
train
https://github.com/hassa/BeatCop/blob/bf7721e17a7828728b15c5833f047d858111197c/beatcop.py#L140-L170
hassa/BeatCop
beatcop.py
BeatCop.spawn
def spawn(self, command): """Spawn process.""" if self.shell: args = command else: args = shlex.split(command) return subprocess.Popen(args, shell=self.shell)
python
def spawn(self, command): """Spawn process.""" if self.shell: args = command else: args = shlex.split(command) return subprocess.Popen(args, shell=self.shell)
[ "def", "spawn", "(", "self", ",", "command", ")", ":", "if", "self", ".", "shell", ":", "args", "=", "command", "else", ":", "args", "=", "shlex", ".", "split", "(", "command", ")", "return", "subprocess", ".", "Popen", "(", "args", ",", "shell", "=", "self", ".", "shell", ")" ]
Spawn process.
[ "Spawn", "process", "." ]
train
https://github.com/hassa/BeatCop/blob/bf7721e17a7828728b15c5833f047d858111197c/beatcop.py#L172-L178
hassa/BeatCop
beatcop.py
BeatCop.cleanup
def cleanup(self): """Clean up, making sure the process is stopped before we pack up and go home.""" if self.process is None: # Process wasn't running yet, so nothing to worry about return if self.process.poll() is None: log.info("Sending TERM to %d", self.process.pid) self.process.terminate() # Give process a second to terminate, if it didn't, kill it. start = time.clock() while time.clock() - start < 1.0: time.sleep(0.05) if self.process.poll() is not None: break else: log.info("Sending KILL to %d", self.process.pid) self.process.kill() assert self.process.poll() is not None
python
def cleanup(self): """Clean up, making sure the process is stopped before we pack up and go home.""" if self.process is None: # Process wasn't running yet, so nothing to worry about return if self.process.poll() is None: log.info("Sending TERM to %d", self.process.pid) self.process.terminate() # Give process a second to terminate, if it didn't, kill it. start = time.clock() while time.clock() - start < 1.0: time.sleep(0.05) if self.process.poll() is not None: break else: log.info("Sending KILL to %d", self.process.pid) self.process.kill() assert self.process.poll() is not None
[ "def", "cleanup", "(", "self", ")", ":", "if", "self", ".", "process", "is", "None", ":", "# Process wasn't running yet, so nothing to worry about", "return", "if", "self", ".", "process", ".", "poll", "(", ")", "is", "None", ":", "log", ".", "info", "(", "\"Sending TERM to %d\"", ",", "self", ".", "process", ".", "pid", ")", "self", ".", "process", ".", "terminate", "(", ")", "# Give process a second to terminate, if it didn't, kill it.", "start", "=", "time", ".", "clock", "(", ")", "while", "time", ".", "clock", "(", ")", "-", "start", "<", "1.0", ":", "time", ".", "sleep", "(", "0.05", ")", "if", "self", ".", "process", ".", "poll", "(", ")", "is", "not", "None", ":", "break", "else", ":", "log", ".", "info", "(", "\"Sending KILL to %d\"", ",", "self", ".", "process", ".", "pid", ")", "self", ".", "process", ".", "kill", "(", ")", "assert", "self", ".", "process", ".", "poll", "(", ")", "is", "not", "None" ]
Clean up, making sure the process is stopped before we pack up and go home.
[ "Clean", "up", "making", "sure", "the", "process", "is", "stopped", "before", "we", "pack", "up", "and", "go", "home", "." ]
train
https://github.com/hassa/BeatCop/blob/bf7721e17a7828728b15c5833f047d858111197c/beatcop.py#L180-L196
hassa/BeatCop
beatcop.py
BeatCop.handle_signal
def handle_signal(self, sig, frame): """Handles signals, surprisingly.""" if sig in [signal.SIGINT]: log.warning("Ctrl-C pressed, shutting down...") if sig in [signal.SIGTERM]: log.warning("SIGTERM received, shutting down...") self.cleanup() sys.exit(-sig)
python
def handle_signal(self, sig, frame): """Handles signals, surprisingly.""" if sig in [signal.SIGINT]: log.warning("Ctrl-C pressed, shutting down...") if sig in [signal.SIGTERM]: log.warning("SIGTERM received, shutting down...") self.cleanup() sys.exit(-sig)
[ "def", "handle_signal", "(", "self", ",", "sig", ",", "frame", ")", ":", "if", "sig", "in", "[", "signal", ".", "SIGINT", "]", ":", "log", ".", "warning", "(", "\"Ctrl-C pressed, shutting down...\"", ")", "if", "sig", "in", "[", "signal", ".", "SIGTERM", "]", ":", "log", ".", "warning", "(", "\"SIGTERM received, shutting down...\"", ")", "self", ".", "cleanup", "(", ")", "sys", ".", "exit", "(", "-", "sig", ")" ]
Handles signals, surprisingly.
[ "Handles", "signals", "surprisingly", "." ]
train
https://github.com/hassa/BeatCop/blob/bf7721e17a7828728b15c5833f047d858111197c/beatcop.py#L198-L205
alfredodeza/notario
notario/engine.py
validate
def validate(data, schema, defined_keys=False): """ Main entry point for the validation engine. :param data: The incoming data, as a dictionary object. :param schema: The schema from which data will be validated against """ if isinstance(data, dict): validator = Validator(data, schema, defined_keys=defined_keys) validator.validate() else: raise TypeError('expected data to be of type dict, but got: %s' % type(data))
python
def validate(data, schema, defined_keys=False): """ Main entry point for the validation engine. :param data: The incoming data, as a dictionary object. :param schema: The schema from which data will be validated against """ if isinstance(data, dict): validator = Validator(data, schema, defined_keys=defined_keys) validator.validate() else: raise TypeError('expected data to be of type dict, but got: %s' % type(data))
[ "def", "validate", "(", "data", ",", "schema", ",", "defined_keys", "=", "False", ")", ":", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "validator", "=", "Validator", "(", "data", ",", "schema", ",", "defined_keys", "=", "defined_keys", ")", "validator", ".", "validate", "(", ")", "else", ":", "raise", "TypeError", "(", "'expected data to be of type dict, but got: %s'", "%", "type", "(", "data", ")", ")" ]
Main entry point for the validation engine. :param data: The incoming data, as a dictionary object. :param schema: The schema from which data will be validated against
[ "Main", "entry", "point", "for", "the", "validation", "engine", "." ]
train
https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/engine.py#L343-L354
alfredodeza/notario
notario/engine.py
Validator.traverser
def traverser(self, data, schema, tree): """ Traverses the dictionary, recursing onto itself if it sees appropriate key/value pairs that indicate that there is a need for more validation in a branch below us. """ if hasattr(schema, '__validator_leaf__'): return schema(data, tree) if hasattr(schema, 'must_validate'): # cherry picking? if not len(schema.must_validate): reason = "must_validate attribute must not be empty" raise SchemaError(data, tree, reason=reason) data = sift(data, schema.must_validate) schema = self.sanitize_optionals(data, schema, tree) self.is_alpha_ordered(data, schema, tree) validated_indexes = [] skip_missing_indexes = getattr(schema, 'must_validate', False) if len(data) < len(schema): # we have missing required items in data, but we don't know # which ones so find what may fail: data_keys = [v[0] for v in data.values()] schema_keys = [v[0] for v in schema.values()] def enforce_once(data_keys, schema_key): # XXX Go through all the data keys and try and see if they pass # validation against the schema. At this point it is impossible # to know which data key corresponds to what schema key # (because schema keys can be a function/callable) so it is # a *very* naive way to try and detect which one might be # missing for data_key in data_keys: failed = None try: enforce(data_key, schema_key, tree, pair='key') return except Invalid: failed = data_key, schema_key if failed: return failed # if there are no callables in the schema keys, just # find the missing data key directly if all([not is_callable(s) for s in schema_keys]): for schema_key in schema_keys: if schema_key not in data_keys: msg = "required key in data is missing: %s" % str(schema_key) raise Invalid(None, tree, reason=msg, pair='key') for schema_key in schema_keys: failure = enforce_once(data_keys, schema_key) if failure: _, failed_schema_key = failure msg = "required key in data is missing: %s" % str(failed_schema_key) raise Invalid(None, tree, reason=msg, pair='key') for index in range(len(data)): self.length_equality(data, schema, index, tree) key, value = data[index] skey, svalue = schema[index] tree.append(key) # Validate the key before anything, to prevent recursing self.key_leaf(data[index], schema[index], tree) # If a dict is a value we need to recurse. # XXX Should we check isinstance(value, ndict) ? if isinstance(value, dict) and len(value): self.traverser(value, svalue, tree) else: self.value_leaf(data[index], schema[index], tree) if tree: tree.pop() validated_indexes.append(index) # XXX There is a chance we might have missing items from # the incoming data that are labeled as required from the schema # we should make sure *here* that we account for that and raise # the appropriate exception. Since the loop finished and everything # seems to have passed, this lack of check will give false positives. missing_indexes = set(schema.keys()).difference(validated_indexes) if missing_indexes: if skip_missing_indexes: return for i in missing_indexes: if not hasattr(schema[i], 'is_optional'): required_key = schema[i][0] tree.append('item[%s]' % i) msg = "required item in schema is missing: %s" % str(required_key) raise Invalid(required_key, tree, reason=msg, pair='key')
python
def traverser(self, data, schema, tree): """ Traverses the dictionary, recursing onto itself if it sees appropriate key/value pairs that indicate that there is a need for more validation in a branch below us. """ if hasattr(schema, '__validator_leaf__'): return schema(data, tree) if hasattr(schema, 'must_validate'): # cherry picking? if not len(schema.must_validate): reason = "must_validate attribute must not be empty" raise SchemaError(data, tree, reason=reason) data = sift(data, schema.must_validate) schema = self.sanitize_optionals(data, schema, tree) self.is_alpha_ordered(data, schema, tree) validated_indexes = [] skip_missing_indexes = getattr(schema, 'must_validate', False) if len(data) < len(schema): # we have missing required items in data, but we don't know # which ones so find what may fail: data_keys = [v[0] for v in data.values()] schema_keys = [v[0] for v in schema.values()] def enforce_once(data_keys, schema_key): # XXX Go through all the data keys and try and see if they pass # validation against the schema. At this point it is impossible # to know which data key corresponds to what schema key # (because schema keys can be a function/callable) so it is # a *very* naive way to try and detect which one might be # missing for data_key in data_keys: failed = None try: enforce(data_key, schema_key, tree, pair='key') return except Invalid: failed = data_key, schema_key if failed: return failed # if there are no callables in the schema keys, just # find the missing data key directly if all([not is_callable(s) for s in schema_keys]): for schema_key in schema_keys: if schema_key not in data_keys: msg = "required key in data is missing: %s" % str(schema_key) raise Invalid(None, tree, reason=msg, pair='key') for schema_key in schema_keys: failure = enforce_once(data_keys, schema_key) if failure: _, failed_schema_key = failure msg = "required key in data is missing: %s" % str(failed_schema_key) raise Invalid(None, tree, reason=msg, pair='key') for index in range(len(data)): self.length_equality(data, schema, index, tree) key, value = data[index] skey, svalue = schema[index] tree.append(key) # Validate the key before anything, to prevent recursing self.key_leaf(data[index], schema[index], tree) # If a dict is a value we need to recurse. # XXX Should we check isinstance(value, ndict) ? if isinstance(value, dict) and len(value): self.traverser(value, svalue, tree) else: self.value_leaf(data[index], schema[index], tree) if tree: tree.pop() validated_indexes.append(index) # XXX There is a chance we might have missing items from # the incoming data that are labeled as required from the schema # we should make sure *here* that we account for that and raise # the appropriate exception. Since the loop finished and everything # seems to have passed, this lack of check will give false positives. missing_indexes = set(schema.keys()).difference(validated_indexes) if missing_indexes: if skip_missing_indexes: return for i in missing_indexes: if not hasattr(schema[i], 'is_optional'): required_key = schema[i][0] tree.append('item[%s]' % i) msg = "required item in schema is missing: %s" % str(required_key) raise Invalid(required_key, tree, reason=msg, pair='key')
[ "def", "traverser", "(", "self", ",", "data", ",", "schema", ",", "tree", ")", ":", "if", "hasattr", "(", "schema", ",", "'__validator_leaf__'", ")", ":", "return", "schema", "(", "data", ",", "tree", ")", "if", "hasattr", "(", "schema", ",", "'must_validate'", ")", ":", "# cherry picking?", "if", "not", "len", "(", "schema", ".", "must_validate", ")", ":", "reason", "=", "\"must_validate attribute must not be empty\"", "raise", "SchemaError", "(", "data", ",", "tree", ",", "reason", "=", "reason", ")", "data", "=", "sift", "(", "data", ",", "schema", ".", "must_validate", ")", "schema", "=", "self", ".", "sanitize_optionals", "(", "data", ",", "schema", ",", "tree", ")", "self", ".", "is_alpha_ordered", "(", "data", ",", "schema", ",", "tree", ")", "validated_indexes", "=", "[", "]", "skip_missing_indexes", "=", "getattr", "(", "schema", ",", "'must_validate'", ",", "False", ")", "if", "len", "(", "data", ")", "<", "len", "(", "schema", ")", ":", "# we have missing required items in data, but we don't know", "# which ones so find what may fail:", "data_keys", "=", "[", "v", "[", "0", "]", "for", "v", "in", "data", ".", "values", "(", ")", "]", "schema_keys", "=", "[", "v", "[", "0", "]", "for", "v", "in", "schema", ".", "values", "(", ")", "]", "def", "enforce_once", "(", "data_keys", ",", "schema_key", ")", ":", "# XXX Go through all the data keys and try and see if they pass", "# validation against the schema. At this point it is impossible", "# to know which data key corresponds to what schema key", "# (because schema keys can be a function/callable) so it is", "# a *very* naive way to try and detect which one might be", "# missing", "for", "data_key", "in", "data_keys", ":", "failed", "=", "None", "try", ":", "enforce", "(", "data_key", ",", "schema_key", ",", "tree", ",", "pair", "=", "'key'", ")", "return", "except", "Invalid", ":", "failed", "=", "data_key", ",", "schema_key", "if", "failed", ":", "return", "failed", "# if there are no callables in the schema keys, just", "# find the missing data key directly", "if", "all", "(", "[", "not", "is_callable", "(", "s", ")", "for", "s", "in", "schema_keys", "]", ")", ":", "for", "schema_key", "in", "schema_keys", ":", "if", "schema_key", "not", "in", "data_keys", ":", "msg", "=", "\"required key in data is missing: %s\"", "%", "str", "(", "schema_key", ")", "raise", "Invalid", "(", "None", ",", "tree", ",", "reason", "=", "msg", ",", "pair", "=", "'key'", ")", "for", "schema_key", "in", "schema_keys", ":", "failure", "=", "enforce_once", "(", "data_keys", ",", "schema_key", ")", "if", "failure", ":", "_", ",", "failed_schema_key", "=", "failure", "msg", "=", "\"required key in data is missing: %s\"", "%", "str", "(", "failed_schema_key", ")", "raise", "Invalid", "(", "None", ",", "tree", ",", "reason", "=", "msg", ",", "pair", "=", "'key'", ")", "for", "index", "in", "range", "(", "len", "(", "data", ")", ")", ":", "self", ".", "length_equality", "(", "data", ",", "schema", ",", "index", ",", "tree", ")", "key", ",", "value", "=", "data", "[", "index", "]", "skey", ",", "svalue", "=", "schema", "[", "index", "]", "tree", ".", "append", "(", "key", ")", "# Validate the key before anything, to prevent recursing", "self", ".", "key_leaf", "(", "data", "[", "index", "]", ",", "schema", "[", "index", "]", ",", "tree", ")", "# If a dict is a value we need to recurse.", "# XXX Should we check isinstance(value, ndict) ?", "if", "isinstance", "(", "value", ",", "dict", ")", "and", "len", "(", "value", ")", ":", "self", ".", "traverser", "(", "value", ",", "svalue", ",", "tree", ")", "else", ":", "self", ".", "value_leaf", "(", "data", "[", "index", "]", ",", "schema", "[", "index", "]", ",", "tree", ")", "if", "tree", ":", "tree", ".", "pop", "(", ")", "validated_indexes", ".", "append", "(", "index", ")", "# XXX There is a chance we might have missing items from", "# the incoming data that are labeled as required from the schema", "# we should make sure *here* that we account for that and raise", "# the appropriate exception. Since the loop finished and everything", "# seems to have passed, this lack of check will give false positives.", "missing_indexes", "=", "set", "(", "schema", ".", "keys", "(", ")", ")", ".", "difference", "(", "validated_indexes", ")", "if", "missing_indexes", ":", "if", "skip_missing_indexes", ":", "return", "for", "i", "in", "missing_indexes", ":", "if", "not", "hasattr", "(", "schema", "[", "i", "]", ",", "'is_optional'", ")", ":", "required_key", "=", "schema", "[", "i", "]", "[", "0", "]", "tree", ".", "append", "(", "'item[%s]'", "%", "i", ")", "msg", "=", "\"required item in schema is missing: %s\"", "%", "str", "(", "required_key", ")", "raise", "Invalid", "(", "required_key", ",", "tree", ",", "reason", "=", "msg", ",", "pair", "=", "'key'", ")" ]
Traverses the dictionary, recursing onto itself if it sees appropriate key/value pairs that indicate that there is a need for more validation in a branch below us.
[ "Traverses", "the", "dictionary", "recursing", "onto", "itself", "if", "it", "sees", "appropriate", "key", "/", "value", "pairs", "that", "indicate", "that", "there", "is", "a", "need", "for", "more", "validation", "in", "a", "branch", "below", "us", "." ]
train
https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/engine.py#L24-L118
alfredodeza/notario
notario/engine.py
Validator.key_leaf
def key_leaf(self, data, schema, tree): """ The deepest validation we can make in any given circumstance for a key. Does not recurse, it will just receive both values and the tree, passing them on to the :fun:`enforce` function. """ key, value = data schema_key, schema_value = schema enforce(key, schema_key, tree, 'key')
python
def key_leaf(self, data, schema, tree): """ The deepest validation we can make in any given circumstance for a key. Does not recurse, it will just receive both values and the tree, passing them on to the :fun:`enforce` function. """ key, value = data schema_key, schema_value = schema enforce(key, schema_key, tree, 'key')
[ "def", "key_leaf", "(", "self", ",", "data", ",", "schema", ",", "tree", ")", ":", "key", ",", "value", "=", "data", "schema_key", ",", "schema_value", "=", "schema", "enforce", "(", "key", ",", "schema_key", ",", "tree", ",", "'key'", ")" ]
The deepest validation we can make in any given circumstance for a key. Does not recurse, it will just receive both values and the tree, passing them on to the :fun:`enforce` function.
[ "The", "deepest", "validation", "we", "can", "make", "in", "any", "given", "circumstance", "for", "a", "key", ".", "Does", "not", "recurse", "it", "will", "just", "receive", "both", "values", "and", "the", "tree", "passing", "them", "on", "to", "the", ":", "fun", ":", "enforce", "function", "." ]
train
https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/engine.py#L121-L129
alfredodeza/notario
notario/engine.py
Validator.value_leaf
def value_leaf(self, data, schema, tree): """ The deepest validation we can make in any given circumstance for a value. Does not recurse, it will just receive both values and the tree, passing them on to the :fun:`enforce` function. """ key, value = data schema_key, schema_value = schema if hasattr(schema_value, '__validator_leaf__'): return schema_value(value, tree) enforce(value, schema_value, tree, 'value')
python
def value_leaf(self, data, schema, tree): """ The deepest validation we can make in any given circumstance for a value. Does not recurse, it will just receive both values and the tree, passing them on to the :fun:`enforce` function. """ key, value = data schema_key, schema_value = schema if hasattr(schema_value, '__validator_leaf__'): return schema_value(value, tree) enforce(value, schema_value, tree, 'value')
[ "def", "value_leaf", "(", "self", ",", "data", ",", "schema", ",", "tree", ")", ":", "key", ",", "value", "=", "data", "schema_key", ",", "schema_value", "=", "schema", "if", "hasattr", "(", "schema_value", ",", "'__validator_leaf__'", ")", ":", "return", "schema_value", "(", "value", ",", "tree", ")", "enforce", "(", "value", ",", "schema_value", ",", "tree", ",", "'value'", ")" ]
The deepest validation we can make in any given circumstance for a value. Does not recurse, it will just receive both values and the tree, passing them on to the :fun:`enforce` function.
[ "The", "deepest", "validation", "we", "can", "make", "in", "any", "given", "circumstance", "for", "a", "value", ".", "Does", "not", "recurse", "it", "will", "just", "receive", "both", "values", "and", "the", "tree", "passing", "them", "on", "to", "the", ":", "fun", ":", "enforce", "function", "." ]
train
https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/engine.py#L131-L142
RowleyGroup/pyqueue
pyqueue/utils.py
strfdelta
def strfdelta(tdelta, fmt): """ Used to format `datetime.timedelta` objects. Works just like `strftime` >>> strfdelta(duration, '%H:%M:%S') param tdelta: Time duration which is an instance of datetime.timedelta param fmt: The pattern to format the timedelta with rtype: str """ substitutes = dict() hours, rem = divmod(tdelta.total_seconds(), 3600) minutes, seconds = divmod(rem, 60) substitutes["H"] = '{:02d}'.format(int(hours)) substitutes["M"] = '{:02d}'.format(int(minutes)) substitutes["S"] = '{:02d}'.format(int(seconds)) return DeltaTemplate(fmt).substitute(**substitutes)
python
def strfdelta(tdelta, fmt): """ Used to format `datetime.timedelta` objects. Works just like `strftime` >>> strfdelta(duration, '%H:%M:%S') param tdelta: Time duration which is an instance of datetime.timedelta param fmt: The pattern to format the timedelta with rtype: str """ substitutes = dict() hours, rem = divmod(tdelta.total_seconds(), 3600) minutes, seconds = divmod(rem, 60) substitutes["H"] = '{:02d}'.format(int(hours)) substitutes["M"] = '{:02d}'.format(int(minutes)) substitutes["S"] = '{:02d}'.format(int(seconds)) return DeltaTemplate(fmt).substitute(**substitutes)
[ "def", "strfdelta", "(", "tdelta", ",", "fmt", ")", ":", "substitutes", "=", "dict", "(", ")", "hours", ",", "rem", "=", "divmod", "(", "tdelta", ".", "total_seconds", "(", ")", ",", "3600", ")", "minutes", ",", "seconds", "=", "divmod", "(", "rem", ",", "60", ")", "substitutes", "[", "\"H\"", "]", "=", "'{:02d}'", ".", "format", "(", "int", "(", "hours", ")", ")", "substitutes", "[", "\"M\"", "]", "=", "'{:02d}'", ".", "format", "(", "int", "(", "minutes", ")", ")", "substitutes", "[", "\"S\"", "]", "=", "'{:02d}'", ".", "format", "(", "int", "(", "seconds", ")", ")", "return", "DeltaTemplate", "(", "fmt", ")", ".", "substitute", "(", "*", "*", "substitutes", ")" ]
Used to format `datetime.timedelta` objects. Works just like `strftime` >>> strfdelta(duration, '%H:%M:%S') param tdelta: Time duration which is an instance of datetime.timedelta param fmt: The pattern to format the timedelta with rtype: str
[ "Used", "to", "format", "datetime", ".", "timedelta", "objects", ".", "Works", "just", "like", "strftime" ]
train
https://github.com/RowleyGroup/pyqueue/blob/24de6e1b06b9626ed94d0d5a859bc71bd3afbb4f/pyqueue/utils.py#L15-L32
RowleyGroup/pyqueue
pyqueue/utils.py
get_user_information
def get_user_information(): """ Returns the user's information :rtype: (str, int, str) """ try: import pwd _username = pwd.getpwuid(os.getuid())[0] _userid = os.getuid() _uname = os.uname()[1] except ImportError: import getpass _username = getpass.getuser() _userid = 0 import platform _uname = platform.node() return _username, _userid, _uname
python
def get_user_information(): """ Returns the user's information :rtype: (str, int, str) """ try: import pwd _username = pwd.getpwuid(os.getuid())[0] _userid = os.getuid() _uname = os.uname()[1] except ImportError: import getpass _username = getpass.getuser() _userid = 0 import platform _uname = platform.node() return _username, _userid, _uname
[ "def", "get_user_information", "(", ")", ":", "try", ":", "import", "pwd", "_username", "=", "pwd", ".", "getpwuid", "(", "os", ".", "getuid", "(", ")", ")", "[", "0", "]", "_userid", "=", "os", ".", "getuid", "(", ")", "_uname", "=", "os", ".", "uname", "(", ")", "[", "1", "]", "except", "ImportError", ":", "import", "getpass", "_username", "=", "getpass", ".", "getuser", "(", ")", "_userid", "=", "0", "import", "platform", "_uname", "=", "platform", ".", "node", "(", ")", "return", "_username", ",", "_userid", ",", "_uname" ]
Returns the user's information :rtype: (str, int, str)
[ "Returns", "the", "user", "s", "information" ]
train
https://github.com/RowleyGroup/pyqueue/blob/24de6e1b06b9626ed94d0d5a859bc71bd3afbb4f/pyqueue/utils.py#L35-L53
oceanprotocol/osmosis-azure-driver
osmosis_azure_driver/computing_plugin.py
Plugin._login_azure_app_token
def _login_azure_app_token(client_id=None, client_secret=None, tenant_id=None): """ Authenticate APP using token credentials: https://docs.microsoft.com/en-us/python/azure/python-sdk-azure-authenticate?view=azure-python :return: ~ServicePrincipalCredentials credentials """ client_id = os.getenv('AZURE_CLIENT_ID') if not client_id else client_id client_secret = os.getenv('AZURE_CLIENT_SECRET') if not client_secret else client_secret tenant_id = os.getenv('AZURE_TENANT_ID') if not tenant_id else tenant_id credentials = ServicePrincipalCredentials( client_id=client_id, secret=client_secret, tenant=tenant_id, ) return credentials
python
def _login_azure_app_token(client_id=None, client_secret=None, tenant_id=None): """ Authenticate APP using token credentials: https://docs.microsoft.com/en-us/python/azure/python-sdk-azure-authenticate?view=azure-python :return: ~ServicePrincipalCredentials credentials """ client_id = os.getenv('AZURE_CLIENT_ID') if not client_id else client_id client_secret = os.getenv('AZURE_CLIENT_SECRET') if not client_secret else client_secret tenant_id = os.getenv('AZURE_TENANT_ID') if not tenant_id else tenant_id credentials = ServicePrincipalCredentials( client_id=client_id, secret=client_secret, tenant=tenant_id, ) return credentials
[ "def", "_login_azure_app_token", "(", "client_id", "=", "None", ",", "client_secret", "=", "None", ",", "tenant_id", "=", "None", ")", ":", "client_id", "=", "os", ".", "getenv", "(", "'AZURE_CLIENT_ID'", ")", "if", "not", "client_id", "else", "client_id", "client_secret", "=", "os", ".", "getenv", "(", "'AZURE_CLIENT_SECRET'", ")", "if", "not", "client_secret", "else", "client_secret", "tenant_id", "=", "os", ".", "getenv", "(", "'AZURE_TENANT_ID'", ")", "if", "not", "tenant_id", "else", "tenant_id", "credentials", "=", "ServicePrincipalCredentials", "(", "client_id", "=", "client_id", ",", "secret", "=", "client_secret", ",", "tenant", "=", "tenant_id", ",", ")", "return", "credentials" ]
Authenticate APP using token credentials: https://docs.microsoft.com/en-us/python/azure/python-sdk-azure-authenticate?view=azure-python :return: ~ServicePrincipalCredentials credentials
[ "Authenticate", "APP", "using", "token", "credentials", ":", "https", ":", "//", "docs", ".", "microsoft", ".", "com", "/", "en", "-", "us", "/", "python", "/", "azure", "/", "python", "-", "sdk", "-", "azure", "-", "authenticate?view", "=", "azure", "-", "python", ":", "return", ":", "~ServicePrincipalCredentials", "credentials" ]
train
https://github.com/oceanprotocol/osmosis-azure-driver/blob/36bcfa96547fb6117346b02b0ac6a74345c59695/osmosis_azure_driver/computing_plugin.py#L42-L56
oceanprotocol/osmosis-azure-driver
osmosis_azure_driver/computing_plugin.py
Plugin.exec_container
def exec_container(self, asset_url, algorithm_url, resource_group_name, account_name, account_key, location, share_name_input='compute', share_name_output='output', docker_image='python:3.6-alpine', memory=1.5, cpu=1): """Prepare a docker image that will run in the cloud, mounting the asset and executing the algorithm. :param asset_url :param algorithm_url :param resource_group_name: :param account_name: :param account_key: :param share_name_input: :param share_name_output: :param location: """ try: container_group_name = 'compute' + str(int(time.time())) result_file = self._create_container_group(resource_group_name=resource_group_name, name=container_group_name, image=docker_image, location=location, memory=memory, cpu=cpu, algorithm=algorithm_url, asset=asset_url, input_mount_point='/input', output_moint_point='/output', account_name=account_name, account_key=account_key, share_name_input=share_name_input, share_name_output=share_name_output ) while self.client.container_groups.get(resource_group_name, container_group_name).provisioning_state != 'Succeeded': logging.info("Waiting to resources ") while self.client.container_groups.get(resource_group_name, container_group_name). \ containers[0].instance_view.current_state.state != 'Terminated': logging.info("Waiting to terminate") self.delete_vm(container_group_name, resource_group_name) return result_file except Exception: logging.error("There was a problem executing your container") raise Exception
python
def exec_container(self, asset_url, algorithm_url, resource_group_name, account_name, account_key, location, share_name_input='compute', share_name_output='output', docker_image='python:3.6-alpine', memory=1.5, cpu=1): """Prepare a docker image that will run in the cloud, mounting the asset and executing the algorithm. :param asset_url :param algorithm_url :param resource_group_name: :param account_name: :param account_key: :param share_name_input: :param share_name_output: :param location: """ try: container_group_name = 'compute' + str(int(time.time())) result_file = self._create_container_group(resource_group_name=resource_group_name, name=container_group_name, image=docker_image, location=location, memory=memory, cpu=cpu, algorithm=algorithm_url, asset=asset_url, input_mount_point='/input', output_moint_point='/output', account_name=account_name, account_key=account_key, share_name_input=share_name_input, share_name_output=share_name_output ) while self.client.container_groups.get(resource_group_name, container_group_name).provisioning_state != 'Succeeded': logging.info("Waiting to resources ") while self.client.container_groups.get(resource_group_name, container_group_name). \ containers[0].instance_view.current_state.state != 'Terminated': logging.info("Waiting to terminate") self.delete_vm(container_group_name, resource_group_name) return result_file except Exception: logging.error("There was a problem executing your container") raise Exception
[ "def", "exec_container", "(", "self", ",", "asset_url", ",", "algorithm_url", ",", "resource_group_name", ",", "account_name", ",", "account_key", ",", "location", ",", "share_name_input", "=", "'compute'", ",", "share_name_output", "=", "'output'", ",", "docker_image", "=", "'python:3.6-alpine'", ",", "memory", "=", "1.5", ",", "cpu", "=", "1", ")", ":", "try", ":", "container_group_name", "=", "'compute'", "+", "str", "(", "int", "(", "time", ".", "time", "(", ")", ")", ")", "result_file", "=", "self", ".", "_create_container_group", "(", "resource_group_name", "=", "resource_group_name", ",", "name", "=", "container_group_name", ",", "image", "=", "docker_image", ",", "location", "=", "location", ",", "memory", "=", "memory", ",", "cpu", "=", "cpu", ",", "algorithm", "=", "algorithm_url", ",", "asset", "=", "asset_url", ",", "input_mount_point", "=", "'/input'", ",", "output_moint_point", "=", "'/output'", ",", "account_name", "=", "account_name", ",", "account_key", "=", "account_key", ",", "share_name_input", "=", "share_name_input", ",", "share_name_output", "=", "share_name_output", ")", "while", "self", ".", "client", ".", "container_groups", ".", "get", "(", "resource_group_name", ",", "container_group_name", ")", ".", "provisioning_state", "!=", "'Succeeded'", ":", "logging", ".", "info", "(", "\"Waiting to resources \"", ")", "while", "self", ".", "client", ".", "container_groups", ".", "get", "(", "resource_group_name", ",", "container_group_name", ")", ".", "containers", "[", "0", "]", ".", "instance_view", ".", "current_state", ".", "state", "!=", "'Terminated'", ":", "logging", ".", "info", "(", "\"Waiting to terminate\"", ")", "self", ".", "delete_vm", "(", "container_group_name", ",", "resource_group_name", ")", "return", "result_file", "except", "Exception", ":", "logging", ".", "error", "(", "\"There was a problem executing your container\"", ")", "raise", "Exception" ]
Prepare a docker image that will run in the cloud, mounting the asset and executing the algorithm. :param asset_url :param algorithm_url :param resource_group_name: :param account_name: :param account_key: :param share_name_input: :param share_name_output: :param location:
[ "Prepare", "a", "docker", "image", "that", "will", "run", "in", "the", "cloud", "mounting", "the", "asset", "and", "executing", "the", "algorithm", ".", ":", "param", "asset_url", ":", "param", "algorithm_url", ":", "param", "resource_group_name", ":", ":", "param", "account_name", ":", ":", "param", "account_key", ":", ":", "param", "share_name_input", ":", ":", "param", "share_name_output", ":", ":", "param", "location", ":" ]
train
https://github.com/oceanprotocol/osmosis-azure-driver/blob/36bcfa96547fb6117346b02b0ac6a74345c59695/osmosis_azure_driver/computing_plugin.py#L135-L184
oceanprotocol/osmosis-azure-driver
osmosis_azure_driver/computing_plugin.py
Plugin.list_container_groups
def list_container_groups(self, resource_group_name): """Lists the container groups in the specified resource group. Arguments: aci_client {azure.mgmt.containerinstance.ContainerInstanceManagementClient} -- An authenticated container instance management client. resource_group {azure.mgmt.resource.resources.models.ResourceGroup} -- The resource group containing the container group(s). """ print("Listing container groups in resource group '{0}'...".format(resource_group_name)) container_groups = self.client.container_groups.list_by_resource_group(resource_group_name) for container_group in container_groups: print(" {0}".format(container_group.name))
python
def list_container_groups(self, resource_group_name): """Lists the container groups in the specified resource group. Arguments: aci_client {azure.mgmt.containerinstance.ContainerInstanceManagementClient} -- An authenticated container instance management client. resource_group {azure.mgmt.resource.resources.models.ResourceGroup} -- The resource group containing the container group(s). """ print("Listing container groups in resource group '{0}'...".format(resource_group_name)) container_groups = self.client.container_groups.list_by_resource_group(resource_group_name) for container_group in container_groups: print(" {0}".format(container_group.name))
[ "def", "list_container_groups", "(", "self", ",", "resource_group_name", ")", ":", "print", "(", "\"Listing container groups in resource group '{0}'...\"", ".", "format", "(", "resource_group_name", ")", ")", "container_groups", "=", "self", ".", "client", ".", "container_groups", ".", "list_by_resource_group", "(", "resource_group_name", ")", "for", "container_group", "in", "container_groups", ":", "print", "(", "\" {0}\"", ".", "format", "(", "container_group", ".", "name", ")", ")" ]
Lists the container groups in the specified resource group. Arguments: aci_client {azure.mgmt.containerinstance.ContainerInstanceManagementClient} -- An authenticated container instance management client. resource_group {azure.mgmt.resource.resources.models.ResourceGroup} -- The resource group containing the container group(s).
[ "Lists", "the", "container", "groups", "in", "the", "specified", "resource", "group", "." ]
train
https://github.com/oceanprotocol/osmosis-azure-driver/blob/36bcfa96547fb6117346b02b0ac6a74345c59695/osmosis_azure_driver/computing_plugin.py#L210-L224
nhoffman/fastalite
fastalite/fastalite.py
fastalite
def fastalite(handle): """Return a sequence of namedtuple objects from a fasta file with attributes (id, description, seq) given open file-like object ``handle`` """ Seq = namedtuple('Seq', ['id', 'description', 'seq']) header, seq = '', [] for line in handle: if line.startswith('>'): if header: yield Seq(header.split()[0], header, ''.join(seq)) header, seq = line[1:].strip(), [] else: seq.append(line.strip()) if header and seq: yield Seq(header.split()[0], header, ''.join(seq))
python
def fastalite(handle): """Return a sequence of namedtuple objects from a fasta file with attributes (id, description, seq) given open file-like object ``handle`` """ Seq = namedtuple('Seq', ['id', 'description', 'seq']) header, seq = '', [] for line in handle: if line.startswith('>'): if header: yield Seq(header.split()[0], header, ''.join(seq)) header, seq = line[1:].strip(), [] else: seq.append(line.strip()) if header and seq: yield Seq(header.split()[0], header, ''.join(seq))
[ "def", "fastalite", "(", "handle", ")", ":", "Seq", "=", "namedtuple", "(", "'Seq'", ",", "[", "'id'", ",", "'description'", ",", "'seq'", "]", ")", "header", ",", "seq", "=", "''", ",", "[", "]", "for", "line", "in", "handle", ":", "if", "line", ".", "startswith", "(", "'>'", ")", ":", "if", "header", ":", "yield", "Seq", "(", "header", ".", "split", "(", ")", "[", "0", "]", ",", "header", ",", "''", ".", "join", "(", "seq", ")", ")", "header", ",", "seq", "=", "line", "[", "1", ":", "]", ".", "strip", "(", ")", ",", "[", "]", "else", ":", "seq", ".", "append", "(", "line", ".", "strip", "(", ")", ")", "if", "header", "and", "seq", ":", "yield", "Seq", "(", "header", ".", "split", "(", ")", "[", "0", "]", ",", "header", ",", "''", ".", "join", "(", "seq", ")", ")" ]
Return a sequence of namedtuple objects from a fasta file with attributes (id, description, seq) given open file-like object ``handle``
[ "Return", "a", "sequence", "of", "namedtuple", "objects", "from", "a", "fasta", "file", "with", "attributes", "(", "id", "description", "seq", ")", "given", "open", "file", "-", "like", "object", "handle" ]
train
https://github.com/nhoffman/fastalite/blob/d544a9e2b5150cf59f0f9651f6f3d659caf13848/fastalite/fastalite.py#L52-L71
nhoffman/fastalite
fastalite/fastalite.py
fastqlite
def fastqlite(handle): """Return a sequence of namedtuple objects from a fastq file with attributes (id, description, seq, qual) given open file-like object ``handle``. This parser assumes that lines corresponding to sequences and quality scores are not wrapped. Raises ``ValueError`` for malformed records. See https://doi.org/10.1093/nar/gkp1137 for a discussion of the fastq format. """ Seq = namedtuple('Seq', ['id', 'description', 'seq', 'qual']) for i, chunk in enumerate(grouper(handle, 4, '')): description, seq, plus, qual = chunk seq, qual = seq.strip(), qual.strip() checks = [description.startswith('@'), seq, plus.startswith('+'), qual, len(seq) == len(qual)] if not all(checks): raise ValueError('Malformed record around line {}'.format(i * 4)) description = description[1:].strip() yield Seq(description.split()[0], description, seq, qual)
python
def fastqlite(handle): """Return a sequence of namedtuple objects from a fastq file with attributes (id, description, seq, qual) given open file-like object ``handle``. This parser assumes that lines corresponding to sequences and quality scores are not wrapped. Raises ``ValueError`` for malformed records. See https://doi.org/10.1093/nar/gkp1137 for a discussion of the fastq format. """ Seq = namedtuple('Seq', ['id', 'description', 'seq', 'qual']) for i, chunk in enumerate(grouper(handle, 4, '')): description, seq, plus, qual = chunk seq, qual = seq.strip(), qual.strip() checks = [description.startswith('@'), seq, plus.startswith('+'), qual, len(seq) == len(qual)] if not all(checks): raise ValueError('Malformed record around line {}'.format(i * 4)) description = description[1:].strip() yield Seq(description.split()[0], description, seq, qual)
[ "def", "fastqlite", "(", "handle", ")", ":", "Seq", "=", "namedtuple", "(", "'Seq'", ",", "[", "'id'", ",", "'description'", ",", "'seq'", ",", "'qual'", "]", ")", "for", "i", ",", "chunk", "in", "enumerate", "(", "grouper", "(", "handle", ",", "4", ",", "''", ")", ")", ":", "description", ",", "seq", ",", "plus", ",", "qual", "=", "chunk", "seq", ",", "qual", "=", "seq", ".", "strip", "(", ")", ",", "qual", ".", "strip", "(", ")", "checks", "=", "[", "description", ".", "startswith", "(", "'@'", ")", ",", "seq", ",", "plus", ".", "startswith", "(", "'+'", ")", ",", "qual", ",", "len", "(", "seq", ")", "==", "len", "(", "qual", ")", "]", "if", "not", "all", "(", "checks", ")", ":", "raise", "ValueError", "(", "'Malformed record around line {}'", ".", "format", "(", "i", "*", "4", ")", ")", "description", "=", "description", "[", "1", ":", "]", ".", "strip", "(", ")", "yield", "Seq", "(", "description", ".", "split", "(", ")", "[", "0", "]", ",", "description", ",", "seq", ",", "qual", ")" ]
Return a sequence of namedtuple objects from a fastq file with attributes (id, description, seq, qual) given open file-like object ``handle``. This parser assumes that lines corresponding to sequences and quality scores are not wrapped. Raises ``ValueError`` for malformed records. See https://doi.org/10.1093/nar/gkp1137 for a discussion of the fastq format.
[ "Return", "a", "sequence", "of", "namedtuple", "objects", "from", "a", "fastq", "file", "with", "attributes", "(", "id", "description", "seq", "qual", ")", "given", "open", "file", "-", "like", "object", "handle", ".", "This", "parser", "assumes", "that", "lines", "corresponding", "to", "sequences", "and", "quality", "scores", "are", "not", "wrapped", ".", "Raises", "ValueError", "for", "malformed", "records", "." ]
train
https://github.com/nhoffman/fastalite/blob/d544a9e2b5150cf59f0f9651f6f3d659caf13848/fastalite/fastalite.py#L81-L105
juiceinc/recipe
recipe/ingredients.py
Dimension.cauldron_extras
def cauldron_extras(self): """ Yield extra tuples containing a field name and a callable that takes a row """ for extra in super(Dimension, self).cauldron_extras: yield extra if self.formatters: prop = self.id + '_raw' else: prop = self.id_prop yield self.id + '_id', lambda row: getattr(row, prop)
python
def cauldron_extras(self): """ Yield extra tuples containing a field name and a callable that takes a row """ for extra in super(Dimension, self).cauldron_extras: yield extra if self.formatters: prop = self.id + '_raw' else: prop = self.id_prop yield self.id + '_id', lambda row: getattr(row, prop)
[ "def", "cauldron_extras", "(", "self", ")", ":", "for", "extra", "in", "super", "(", "Dimension", ",", "self", ")", ".", "cauldron_extras", ":", "yield", "extra", "if", "self", ".", "formatters", ":", "prop", "=", "self", ".", "id", "+", "'_raw'", "else", ":", "prop", "=", "self", ".", "id_prop", "yield", "self", ".", "id", "+", "'_id'", ",", "lambda", "row", ":", "getattr", "(", "row", ",", "prop", ")" ]
Yield extra tuples containing a field name and a callable that takes a row
[ "Yield", "extra", "tuples", "containing", "a", "field", "name", "and", "a", "callable", "that", "takes", "a", "row" ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/ingredients.py#L280-L292
juiceinc/recipe
recipe/ingredients.py
Dimension.make_column_suffixes
def make_column_suffixes(self): """ Make sure we have the right column suffixes. These will be appended to `id` when generating the query. """ if self.column_suffixes: return self.column_suffixes if len(self.columns) == 0: return () elif len(self.columns) == 1: if self.formatters: return '_raw', else: return '', elif len(self.columns) == 2: if self.formatters: return '_id', '_raw', else: return '_id', '', else: raise BadIngredient( 'column_suffixes must be supplied if there is ' 'more than one column' )
python
def make_column_suffixes(self): """ Make sure we have the right column suffixes. These will be appended to `id` when generating the query. """ if self.column_suffixes: return self.column_suffixes if len(self.columns) == 0: return () elif len(self.columns) == 1: if self.formatters: return '_raw', else: return '', elif len(self.columns) == 2: if self.formatters: return '_id', '_raw', else: return '_id', '', else: raise BadIngredient( 'column_suffixes must be supplied if there is ' 'more than one column' )
[ "def", "make_column_suffixes", "(", "self", ")", ":", "if", "self", ".", "column_suffixes", ":", "return", "self", ".", "column_suffixes", "if", "len", "(", "self", ".", "columns", ")", "==", "0", ":", "return", "(", ")", "elif", "len", "(", "self", ".", "columns", ")", "==", "1", ":", "if", "self", ".", "formatters", ":", "return", "'_raw'", ",", "else", ":", "return", "''", ",", "elif", "len", "(", "self", ".", "columns", ")", "==", "2", ":", "if", "self", ".", "formatters", ":", "return", "'_id'", ",", "'_raw'", ",", "else", ":", "return", "'_id'", ",", "''", ",", "else", ":", "raise", "BadIngredient", "(", "'column_suffixes must be supplied if there is '", "'more than one column'", ")" ]
Make sure we have the right column suffixes. These will be appended to `id` when generating the query.
[ "Make", "sure", "we", "have", "the", "right", "column", "suffixes", ".", "These", "will", "be", "appended", "to", "id", "when", "generating", "the", "query", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/ingredients.py#L294-L319
juiceinc/recipe
recipe/shelf.py
parse_condition
def parse_condition( cond, selectable, aggregated=False, default_aggregation='sum' ): """Create a SQLAlchemy clause from a condition.""" if cond is None: return None else: if 'and' in cond: conditions = [ parse_condition( c, selectable, aggregated, default_aggregation ) for c in cond['and'] ] return and_(*conditions) elif 'or' in cond: conditions = [ parse_condition( c, selectable, aggregated, default_aggregation ) for c in cond['or'] ] return or_(*conditions) elif 'field' not in cond: raise BadIngredient('field must be defined in condition') field = parse_field( cond['field'], selectable, aggregated=aggregated, default_aggregation=default_aggregation ) if 'in' in cond: value = cond['in'] if isinstance(value, dict): raise BadIngredient('value for in must be a list') condition_expression = getattr(field, 'in_')(tuple(value)) elif 'gt' in cond: value = cond['gt'] if isinstance(value, (list, dict)): raise BadIngredient('conditional value must be a scalar') condition_expression = getattr(field, '__gt__')(value) elif 'gte' in cond: value = cond['gte'] if isinstance(value, (list, dict)): raise BadIngredient('conditional value must be a scalar') condition_expression = getattr(field, '__ge__')(value) elif 'lt' in cond: value = cond['lt'] if isinstance(value, (list, dict)): raise BadIngredient('conditional value must be a scalar') condition_expression = getattr(field, '__lt__')(value) elif 'lte' in cond: value = cond['lte'] if isinstance(value, (list, dict)): raise BadIngredient('conditional value must be a scalar') condition_expression = getattr(field, '__le__')(value) elif 'eq' in cond: value = cond['eq'] if isinstance(value, (list, dict)): raise BadIngredient('conditional value must be a scalar') condition_expression = getattr(field, '__eq__')(value) elif 'ne' in cond: value = cond['ne'] if isinstance(value, (list, dict)): raise BadIngredient('conditional value must be a scalar') condition_expression = getattr(field, '__ne__')(value) else: raise BadIngredient('Bad condition') return condition_expression
python
def parse_condition( cond, selectable, aggregated=False, default_aggregation='sum' ): """Create a SQLAlchemy clause from a condition.""" if cond is None: return None else: if 'and' in cond: conditions = [ parse_condition( c, selectable, aggregated, default_aggregation ) for c in cond['and'] ] return and_(*conditions) elif 'or' in cond: conditions = [ parse_condition( c, selectable, aggregated, default_aggregation ) for c in cond['or'] ] return or_(*conditions) elif 'field' not in cond: raise BadIngredient('field must be defined in condition') field = parse_field( cond['field'], selectable, aggregated=aggregated, default_aggregation=default_aggregation ) if 'in' in cond: value = cond['in'] if isinstance(value, dict): raise BadIngredient('value for in must be a list') condition_expression = getattr(field, 'in_')(tuple(value)) elif 'gt' in cond: value = cond['gt'] if isinstance(value, (list, dict)): raise BadIngredient('conditional value must be a scalar') condition_expression = getattr(field, '__gt__')(value) elif 'gte' in cond: value = cond['gte'] if isinstance(value, (list, dict)): raise BadIngredient('conditional value must be a scalar') condition_expression = getattr(field, '__ge__')(value) elif 'lt' in cond: value = cond['lt'] if isinstance(value, (list, dict)): raise BadIngredient('conditional value must be a scalar') condition_expression = getattr(field, '__lt__')(value) elif 'lte' in cond: value = cond['lte'] if isinstance(value, (list, dict)): raise BadIngredient('conditional value must be a scalar') condition_expression = getattr(field, '__le__')(value) elif 'eq' in cond: value = cond['eq'] if isinstance(value, (list, dict)): raise BadIngredient('conditional value must be a scalar') condition_expression = getattr(field, '__eq__')(value) elif 'ne' in cond: value = cond['ne'] if isinstance(value, (list, dict)): raise BadIngredient('conditional value must be a scalar') condition_expression = getattr(field, '__ne__')(value) else: raise BadIngredient('Bad condition') return condition_expression
[ "def", "parse_condition", "(", "cond", ",", "selectable", ",", "aggregated", "=", "False", ",", "default_aggregation", "=", "'sum'", ")", ":", "if", "cond", "is", "None", ":", "return", "None", "else", ":", "if", "'and'", "in", "cond", ":", "conditions", "=", "[", "parse_condition", "(", "c", ",", "selectable", ",", "aggregated", ",", "default_aggregation", ")", "for", "c", "in", "cond", "[", "'and'", "]", "]", "return", "and_", "(", "*", "conditions", ")", "elif", "'or'", "in", "cond", ":", "conditions", "=", "[", "parse_condition", "(", "c", ",", "selectable", ",", "aggregated", ",", "default_aggregation", ")", "for", "c", "in", "cond", "[", "'or'", "]", "]", "return", "or_", "(", "*", "conditions", ")", "elif", "'field'", "not", "in", "cond", ":", "raise", "BadIngredient", "(", "'field must be defined in condition'", ")", "field", "=", "parse_field", "(", "cond", "[", "'field'", "]", ",", "selectable", ",", "aggregated", "=", "aggregated", ",", "default_aggregation", "=", "default_aggregation", ")", "if", "'in'", "in", "cond", ":", "value", "=", "cond", "[", "'in'", "]", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "raise", "BadIngredient", "(", "'value for in must be a list'", ")", "condition_expression", "=", "getattr", "(", "field", ",", "'in_'", ")", "(", "tuple", "(", "value", ")", ")", "elif", "'gt'", "in", "cond", ":", "value", "=", "cond", "[", "'gt'", "]", "if", "isinstance", "(", "value", ",", "(", "list", ",", "dict", ")", ")", ":", "raise", "BadIngredient", "(", "'conditional value must be a scalar'", ")", "condition_expression", "=", "getattr", "(", "field", ",", "'__gt__'", ")", "(", "value", ")", "elif", "'gte'", "in", "cond", ":", "value", "=", "cond", "[", "'gte'", "]", "if", "isinstance", "(", "value", ",", "(", "list", ",", "dict", ")", ")", ":", "raise", "BadIngredient", "(", "'conditional value must be a scalar'", ")", "condition_expression", "=", "getattr", "(", "field", ",", "'__ge__'", ")", "(", "value", ")", "elif", "'lt'", "in", "cond", ":", "value", "=", "cond", "[", "'lt'", "]", "if", "isinstance", "(", "value", ",", "(", "list", ",", "dict", ")", ")", ":", "raise", "BadIngredient", "(", "'conditional value must be a scalar'", ")", "condition_expression", "=", "getattr", "(", "field", ",", "'__lt__'", ")", "(", "value", ")", "elif", "'lte'", "in", "cond", ":", "value", "=", "cond", "[", "'lte'", "]", "if", "isinstance", "(", "value", ",", "(", "list", ",", "dict", ")", ")", ":", "raise", "BadIngredient", "(", "'conditional value must be a scalar'", ")", "condition_expression", "=", "getattr", "(", "field", ",", "'__le__'", ")", "(", "value", ")", "elif", "'eq'", "in", "cond", ":", "value", "=", "cond", "[", "'eq'", "]", "if", "isinstance", "(", "value", ",", "(", "list", ",", "dict", ")", ")", ":", "raise", "BadIngredient", "(", "'conditional value must be a scalar'", ")", "condition_expression", "=", "getattr", "(", "field", ",", "'__eq__'", ")", "(", "value", ")", "elif", "'ne'", "in", "cond", ":", "value", "=", "cond", "[", "'ne'", "]", "if", "isinstance", "(", "value", ",", "(", "list", ",", "dict", ")", ")", ":", "raise", "BadIngredient", "(", "'conditional value must be a scalar'", ")", "condition_expression", "=", "getattr", "(", "field", ",", "'__ne__'", ")", "(", "value", ")", "else", ":", "raise", "BadIngredient", "(", "'Bad condition'", ")", "return", "condition_expression" ]
Create a SQLAlchemy clause from a condition.
[ "Create", "a", "SQLAlchemy", "clause", "from", "a", "condition", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L32-L100
juiceinc/recipe
recipe/shelf.py
tokenize
def tokenize(s): """ Tokenize a string by splitting it by + and - >>> tokenize('this + that') ['this', 'PLUS', 'that'] >>> tokenize('this+that') ['this', 'PLUS', 'that'] >>> tokenize('this+that-other') ['this', 'PLUS', 'that', 'MINUS', 'other'] """ # Crude tokenization s = s.replace('+', ' PLUS ').replace('-', ' MINUS ') \ .replace('/', ' DIVIDE ').replace('*', ' MULTIPLY ') words = [w for w in s.split(' ') if w] return words
python
def tokenize(s): """ Tokenize a string by splitting it by + and - >>> tokenize('this + that') ['this', 'PLUS', 'that'] >>> tokenize('this+that') ['this', 'PLUS', 'that'] >>> tokenize('this+that-other') ['this', 'PLUS', 'that', 'MINUS', 'other'] """ # Crude tokenization s = s.replace('+', ' PLUS ').replace('-', ' MINUS ') \ .replace('/', ' DIVIDE ').replace('*', ' MULTIPLY ') words = [w for w in s.split(' ') if w] return words
[ "def", "tokenize", "(", "s", ")", ":", "# Crude tokenization", "s", "=", "s", ".", "replace", "(", "'+'", ",", "' PLUS '", ")", ".", "replace", "(", "'-'", ",", "' MINUS '", ")", ".", "replace", "(", "'/'", ",", "' DIVIDE '", ")", ".", "replace", "(", "'*'", ",", "' MULTIPLY '", ")", "words", "=", "[", "w", "for", "w", "in", "s", ".", "split", "(", "' '", ")", "if", "w", "]", "return", "words" ]
Tokenize a string by splitting it by + and - >>> tokenize('this + that') ['this', 'PLUS', 'that'] >>> tokenize('this+that') ['this', 'PLUS', 'that'] >>> tokenize('this+that-other') ['this', 'PLUS', 'that', 'MINUS', 'other']
[ "Tokenize", "a", "string", "by", "splitting", "it", "by", "+", "and", "-" ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L103-L120
juiceinc/recipe
recipe/shelf.py
_find_in_columncollection
def _find_in_columncollection(columns, name): """ Find a column in a column collection by name or _label""" for col in columns: if col.name == name or getattr(col, '_label', None) == name: return col return None
python
def _find_in_columncollection(columns, name): """ Find a column in a column collection by name or _label""" for col in columns: if col.name == name or getattr(col, '_label', None) == name: return col return None
[ "def", "_find_in_columncollection", "(", "columns", ",", "name", ")", ":", "for", "col", "in", "columns", ":", "if", "col", ".", "name", "==", "name", "or", "getattr", "(", "col", ",", "'_label'", ",", "None", ")", "==", "name", ":", "return", "col", "return", "None" ]
Find a column in a column collection by name or _label
[ "Find", "a", "column", "in", "a", "column", "collection", "by", "name", "or", "_label" ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L123-L128
juiceinc/recipe
recipe/shelf.py
find_column
def find_column(selectable, name): """ Find a column named `name` in selectable :param selectable: :param name: :return: A column object """ from recipe import Recipe if isinstance(selectable, Recipe): selectable = selectable.subquery() # Selectable is a table if isinstance(selectable, DeclarativeMeta): col = getattr(selectable, name, None) if col is not None: return col col = _find_in_columncollection(selectable.__table__.columns, name) if col is not None: return col # Selectable is a sqlalchemy subquery elif hasattr(selectable, 'c' ) and isinstance(selectable.c, ImmutableColumnCollection): col = getattr(selectable.c, name, None) if col is not None: return col col = _find_in_columncollection(selectable.c, name) if col is not None: return col raise BadIngredient('Can not find {} in {}'.format(name, selectable))
python
def find_column(selectable, name): """ Find a column named `name` in selectable :param selectable: :param name: :return: A column object """ from recipe import Recipe if isinstance(selectable, Recipe): selectable = selectable.subquery() # Selectable is a table if isinstance(selectable, DeclarativeMeta): col = getattr(selectable, name, None) if col is not None: return col col = _find_in_columncollection(selectable.__table__.columns, name) if col is not None: return col # Selectable is a sqlalchemy subquery elif hasattr(selectable, 'c' ) and isinstance(selectable.c, ImmutableColumnCollection): col = getattr(selectable.c, name, None) if col is not None: return col col = _find_in_columncollection(selectable.c, name) if col is not None: return col raise BadIngredient('Can not find {} in {}'.format(name, selectable))
[ "def", "find_column", "(", "selectable", ",", "name", ")", ":", "from", "recipe", "import", "Recipe", "if", "isinstance", "(", "selectable", ",", "Recipe", ")", ":", "selectable", "=", "selectable", ".", "subquery", "(", ")", "# Selectable is a table", "if", "isinstance", "(", "selectable", ",", "DeclarativeMeta", ")", ":", "col", "=", "getattr", "(", "selectable", ",", "name", ",", "None", ")", "if", "col", "is", "not", "None", ":", "return", "col", "col", "=", "_find_in_columncollection", "(", "selectable", ".", "__table__", ".", "columns", ",", "name", ")", "if", "col", "is", "not", "None", ":", "return", "col", "# Selectable is a sqlalchemy subquery", "elif", "hasattr", "(", "selectable", ",", "'c'", ")", "and", "isinstance", "(", "selectable", ".", "c", ",", "ImmutableColumnCollection", ")", ":", "col", "=", "getattr", "(", "selectable", ".", "c", ",", "name", ",", "None", ")", "if", "col", "is", "not", "None", ":", "return", "col", "col", "=", "_find_in_columncollection", "(", "selectable", ".", "c", ",", "name", ")", "if", "col", "is", "not", "None", ":", "return", "col", "raise", "BadIngredient", "(", "'Can not find {} in {}'", ".", "format", "(", "name", ",", "selectable", ")", ")" ]
Find a column named `name` in selectable :param selectable: :param name: :return: A column object
[ "Find", "a", "column", "named", "name", "in", "selectable" ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L131-L165
juiceinc/recipe
recipe/shelf.py
parse_field
def parse_field(fld, selectable, aggregated=True, default_aggregation='sum'): """ Parse a field object from yaml into a sqlalchemy expression """ # An aggregation is a callable that takes a single field expression # None will perform no aggregation aggregation_lookup = { 'sum': func.sum, 'min': func.min, 'max': func.max, 'avg': func.avg, 'count': func.count, 'count_distinct': lambda fld: func.count(distinct(fld)), 'month': lambda fld: func.date_trunc('month', fld), 'week': lambda fld: func.date_trunc('week', fld), 'year': lambda fld: func.date_trunc('year', fld), 'quarter': lambda fld: func.date_trunc('quarter', fld), 'age': lambda fld: func.date_part('year', func.age(fld)), None: lambda fld: fld, } # Ensure that the dictionary contains: # { # 'value': str, # 'aggregation': str|None, # 'condition': dict|None # } if isinstance(fld, basestring): fld = { 'value': fld, } if not isinstance(fld, dict): raise BadIngredient('fields must be a string or a dict') if 'value' not in fld: raise BadIngredient('fields must contain a value') if not isinstance(fld['value'], basestring): raise BadIngredient('field value must be a string') # Ensure a condition if 'condition' in fld: if not isinstance(fld['condition'], dict) and \ not fld['condition'] is None: raise BadIngredient('condition must be null or an object') else: fld['condition'] = None # Ensure an aggregation initial_aggregation = default_aggregation if aggregated else None if 'aggregation' in fld: if not isinstance(fld['aggregation'], basestring) and \ not fld['aggregation'] is None: raise BadIngredient('aggregation must be null or an string') if fld['aggregation'] is None: fld['aggregation'] = initial_aggregation else: fld['aggregation'] = initial_aggregation value = fld.get('value', None) if value is None: raise BadIngredient('field value is not defined') field_parts = [] for word in tokenize(value): if word in ('MINUS', 'PLUS', 'DIVIDE', 'MULTIPLY'): field_parts.append(word) else: field_parts.append(find_column(selectable, word)) if len(field_parts) is None: raise BadIngredient('field is not defined.') # Fields should have an odd number of parts if len(field_parts) % 2 != 1: raise BadIngredient('field does not have the right number of parts') field = field_parts[0] if len(field_parts) > 1: # if we need to add and subtract from the field # join the field parts into pairs, for instance if field parts is # [MyTable.first, 'MINUS', MyTable.second, 'PLUS', MyTable.third] # we will get two pairs here # [('MINUS', MyTable.second), ('PLUS', MyTable.third)] for operator, other_field in zip(field_parts[1::2], field_parts[2::2]): if operator == 'PLUS': field = field.__add__(other_field) elif operator == 'MINUS': field = field.__sub__(other_field) elif operator == 'DIVIDE': field = field.__div__(other_field) elif operator == 'MULTIPLY': field = field.__mul__(other_field) else: raise BadIngredient('Unknown operator {}'.format(operator)) # Handle the aggregator aggr = fld.get('aggregation', 'sum') if aggr is not None: aggr = aggr.strip() if aggr not in aggregation_lookup: raise BadIngredient('unknown aggregation {}'.format(aggr)) aggregator = aggregation_lookup[aggr] condition = parse_condition( fld.get('condition', None), selectable, aggregated=False, default_aggregation=default_aggregation ) if condition is not None: field = case([(condition, field)]) return aggregator(field)
python
def parse_field(fld, selectable, aggregated=True, default_aggregation='sum'): """ Parse a field object from yaml into a sqlalchemy expression """ # An aggregation is a callable that takes a single field expression # None will perform no aggregation aggregation_lookup = { 'sum': func.sum, 'min': func.min, 'max': func.max, 'avg': func.avg, 'count': func.count, 'count_distinct': lambda fld: func.count(distinct(fld)), 'month': lambda fld: func.date_trunc('month', fld), 'week': lambda fld: func.date_trunc('week', fld), 'year': lambda fld: func.date_trunc('year', fld), 'quarter': lambda fld: func.date_trunc('quarter', fld), 'age': lambda fld: func.date_part('year', func.age(fld)), None: lambda fld: fld, } # Ensure that the dictionary contains: # { # 'value': str, # 'aggregation': str|None, # 'condition': dict|None # } if isinstance(fld, basestring): fld = { 'value': fld, } if not isinstance(fld, dict): raise BadIngredient('fields must be a string or a dict') if 'value' not in fld: raise BadIngredient('fields must contain a value') if not isinstance(fld['value'], basestring): raise BadIngredient('field value must be a string') # Ensure a condition if 'condition' in fld: if not isinstance(fld['condition'], dict) and \ not fld['condition'] is None: raise BadIngredient('condition must be null or an object') else: fld['condition'] = None # Ensure an aggregation initial_aggregation = default_aggregation if aggregated else None if 'aggregation' in fld: if not isinstance(fld['aggregation'], basestring) and \ not fld['aggregation'] is None: raise BadIngredient('aggregation must be null or an string') if fld['aggregation'] is None: fld['aggregation'] = initial_aggregation else: fld['aggregation'] = initial_aggregation value = fld.get('value', None) if value is None: raise BadIngredient('field value is not defined') field_parts = [] for word in tokenize(value): if word in ('MINUS', 'PLUS', 'DIVIDE', 'MULTIPLY'): field_parts.append(word) else: field_parts.append(find_column(selectable, word)) if len(field_parts) is None: raise BadIngredient('field is not defined.') # Fields should have an odd number of parts if len(field_parts) % 2 != 1: raise BadIngredient('field does not have the right number of parts') field = field_parts[0] if len(field_parts) > 1: # if we need to add and subtract from the field # join the field parts into pairs, for instance if field parts is # [MyTable.first, 'MINUS', MyTable.second, 'PLUS', MyTable.third] # we will get two pairs here # [('MINUS', MyTable.second), ('PLUS', MyTable.third)] for operator, other_field in zip(field_parts[1::2], field_parts[2::2]): if operator == 'PLUS': field = field.__add__(other_field) elif operator == 'MINUS': field = field.__sub__(other_field) elif operator == 'DIVIDE': field = field.__div__(other_field) elif operator == 'MULTIPLY': field = field.__mul__(other_field) else: raise BadIngredient('Unknown operator {}'.format(operator)) # Handle the aggregator aggr = fld.get('aggregation', 'sum') if aggr is not None: aggr = aggr.strip() if aggr not in aggregation_lookup: raise BadIngredient('unknown aggregation {}'.format(aggr)) aggregator = aggregation_lookup[aggr] condition = parse_condition( fld.get('condition', None), selectable, aggregated=False, default_aggregation=default_aggregation ) if condition is not None: field = case([(condition, field)]) return aggregator(field)
[ "def", "parse_field", "(", "fld", ",", "selectable", ",", "aggregated", "=", "True", ",", "default_aggregation", "=", "'sum'", ")", ":", "# An aggregation is a callable that takes a single field expression", "# None will perform no aggregation", "aggregation_lookup", "=", "{", "'sum'", ":", "func", ".", "sum", ",", "'min'", ":", "func", ".", "min", ",", "'max'", ":", "func", ".", "max", ",", "'avg'", ":", "func", ".", "avg", ",", "'count'", ":", "func", ".", "count", ",", "'count_distinct'", ":", "lambda", "fld", ":", "func", ".", "count", "(", "distinct", "(", "fld", ")", ")", ",", "'month'", ":", "lambda", "fld", ":", "func", ".", "date_trunc", "(", "'month'", ",", "fld", ")", ",", "'week'", ":", "lambda", "fld", ":", "func", ".", "date_trunc", "(", "'week'", ",", "fld", ")", ",", "'year'", ":", "lambda", "fld", ":", "func", ".", "date_trunc", "(", "'year'", ",", "fld", ")", ",", "'quarter'", ":", "lambda", "fld", ":", "func", ".", "date_trunc", "(", "'quarter'", ",", "fld", ")", ",", "'age'", ":", "lambda", "fld", ":", "func", ".", "date_part", "(", "'year'", ",", "func", ".", "age", "(", "fld", ")", ")", ",", "None", ":", "lambda", "fld", ":", "fld", ",", "}", "# Ensure that the dictionary contains:", "# {", "# 'value': str,", "# 'aggregation': str|None,", "# 'condition': dict|None", "# }", "if", "isinstance", "(", "fld", ",", "basestring", ")", ":", "fld", "=", "{", "'value'", ":", "fld", ",", "}", "if", "not", "isinstance", "(", "fld", ",", "dict", ")", ":", "raise", "BadIngredient", "(", "'fields must be a string or a dict'", ")", "if", "'value'", "not", "in", "fld", ":", "raise", "BadIngredient", "(", "'fields must contain a value'", ")", "if", "not", "isinstance", "(", "fld", "[", "'value'", "]", ",", "basestring", ")", ":", "raise", "BadIngredient", "(", "'field value must be a string'", ")", "# Ensure a condition", "if", "'condition'", "in", "fld", ":", "if", "not", "isinstance", "(", "fld", "[", "'condition'", "]", ",", "dict", ")", "and", "not", "fld", "[", "'condition'", "]", "is", "None", ":", "raise", "BadIngredient", "(", "'condition must be null or an object'", ")", "else", ":", "fld", "[", "'condition'", "]", "=", "None", "# Ensure an aggregation", "initial_aggregation", "=", "default_aggregation", "if", "aggregated", "else", "None", "if", "'aggregation'", "in", "fld", ":", "if", "not", "isinstance", "(", "fld", "[", "'aggregation'", "]", ",", "basestring", ")", "and", "not", "fld", "[", "'aggregation'", "]", "is", "None", ":", "raise", "BadIngredient", "(", "'aggregation must be null or an string'", ")", "if", "fld", "[", "'aggregation'", "]", "is", "None", ":", "fld", "[", "'aggregation'", "]", "=", "initial_aggregation", "else", ":", "fld", "[", "'aggregation'", "]", "=", "initial_aggregation", "value", "=", "fld", ".", "get", "(", "'value'", ",", "None", ")", "if", "value", "is", "None", ":", "raise", "BadIngredient", "(", "'field value is not defined'", ")", "field_parts", "=", "[", "]", "for", "word", "in", "tokenize", "(", "value", ")", ":", "if", "word", "in", "(", "'MINUS'", ",", "'PLUS'", ",", "'DIVIDE'", ",", "'MULTIPLY'", ")", ":", "field_parts", ".", "append", "(", "word", ")", "else", ":", "field_parts", ".", "append", "(", "find_column", "(", "selectable", ",", "word", ")", ")", "if", "len", "(", "field_parts", ")", "is", "None", ":", "raise", "BadIngredient", "(", "'field is not defined.'", ")", "# Fields should have an odd number of parts", "if", "len", "(", "field_parts", ")", "%", "2", "!=", "1", ":", "raise", "BadIngredient", "(", "'field does not have the right number of parts'", ")", "field", "=", "field_parts", "[", "0", "]", "if", "len", "(", "field_parts", ")", ">", "1", ":", "# if we need to add and subtract from the field", "# join the field parts into pairs, for instance if field parts is", "# [MyTable.first, 'MINUS', MyTable.second, 'PLUS', MyTable.third]", "# we will get two pairs here", "# [('MINUS', MyTable.second), ('PLUS', MyTable.third)]", "for", "operator", ",", "other_field", "in", "zip", "(", "field_parts", "[", "1", ":", ":", "2", "]", ",", "field_parts", "[", "2", ":", ":", "2", "]", ")", ":", "if", "operator", "==", "'PLUS'", ":", "field", "=", "field", ".", "__add__", "(", "other_field", ")", "elif", "operator", "==", "'MINUS'", ":", "field", "=", "field", ".", "__sub__", "(", "other_field", ")", "elif", "operator", "==", "'DIVIDE'", ":", "field", "=", "field", ".", "__div__", "(", "other_field", ")", "elif", "operator", "==", "'MULTIPLY'", ":", "field", "=", "field", ".", "__mul__", "(", "other_field", ")", "else", ":", "raise", "BadIngredient", "(", "'Unknown operator {}'", ".", "format", "(", "operator", ")", ")", "# Handle the aggregator", "aggr", "=", "fld", ".", "get", "(", "'aggregation'", ",", "'sum'", ")", "if", "aggr", "is", "not", "None", ":", "aggr", "=", "aggr", ".", "strip", "(", ")", "if", "aggr", "not", "in", "aggregation_lookup", ":", "raise", "BadIngredient", "(", "'unknown aggregation {}'", ".", "format", "(", "aggr", ")", ")", "aggregator", "=", "aggregation_lookup", "[", "aggr", "]", "condition", "=", "parse_condition", "(", "fld", ".", "get", "(", "'condition'", ",", "None", ")", ",", "selectable", ",", "aggregated", "=", "False", ",", "default_aggregation", "=", "default_aggregation", ")", "if", "condition", "is", "not", "None", ":", "field", "=", "case", "(", "[", "(", "condition", ",", "field", ")", "]", ")", "return", "aggregator", "(", "field", ")" ]
Parse a field object from yaml into a sqlalchemy expression
[ "Parse", "a", "field", "object", "from", "yaml", "into", "a", "sqlalchemy", "expression" ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L168-L279
juiceinc/recipe
recipe/shelf.py
ingredient_from_dict
def ingredient_from_dict(ingr_dict, selectable): """Create an ingredient from an dictionary. This object will be deserialized from yaml """ # TODO: This is deprecated in favor of # ingredient_from_validated_dict # Describe the required params for each kind of ingredient # The key is the parameter name, the value is one of # field: A parse_field with aggregation=False # aggregated_field: A parse_field with aggregation=True # condition: A parse_condition params_lookup = { 'Dimension': { 'field': 'field' }, 'LookupDimension': { 'field': 'field' }, 'IdValueDimension': OrderedDict(id_field='field', field='field'), 'Metric': { 'field': 'aggregated_field' }, 'DivideMetric': OrderedDict( numerator_field='aggregated_field', denominator_field='aggregated_field' ), 'WtdAvgMetric': OrderedDict(field='field', weight='field') } format_lookup = { 'comma': ',.0f', 'dollar': '$,.0f', 'percent': '.0%', 'comma1': ',.1f', 'dollar1': '$,.1f', 'percent1': '.1%', 'comma2': ',.2f', 'dollar2': '$,.2f', 'percent2': '.2%', } kind = ingr_dict.pop('kind', 'Metric') IngredientClass = ingredient_class_for_name(kind) if IngredientClass is None: raise BadIngredient('Unknown ingredient kind') params = params_lookup.get(kind, {'field': 'field'}) args = [] for k, v in iteritems(params): # All the params must be in the dict if k not in ingr_dict: raise BadIngredient( '{} must be defined to make a {}'.format(k, kind) ) if v == 'field': statement = parse_field( ingr_dict.pop(k, None), selectable, aggregated=False ) elif v == 'aggregated_field': statement = parse_field( ingr_dict.pop(k, None), selectable, aggregated=True ) elif v == 'condition': statement = parse_condition( ingr_dict.pop(k, None), selectable, aggregated=True ) else: raise BadIngredient('Do not know what this is') args.append(statement) # Remaining properties in ingr_dict are treated as keyword args # If the format string exists in format_lookup, use the value otherwise # use the original format if 'format' in ingr_dict: ingr_dict['format'] = format_lookup.get( ingr_dict['format'], ingr_dict['format'] ) return IngredientClass(*args, **ingr_dict)
python
def ingredient_from_dict(ingr_dict, selectable): """Create an ingredient from an dictionary. This object will be deserialized from yaml """ # TODO: This is deprecated in favor of # ingredient_from_validated_dict # Describe the required params for each kind of ingredient # The key is the parameter name, the value is one of # field: A parse_field with aggregation=False # aggregated_field: A parse_field with aggregation=True # condition: A parse_condition params_lookup = { 'Dimension': { 'field': 'field' }, 'LookupDimension': { 'field': 'field' }, 'IdValueDimension': OrderedDict(id_field='field', field='field'), 'Metric': { 'field': 'aggregated_field' }, 'DivideMetric': OrderedDict( numerator_field='aggregated_field', denominator_field='aggregated_field' ), 'WtdAvgMetric': OrderedDict(field='field', weight='field') } format_lookup = { 'comma': ',.0f', 'dollar': '$,.0f', 'percent': '.0%', 'comma1': ',.1f', 'dollar1': '$,.1f', 'percent1': '.1%', 'comma2': ',.2f', 'dollar2': '$,.2f', 'percent2': '.2%', } kind = ingr_dict.pop('kind', 'Metric') IngredientClass = ingredient_class_for_name(kind) if IngredientClass is None: raise BadIngredient('Unknown ingredient kind') params = params_lookup.get(kind, {'field': 'field'}) args = [] for k, v in iteritems(params): # All the params must be in the dict if k not in ingr_dict: raise BadIngredient( '{} must be defined to make a {}'.format(k, kind) ) if v == 'field': statement = parse_field( ingr_dict.pop(k, None), selectable, aggregated=False ) elif v == 'aggregated_field': statement = parse_field( ingr_dict.pop(k, None), selectable, aggregated=True ) elif v == 'condition': statement = parse_condition( ingr_dict.pop(k, None), selectable, aggregated=True ) else: raise BadIngredient('Do not know what this is') args.append(statement) # Remaining properties in ingr_dict are treated as keyword args # If the format string exists in format_lookup, use the value otherwise # use the original format if 'format' in ingr_dict: ingr_dict['format'] = format_lookup.get( ingr_dict['format'], ingr_dict['format'] ) return IngredientClass(*args, **ingr_dict)
[ "def", "ingredient_from_dict", "(", "ingr_dict", ",", "selectable", ")", ":", "# TODO: This is deprecated in favor of", "# ingredient_from_validated_dict", "# Describe the required params for each kind of ingredient", "# The key is the parameter name, the value is one of", "# field: A parse_field with aggregation=False", "# aggregated_field: A parse_field with aggregation=True", "# condition: A parse_condition", "params_lookup", "=", "{", "'Dimension'", ":", "{", "'field'", ":", "'field'", "}", ",", "'LookupDimension'", ":", "{", "'field'", ":", "'field'", "}", ",", "'IdValueDimension'", ":", "OrderedDict", "(", "id_field", "=", "'field'", ",", "field", "=", "'field'", ")", ",", "'Metric'", ":", "{", "'field'", ":", "'aggregated_field'", "}", ",", "'DivideMetric'", ":", "OrderedDict", "(", "numerator_field", "=", "'aggregated_field'", ",", "denominator_field", "=", "'aggregated_field'", ")", ",", "'WtdAvgMetric'", ":", "OrderedDict", "(", "field", "=", "'field'", ",", "weight", "=", "'field'", ")", "}", "format_lookup", "=", "{", "'comma'", ":", "',.0f'", ",", "'dollar'", ":", "'$,.0f'", ",", "'percent'", ":", "'.0%'", ",", "'comma1'", ":", "',.1f'", ",", "'dollar1'", ":", "'$,.1f'", ",", "'percent1'", ":", "'.1%'", ",", "'comma2'", ":", "',.2f'", ",", "'dollar2'", ":", "'$,.2f'", ",", "'percent2'", ":", "'.2%'", ",", "}", "kind", "=", "ingr_dict", ".", "pop", "(", "'kind'", ",", "'Metric'", ")", "IngredientClass", "=", "ingredient_class_for_name", "(", "kind", ")", "if", "IngredientClass", "is", "None", ":", "raise", "BadIngredient", "(", "'Unknown ingredient kind'", ")", "params", "=", "params_lookup", ".", "get", "(", "kind", ",", "{", "'field'", ":", "'field'", "}", ")", "args", "=", "[", "]", "for", "k", ",", "v", "in", "iteritems", "(", "params", ")", ":", "# All the params must be in the dict", "if", "k", "not", "in", "ingr_dict", ":", "raise", "BadIngredient", "(", "'{} must be defined to make a {}'", ".", "format", "(", "k", ",", "kind", ")", ")", "if", "v", "==", "'field'", ":", "statement", "=", "parse_field", "(", "ingr_dict", ".", "pop", "(", "k", ",", "None", ")", ",", "selectable", ",", "aggregated", "=", "False", ")", "elif", "v", "==", "'aggregated_field'", ":", "statement", "=", "parse_field", "(", "ingr_dict", ".", "pop", "(", "k", ",", "None", ")", ",", "selectable", ",", "aggregated", "=", "True", ")", "elif", "v", "==", "'condition'", ":", "statement", "=", "parse_condition", "(", "ingr_dict", ".", "pop", "(", "k", ",", "None", ")", ",", "selectable", ",", "aggregated", "=", "True", ")", "else", ":", "raise", "BadIngredient", "(", "'Do not know what this is'", ")", "args", ".", "append", "(", "statement", ")", "# Remaining properties in ingr_dict are treated as keyword args", "# If the format string exists in format_lookup, use the value otherwise", "# use the original format", "if", "'format'", "in", "ingr_dict", ":", "ingr_dict", "[", "'format'", "]", "=", "format_lookup", ".", "get", "(", "ingr_dict", "[", "'format'", "]", ",", "ingr_dict", "[", "'format'", "]", ")", "return", "IngredientClass", "(", "*", "args", ",", "*", "*", "ingr_dict", ")" ]
Create an ingredient from an dictionary. This object will be deserialized from yaml
[ "Create", "an", "ingredient", "from", "an", "dictionary", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L282-L368
juiceinc/recipe
recipe/shelf.py
parse_validated_field
def parse_validated_field(fld, selectable): """ Converts a validated field to sqlalchemy. Field references are looked up in selectable """ aggr_fn = IngredientValidator.aggregation_lookup[fld['aggregation']] field = find_column(selectable, fld['value']) for operator in fld.get('operators', []): op = operator['operator'] other_field = parse_validated_field(operator['field'], selectable) field = IngredientValidator.operator_lookup[op](field)(other_field) condition = fld.get('condition', None) if condition: condition = parse_condition(condition, selectable) field = case([(condition, field)]) field = aggr_fn(field) return field
python
def parse_validated_field(fld, selectable): """ Converts a validated field to sqlalchemy. Field references are looked up in selectable """ aggr_fn = IngredientValidator.aggregation_lookup[fld['aggregation']] field = find_column(selectable, fld['value']) for operator in fld.get('operators', []): op = operator['operator'] other_field = parse_validated_field(operator['field'], selectable) field = IngredientValidator.operator_lookup[op](field)(other_field) condition = fld.get('condition', None) if condition: condition = parse_condition(condition, selectable) field = case([(condition, field)]) field = aggr_fn(field) return field
[ "def", "parse_validated_field", "(", "fld", ",", "selectable", ")", ":", "aggr_fn", "=", "IngredientValidator", ".", "aggregation_lookup", "[", "fld", "[", "'aggregation'", "]", "]", "field", "=", "find_column", "(", "selectable", ",", "fld", "[", "'value'", "]", ")", "for", "operator", "in", "fld", ".", "get", "(", "'operators'", ",", "[", "]", ")", ":", "op", "=", "operator", "[", "'operator'", "]", "other_field", "=", "parse_validated_field", "(", "operator", "[", "'field'", "]", ",", "selectable", ")", "field", "=", "IngredientValidator", ".", "operator_lookup", "[", "op", "]", "(", "field", ")", "(", "other_field", ")", "condition", "=", "fld", ".", "get", "(", "'condition'", ",", "None", ")", "if", "condition", ":", "condition", "=", "parse_condition", "(", "condition", ",", "selectable", ")", "field", "=", "case", "(", "[", "(", "condition", ",", "field", ")", "]", ")", "field", "=", "aggr_fn", "(", "field", ")", "return", "field" ]
Converts a validated field to sqlalchemy. Field references are looked up in selectable
[ "Converts", "a", "validated", "field", "to", "sqlalchemy", ".", "Field", "references", "are", "looked", "up", "in", "selectable" ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L371-L389
juiceinc/recipe
recipe/shelf.py
ingredient_from_validated_dict
def ingredient_from_validated_dict(ingr_dict, selectable): """ Create an ingredient from an dictionary. This object will be deserialized from yaml """ validator = IngredientValidator(schema=ingr_dict['kind']) if not validator.validate(ingr_dict): raise Exception(validator.errors) ingr_dict = validator.document kind = ingr_dict.pop('kind', 'Metric') IngredientClass = ingredient_class_for_name(kind) if IngredientClass is None: raise BadIngredient('Unknown ingredient kind') args = [] for fld in ingr_dict.pop('_fields', []): args.append(parse_validated_field(ingr_dict.pop(fld), selectable)) return IngredientClass(*args, **ingr_dict)
python
def ingredient_from_validated_dict(ingr_dict, selectable): """ Create an ingredient from an dictionary. This object will be deserialized from yaml """ validator = IngredientValidator(schema=ingr_dict['kind']) if not validator.validate(ingr_dict): raise Exception(validator.errors) ingr_dict = validator.document kind = ingr_dict.pop('kind', 'Metric') IngredientClass = ingredient_class_for_name(kind) if IngredientClass is None: raise BadIngredient('Unknown ingredient kind') args = [] for fld in ingr_dict.pop('_fields', []): args.append(parse_validated_field(ingr_dict.pop(fld), selectable)) return IngredientClass(*args, **ingr_dict)
[ "def", "ingredient_from_validated_dict", "(", "ingr_dict", ",", "selectable", ")", ":", "validator", "=", "IngredientValidator", "(", "schema", "=", "ingr_dict", "[", "'kind'", "]", ")", "if", "not", "validator", ".", "validate", "(", "ingr_dict", ")", ":", "raise", "Exception", "(", "validator", ".", "errors", ")", "ingr_dict", "=", "validator", ".", "document", "kind", "=", "ingr_dict", ".", "pop", "(", "'kind'", ",", "'Metric'", ")", "IngredientClass", "=", "ingredient_class_for_name", "(", "kind", ")", "if", "IngredientClass", "is", "None", ":", "raise", "BadIngredient", "(", "'Unknown ingredient kind'", ")", "args", "=", "[", "]", "for", "fld", "in", "ingr_dict", ".", "pop", "(", "'_fields'", ",", "[", "]", ")", ":", "args", ".", "append", "(", "parse_validated_field", "(", "ingr_dict", ".", "pop", "(", "fld", ")", ",", "selectable", ")", ")", "return", "IngredientClass", "(", "*", "args", ",", "*", "*", "ingr_dict", ")" ]
Create an ingredient from an dictionary. This object will be deserialized from yaml
[ "Create", "an", "ingredient", "from", "an", "dictionary", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L392-L412
juiceinc/recipe
recipe/shelf.py
AutomaticShelf
def AutomaticShelf(table): """Given a SQLAlchemy Table, automatically generate a Shelf with metrics and dimensions based on its schema. """ if hasattr(table, '__table__'): table = table.__table__ config = introspect_table(table) return Shelf.from_config(config, table)
python
def AutomaticShelf(table): """Given a SQLAlchemy Table, automatically generate a Shelf with metrics and dimensions based on its schema. """ if hasattr(table, '__table__'): table = table.__table__ config = introspect_table(table) return Shelf.from_config(config, table)
[ "def", "AutomaticShelf", "(", "table", ")", ":", "if", "hasattr", "(", "table", ",", "'__table__'", ")", ":", "table", "=", "table", ".", "__table__", "config", "=", "introspect_table", "(", "table", ")", "return", "Shelf", ".", "from_config", "(", "config", ",", "table", ")" ]
Given a SQLAlchemy Table, automatically generate a Shelf with metrics and dimensions based on its schema.
[ "Given", "a", "SQLAlchemy", "Table", "automatically", "generate", "a", "Shelf", "with", "metrics", "and", "dimensions", "based", "on", "its", "schema", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L764-L771
juiceinc/recipe
recipe/shelf.py
introspect_table
def introspect_table(table): """Given a SQLAlchemy Table object, return a Shelf description suitable for passing to Shelf.from_config. """ d = {} for c in table.columns: if isinstance(c.type, String): d[c.name] = {'kind': 'Dimension', 'field': c.name} if isinstance(c.type, (Integer, Float)): d[c.name] = {'kind': 'Metric', 'field': c.name} return d
python
def introspect_table(table): """Given a SQLAlchemy Table object, return a Shelf description suitable for passing to Shelf.from_config. """ d = {} for c in table.columns: if isinstance(c.type, String): d[c.name] = {'kind': 'Dimension', 'field': c.name} if isinstance(c.type, (Integer, Float)): d[c.name] = {'kind': 'Metric', 'field': c.name} return d
[ "def", "introspect_table", "(", "table", ")", ":", "d", "=", "{", "}", "for", "c", "in", "table", ".", "columns", ":", "if", "isinstance", "(", "c", ".", "type", ",", "String", ")", ":", "d", "[", "c", ".", "name", "]", "=", "{", "'kind'", ":", "'Dimension'", ",", "'field'", ":", "c", ".", "name", "}", "if", "isinstance", "(", "c", ".", "type", ",", "(", "Integer", ",", "Float", ")", ")", ":", "d", "[", "c", ".", "name", "]", "=", "{", "'kind'", ":", "'Metric'", ",", "'field'", ":", "c", ".", "name", "}", "return", "d" ]
Given a SQLAlchemy Table object, return a Shelf description suitable for passing to Shelf.from_config.
[ "Given", "a", "SQLAlchemy", "Table", "object", "return", "a", "Shelf", "description", "suitable", "for", "passing", "to", "Shelf", ".", "from_config", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L774-L784
juiceinc/recipe
recipe/shelf.py
Shelf.pop
def pop(self, k, d=_POP_DEFAULT): """Pop an ingredient off of this shelf.""" if d is _POP_DEFAULT: return self._ingredients.pop(k) else: return self._ingredients.pop(k, d)
python
def pop(self, k, d=_POP_DEFAULT): """Pop an ingredient off of this shelf.""" if d is _POP_DEFAULT: return self._ingredients.pop(k) else: return self._ingredients.pop(k, d)
[ "def", "pop", "(", "self", ",", "k", ",", "d", "=", "_POP_DEFAULT", ")", ":", "if", "d", "is", "_POP_DEFAULT", ":", "return", "self", ".", "_ingredients", ".", "pop", "(", "k", ")", "else", ":", "return", "self", ".", "_ingredients", ".", "pop", "(", "k", ",", "d", ")" ]
Pop an ingredient off of this shelf.
[ "Pop", "an", "ingredient", "off", "of", "this", "shelf", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L531-L536
juiceinc/recipe
recipe/shelf.py
Shelf.dimension_ids
def dimension_ids(self): """ Return the Dimensions on this shelf in the order in which they were used.""" return self._sorted_ingredients([ d.id for d in self.values() if isinstance(d, Dimension) ])
python
def dimension_ids(self): """ Return the Dimensions on this shelf in the order in which they were used.""" return self._sorted_ingredients([ d.id for d in self.values() if isinstance(d, Dimension) ])
[ "def", "dimension_ids", "(", "self", ")", ":", "return", "self", ".", "_sorted_ingredients", "(", "[", "d", ".", "id", "for", "d", "in", "self", ".", "values", "(", ")", "if", "isinstance", "(", "d", ",", "Dimension", ")", "]", ")" ]
Return the Dimensions on this shelf in the order in which they were used.
[ "Return", "the", "Dimensions", "on", "this", "shelf", "in", "the", "order", "in", "which", "they", "were", "used", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L545-L550
juiceinc/recipe
recipe/shelf.py
Shelf.metric_ids
def metric_ids(self): """ Return the Metrics on this shelf in the order in which they were used. """ return self._sorted_ingredients([ d.id for d in self.values() if isinstance(d, Metric) ])
python
def metric_ids(self): """ Return the Metrics on this shelf in the order in which they were used. """ return self._sorted_ingredients([ d.id for d in self.values() if isinstance(d, Metric) ])
[ "def", "metric_ids", "(", "self", ")", ":", "return", "self", ".", "_sorted_ingredients", "(", "[", "d", ".", "id", "for", "d", "in", "self", ".", "values", "(", ")", "if", "isinstance", "(", "d", ",", "Metric", ")", "]", ")" ]
Return the Metrics on this shelf in the order in which they were used.
[ "Return", "the", "Metrics", "on", "this", "shelf", "in", "the", "order", "in", "which", "they", "were", "used", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L553-L558
juiceinc/recipe
recipe/shelf.py
Shelf.filter_ids
def filter_ids(self): """ Return the Metrics on this shelf in the order in which they were used. """ return self._sorted_ingredients([ d.id for d in self.values() if isinstance(d, Filter) ])
python
def filter_ids(self): """ Return the Metrics on this shelf in the order in which they were used. """ return self._sorted_ingredients([ d.id for d in self.values() if isinstance(d, Filter) ])
[ "def", "filter_ids", "(", "self", ")", ":", "return", "self", ".", "_sorted_ingredients", "(", "[", "d", ".", "id", "for", "d", "in", "self", ".", "values", "(", ")", "if", "isinstance", "(", "d", ",", "Filter", ")", "]", ")" ]
Return the Metrics on this shelf in the order in which they were used.
[ "Return", "the", "Metrics", "on", "this", "shelf", "in", "the", "order", "in", "which", "they", "were", "used", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L561-L566
juiceinc/recipe
recipe/shelf.py
Shelf.from_config
def from_config( cls, obj, selectable, ingredient_constructor=ingredient_from_validated_dict, metadata=None ): """Create a shelf using a dict shelf definition. :param obj: A Python dictionary describing a Shelf. :param selectable: A SQLAlchemy Table, a Recipe, a table name, or a SQLAlchemy join to select from. :param metadata: If `selectable` is passed as a table name, then in order to introspect its schema, we must have the SQLAlchemy MetaData object to associate it with. :return: A shelf that contains the ingredients defined in obj. """ from recipe import Recipe if isinstance(selectable, Recipe): selectable = selectable.subquery() elif isinstance(selectable, basestring): if '.' in selectable: schema, tablename = selectable.split('.') else: schema, tablename = None, selectable selectable = Table( tablename, metadata, schema=schema, extend_existing=True, autoload=True ) d = {} for k, v in iteritems(obj): d[k] = ingredient_constructor(v, selectable) shelf = cls(d, select_from=selectable) return shelf
python
def from_config( cls, obj, selectable, ingredient_constructor=ingredient_from_validated_dict, metadata=None ): """Create a shelf using a dict shelf definition. :param obj: A Python dictionary describing a Shelf. :param selectable: A SQLAlchemy Table, a Recipe, a table name, or a SQLAlchemy join to select from. :param metadata: If `selectable` is passed as a table name, then in order to introspect its schema, we must have the SQLAlchemy MetaData object to associate it with. :return: A shelf that contains the ingredients defined in obj. """ from recipe import Recipe if isinstance(selectable, Recipe): selectable = selectable.subquery() elif isinstance(selectable, basestring): if '.' in selectable: schema, tablename = selectable.split('.') else: schema, tablename = None, selectable selectable = Table( tablename, metadata, schema=schema, extend_existing=True, autoload=True ) d = {} for k, v in iteritems(obj): d[k] = ingredient_constructor(v, selectable) shelf = cls(d, select_from=selectable) return shelf
[ "def", "from_config", "(", "cls", ",", "obj", ",", "selectable", ",", "ingredient_constructor", "=", "ingredient_from_validated_dict", ",", "metadata", "=", "None", ")", ":", "from", "recipe", "import", "Recipe", "if", "isinstance", "(", "selectable", ",", "Recipe", ")", ":", "selectable", "=", "selectable", ".", "subquery", "(", ")", "elif", "isinstance", "(", "selectable", ",", "basestring", ")", ":", "if", "'.'", "in", "selectable", ":", "schema", ",", "tablename", "=", "selectable", ".", "split", "(", "'.'", ")", "else", ":", "schema", ",", "tablename", "=", "None", ",", "selectable", "selectable", "=", "Table", "(", "tablename", ",", "metadata", ",", "schema", "=", "schema", ",", "extend_existing", "=", "True", ",", "autoload", "=", "True", ")", "d", "=", "{", "}", "for", "k", ",", "v", "in", "iteritems", "(", "obj", ")", ":", "d", "[", "k", "]", "=", "ingredient_constructor", "(", "v", ",", "selectable", ")", "shelf", "=", "cls", "(", "d", ",", "select_from", "=", "selectable", ")", "return", "shelf" ]
Create a shelf using a dict shelf definition. :param obj: A Python dictionary describing a Shelf. :param selectable: A SQLAlchemy Table, a Recipe, a table name, or a SQLAlchemy join to select from. :param metadata: If `selectable` is passed as a table name, then in order to introspect its schema, we must have the SQLAlchemy MetaData object to associate it with. :return: A shelf that contains the ingredients defined in obj.
[ "Create", "a", "shelf", "using", "a", "dict", "shelf", "definition", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L600-L638
juiceinc/recipe
recipe/shelf.py
Shelf.from_yaml
def from_yaml(cls, yaml_str, selectable, **kwargs): """Create a shelf using a yaml shelf definition. :param yaml_str: A string containing yaml ingredient definitions. :param selectable: A SQLAlchemy Table, a Recipe, or a SQLAlchemy join to select from. :return: A shelf that contains the ingredients defined in yaml_str. """ obj = safe_load(yaml_str) return cls.from_config( obj, selectable, ingredient_constructor=ingredient_from_dict, **kwargs )
python
def from_yaml(cls, yaml_str, selectable, **kwargs): """Create a shelf using a yaml shelf definition. :param yaml_str: A string containing yaml ingredient definitions. :param selectable: A SQLAlchemy Table, a Recipe, or a SQLAlchemy join to select from. :return: A shelf that contains the ingredients defined in yaml_str. """ obj = safe_load(yaml_str) return cls.from_config( obj, selectable, ingredient_constructor=ingredient_from_dict, **kwargs )
[ "def", "from_yaml", "(", "cls", ",", "yaml_str", ",", "selectable", ",", "*", "*", "kwargs", ")", ":", "obj", "=", "safe_load", "(", "yaml_str", ")", "return", "cls", ".", "from_config", "(", "obj", ",", "selectable", ",", "ingredient_constructor", "=", "ingredient_from_dict", ",", "*", "*", "kwargs", ")" ]
Create a shelf using a yaml shelf definition. :param yaml_str: A string containing yaml ingredient definitions. :param selectable: A SQLAlchemy Table, a Recipe, or a SQLAlchemy join to select from. :return: A shelf that contains the ingredients defined in yaml_str.
[ "Create", "a", "shelf", "using", "a", "yaml", "shelf", "definition", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L641-L655
juiceinc/recipe
recipe/shelf.py
Shelf.from_validated_yaml
def from_validated_yaml(cls, yaml_str, selectable, **kwargs): """Create a shelf using a yaml shelf definition. :param yaml_str: A string containing yaml ingredient definitions. :param selectable: A SQLAlchemy Table, a Recipe, or a SQLAlchemy join to select from. :return: A shelf that contains the ingredients defined in yaml_str. """ obj = safe_load(yaml_str) return cls.from_config(obj, selectable, **kwargs)
python
def from_validated_yaml(cls, yaml_str, selectable, **kwargs): """Create a shelf using a yaml shelf definition. :param yaml_str: A string containing yaml ingredient definitions. :param selectable: A SQLAlchemy Table, a Recipe, or a SQLAlchemy join to select from. :return: A shelf that contains the ingredients defined in yaml_str. """ obj = safe_load(yaml_str) return cls.from_config(obj, selectable, **kwargs)
[ "def", "from_validated_yaml", "(", "cls", ",", "yaml_str", ",", "selectable", ",", "*", "*", "kwargs", ")", ":", "obj", "=", "safe_load", "(", "yaml_str", ")", "return", "cls", ".", "from_config", "(", "obj", ",", "selectable", ",", "*", "*", "kwargs", ")" ]
Create a shelf using a yaml shelf definition. :param yaml_str: A string containing yaml ingredient definitions. :param selectable: A SQLAlchemy Table, a Recipe, or a SQLAlchemy join to select from. :return: A shelf that contains the ingredients defined in yaml_str.
[ "Create", "a", "shelf", "using", "a", "yaml", "shelf", "definition", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L658-L667
juiceinc/recipe
recipe/shelf.py
Shelf.find
def find(self, obj, filter_to_class=Ingredient, constructor=None): """ Find an Ingredient, optionally using the shelf. :param obj: A string or Ingredient :param filter_to_class: The Ingredient subclass that obj must be an instance of :param constructor: An optional callable for building Ingredients from obj :return: An Ingredient of subclass `filter_to_class` """ if callable(constructor): obj = constructor(obj, shelf=self) if isinstance(obj, basestring): set_descending = obj.startswith('-') if set_descending: obj = obj[1:] if obj not in self: raise BadRecipe("{} doesn't exist on the shelf".format(obj)) ingredient = self[obj] if not isinstance(ingredient, filter_to_class): raise BadRecipe('{} is not a {}'.format(obj, filter_to_class)) if set_descending: ingredient.ordering = 'desc' return ingredient elif isinstance(obj, filter_to_class): return obj else: raise BadRecipe('{} is not a {}'.format(obj, filter_to_class))
python
def find(self, obj, filter_to_class=Ingredient, constructor=None): """ Find an Ingredient, optionally using the shelf. :param obj: A string or Ingredient :param filter_to_class: The Ingredient subclass that obj must be an instance of :param constructor: An optional callable for building Ingredients from obj :return: An Ingredient of subclass `filter_to_class` """ if callable(constructor): obj = constructor(obj, shelf=self) if isinstance(obj, basestring): set_descending = obj.startswith('-') if set_descending: obj = obj[1:] if obj not in self: raise BadRecipe("{} doesn't exist on the shelf".format(obj)) ingredient = self[obj] if not isinstance(ingredient, filter_to_class): raise BadRecipe('{} is not a {}'.format(obj, filter_to_class)) if set_descending: ingredient.ordering = 'desc' return ingredient elif isinstance(obj, filter_to_class): return obj else: raise BadRecipe('{} is not a {}'.format(obj, filter_to_class))
[ "def", "find", "(", "self", ",", "obj", ",", "filter_to_class", "=", "Ingredient", ",", "constructor", "=", "None", ")", ":", "if", "callable", "(", "constructor", ")", ":", "obj", "=", "constructor", "(", "obj", ",", "shelf", "=", "self", ")", "if", "isinstance", "(", "obj", ",", "basestring", ")", ":", "set_descending", "=", "obj", ".", "startswith", "(", "'-'", ")", "if", "set_descending", ":", "obj", "=", "obj", "[", "1", ":", "]", "if", "obj", "not", "in", "self", ":", "raise", "BadRecipe", "(", "\"{} doesn't exist on the shelf\"", ".", "format", "(", "obj", ")", ")", "ingredient", "=", "self", "[", "obj", "]", "if", "not", "isinstance", "(", "ingredient", ",", "filter_to_class", ")", ":", "raise", "BadRecipe", "(", "'{} is not a {}'", ".", "format", "(", "obj", ",", "filter_to_class", ")", ")", "if", "set_descending", ":", "ingredient", ".", "ordering", "=", "'desc'", "return", "ingredient", "elif", "isinstance", "(", "obj", ",", "filter_to_class", ")", ":", "return", "obj", "else", ":", "raise", "BadRecipe", "(", "'{} is not a {}'", ".", "format", "(", "obj", ",", "filter_to_class", ")", ")" ]
Find an Ingredient, optionally using the shelf. :param obj: A string or Ingredient :param filter_to_class: The Ingredient subclass that obj must be an instance of :param constructor: An optional callable for building Ingredients from obj :return: An Ingredient of subclass `filter_to_class`
[ "Find", "an", "Ingredient", "optionally", "using", "the", "shelf", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L669-L702
juiceinc/recipe
recipe/shelf.py
Shelf.brew_query_parts
def brew_query_parts(self): """ Make columns, group_bys, filters, havings """ columns, group_bys, filters, havings = [], [], set(), set() for ingredient in self.ingredients(): if ingredient.query_columns: columns.extend(ingredient.query_columns) if ingredient.group_by: group_bys.extend(ingredient.group_by) if ingredient.filters: filters.update(ingredient.filters) if ingredient.havings: havings.update(ingredient.havings) return { 'columns': columns, 'group_bys': group_bys, 'filters': filters, 'havings': havings, }
python
def brew_query_parts(self): """ Make columns, group_bys, filters, havings """ columns, group_bys, filters, havings = [], [], set(), set() for ingredient in self.ingredients(): if ingredient.query_columns: columns.extend(ingredient.query_columns) if ingredient.group_by: group_bys.extend(ingredient.group_by) if ingredient.filters: filters.update(ingredient.filters) if ingredient.havings: havings.update(ingredient.havings) return { 'columns': columns, 'group_bys': group_bys, 'filters': filters, 'havings': havings, }
[ "def", "brew_query_parts", "(", "self", ")", ":", "columns", ",", "group_bys", ",", "filters", ",", "havings", "=", "[", "]", ",", "[", "]", ",", "set", "(", ")", ",", "set", "(", ")", "for", "ingredient", "in", "self", ".", "ingredients", "(", ")", ":", "if", "ingredient", ".", "query_columns", ":", "columns", ".", "extend", "(", "ingredient", ".", "query_columns", ")", "if", "ingredient", ".", "group_by", ":", "group_bys", ".", "extend", "(", "ingredient", ".", "group_by", ")", "if", "ingredient", ".", "filters", ":", "filters", ".", "update", "(", "ingredient", ".", "filters", ")", "if", "ingredient", ".", "havings", ":", "havings", ".", "update", "(", "ingredient", ".", "havings", ")", "return", "{", "'columns'", ":", "columns", ",", "'group_bys'", ":", "group_bys", ",", "'filters'", ":", "filters", ",", "'havings'", ":", "havings", ",", "}" ]
Make columns, group_bys, filters, havings
[ "Make", "columns", "group_bys", "filters", "havings" ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L704-L723
juiceinc/recipe
recipe/shelf.py
Shelf.enchant
def enchant(self, list, cache_context=None): """ Add any calculated values to each row of a resultset generating a new namedtuple :param list: a list of row results :param cache_context: optional extra context for caching :return: a list with ingredient.cauldron_extras added for all ingredients """ enchantedlist = [] if list: sample_item = list[0] # Extra fields to add to each row # With extra callables extra_fields, extra_callables = [], [] for ingredient in self.values(): if not isinstance(ingredient, (Dimension, Metric)): continue if cache_context: ingredient.cache_context += str(cache_context) for extra_field, extra_callable in ingredient.cauldron_extras: extra_fields.append(extra_field) extra_callables.append(extra_callable) # Mixin the extra fields keyed_tuple = lightweight_named_tuple( 'result', sample_item._fields + tuple(extra_fields) ) # Iterate over the results and build a new namedtuple for each row for row in list: values = row + tuple(fn(row) for fn in extra_callables) enchantedlist.append(keyed_tuple(values)) return enchantedlist
python
def enchant(self, list, cache_context=None): """ Add any calculated values to each row of a resultset generating a new namedtuple :param list: a list of row results :param cache_context: optional extra context for caching :return: a list with ingredient.cauldron_extras added for all ingredients """ enchantedlist = [] if list: sample_item = list[0] # Extra fields to add to each row # With extra callables extra_fields, extra_callables = [], [] for ingredient in self.values(): if not isinstance(ingredient, (Dimension, Metric)): continue if cache_context: ingredient.cache_context += str(cache_context) for extra_field, extra_callable in ingredient.cauldron_extras: extra_fields.append(extra_field) extra_callables.append(extra_callable) # Mixin the extra fields keyed_tuple = lightweight_named_tuple( 'result', sample_item._fields + tuple(extra_fields) ) # Iterate over the results and build a new namedtuple for each row for row in list: values = row + tuple(fn(row) for fn in extra_callables) enchantedlist.append(keyed_tuple(values)) return enchantedlist
[ "def", "enchant", "(", "self", ",", "list", ",", "cache_context", "=", "None", ")", ":", "enchantedlist", "=", "[", "]", "if", "list", ":", "sample_item", "=", "list", "[", "0", "]", "# Extra fields to add to each row", "# With extra callables", "extra_fields", ",", "extra_callables", "=", "[", "]", ",", "[", "]", "for", "ingredient", "in", "self", ".", "values", "(", ")", ":", "if", "not", "isinstance", "(", "ingredient", ",", "(", "Dimension", ",", "Metric", ")", ")", ":", "continue", "if", "cache_context", ":", "ingredient", ".", "cache_context", "+=", "str", "(", "cache_context", ")", "for", "extra_field", ",", "extra_callable", "in", "ingredient", ".", "cauldron_extras", ":", "extra_fields", ".", "append", "(", "extra_field", ")", "extra_callables", ".", "append", "(", "extra_callable", ")", "# Mixin the extra fields", "keyed_tuple", "=", "lightweight_named_tuple", "(", "'result'", ",", "sample_item", ".", "_fields", "+", "tuple", "(", "extra_fields", ")", ")", "# Iterate over the results and build a new namedtuple for each row", "for", "row", "in", "list", ":", "values", "=", "row", "+", "tuple", "(", "fn", "(", "row", ")", "for", "fn", "in", "extra_callables", ")", "enchantedlist", ".", "append", "(", "keyed_tuple", "(", "values", ")", ")", "return", "enchantedlist" ]
Add any calculated values to each row of a resultset generating a new namedtuple :param list: a list of row results :param cache_context: optional extra context for caching :return: a list with ingredient.cauldron_extras added for all ingredients
[ "Add", "any", "calculated", "values", "to", "each", "row", "of", "a", "resultset", "generating", "a", "new", "namedtuple" ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L725-L761
juiceinc/recipe
recipe/extensions.py
AutomaticFilters.apply_automatic_filters
def apply_automatic_filters(self, value): """Toggles whether automatic filters are applied to a recipe. The following will disable automatic filters for this recipe:: recipe.apply_automatic_filters(False) """ if self.apply != value: self.dirty = True self.apply = value return self.recipe
python
def apply_automatic_filters(self, value): """Toggles whether automatic filters are applied to a recipe. The following will disable automatic filters for this recipe:: recipe.apply_automatic_filters(False) """ if self.apply != value: self.dirty = True self.apply = value return self.recipe
[ "def", "apply_automatic_filters", "(", "self", ",", "value", ")", ":", "if", "self", ".", "apply", "!=", "value", ":", "self", ".", "dirty", "=", "True", "self", ".", "apply", "=", "value", "return", "self", ".", "recipe" ]
Toggles whether automatic filters are applied to a recipe. The following will disable automatic filters for this recipe:: recipe.apply_automatic_filters(False)
[ "Toggles", "whether", "automatic", "filters", "are", "applied", "to", "a", "recipe", ".", "The", "following", "will", "disable", "automatic", "filters", "for", "this", "recipe", "::" ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/extensions.py#L213-L222
juiceinc/recipe
recipe/extensions.py
AutomaticFilters.automatic_filters
def automatic_filters(self, value): """Sets a dictionary of automatic filters to apply to this recipe. If your recipe uses a shelf that has dimensions 'state' and 'gender' you could filter the data to Men in California and New Hampshire with:: shelf = Shelf({ 'state': Dimension(Census.state), 'gender': Dimension(Census.gender), 'population': Metric(func.sum(Census.population)), }) recipe = Recipe(shelf=shelf) recipe.dimensions('state').metrics('population').automatic_filters({ 'state': ['California', 'New Hampshire'], 'gender': 'M' }) Automatic filter keys can optionally include an ``operator``. **List operators** If the value provided in the automatic_filter dictionary is a list, the following operators are available. The default operator is ``in``:: in (default) notin between (requires a list of two items) **Scalar operators** If the value provided in the automatic_filter dictionary is a scalar (a string, integer, or number), the following operators are available. The default operator is ``eq``:: eq (equal) (the default) ne (not equal) lt (less than) lte (less than or equal) gt (greater than) gte (greater than or equal) **An example using operators** Here's an example that filters to states that start with the letters A-C:: shelf = Shelf({ 'state': Dimension(Census.state), 'gender': Dimension(Census.gender), 'population': Metric(func.sum(Census.population)), }) recipe = Recipe(shelf=shelf) recipe.dimensions('state').metrics('population').automatic_filters({ 'state__lt': 'D' }) """ assert isinstance(value, dict) self._automatic_filters = value self.dirty = True return self.recipe
python
def automatic_filters(self, value): """Sets a dictionary of automatic filters to apply to this recipe. If your recipe uses a shelf that has dimensions 'state' and 'gender' you could filter the data to Men in California and New Hampshire with:: shelf = Shelf({ 'state': Dimension(Census.state), 'gender': Dimension(Census.gender), 'population': Metric(func.sum(Census.population)), }) recipe = Recipe(shelf=shelf) recipe.dimensions('state').metrics('population').automatic_filters({ 'state': ['California', 'New Hampshire'], 'gender': 'M' }) Automatic filter keys can optionally include an ``operator``. **List operators** If the value provided in the automatic_filter dictionary is a list, the following operators are available. The default operator is ``in``:: in (default) notin between (requires a list of two items) **Scalar operators** If the value provided in the automatic_filter dictionary is a scalar (a string, integer, or number), the following operators are available. The default operator is ``eq``:: eq (equal) (the default) ne (not equal) lt (less than) lte (less than or equal) gt (greater than) gte (greater than or equal) **An example using operators** Here's an example that filters to states that start with the letters A-C:: shelf = Shelf({ 'state': Dimension(Census.state), 'gender': Dimension(Census.gender), 'population': Metric(func.sum(Census.population)), }) recipe = Recipe(shelf=shelf) recipe.dimensions('state').metrics('population').automatic_filters({ 'state__lt': 'D' }) """ assert isinstance(value, dict) self._automatic_filters = value self.dirty = True return self.recipe
[ "def", "automatic_filters", "(", "self", ",", "value", ")", ":", "assert", "isinstance", "(", "value", ",", "dict", ")", "self", ".", "_automatic_filters", "=", "value", "self", ".", "dirty", "=", "True", "return", "self", ".", "recipe" ]
Sets a dictionary of automatic filters to apply to this recipe. If your recipe uses a shelf that has dimensions 'state' and 'gender' you could filter the data to Men in California and New Hampshire with:: shelf = Shelf({ 'state': Dimension(Census.state), 'gender': Dimension(Census.gender), 'population': Metric(func.sum(Census.population)), }) recipe = Recipe(shelf=shelf) recipe.dimensions('state').metrics('population').automatic_filters({ 'state': ['California', 'New Hampshire'], 'gender': 'M' }) Automatic filter keys can optionally include an ``operator``. **List operators** If the value provided in the automatic_filter dictionary is a list, the following operators are available. The default operator is ``in``:: in (default) notin between (requires a list of two items) **Scalar operators** If the value provided in the automatic_filter dictionary is a scalar (a string, integer, or number), the following operators are available. The default operator is ``eq``:: eq (equal) (the default) ne (not equal) lt (less than) lte (less than or equal) gt (greater than) gte (greater than or equal) **An example using operators** Here's an example that filters to states that start with the letters A-C:: shelf = Shelf({ 'state': Dimension(Census.state), 'gender': Dimension(Census.gender), 'population': Metric(func.sum(Census.population)), }) recipe = Recipe(shelf=shelf) recipe.dimensions('state').metrics('population').automatic_filters({ 'state__lt': 'D' })
[ "Sets", "a", "dictionary", "of", "automatic", "filters", "to", "apply", "to", "this", "recipe", ".", "If", "your", "recipe", "uses", "a", "shelf", "that", "has", "dimensions", "state", "and", "gender", "you", "could", "filter", "the", "data", "to", "Men", "in", "California", "and", "New", "Hampshire", "with", "::" ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/extensions.py#L224-L282
juiceinc/recipe
recipe/extensions.py
SummarizeOver.modify_postquery_parts
def modify_postquery_parts(self, postquery_parts): """ Take a recipe that has dimensions Resummarize it over one of the dimensions returning averages of the metrics. """ if self._summarize_over is None: return postquery_parts assert self._summarize_over in self.recipe.dimension_ids # Start with a subquery subq = postquery_parts['query'].subquery(name='summarize') summarize_over_dim = set(( self._summarize_over, self._summarize_over + '_id', self._summarize_over + '_raw' )) dim_column_names = set(dim for dim in self.recipe.dimension_ids).union( set(dim + '_id' for dim in self.recipe.dimension_ids) ).union(set(dim + '_raw' for dim in self.recipe.dimension_ids)) used_dim_column_names = dim_column_names - summarize_over_dim # Build a new query around the subquery group_by_columns = [ col for col in subq.c if col.name in used_dim_column_names ] # Generate columns for the metric, remapping the aggregation function # count -> sum # sum -> sum # avg -> avg # Metrics can override the summary aggregation by providing a # metric.meta.summary_aggregation callable parameter metric_columns = [] for col in subq.c: if col.name not in dim_column_names: met = self.recipe._cauldron.find(col.name, Metric) summary_aggregation = met.meta.get('summary_aggregation', None) if summary_aggregation is None: if str(met.expression).startswith(u'avg'): summary_aggregation = func.avg elif str(met.expression).startswith(u'count'): summary_aggregation = func.sum elif str(met.expression).startswith(u'sum'): summary_aggregation = func.sum if summary_aggregation is None: # We don't know how to aggregate this metric in a summary raise BadRecipe( u'Provide a summary_aggregation for metric' u' {}'.format(col.name) ) metric_columns.append(summary_aggregation(col).label(col.name)) # Find the ordering columns and apply them to the new query order_by_columns = [] for col in postquery_parts['query']._order_by: subq_col = getattr( subq.c, col.name, getattr(subq.c, col.name + '_raw', None) ) if subq_col is not None: order_by_columns.append(subq_col) postquery_parts['query'] = self.recipe._session.query( *(group_by_columns + metric_columns) ).group_by(*group_by_columns).order_by(*order_by_columns) # Remove the summarized dimension self.recipe._cauldron.pop(self._summarize_over, None) return postquery_parts
python
def modify_postquery_parts(self, postquery_parts): """ Take a recipe that has dimensions Resummarize it over one of the dimensions returning averages of the metrics. """ if self._summarize_over is None: return postquery_parts assert self._summarize_over in self.recipe.dimension_ids # Start with a subquery subq = postquery_parts['query'].subquery(name='summarize') summarize_over_dim = set(( self._summarize_over, self._summarize_over + '_id', self._summarize_over + '_raw' )) dim_column_names = set(dim for dim in self.recipe.dimension_ids).union( set(dim + '_id' for dim in self.recipe.dimension_ids) ).union(set(dim + '_raw' for dim in self.recipe.dimension_ids)) used_dim_column_names = dim_column_names - summarize_over_dim # Build a new query around the subquery group_by_columns = [ col for col in subq.c if col.name in used_dim_column_names ] # Generate columns for the metric, remapping the aggregation function # count -> sum # sum -> sum # avg -> avg # Metrics can override the summary aggregation by providing a # metric.meta.summary_aggregation callable parameter metric_columns = [] for col in subq.c: if col.name not in dim_column_names: met = self.recipe._cauldron.find(col.name, Metric) summary_aggregation = met.meta.get('summary_aggregation', None) if summary_aggregation is None: if str(met.expression).startswith(u'avg'): summary_aggregation = func.avg elif str(met.expression).startswith(u'count'): summary_aggregation = func.sum elif str(met.expression).startswith(u'sum'): summary_aggregation = func.sum if summary_aggregation is None: # We don't know how to aggregate this metric in a summary raise BadRecipe( u'Provide a summary_aggregation for metric' u' {}'.format(col.name) ) metric_columns.append(summary_aggregation(col).label(col.name)) # Find the ordering columns and apply them to the new query order_by_columns = [] for col in postquery_parts['query']._order_by: subq_col = getattr( subq.c, col.name, getattr(subq.c, col.name + '_raw', None) ) if subq_col is not None: order_by_columns.append(subq_col) postquery_parts['query'] = self.recipe._session.query( *(group_by_columns + metric_columns) ).group_by(*group_by_columns).order_by(*order_by_columns) # Remove the summarized dimension self.recipe._cauldron.pop(self._summarize_over, None) return postquery_parts
[ "def", "modify_postquery_parts", "(", "self", ",", "postquery_parts", ")", ":", "if", "self", ".", "_summarize_over", "is", "None", ":", "return", "postquery_parts", "assert", "self", ".", "_summarize_over", "in", "self", ".", "recipe", ".", "dimension_ids", "# Start with a subquery", "subq", "=", "postquery_parts", "[", "'query'", "]", ".", "subquery", "(", "name", "=", "'summarize'", ")", "summarize_over_dim", "=", "set", "(", "(", "self", ".", "_summarize_over", ",", "self", ".", "_summarize_over", "+", "'_id'", ",", "self", ".", "_summarize_over", "+", "'_raw'", ")", ")", "dim_column_names", "=", "set", "(", "dim", "for", "dim", "in", "self", ".", "recipe", ".", "dimension_ids", ")", ".", "union", "(", "set", "(", "dim", "+", "'_id'", "for", "dim", "in", "self", ".", "recipe", ".", "dimension_ids", ")", ")", ".", "union", "(", "set", "(", "dim", "+", "'_raw'", "for", "dim", "in", "self", ".", "recipe", ".", "dimension_ids", ")", ")", "used_dim_column_names", "=", "dim_column_names", "-", "summarize_over_dim", "# Build a new query around the subquery", "group_by_columns", "=", "[", "col", "for", "col", "in", "subq", ".", "c", "if", "col", ".", "name", "in", "used_dim_column_names", "]", "# Generate columns for the metric, remapping the aggregation function", "# count -> sum", "# sum -> sum", "# avg -> avg", "# Metrics can override the summary aggregation by providing a", "# metric.meta.summary_aggregation callable parameter", "metric_columns", "=", "[", "]", "for", "col", "in", "subq", ".", "c", ":", "if", "col", ".", "name", "not", "in", "dim_column_names", ":", "met", "=", "self", ".", "recipe", ".", "_cauldron", ".", "find", "(", "col", ".", "name", ",", "Metric", ")", "summary_aggregation", "=", "met", ".", "meta", ".", "get", "(", "'summary_aggregation'", ",", "None", ")", "if", "summary_aggregation", "is", "None", ":", "if", "str", "(", "met", ".", "expression", ")", ".", "startswith", "(", "u'avg'", ")", ":", "summary_aggregation", "=", "func", ".", "avg", "elif", "str", "(", "met", ".", "expression", ")", ".", "startswith", "(", "u'count'", ")", ":", "summary_aggregation", "=", "func", ".", "sum", "elif", "str", "(", "met", ".", "expression", ")", ".", "startswith", "(", "u'sum'", ")", ":", "summary_aggregation", "=", "func", ".", "sum", "if", "summary_aggregation", "is", "None", ":", "# We don't know how to aggregate this metric in a summary", "raise", "BadRecipe", "(", "u'Provide a summary_aggregation for metric'", "u' {}'", ".", "format", "(", "col", ".", "name", ")", ")", "metric_columns", ".", "append", "(", "summary_aggregation", "(", "col", ")", ".", "label", "(", "col", ".", "name", ")", ")", "# Find the ordering columns and apply them to the new query", "order_by_columns", "=", "[", "]", "for", "col", "in", "postquery_parts", "[", "'query'", "]", ".", "_order_by", ":", "subq_col", "=", "getattr", "(", "subq", ".", "c", ",", "col", ".", "name", ",", "getattr", "(", "subq", ".", "c", ",", "col", ".", "name", "+", "'_raw'", ",", "None", ")", ")", "if", "subq_col", "is", "not", "None", ":", "order_by_columns", ".", "append", "(", "subq_col", ")", "postquery_parts", "[", "'query'", "]", "=", "self", ".", "recipe", ".", "_session", ".", "query", "(", "*", "(", "group_by_columns", "+", "metric_columns", ")", ")", ".", "group_by", "(", "*", "group_by_columns", ")", ".", "order_by", "(", "*", "order_by_columns", ")", "# Remove the summarized dimension", "self", ".", "recipe", ".", "_cauldron", ".", "pop", "(", "self", ".", "_summarize_over", ",", "None", ")", "return", "postquery_parts" ]
Take a recipe that has dimensions Resummarize it over one of the dimensions returning averages of the metrics.
[ "Take", "a", "recipe", "that", "has", "dimensions", "Resummarize", "it", "over", "one", "of", "the", "dimensions", "returning", "averages", "of", "the", "metrics", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/extensions.py#L334-L403
juiceinc/recipe
recipe/extensions.py
Anonymize.anonymize
def anonymize(self, value): """ Should this recipe be anonymized""" assert isinstance(value, bool) if self._anonymize != value: self.dirty = True self._anonymize = value # Builder pattern must return the recipe return self.recipe
python
def anonymize(self, value): """ Should this recipe be anonymized""" assert isinstance(value, bool) if self._anonymize != value: self.dirty = True self._anonymize = value # Builder pattern must return the recipe return self.recipe
[ "def", "anonymize", "(", "self", ",", "value", ")", ":", "assert", "isinstance", "(", "value", ",", "bool", ")", "if", "self", ".", "_anonymize", "!=", "value", ":", "self", ".", "dirty", "=", "True", "self", ".", "_anonymize", "=", "value", "# Builder pattern must return the recipe", "return", "self", ".", "recipe" ]
Should this recipe be anonymized
[ "Should", "this", "recipe", "be", "anonymized" ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/extensions.py#L430-L439
juiceinc/recipe
recipe/extensions.py
Anonymize.add_ingredients
def add_ingredients(self): """ Put the anonymizers in the last position of formatters """ for ingredient in self.recipe._cauldron.values(): if hasattr(ingredient.meta, 'anonymizer'): anonymizer = ingredient.meta.anonymizer # Build a FakerAnonymizer if we have a string if isinstance(anonymizer, basestring): # Check for extra parameters kwargs = {} anonymizer_locale = getattr( ingredient.meta, 'anonymizer_locale', None ) anonymizer_postprocessor = getattr( ingredient.meta, 'anonymizer_postprocessor', None ) if anonymizer_postprocessor is not None: kwargs['postprocessor'] = anonymizer_postprocessor if anonymizer_locale is not None: kwargs['locale'] = anonymizer_locale anonymizer = FakerAnonymizer(anonymizer, **kwargs) # Strip out all FakerAnonymizers ingredient.formatters = [ f for f in ingredient.formatters if not isinstance(f, FakerAnonymizer) ] if self._anonymize: if ingredient.meta.anonymizer not in ingredient.formatters: ingredient.formatters.append(anonymizer) else: if ingredient.meta.anonymizer in ingredient.formatters: ingredient.formatters.remove(anonymizer)
python
def add_ingredients(self): """ Put the anonymizers in the last position of formatters """ for ingredient in self.recipe._cauldron.values(): if hasattr(ingredient.meta, 'anonymizer'): anonymizer = ingredient.meta.anonymizer # Build a FakerAnonymizer if we have a string if isinstance(anonymizer, basestring): # Check for extra parameters kwargs = {} anonymizer_locale = getattr( ingredient.meta, 'anonymizer_locale', None ) anonymizer_postprocessor = getattr( ingredient.meta, 'anonymizer_postprocessor', None ) if anonymizer_postprocessor is not None: kwargs['postprocessor'] = anonymizer_postprocessor if anonymizer_locale is not None: kwargs['locale'] = anonymizer_locale anonymizer = FakerAnonymizer(anonymizer, **kwargs) # Strip out all FakerAnonymizers ingredient.formatters = [ f for f in ingredient.formatters if not isinstance(f, FakerAnonymizer) ] if self._anonymize: if ingredient.meta.anonymizer not in ingredient.formatters: ingredient.formatters.append(anonymizer) else: if ingredient.meta.anonymizer in ingredient.formatters: ingredient.formatters.remove(anonymizer)
[ "def", "add_ingredients", "(", "self", ")", ":", "for", "ingredient", "in", "self", ".", "recipe", ".", "_cauldron", ".", "values", "(", ")", ":", "if", "hasattr", "(", "ingredient", ".", "meta", ",", "'anonymizer'", ")", ":", "anonymizer", "=", "ingredient", ".", "meta", ".", "anonymizer", "# Build a FakerAnonymizer if we have a string", "if", "isinstance", "(", "anonymizer", ",", "basestring", ")", ":", "# Check for extra parameters", "kwargs", "=", "{", "}", "anonymizer_locale", "=", "getattr", "(", "ingredient", ".", "meta", ",", "'anonymizer_locale'", ",", "None", ")", "anonymizer_postprocessor", "=", "getattr", "(", "ingredient", ".", "meta", ",", "'anonymizer_postprocessor'", ",", "None", ")", "if", "anonymizer_postprocessor", "is", "not", "None", ":", "kwargs", "[", "'postprocessor'", "]", "=", "anonymizer_postprocessor", "if", "anonymizer_locale", "is", "not", "None", ":", "kwargs", "[", "'locale'", "]", "=", "anonymizer_locale", "anonymizer", "=", "FakerAnonymizer", "(", "anonymizer", ",", "*", "*", "kwargs", ")", "# Strip out all FakerAnonymizers", "ingredient", ".", "formatters", "=", "[", "f", "for", "f", "in", "ingredient", ".", "formatters", "if", "not", "isinstance", "(", "f", ",", "FakerAnonymizer", ")", "]", "if", "self", ".", "_anonymize", ":", "if", "ingredient", ".", "meta", ".", "anonymizer", "not", "in", "ingredient", ".", "formatters", ":", "ingredient", ".", "formatters", ".", "append", "(", "anonymizer", ")", "else", ":", "if", "ingredient", ".", "meta", ".", "anonymizer", "in", "ingredient", ".", "formatters", ":", "ingredient", ".", "formatters", ".", "remove", "(", "anonymizer", ")" ]
Put the anonymizers in the last position of formatters
[ "Put", "the", "anonymizers", "in", "the", "last", "position", "of", "formatters" ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/extensions.py#L441-L475
juiceinc/recipe
recipe/extensions.py
BlendRecipe.blend
def blend(self, blend_recipe, join_base, join_blend): """Blend a recipe into the base recipe. This performs an inner join of the blend_recipe to the base recipe's SQL. """ assert isinstance(blend_recipe, Recipe) self.blend_recipes.append(blend_recipe) self.blend_types.append('inner') self.blend_criteria.append((join_base, join_blend)) self.dirty = True return self.recipe
python
def blend(self, blend_recipe, join_base, join_blend): """Blend a recipe into the base recipe. This performs an inner join of the blend_recipe to the base recipe's SQL. """ assert isinstance(blend_recipe, Recipe) self.blend_recipes.append(blend_recipe) self.blend_types.append('inner') self.blend_criteria.append((join_base, join_blend)) self.dirty = True return self.recipe
[ "def", "blend", "(", "self", ",", "blend_recipe", ",", "join_base", ",", "join_blend", ")", ":", "assert", "isinstance", "(", "blend_recipe", ",", "Recipe", ")", "self", ".", "blend_recipes", ".", "append", "(", "blend_recipe", ")", "self", ".", "blend_types", ".", "append", "(", "'inner'", ")", "self", ".", "blend_criteria", ".", "append", "(", "(", "join_base", ",", "join_blend", ")", ")", "self", ".", "dirty", "=", "True", "return", "self", ".", "recipe" ]
Blend a recipe into the base recipe. This performs an inner join of the blend_recipe to the base recipe's SQL.
[ "Blend", "a", "recipe", "into", "the", "base", "recipe", ".", "This", "performs", "an", "inner", "join", "of", "the", "blend_recipe", "to", "the", "base", "recipe", "s", "SQL", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/extensions.py#L498-L509
juiceinc/recipe
recipe/extensions.py
BlendRecipe.modify_postquery_parts
def modify_postquery_parts(self, postquery_parts): """ Make the comparison recipe a subquery that is left joined to the base recipe using dimensions that are shared between the recipes. Hoist the metric from the comparison recipe up to the base query while adding the suffix. """ if not self.blend_recipes: return postquery_parts for blend_recipe, blend_type, blend_criteria in \ zip(self.blend_recipes, self.blend_types, self.blend_criteria): join_base, join_blend = blend_criteria blend_subq = blend_recipe.subquery() # For all metrics in the blend recipe # Use the metric in the base recipe and # Add the metric columns to the base recipe for m in blend_recipe.metric_ids: met = blend_recipe._cauldron[m] self.recipe._cauldron.use(met) for suffix in met.make_column_suffixes(): col = getattr(blend_subq.c, met.id, None) if col is not None: postquery_parts['query'] = postquery_parts[ 'query' ].add_columns(col.label(met.id + suffix)) else: raise BadRecipe( '{} could not be found in .blend() ' 'recipe subquery'.format(id + suffix) ) # For all dimensions in the blend recipe # Use the dimension in the base recipe and # Add the dimension columns and group_by to the base recipe # Ignore the join_blend dimension for d in blend_recipe.dimension_ids: if d == join_blend: continue dim = blend_recipe._cauldron[d] self.recipe._cauldron.use(dim) for suffix in dim.make_column_suffixes(): col = getattr(blend_subq.c, dim.id, None) if col is not None: postquery_parts['query'] = postquery_parts[ 'query' ].add_columns(col.label(dim.id + suffix)) postquery_parts['query'] = postquery_parts[ 'query' ].group_by(col) else: raise BadRecipe( '{} could not be found in .blend() ' 'recipe subquery'.format(id + suffix) ) base_dim = self.recipe._cauldron[join_base] blend_dim = blend_recipe._cauldron[join_blend] base_col = base_dim.columns[0] blend_col = getattr(blend_subq.c, blend_dim.id_prop, None) if blend_col is None: raise BadRecipe( 'Can\'t find join property for {} dimension in \ blend recipe'.format(blend_dim.id_prop) ) if blend_type == 'outer': postquery_parts['query'] = postquery_parts['query'] \ .outerjoin(blend_subq, base_col == blend_col) else: postquery_parts['query'] = postquery_parts['query'] \ .join(blend_subq, base_col == blend_col) return postquery_parts
python
def modify_postquery_parts(self, postquery_parts): """ Make the comparison recipe a subquery that is left joined to the base recipe using dimensions that are shared between the recipes. Hoist the metric from the comparison recipe up to the base query while adding the suffix. """ if not self.blend_recipes: return postquery_parts for blend_recipe, blend_type, blend_criteria in \ zip(self.blend_recipes, self.blend_types, self.blend_criteria): join_base, join_blend = blend_criteria blend_subq = blend_recipe.subquery() # For all metrics in the blend recipe # Use the metric in the base recipe and # Add the metric columns to the base recipe for m in blend_recipe.metric_ids: met = blend_recipe._cauldron[m] self.recipe._cauldron.use(met) for suffix in met.make_column_suffixes(): col = getattr(blend_subq.c, met.id, None) if col is not None: postquery_parts['query'] = postquery_parts[ 'query' ].add_columns(col.label(met.id + suffix)) else: raise BadRecipe( '{} could not be found in .blend() ' 'recipe subquery'.format(id + suffix) ) # For all dimensions in the blend recipe # Use the dimension in the base recipe and # Add the dimension columns and group_by to the base recipe # Ignore the join_blend dimension for d in blend_recipe.dimension_ids: if d == join_blend: continue dim = blend_recipe._cauldron[d] self.recipe._cauldron.use(dim) for suffix in dim.make_column_suffixes(): col = getattr(blend_subq.c, dim.id, None) if col is not None: postquery_parts['query'] = postquery_parts[ 'query' ].add_columns(col.label(dim.id + suffix)) postquery_parts['query'] = postquery_parts[ 'query' ].group_by(col) else: raise BadRecipe( '{} could not be found in .blend() ' 'recipe subquery'.format(id + suffix) ) base_dim = self.recipe._cauldron[join_base] blend_dim = blend_recipe._cauldron[join_blend] base_col = base_dim.columns[0] blend_col = getattr(blend_subq.c, blend_dim.id_prop, None) if blend_col is None: raise BadRecipe( 'Can\'t find join property for {} dimension in \ blend recipe'.format(blend_dim.id_prop) ) if blend_type == 'outer': postquery_parts['query'] = postquery_parts['query'] \ .outerjoin(blend_subq, base_col == blend_col) else: postquery_parts['query'] = postquery_parts['query'] \ .join(blend_subq, base_col == blend_col) return postquery_parts
[ "def", "modify_postquery_parts", "(", "self", ",", "postquery_parts", ")", ":", "if", "not", "self", ".", "blend_recipes", ":", "return", "postquery_parts", "for", "blend_recipe", ",", "blend_type", ",", "blend_criteria", "in", "zip", "(", "self", ".", "blend_recipes", ",", "self", ".", "blend_types", ",", "self", ".", "blend_criteria", ")", ":", "join_base", ",", "join_blend", "=", "blend_criteria", "blend_subq", "=", "blend_recipe", ".", "subquery", "(", ")", "# For all metrics in the blend recipe", "# Use the metric in the base recipe and", "# Add the metric columns to the base recipe", "for", "m", "in", "blend_recipe", ".", "metric_ids", ":", "met", "=", "blend_recipe", ".", "_cauldron", "[", "m", "]", "self", ".", "recipe", ".", "_cauldron", ".", "use", "(", "met", ")", "for", "suffix", "in", "met", ".", "make_column_suffixes", "(", ")", ":", "col", "=", "getattr", "(", "blend_subq", ".", "c", ",", "met", ".", "id", ",", "None", ")", "if", "col", "is", "not", "None", ":", "postquery_parts", "[", "'query'", "]", "=", "postquery_parts", "[", "'query'", "]", ".", "add_columns", "(", "col", ".", "label", "(", "met", ".", "id", "+", "suffix", ")", ")", "else", ":", "raise", "BadRecipe", "(", "'{} could not be found in .blend() '", "'recipe subquery'", ".", "format", "(", "id", "+", "suffix", ")", ")", "# For all dimensions in the blend recipe", "# Use the dimension in the base recipe and", "# Add the dimension columns and group_by to the base recipe", "# Ignore the join_blend dimension", "for", "d", "in", "blend_recipe", ".", "dimension_ids", ":", "if", "d", "==", "join_blend", ":", "continue", "dim", "=", "blend_recipe", ".", "_cauldron", "[", "d", "]", "self", ".", "recipe", ".", "_cauldron", ".", "use", "(", "dim", ")", "for", "suffix", "in", "dim", ".", "make_column_suffixes", "(", ")", ":", "col", "=", "getattr", "(", "blend_subq", ".", "c", ",", "dim", ".", "id", ",", "None", ")", "if", "col", "is", "not", "None", ":", "postquery_parts", "[", "'query'", "]", "=", "postquery_parts", "[", "'query'", "]", ".", "add_columns", "(", "col", ".", "label", "(", "dim", ".", "id", "+", "suffix", ")", ")", "postquery_parts", "[", "'query'", "]", "=", "postquery_parts", "[", "'query'", "]", ".", "group_by", "(", "col", ")", "else", ":", "raise", "BadRecipe", "(", "'{} could not be found in .blend() '", "'recipe subquery'", ".", "format", "(", "id", "+", "suffix", ")", ")", "base_dim", "=", "self", ".", "recipe", ".", "_cauldron", "[", "join_base", "]", "blend_dim", "=", "blend_recipe", ".", "_cauldron", "[", "join_blend", "]", "base_col", "=", "base_dim", ".", "columns", "[", "0", "]", "blend_col", "=", "getattr", "(", "blend_subq", ".", "c", ",", "blend_dim", ".", "id_prop", ",", "None", ")", "if", "blend_col", "is", "None", ":", "raise", "BadRecipe", "(", "'Can\\'t find join property for {} dimension in \\\n blend recipe'", ".", "format", "(", "blend_dim", ".", "id_prop", ")", ")", "if", "blend_type", "==", "'outer'", ":", "postquery_parts", "[", "'query'", "]", "=", "postquery_parts", "[", "'query'", "]", ".", "outerjoin", "(", "blend_subq", ",", "base_col", "==", "blend_col", ")", "else", ":", "postquery_parts", "[", "'query'", "]", "=", "postquery_parts", "[", "'query'", "]", ".", "join", "(", "blend_subq", ",", "base_col", "==", "blend_col", ")", "return", "postquery_parts" ]
Make the comparison recipe a subquery that is left joined to the base recipe using dimensions that are shared between the recipes. Hoist the metric from the comparison recipe up to the base query while adding the suffix.
[ "Make", "the", "comparison", "recipe", "a", "subquery", "that", "is", "left", "joined", "to", "the", "base", "recipe", "using", "dimensions", "that", "are", "shared", "between", "the", "recipes", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/extensions.py#L525-L605
juiceinc/recipe
recipe/extensions.py
CompareRecipe.compare
def compare(self, compare_recipe, suffix='_compare'): """Adds a comparison recipe to a base recipe.""" assert isinstance(compare_recipe, Recipe) assert isinstance(suffix, basestring) self.compare_recipe.append(compare_recipe) self.suffix.append(suffix) self.dirty = True return self.recipe
python
def compare(self, compare_recipe, suffix='_compare'): """Adds a comparison recipe to a base recipe.""" assert isinstance(compare_recipe, Recipe) assert isinstance(suffix, basestring) self.compare_recipe.append(compare_recipe) self.suffix.append(suffix) self.dirty = True return self.recipe
[ "def", "compare", "(", "self", ",", "compare_recipe", ",", "suffix", "=", "'_compare'", ")", ":", "assert", "isinstance", "(", "compare_recipe", ",", "Recipe", ")", "assert", "isinstance", "(", "suffix", ",", "basestring", ")", "self", ".", "compare_recipe", ".", "append", "(", "compare_recipe", ")", "self", ".", "suffix", ".", "append", "(", "suffix", ")", "self", ".", "dirty", "=", "True", "return", "self", ".", "recipe" ]
Adds a comparison recipe to a base recipe.
[ "Adds", "a", "comparison", "recipe", "to", "a", "base", "recipe", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/extensions.py#L624-L631
juiceinc/recipe
recipe/extensions.py
CompareRecipe.modify_postquery_parts
def modify_postquery_parts(self, postquery_parts): """Make the comparison recipe a subquery that is left joined to the base recipe using dimensions that are shared between the recipes. Hoist the metric from the comparison recipe up to the base query while adding the suffix. """ if not self.compare_recipe: return postquery_parts for compare_recipe, compare_suffix in zip( self.compare_recipe, self.suffix ): comparison_subq = compare_recipe.subquery() # For all metrics in the comparison recipe # Use the metric in the base recipe and # Add the metric columns to the base recipe # Comparison metrics hoisted into the base recipe need an # aggregation function.The default is func.avg but # metrics can override this by provoding a # metric.meta.summary_aggregation callable parameter for m in compare_recipe.metric_ids: met = compare_recipe._cauldron[m] id = met.id met.id = id + compare_suffix summary_aggregation = met.meta.get( 'summary_aggregation', func.avg ) self.recipe._cauldron.use(met) for suffix in met.make_column_suffixes(): col = getattr(comparison_subq.c, id + suffix, None) if col is not None: postquery_parts['query'] = \ postquery_parts['query'].add_columns( summary_aggregation(col).label( met.id + suffix)) else: raise BadRecipe( '{} could not be found in .compare() ' 'recipe subquery'.format(id + suffix) ) join_conditions = [] for dim in compare_recipe.dimension_ids: if dim not in self.recipe.dimension_ids: raise BadRecipe( '{} dimension in comparison recipe must exist ' 'in base recipe' ) base_dim = self.recipe._cauldron[dim] compare_dim = compare_recipe._cauldron[dim] base_col = base_dim.columns[0] compare_col = getattr( comparison_subq.c, compare_dim.id_prop, None ) if compare_col is None: raise BadRecipe( 'Can\'t find join property for {} dimension in \ compare recipe'.format(compare_dim.id_prop) ) join_conditions.append(base_col == compare_col) join_clause = text('1=1') if join_conditions: join_clause = and_(*join_conditions) postquery_parts['query'] = postquery_parts['query'] \ .outerjoin(comparison_subq, join_clause) return postquery_parts
python
def modify_postquery_parts(self, postquery_parts): """Make the comparison recipe a subquery that is left joined to the base recipe using dimensions that are shared between the recipes. Hoist the metric from the comparison recipe up to the base query while adding the suffix. """ if not self.compare_recipe: return postquery_parts for compare_recipe, compare_suffix in zip( self.compare_recipe, self.suffix ): comparison_subq = compare_recipe.subquery() # For all metrics in the comparison recipe # Use the metric in the base recipe and # Add the metric columns to the base recipe # Comparison metrics hoisted into the base recipe need an # aggregation function.The default is func.avg but # metrics can override this by provoding a # metric.meta.summary_aggregation callable parameter for m in compare_recipe.metric_ids: met = compare_recipe._cauldron[m] id = met.id met.id = id + compare_suffix summary_aggregation = met.meta.get( 'summary_aggregation', func.avg ) self.recipe._cauldron.use(met) for suffix in met.make_column_suffixes(): col = getattr(comparison_subq.c, id + suffix, None) if col is not None: postquery_parts['query'] = \ postquery_parts['query'].add_columns( summary_aggregation(col).label( met.id + suffix)) else: raise BadRecipe( '{} could not be found in .compare() ' 'recipe subquery'.format(id + suffix) ) join_conditions = [] for dim in compare_recipe.dimension_ids: if dim not in self.recipe.dimension_ids: raise BadRecipe( '{} dimension in comparison recipe must exist ' 'in base recipe' ) base_dim = self.recipe._cauldron[dim] compare_dim = compare_recipe._cauldron[dim] base_col = base_dim.columns[0] compare_col = getattr( comparison_subq.c, compare_dim.id_prop, None ) if compare_col is None: raise BadRecipe( 'Can\'t find join property for {} dimension in \ compare recipe'.format(compare_dim.id_prop) ) join_conditions.append(base_col == compare_col) join_clause = text('1=1') if join_conditions: join_clause = and_(*join_conditions) postquery_parts['query'] = postquery_parts['query'] \ .outerjoin(comparison_subq, join_clause) return postquery_parts
[ "def", "modify_postquery_parts", "(", "self", ",", "postquery_parts", ")", ":", "if", "not", "self", ".", "compare_recipe", ":", "return", "postquery_parts", "for", "compare_recipe", ",", "compare_suffix", "in", "zip", "(", "self", ".", "compare_recipe", ",", "self", ".", "suffix", ")", ":", "comparison_subq", "=", "compare_recipe", ".", "subquery", "(", ")", "# For all metrics in the comparison recipe", "# Use the metric in the base recipe and", "# Add the metric columns to the base recipe", "# Comparison metrics hoisted into the base recipe need an", "# aggregation function.The default is func.avg but", "# metrics can override this by provoding a", "# metric.meta.summary_aggregation callable parameter", "for", "m", "in", "compare_recipe", ".", "metric_ids", ":", "met", "=", "compare_recipe", ".", "_cauldron", "[", "m", "]", "id", "=", "met", ".", "id", "met", ".", "id", "=", "id", "+", "compare_suffix", "summary_aggregation", "=", "met", ".", "meta", ".", "get", "(", "'summary_aggregation'", ",", "func", ".", "avg", ")", "self", ".", "recipe", ".", "_cauldron", ".", "use", "(", "met", ")", "for", "suffix", "in", "met", ".", "make_column_suffixes", "(", ")", ":", "col", "=", "getattr", "(", "comparison_subq", ".", "c", ",", "id", "+", "suffix", ",", "None", ")", "if", "col", "is", "not", "None", ":", "postquery_parts", "[", "'query'", "]", "=", "postquery_parts", "[", "'query'", "]", ".", "add_columns", "(", "summary_aggregation", "(", "col", ")", ".", "label", "(", "met", ".", "id", "+", "suffix", ")", ")", "else", ":", "raise", "BadRecipe", "(", "'{} could not be found in .compare() '", "'recipe subquery'", ".", "format", "(", "id", "+", "suffix", ")", ")", "join_conditions", "=", "[", "]", "for", "dim", "in", "compare_recipe", ".", "dimension_ids", ":", "if", "dim", "not", "in", "self", ".", "recipe", ".", "dimension_ids", ":", "raise", "BadRecipe", "(", "'{} dimension in comparison recipe must exist '", "'in base recipe'", ")", "base_dim", "=", "self", ".", "recipe", ".", "_cauldron", "[", "dim", "]", "compare_dim", "=", "compare_recipe", ".", "_cauldron", "[", "dim", "]", "base_col", "=", "base_dim", ".", "columns", "[", "0", "]", "compare_col", "=", "getattr", "(", "comparison_subq", ".", "c", ",", "compare_dim", ".", "id_prop", ",", "None", ")", "if", "compare_col", "is", "None", ":", "raise", "BadRecipe", "(", "'Can\\'t find join property for {} dimension in \\\n compare recipe'", ".", "format", "(", "compare_dim", ".", "id_prop", ")", ")", "join_conditions", ".", "append", "(", "base_col", "==", "compare_col", ")", "join_clause", "=", "text", "(", "'1=1'", ")", "if", "join_conditions", ":", "join_clause", "=", "and_", "(", "*", "join_conditions", ")", "postquery_parts", "[", "'query'", "]", "=", "postquery_parts", "[", "'query'", "]", ".", "outerjoin", "(", "comparison_subq", ",", "join_clause", ")", "return", "postquery_parts" ]
Make the comparison recipe a subquery that is left joined to the base recipe using dimensions that are shared between the recipes. Hoist the metric from the comparison recipe up to the base query while adding the suffix.
[ "Make", "the", "comparison", "recipe", "a", "subquery", "that", "is", "left", "joined", "to", "the", "base", "recipe", "using", "dimensions", "that", "are", "shared", "between", "the", "recipes", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/extensions.py#L633-L705
oceanprotocol/osmosis-azure-driver
osmosis_azure_driver/data_plugin.py
Plugin.list
def list(self, container_or_share_name, container=None, account=None): """List the blobs/files inside a container/share_name. Args: container_or_share_name(str): Name of the container/share_name where we want to list the blobs/files. container(bool): flag to know it you are listing files or blobs. account(str): The name of the storage account. """ key = self.storage_client.storage_accounts.list_keys(self.resource_group_name, account).keys[0].value if container: bs = BlockBlobService(account_name=account, account_key=key) container_list = [] for i in bs.list_blobs(container_or_share_name).items: container_list.append(i.name) return container_list elif not container: fs = FileService(account_name=account, account_key=key) container_list = [] for i in fs.list_directories_and_files(container_or_share_name).items: container_list.append(i.name) return container_list else: raise ValueError("You have to pass a value for container param")
python
def list(self, container_or_share_name, container=None, account=None): """List the blobs/files inside a container/share_name. Args: container_or_share_name(str): Name of the container/share_name where we want to list the blobs/files. container(bool): flag to know it you are listing files or blobs. account(str): The name of the storage account. """ key = self.storage_client.storage_accounts.list_keys(self.resource_group_name, account).keys[0].value if container: bs = BlockBlobService(account_name=account, account_key=key) container_list = [] for i in bs.list_blobs(container_or_share_name).items: container_list.append(i.name) return container_list elif not container: fs = FileService(account_name=account, account_key=key) container_list = [] for i in fs.list_directories_and_files(container_or_share_name).items: container_list.append(i.name) return container_list else: raise ValueError("You have to pass a value for container param")
[ "def", "list", "(", "self", ",", "container_or_share_name", ",", "container", "=", "None", ",", "account", "=", "None", ")", ":", "key", "=", "self", ".", "storage_client", ".", "storage_accounts", ".", "list_keys", "(", "self", ".", "resource_group_name", ",", "account", ")", ".", "keys", "[", "0", "]", ".", "value", "if", "container", ":", "bs", "=", "BlockBlobService", "(", "account_name", "=", "account", ",", "account_key", "=", "key", ")", "container_list", "=", "[", "]", "for", "i", "in", "bs", ".", "list_blobs", "(", "container_or_share_name", ")", ".", "items", ":", "container_list", ".", "append", "(", "i", ".", "name", ")", "return", "container_list", "elif", "not", "container", ":", "fs", "=", "FileService", "(", "account_name", "=", "account", ",", "account_key", "=", "key", ")", "container_list", "=", "[", "]", "for", "i", "in", "fs", ".", "list_directories_and_files", "(", "container_or_share_name", ")", ".", "items", ":", "container_list", ".", "append", "(", "i", ".", "name", ")", "return", "container_list", "else", ":", "raise", "ValueError", "(", "\"You have to pass a value for container param\"", ")" ]
List the blobs/files inside a container/share_name. Args: container_or_share_name(str): Name of the container/share_name where we want to list the blobs/files. container(bool): flag to know it you are listing files or blobs. account(str): The name of the storage account.
[ "List", "the", "blobs", "/", "files", "inside", "a", "container", "/", "share_name", ".", "Args", ":", "container_or_share_name", "(", "str", ")", ":", "Name", "of", "the", "container", "/", "share_name", "where", "we", "want", "to", "list", "the", "blobs", "/", "files", ".", "container", "(", "bool", ")", ":", "flag", "to", "know", "it", "you", "are", "listing", "files", "or", "blobs", ".", "account", "(", "str", ")", ":", "The", "name", "of", "the", "storage", "account", "." ]
train
https://github.com/oceanprotocol/osmosis-azure-driver/blob/36bcfa96547fb6117346b02b0ac6a74345c59695/osmosis_azure_driver/data_plugin.py#L89-L110
oceanprotocol/osmosis-azure-driver
osmosis_azure_driver/data_plugin.py
Plugin.generate_url
def generate_url(self, remote_file): """Sign a remote file to distribute. The azure url format is https://myaccount.blob.core.windows.net/mycontainer/myblob. Args: remote_file(str): The blob that we want to sign. """ parse_url = _parse_url(remote_file) key = self.storage_client.storage_accounts.list_keys(self.resource_group_name, parse_url.account).keys[0].value if parse_url.file_type == 'blob': bs = BlockBlobService(account_name=parse_url.account, account_key=key) sas_token = bs.generate_blob_shared_access_signature(parse_url.container_or_share_name, parse_url.file, permission=BlobPermissions.READ, expiry=datetime.utcnow() + timedelta(hours=24), ) source_blob_url = bs.make_blob_url(container_name=parse_url.container_or_share_name, blob_name=parse_url.file, sas_token=sas_token) return source_blob_url elif parse_url.file_type == 'file': fs = FileService(account_name=parse_url.account, account_key=key) sas_token = fs.generate_file_shared_access_signature(share_name=parse_url.container_or_share_name, directory_name=parse_url.path, file_name=parse_url.file, permission=BlobPermissions.READ, expiry=datetime.utcnow() + timedelta(hours=24), ) source_file_url = fs.make_file_url(share_name=parse_url.container_or_share_name, directory_name=parse_url.path, file_name=parse_url.file, sas_token=sas_token) return source_file_url else: raise ValueError("This azure storage type is not valid. It should be blob or file.")
python
def generate_url(self, remote_file): """Sign a remote file to distribute. The azure url format is https://myaccount.blob.core.windows.net/mycontainer/myblob. Args: remote_file(str): The blob that we want to sign. """ parse_url = _parse_url(remote_file) key = self.storage_client.storage_accounts.list_keys(self.resource_group_name, parse_url.account).keys[0].value if parse_url.file_type == 'blob': bs = BlockBlobService(account_name=parse_url.account, account_key=key) sas_token = bs.generate_blob_shared_access_signature(parse_url.container_or_share_name, parse_url.file, permission=BlobPermissions.READ, expiry=datetime.utcnow() + timedelta(hours=24), ) source_blob_url = bs.make_blob_url(container_name=parse_url.container_or_share_name, blob_name=parse_url.file, sas_token=sas_token) return source_blob_url elif parse_url.file_type == 'file': fs = FileService(account_name=parse_url.account, account_key=key) sas_token = fs.generate_file_shared_access_signature(share_name=parse_url.container_or_share_name, directory_name=parse_url.path, file_name=parse_url.file, permission=BlobPermissions.READ, expiry=datetime.utcnow() + timedelta(hours=24), ) source_file_url = fs.make_file_url(share_name=parse_url.container_or_share_name, directory_name=parse_url.path, file_name=parse_url.file, sas_token=sas_token) return source_file_url else: raise ValueError("This azure storage type is not valid. It should be blob or file.")
[ "def", "generate_url", "(", "self", ",", "remote_file", ")", ":", "parse_url", "=", "_parse_url", "(", "remote_file", ")", "key", "=", "self", ".", "storage_client", ".", "storage_accounts", ".", "list_keys", "(", "self", ".", "resource_group_name", ",", "parse_url", ".", "account", ")", ".", "keys", "[", "0", "]", ".", "value", "if", "parse_url", ".", "file_type", "==", "'blob'", ":", "bs", "=", "BlockBlobService", "(", "account_name", "=", "parse_url", ".", "account", ",", "account_key", "=", "key", ")", "sas_token", "=", "bs", ".", "generate_blob_shared_access_signature", "(", "parse_url", ".", "container_or_share_name", ",", "parse_url", ".", "file", ",", "permission", "=", "BlobPermissions", ".", "READ", ",", "expiry", "=", "datetime", ".", "utcnow", "(", ")", "+", "timedelta", "(", "hours", "=", "24", ")", ",", ")", "source_blob_url", "=", "bs", ".", "make_blob_url", "(", "container_name", "=", "parse_url", ".", "container_or_share_name", ",", "blob_name", "=", "parse_url", ".", "file", ",", "sas_token", "=", "sas_token", ")", "return", "source_blob_url", "elif", "parse_url", ".", "file_type", "==", "'file'", ":", "fs", "=", "FileService", "(", "account_name", "=", "parse_url", ".", "account", ",", "account_key", "=", "key", ")", "sas_token", "=", "fs", ".", "generate_file_shared_access_signature", "(", "share_name", "=", "parse_url", ".", "container_or_share_name", ",", "directory_name", "=", "parse_url", ".", "path", ",", "file_name", "=", "parse_url", ".", "file", ",", "permission", "=", "BlobPermissions", ".", "READ", ",", "expiry", "=", "datetime", ".", "utcnow", "(", ")", "+", "timedelta", "(", "hours", "=", "24", ")", ",", ")", "source_file_url", "=", "fs", ".", "make_file_url", "(", "share_name", "=", "parse_url", ".", "container_or_share_name", ",", "directory_name", "=", "parse_url", ".", "path", ",", "file_name", "=", "parse_url", ".", "file", ",", "sas_token", "=", "sas_token", ")", "return", "source_file_url", "else", ":", "raise", "ValueError", "(", "\"This azure storage type is not valid. It should be blob or file.\"", ")" ]
Sign a remote file to distribute. The azure url format is https://myaccount.blob.core.windows.net/mycontainer/myblob. Args: remote_file(str): The blob that we want to sign.
[ "Sign", "a", "remote", "file", "to", "distribute", ".", "The", "azure", "url", "format", "is", "https", ":", "//", "myaccount", ".", "blob", ".", "core", ".", "windows", ".", "net", "/", "mycontainer", "/", "myblob", ".", "Args", ":", "remote_file", "(", "str", ")", ":", "The", "blob", "that", "we", "want", "to", "sign", "." ]
train
https://github.com/oceanprotocol/osmosis-azure-driver/blob/36bcfa96547fb6117346b02b0ac6a74345c59695/osmosis_azure_driver/data_plugin.py#L112-L145
oceanprotocol/osmosis-azure-driver
osmosis_azure_driver/data_plugin.py
Plugin.delete
def delete(self, remote_file): """Delete file from the cloud. The azure url format is https://myaccount.blob.core.windows.net/mycontainer/myblob. Args: remote_file(str): The path of the file to be deleted. Raises: :exc:`~..OsmosisError`: if the file is not uploaded correctly. """ if 'core.windows.net' not in remote_file: self.logger.error("Source or destination must be a azure storage url (format " "https://myaccount.blob.core.windows.net/mycontainer/myblob") raise OsmosisError parse_url = _parse_url(remote_file) key = self.storage_client.storage_accounts.list_keys(self.resource_group_name, parse_url.account).keys[0].value if parse_url.file_type == 'blob': bs = BlockBlobService(account_name=parse_url.account, account_key=key) return bs.delete_blob(parse_url.container_or_share_name, parse_url.file) elif parse_url.file_type == 'file': fs = FileService(account_name=parse_url.account, account_key=key) return fs.delete_file(parse_url.container_or_share_name, parse_url.path, parse_url.file) else: raise ValueError("This azure storage type is not valid. It should be blob or file.")
python
def delete(self, remote_file): """Delete file from the cloud. The azure url format is https://myaccount.blob.core.windows.net/mycontainer/myblob. Args: remote_file(str): The path of the file to be deleted. Raises: :exc:`~..OsmosisError`: if the file is not uploaded correctly. """ if 'core.windows.net' not in remote_file: self.logger.error("Source or destination must be a azure storage url (format " "https://myaccount.blob.core.windows.net/mycontainer/myblob") raise OsmosisError parse_url = _parse_url(remote_file) key = self.storage_client.storage_accounts.list_keys(self.resource_group_name, parse_url.account).keys[0].value if parse_url.file_type == 'blob': bs = BlockBlobService(account_name=parse_url.account, account_key=key) return bs.delete_blob(parse_url.container_or_share_name, parse_url.file) elif parse_url.file_type == 'file': fs = FileService(account_name=parse_url.account, account_key=key) return fs.delete_file(parse_url.container_or_share_name, parse_url.path, parse_url.file) else: raise ValueError("This azure storage type is not valid. It should be blob or file.")
[ "def", "delete", "(", "self", ",", "remote_file", ")", ":", "if", "'core.windows.net'", "not", "in", "remote_file", ":", "self", ".", "logger", ".", "error", "(", "\"Source or destination must be a azure storage url (format \"", "\"https://myaccount.blob.core.windows.net/mycontainer/myblob\"", ")", "raise", "OsmosisError", "parse_url", "=", "_parse_url", "(", "remote_file", ")", "key", "=", "self", ".", "storage_client", ".", "storage_accounts", ".", "list_keys", "(", "self", ".", "resource_group_name", ",", "parse_url", ".", "account", ")", ".", "keys", "[", "0", "]", ".", "value", "if", "parse_url", ".", "file_type", "==", "'blob'", ":", "bs", "=", "BlockBlobService", "(", "account_name", "=", "parse_url", ".", "account", ",", "account_key", "=", "key", ")", "return", "bs", ".", "delete_blob", "(", "parse_url", ".", "container_or_share_name", ",", "parse_url", ".", "file", ")", "elif", "parse_url", ".", "file_type", "==", "'file'", ":", "fs", "=", "FileService", "(", "account_name", "=", "parse_url", ".", "account", ",", "account_key", "=", "key", ")", "return", "fs", ".", "delete_file", "(", "parse_url", ".", "container_or_share_name", ",", "parse_url", ".", "path", ",", "parse_url", ".", "file", ")", "else", ":", "raise", "ValueError", "(", "\"This azure storage type is not valid. It should be blob or file.\"", ")" ]
Delete file from the cloud. The azure url format is https://myaccount.blob.core.windows.net/mycontainer/myblob. Args: remote_file(str): The path of the file to be deleted. Raises: :exc:`~..OsmosisError`: if the file is not uploaded correctly.
[ "Delete", "file", "from", "the", "cloud", ".", "The", "azure", "url", "format", "is", "https", ":", "//", "myaccount", ".", "blob", ".", "core", ".", "windows", ".", "net", "/", "mycontainer", "/", "myblob", ".", "Args", ":", "remote_file", "(", "str", ")", ":", "The", "path", "of", "the", "file", "to", "be", "deleted", ".", "Raises", ":", ":", "exc", ":", "~", "..", "OsmosisError", ":", "if", "the", "file", "is", "not", "uploaded", "correctly", "." ]
train
https://github.com/oceanprotocol/osmosis-azure-driver/blob/36bcfa96547fb6117346b02b0ac6a74345c59695/osmosis_azure_driver/data_plugin.py#L147-L167
oceanprotocol/osmosis-azure-driver
osmosis_azure_driver/data_plugin.py
Plugin.copy
def copy(self, source_path, dest_path, account=None, group_name=None): """Copy file from a path to another path. The azure url format is https://myaccount.blob.core.windows.net/mycontainer/myblob. Args: source_path(str): The path of the file to be copied. dest_path(str): The destination path where the file is going to be allocated. Raises: :exc:`~..OsmosisError`: if the file is not uploaded correctly. """ if 'core.windows.net' not in source_path and 'core.windows.net' not in dest_path: self.logger.error("Source or destination must be a azure storage url (format " "https://myaccount.blob.core.windows.net/mycontainer/myblob") raise OsmosisError # Check if source exists and can read if 'core.windows.net' in source_path: parse_url = _parse_url(source_path) key = self.storage_client.storage_accounts.list_keys(self.resource_group_name, parse_url.account).keys[ 0].value if parse_url.file_type == 'blob': bs = BlockBlobService(account_name=parse_url.account, account_key=key) return bs.get_blob_to_path(parse_url.container_or_share_name, parse_url.file, dest_path) elif parse_url.file_type == 'file': fs = FileService(account_name=parse_url.account, account_key=key) return fs.get_file_to_path(parse_url.container_or_share_name, parse_url.path, parse_url.file, dest_path) else: raise ValueError("This azure storage type is not valid. It should be blob or file.") else: parse_url = _parse_url(dest_path) key = self.storage_client.storage_accounts.list_keys(self.resource_group_name, parse_url.account).keys[ 0].value if parse_url.file_type == 'blob': bs = BlockBlobService(account_name=parse_url.account, account_key=key) return bs.create_blob_from_path(parse_url.container_or_share_name, parse_url.file, source_path) elif parse_url.file_type == 'file': fs = FileService(account_name=parse_url.account, account_key=key) return fs.create_file_from_path(parse_url.container_or_share_name, parse_url.path, parse_url.file, source_path) else: raise ValueError("This azure storage type is not valid. It should be blob or file.")
python
def copy(self, source_path, dest_path, account=None, group_name=None): """Copy file from a path to another path. The azure url format is https://myaccount.blob.core.windows.net/mycontainer/myblob. Args: source_path(str): The path of the file to be copied. dest_path(str): The destination path where the file is going to be allocated. Raises: :exc:`~..OsmosisError`: if the file is not uploaded correctly. """ if 'core.windows.net' not in source_path and 'core.windows.net' not in dest_path: self.logger.error("Source or destination must be a azure storage url (format " "https://myaccount.blob.core.windows.net/mycontainer/myblob") raise OsmosisError # Check if source exists and can read if 'core.windows.net' in source_path: parse_url = _parse_url(source_path) key = self.storage_client.storage_accounts.list_keys(self.resource_group_name, parse_url.account).keys[ 0].value if parse_url.file_type == 'blob': bs = BlockBlobService(account_name=parse_url.account, account_key=key) return bs.get_blob_to_path(parse_url.container_or_share_name, parse_url.file, dest_path) elif parse_url.file_type == 'file': fs = FileService(account_name=parse_url.account, account_key=key) return fs.get_file_to_path(parse_url.container_or_share_name, parse_url.path, parse_url.file, dest_path) else: raise ValueError("This azure storage type is not valid. It should be blob or file.") else: parse_url = _parse_url(dest_path) key = self.storage_client.storage_accounts.list_keys(self.resource_group_name, parse_url.account).keys[ 0].value if parse_url.file_type == 'blob': bs = BlockBlobService(account_name=parse_url.account, account_key=key) return bs.create_blob_from_path(parse_url.container_or_share_name, parse_url.file, source_path) elif parse_url.file_type == 'file': fs = FileService(account_name=parse_url.account, account_key=key) return fs.create_file_from_path(parse_url.container_or_share_name, parse_url.path, parse_url.file, source_path) else: raise ValueError("This azure storage type is not valid. It should be blob or file.")
[ "def", "copy", "(", "self", ",", "source_path", ",", "dest_path", ",", "account", "=", "None", ",", "group_name", "=", "None", ")", ":", "if", "'core.windows.net'", "not", "in", "source_path", "and", "'core.windows.net'", "not", "in", "dest_path", ":", "self", ".", "logger", ".", "error", "(", "\"Source or destination must be a azure storage url (format \"", "\"https://myaccount.blob.core.windows.net/mycontainer/myblob\"", ")", "raise", "OsmosisError", "# Check if source exists and can read", "if", "'core.windows.net'", "in", "source_path", ":", "parse_url", "=", "_parse_url", "(", "source_path", ")", "key", "=", "self", ".", "storage_client", ".", "storage_accounts", ".", "list_keys", "(", "self", ".", "resource_group_name", ",", "parse_url", ".", "account", ")", ".", "keys", "[", "0", "]", ".", "value", "if", "parse_url", ".", "file_type", "==", "'blob'", ":", "bs", "=", "BlockBlobService", "(", "account_name", "=", "parse_url", ".", "account", ",", "account_key", "=", "key", ")", "return", "bs", ".", "get_blob_to_path", "(", "parse_url", ".", "container_or_share_name", ",", "parse_url", ".", "file", ",", "dest_path", ")", "elif", "parse_url", ".", "file_type", "==", "'file'", ":", "fs", "=", "FileService", "(", "account_name", "=", "parse_url", ".", "account", ",", "account_key", "=", "key", ")", "return", "fs", ".", "get_file_to_path", "(", "parse_url", ".", "container_or_share_name", ",", "parse_url", ".", "path", ",", "parse_url", ".", "file", ",", "dest_path", ")", "else", ":", "raise", "ValueError", "(", "\"This azure storage type is not valid. It should be blob or file.\"", ")", "else", ":", "parse_url", "=", "_parse_url", "(", "dest_path", ")", "key", "=", "self", ".", "storage_client", ".", "storage_accounts", ".", "list_keys", "(", "self", ".", "resource_group_name", ",", "parse_url", ".", "account", ")", ".", "keys", "[", "0", "]", ".", "value", "if", "parse_url", ".", "file_type", "==", "'blob'", ":", "bs", "=", "BlockBlobService", "(", "account_name", "=", "parse_url", ".", "account", ",", "account_key", "=", "key", ")", "return", "bs", ".", "create_blob_from_path", "(", "parse_url", ".", "container_or_share_name", ",", "parse_url", ".", "file", ",", "source_path", ")", "elif", "parse_url", ".", "file_type", "==", "'file'", ":", "fs", "=", "FileService", "(", "account_name", "=", "parse_url", ".", "account", ",", "account_key", "=", "key", ")", "return", "fs", ".", "create_file_from_path", "(", "parse_url", ".", "container_or_share_name", ",", "parse_url", ".", "path", ",", "parse_url", ".", "file", ",", "source_path", ")", "else", ":", "raise", "ValueError", "(", "\"This azure storage type is not valid. It should be blob or file.\"", ")" ]
Copy file from a path to another path. The azure url format is https://myaccount.blob.core.windows.net/mycontainer/myblob. Args: source_path(str): The path of the file to be copied. dest_path(str): The destination path where the file is going to be allocated. Raises: :exc:`~..OsmosisError`: if the file is not uploaded correctly.
[ "Copy", "file", "from", "a", "path", "to", "another", "path", ".", "The", "azure", "url", "format", "is", "https", ":", "//", "myaccount", ".", "blob", ".", "core", ".", "windows", ".", "net", "/", "mycontainer", "/", "myblob", ".", "Args", ":", "source_path", "(", "str", ")", ":", "The", "path", "of", "the", "file", "to", "be", "copied", ".", "dest_path", "(", "str", ")", ":", "The", "destination", "path", "where", "the", "file", "is", "going", "to", "be", "allocated", ".", "Raises", ":", ":", "exc", ":", "~", "..", "OsmosisError", ":", "if", "the", "file", "is", "not", "uploaded", "correctly", "." ]
train
https://github.com/oceanprotocol/osmosis-azure-driver/blob/36bcfa96547fb6117346b02b0ac6a74345c59695/osmosis_azure_driver/data_plugin.py#L169-L207
juiceinc/recipe
recipe/utils.py
prettyprintable_sql
def prettyprintable_sql(statement, dialect=None, reindent=True): """ Generate an SQL expression string with bound parameters rendered inline for the given SQLAlchemy statement. The function can also receive a `sqlalchemy.orm.Query` object instead of statement. WARNING: Should only be used for debugging. Inlining parameters is not safe when handling user created data. """ if isinstance(statement, sqlalchemy.orm.Query): if dialect is None: dialect = statement.session.get_bind().dialect statement = statement.statement # Generate a class that can handle encoding if dialect: DialectKlass = dialect.__class__ else: DialectKlass = DefaultDialect class LiteralDialect(DialectKlass): colspecs = { # prevent various encoding explosions String: StringLiteral, # teach SA about how to literalize a datetime DateTime: StringLiteral, Date: StringLiteral, # don't format py2 long integers to NULL NullType: StringLiteral, } compiled = statement.compile( dialect=LiteralDialect(), compile_kwargs={ 'literal_binds': True } ) return sqlparse.format(str(compiled), reindent=reindent)
python
def prettyprintable_sql(statement, dialect=None, reindent=True): """ Generate an SQL expression string with bound parameters rendered inline for the given SQLAlchemy statement. The function can also receive a `sqlalchemy.orm.Query` object instead of statement. WARNING: Should only be used for debugging. Inlining parameters is not safe when handling user created data. """ if isinstance(statement, sqlalchemy.orm.Query): if dialect is None: dialect = statement.session.get_bind().dialect statement = statement.statement # Generate a class that can handle encoding if dialect: DialectKlass = dialect.__class__ else: DialectKlass = DefaultDialect class LiteralDialect(DialectKlass): colspecs = { # prevent various encoding explosions String: StringLiteral, # teach SA about how to literalize a datetime DateTime: StringLiteral, Date: StringLiteral, # don't format py2 long integers to NULL NullType: StringLiteral, } compiled = statement.compile( dialect=LiteralDialect(), compile_kwargs={ 'literal_binds': True } ) return sqlparse.format(str(compiled), reindent=reindent)
[ "def", "prettyprintable_sql", "(", "statement", ",", "dialect", "=", "None", ",", "reindent", "=", "True", ")", ":", "if", "isinstance", "(", "statement", ",", "sqlalchemy", ".", "orm", ".", "Query", ")", ":", "if", "dialect", "is", "None", ":", "dialect", "=", "statement", ".", "session", ".", "get_bind", "(", ")", ".", "dialect", "statement", "=", "statement", ".", "statement", "# Generate a class that can handle encoding", "if", "dialect", ":", "DialectKlass", "=", "dialect", ".", "__class__", "else", ":", "DialectKlass", "=", "DefaultDialect", "class", "LiteralDialect", "(", "DialectKlass", ")", ":", "colspecs", "=", "{", "# prevent various encoding explosions", "String", ":", "StringLiteral", ",", "# teach SA about how to literalize a datetime", "DateTime", ":", "StringLiteral", ",", "Date", ":", "StringLiteral", ",", "# don't format py2 long integers to NULL", "NullType", ":", "StringLiteral", ",", "}", "compiled", "=", "statement", ".", "compile", "(", "dialect", "=", "LiteralDialect", "(", ")", ",", "compile_kwargs", "=", "{", "'literal_binds'", ":", "True", "}", ")", "return", "sqlparse", ".", "format", "(", "str", "(", "compiled", ")", ",", "reindent", "=", "reindent", ")" ]
Generate an SQL expression string with bound parameters rendered inline for the given SQLAlchemy statement. The function can also receive a `sqlalchemy.orm.Query` object instead of statement. WARNING: Should only be used for debugging. Inlining parameters is not safe when handling user created data.
[ "Generate", "an", "SQL", "expression", "string", "with", "bound", "parameters", "rendered", "inline", "for", "the", "given", "SQLAlchemy", "statement", ".", "The", "function", "can", "also", "receive", "a", "sqlalchemy", ".", "orm", ".", "Query", "object", "instead", "of", "statement", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/utils.py#L42-L78
juiceinc/recipe
recipe/validators.py
IngredientValidator._normalize_coerce_to_format_with_lookup
def _normalize_coerce_to_format_with_lookup(self, v): """ Replace a format with a default """ try: return self.format_lookup.get(v, v) except TypeError: # v is something we can't lookup (like a list) return v
python
def _normalize_coerce_to_format_with_lookup(self, v): """ Replace a format with a default """ try: return self.format_lookup.get(v, v) except TypeError: # v is something we can't lookup (like a list) return v
[ "def", "_normalize_coerce_to_format_with_lookup", "(", "self", ",", "v", ")", ":", "try", ":", "return", "self", ".", "format_lookup", ".", "get", "(", "v", ",", "v", ")", "except", "TypeError", ":", "# v is something we can't lookup (like a list)", "return", "v" ]
Replace a format with a default
[ "Replace", "a", "format", "with", "a", "default" ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/validators.py#L66-L72
juiceinc/recipe
recipe/validators.py
IngredientValidator._normalize_coerce_to_field_dict
def _normalize_coerce_to_field_dict(self, v): """ coerces strings to a dict {'value': str} """ def tokenize(s): """ Tokenize a string by splitting it by + and - >>> tokenize('this + that') ['this', '+', 'that'] >>> tokenize('this+that') ['this', '+', 'that'] >>> tokenize('this+that-other') ['this', '+', 'that', '-', 'other] """ # Crude tokenization s = s.replace('+', ' + ').replace('-', ' - ') \ .replace('/', ' / ').replace('*', ' * ') words = [w for w in s.split(' ') if w] return words if isinstance(v, _str_type): field_parts = tokenize(v) field = field_parts[0] d = {'value': field} if len(field_parts) > 1: # if we need to add and subtract from the field join the field # parts into pairs, for instance if field parts is # [MyTable.first, '-', MyTable.second, '+', MyTable.third] # we will get two pairs here # [('-', MyTable.second), ('+', MyTable.third)] d['operators'] = [] for operator, other_field in zip( field_parts[1::2], field_parts[2::2] ): d['operators'].append({ 'operator': operator, 'field': { 'value': other_field } }) return d else: return v
python
def _normalize_coerce_to_field_dict(self, v): """ coerces strings to a dict {'value': str} """ def tokenize(s): """ Tokenize a string by splitting it by + and - >>> tokenize('this + that') ['this', '+', 'that'] >>> tokenize('this+that') ['this', '+', 'that'] >>> tokenize('this+that-other') ['this', '+', 'that', '-', 'other] """ # Crude tokenization s = s.replace('+', ' + ').replace('-', ' - ') \ .replace('/', ' / ').replace('*', ' * ') words = [w for w in s.split(' ') if w] return words if isinstance(v, _str_type): field_parts = tokenize(v) field = field_parts[0] d = {'value': field} if len(field_parts) > 1: # if we need to add and subtract from the field join the field # parts into pairs, for instance if field parts is # [MyTable.first, '-', MyTable.second, '+', MyTable.third] # we will get two pairs here # [('-', MyTable.second), ('+', MyTable.third)] d['operators'] = [] for operator, other_field in zip( field_parts[1::2], field_parts[2::2] ): d['operators'].append({ 'operator': operator, 'field': { 'value': other_field } }) return d else: return v
[ "def", "_normalize_coerce_to_field_dict", "(", "self", ",", "v", ")", ":", "def", "tokenize", "(", "s", ")", ":", "\"\"\" Tokenize a string by splitting it by + and -\n\n >>> tokenize('this + that')\n ['this', '+', 'that']\n\n >>> tokenize('this+that')\n ['this', '+', 'that']\n\n >>> tokenize('this+that-other')\n ['this', '+', 'that', '-', 'other]\n \"\"\"", "# Crude tokenization", "s", "=", "s", ".", "replace", "(", "'+'", ",", "' + '", ")", ".", "replace", "(", "'-'", ",", "' - '", ")", ".", "replace", "(", "'/'", ",", "' / '", ")", ".", "replace", "(", "'*'", ",", "' * '", ")", "words", "=", "[", "w", "for", "w", "in", "s", ".", "split", "(", "' '", ")", "if", "w", "]", "return", "words", "if", "isinstance", "(", "v", ",", "_str_type", ")", ":", "field_parts", "=", "tokenize", "(", "v", ")", "field", "=", "field_parts", "[", "0", "]", "d", "=", "{", "'value'", ":", "field", "}", "if", "len", "(", "field_parts", ")", ">", "1", ":", "# if we need to add and subtract from the field join the field", "# parts into pairs, for instance if field parts is", "# [MyTable.first, '-', MyTable.second, '+', MyTable.third]", "# we will get two pairs here", "# [('-', MyTable.second), ('+', MyTable.third)]", "d", "[", "'operators'", "]", "=", "[", "]", "for", "operator", ",", "other_field", "in", "zip", "(", "field_parts", "[", "1", ":", ":", "2", "]", ",", "field_parts", "[", "2", ":", ":", "2", "]", ")", ":", "d", "[", "'operators'", "]", ".", "append", "(", "{", "'operator'", ":", "operator", ",", "'field'", ":", "{", "'value'", ":", "other_field", "}", "}", ")", "return", "d", "else", ":", "return", "v" ]
coerces strings to a dict {'value': str}
[ "coerces", "strings", "to", "a", "dict", "{", "value", ":", "str", "}" ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/validators.py#L80-L124
juiceinc/recipe
recipe/validators.py
IngredientValidator._validate_type_scalar
def _validate_type_scalar(self, value): """ Is not a list or a dict """ if isinstance( value, _int_types + (_str_type, float, date, datetime, bool) ): return True
python
def _validate_type_scalar(self, value): """ Is not a list or a dict """ if isinstance( value, _int_types + (_str_type, float, date, datetime, bool) ): return True
[ "def", "_validate_type_scalar", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "_int_types", "+", "(", "_str_type", ",", "float", ",", "date", ",", "datetime", ",", "bool", ")", ")", ":", "return", "True" ]
Is not a list or a dict
[ "Is", "not", "a", "list", "or", "a", "dict" ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/validators.py#L132-L137
juiceinc/recipe
recipe/core.py
Recipe.from_config
def from_config(cls, shelf, obj, **kwargs): """ Construct a Recipe from a plain Python dictionary. Most of the directives only support named ingredients, specified as strings, and looked up on the shelf. But filters can be specified as objects. Additionally, each RecipeExtension can extract and handle data from the configuration. """ def subdict(d, keys): new = {} for k in keys: if k in d: new[k] = d[k] return new core_kwargs = subdict(obj, recipe_schema['schema'].keys()) core_kwargs = normalize_schema(recipe_schema, core_kwargs) core_kwargs['filters'] = [ parse_condition(filter, shelf.Meta.select_from) if isinstance(filter, dict) else filter for filter in obj.get('filters', []) ] core_kwargs.update(kwargs) recipe = cls(shelf=shelf, **core_kwargs) # Now let extensions handle their own stuff for ext in recipe.recipe_extensions: additional_schema = getattr(ext, 'recipe_schema', None) if additional_schema is not None: ext_data = subdict(obj, additional_schema.keys()) ext_data = normalize_dict(additional_schema, ext_data) recipe = ext.from_config(ext_data) return recipe
python
def from_config(cls, shelf, obj, **kwargs): """ Construct a Recipe from a plain Python dictionary. Most of the directives only support named ingredients, specified as strings, and looked up on the shelf. But filters can be specified as objects. Additionally, each RecipeExtension can extract and handle data from the configuration. """ def subdict(d, keys): new = {} for k in keys: if k in d: new[k] = d[k] return new core_kwargs = subdict(obj, recipe_schema['schema'].keys()) core_kwargs = normalize_schema(recipe_schema, core_kwargs) core_kwargs['filters'] = [ parse_condition(filter, shelf.Meta.select_from) if isinstance(filter, dict) else filter for filter in obj.get('filters', []) ] core_kwargs.update(kwargs) recipe = cls(shelf=shelf, **core_kwargs) # Now let extensions handle their own stuff for ext in recipe.recipe_extensions: additional_schema = getattr(ext, 'recipe_schema', None) if additional_schema is not None: ext_data = subdict(obj, additional_schema.keys()) ext_data = normalize_dict(additional_schema, ext_data) recipe = ext.from_config(ext_data) return recipe
[ "def", "from_config", "(", "cls", ",", "shelf", ",", "obj", ",", "*", "*", "kwargs", ")", ":", "def", "subdict", "(", "d", ",", "keys", ")", ":", "new", "=", "{", "}", "for", "k", "in", "keys", ":", "if", "k", "in", "d", ":", "new", "[", "k", "]", "=", "d", "[", "k", "]", "return", "new", "core_kwargs", "=", "subdict", "(", "obj", ",", "recipe_schema", "[", "'schema'", "]", ".", "keys", "(", ")", ")", "core_kwargs", "=", "normalize_schema", "(", "recipe_schema", ",", "core_kwargs", ")", "core_kwargs", "[", "'filters'", "]", "=", "[", "parse_condition", "(", "filter", ",", "shelf", ".", "Meta", ".", "select_from", ")", "if", "isinstance", "(", "filter", ",", "dict", ")", "else", "filter", "for", "filter", "in", "obj", ".", "get", "(", "'filters'", ",", "[", "]", ")", "]", "core_kwargs", ".", "update", "(", "kwargs", ")", "recipe", "=", "cls", "(", "shelf", "=", "shelf", ",", "*", "*", "core_kwargs", ")", "# Now let extensions handle their own stuff", "for", "ext", "in", "recipe", ".", "recipe_extensions", ":", "additional_schema", "=", "getattr", "(", "ext", ",", "'recipe_schema'", ",", "None", ")", "if", "additional_schema", "is", "not", "None", ":", "ext_data", "=", "subdict", "(", "obj", ",", "additional_schema", ".", "keys", "(", ")", ")", "ext_data", "=", "normalize_dict", "(", "additional_schema", ",", "ext_data", ")", "recipe", "=", "ext", ".", "from_config", "(", "ext_data", ")", "return", "recipe" ]
Construct a Recipe from a plain Python dictionary. Most of the directives only support named ingredients, specified as strings, and looked up on the shelf. But filters can be specified as objects. Additionally, each RecipeExtension can extract and handle data from the configuration.
[ "Construct", "a", "Recipe", "from", "a", "plain", "Python", "dictionary", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/core.py#L145-L181
juiceinc/recipe
recipe/core.py
Recipe.shelf
def shelf(self, shelf=None): """ Defines a shelf to use for this recipe """ if shelf is None: self._shelf = Shelf({}) elif isinstance(shelf, Shelf): self._shelf = shelf elif isinstance(shelf, dict): self._shelf = Shelf(shelf) else: raise BadRecipe('shelf must be a dict or recipe.shelf.Shelf') if self._select_from is None and \ self._shelf.Meta.select_from is not None: self._select_from = self._shelf.Meta.select_from return self
python
def shelf(self, shelf=None): """ Defines a shelf to use for this recipe """ if shelf is None: self._shelf = Shelf({}) elif isinstance(shelf, Shelf): self._shelf = shelf elif isinstance(shelf, dict): self._shelf = Shelf(shelf) else: raise BadRecipe('shelf must be a dict or recipe.shelf.Shelf') if self._select_from is None and \ self._shelf.Meta.select_from is not None: self._select_from = self._shelf.Meta.select_from return self
[ "def", "shelf", "(", "self", ",", "shelf", "=", "None", ")", ":", "if", "shelf", "is", "None", ":", "self", ".", "_shelf", "=", "Shelf", "(", "{", "}", ")", "elif", "isinstance", "(", "shelf", ",", "Shelf", ")", ":", "self", ".", "_shelf", "=", "shelf", "elif", "isinstance", "(", "shelf", ",", "dict", ")", ":", "self", ".", "_shelf", "=", "Shelf", "(", "shelf", ")", "else", ":", "raise", "BadRecipe", "(", "'shelf must be a dict or recipe.shelf.Shelf'", ")", "if", "self", ".", "_select_from", "is", "None", "and", "self", ".", "_shelf", ".", "Meta", ".", "select_from", "is", "not", "None", ":", "self", ".", "_select_from", "=", "self", ".", "_shelf", ".", "Meta", ".", "select_from", "return", "self" ]
Defines a shelf to use for this recipe
[ "Defines", "a", "shelf", "to", "use", "for", "this", "recipe" ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/core.py#L217-L231
juiceinc/recipe
recipe/core.py
Recipe.metrics
def metrics(self, *metrics): """ Add a list of Metric ingredients to the query. These can either be Metric objects or strings representing metrics on the shelf. The Metric expression will be added to the query's select statement. The metric value is a property of each row of the result. :param metrics: Metrics to add to the recipe. Metrics can either be keys on the ``shelf`` or Metric objects :type metrics: list """ for m in metrics: self._cauldron.use(self._shelf.find(m, Metric)) self.dirty = True return self
python
def metrics(self, *metrics): """ Add a list of Metric ingredients to the query. These can either be Metric objects or strings representing metrics on the shelf. The Metric expression will be added to the query's select statement. The metric value is a property of each row of the result. :param metrics: Metrics to add to the recipe. Metrics can either be keys on the ``shelf`` or Metric objects :type metrics: list """ for m in metrics: self._cauldron.use(self._shelf.find(m, Metric)) self.dirty = True return self
[ "def", "metrics", "(", "self", ",", "*", "metrics", ")", ":", "for", "m", "in", "metrics", ":", "self", ".", "_cauldron", ".", "use", "(", "self", ".", "_shelf", ".", "find", "(", "m", ",", "Metric", ")", ")", "self", ".", "dirty", "=", "True", "return", "self" ]
Add a list of Metric ingredients to the query. These can either be Metric objects or strings representing metrics on the shelf. The Metric expression will be added to the query's select statement. The metric value is a property of each row of the result. :param metrics: Metrics to add to the recipe. Metrics can either be keys on the ``shelf`` or Metric objects :type metrics: list
[ "Add", "a", "list", "of", "Metric", "ingredients", "to", "the", "query", ".", "These", "can", "either", "be", "Metric", "objects", "or", "strings", "representing", "metrics", "on", "the", "shelf", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/core.py#L233-L248
juiceinc/recipe
recipe/core.py
Recipe.dimensions
def dimensions(self, *dimensions): """ Add a list of Dimension ingredients to the query. These can either be Dimension objects or strings representing dimensions on the shelf. The Dimension expression will be added to the query's select statement and to the group_by. :param dimensions: Dimensions to add to the recipe. Dimensions can either be keys on the ``shelf`` or Dimension objects :type dimensions: list """ for d in dimensions: self._cauldron.use(self._shelf.find(d, Dimension)) self.dirty = True return self
python
def dimensions(self, *dimensions): """ Add a list of Dimension ingredients to the query. These can either be Dimension objects or strings representing dimensions on the shelf. The Dimension expression will be added to the query's select statement and to the group_by. :param dimensions: Dimensions to add to the recipe. Dimensions can either be keys on the ``shelf`` or Dimension objects :type dimensions: list """ for d in dimensions: self._cauldron.use(self._shelf.find(d, Dimension)) self.dirty = True return self
[ "def", "dimensions", "(", "self", ",", "*", "dimensions", ")", ":", "for", "d", "in", "dimensions", ":", "self", ".", "_cauldron", ".", "use", "(", "self", ".", "_shelf", ".", "find", "(", "d", ",", "Dimension", ")", ")", "self", ".", "dirty", "=", "True", "return", "self" ]
Add a list of Dimension ingredients to the query. These can either be Dimension objects or strings representing dimensions on the shelf. The Dimension expression will be added to the query's select statement and to the group_by. :param dimensions: Dimensions to add to the recipe. Dimensions can either be keys on the ``shelf`` or Dimension objects :type dimensions: list
[ "Add", "a", "list", "of", "Dimension", "ingredients", "to", "the", "query", ".", "These", "can", "either", "be", "Dimension", "objects", "or", "strings", "representing", "dimensions", "on", "the", "shelf", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/core.py#L254-L270
juiceinc/recipe
recipe/core.py
Recipe.filters
def filters(self, *filters): """ Add a list of Filter ingredients to the query. These can either be Filter objects or strings representing filters on the service's shelf. ``.filters()`` are additive, calling .filters() more than once will add to the list of filters being used by the recipe. The Filter expression will be added to the query's where clause :param filters: Filters to add to the recipe. Filters can either be keys on the ``shelf`` or Filter objects :type filters: list """ def filter_constructor(f, shelf=None): if isinstance(f, BinaryExpression): return Filter(f) else: return f for f in filters: self._cauldron.use( self._shelf.find( f, (Filter, Having), constructor=filter_constructor ) ) self.dirty = True return self
python
def filters(self, *filters): """ Add a list of Filter ingredients to the query. These can either be Filter objects or strings representing filters on the service's shelf. ``.filters()`` are additive, calling .filters() more than once will add to the list of filters being used by the recipe. The Filter expression will be added to the query's where clause :param filters: Filters to add to the recipe. Filters can either be keys on the ``shelf`` or Filter objects :type filters: list """ def filter_constructor(f, shelf=None): if isinstance(f, BinaryExpression): return Filter(f) else: return f for f in filters: self._cauldron.use( self._shelf.find( f, (Filter, Having), constructor=filter_constructor ) ) self.dirty = True return self
[ "def", "filters", "(", "self", ",", "*", "filters", ")", ":", "def", "filter_constructor", "(", "f", ",", "shelf", "=", "None", ")", ":", "if", "isinstance", "(", "f", ",", "BinaryExpression", ")", ":", "return", "Filter", "(", "f", ")", "else", ":", "return", "f", "for", "f", "in", "filters", ":", "self", ".", "_cauldron", ".", "use", "(", "self", ".", "_shelf", ".", "find", "(", "f", ",", "(", "Filter", ",", "Having", ")", ",", "constructor", "=", "filter_constructor", ")", ")", "self", ".", "dirty", "=", "True", "return", "self" ]
Add a list of Filter ingredients to the query. These can either be Filter objects or strings representing filters on the service's shelf. ``.filters()`` are additive, calling .filters() more than once will add to the list of filters being used by the recipe. The Filter expression will be added to the query's where clause :param filters: Filters to add to the recipe. Filters can either be keys on the ``shelf`` or Filter objects :type filters: list
[ "Add", "a", "list", "of", "Filter", "ingredients", "to", "the", "query", ".", "These", "can", "either", "be", "Filter", "objects", "or", "strings", "representing", "filters", "on", "the", "service", "s", "shelf", ".", ".", "filters", "()", "are", "additive", "calling", ".", "filters", "()", "more", "than", "once", "will", "add", "to", "the", "list", "of", "filters", "being", "used", "by", "the", "recipe", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/core.py#L276-L305
juiceinc/recipe
recipe/core.py
Recipe.order_by
def order_by(self, *order_bys): """ Add a list of ingredients to order by to the query. These can either be Dimension or Metric objects or strings representing order_bys on the shelf. The Order_by expression will be added to the query's order_by statement :param order_bys: Order_bys to add to the recipe. Order_bys can either be keys on the ``shelf`` or Dimension or Metric objects. If the key is prefixed by "-" the ordering will be descending. :type order_bys: list """ # Order bys shouldn't be added to the _cauldron self._order_bys = [] for ingr in order_bys: order_by = self._shelf.find(ingr, (Dimension, Metric)) self._order_bys.append(order_by) self.dirty = True return self
python
def order_by(self, *order_bys): """ Add a list of ingredients to order by to the query. These can either be Dimension or Metric objects or strings representing order_bys on the shelf. The Order_by expression will be added to the query's order_by statement :param order_bys: Order_bys to add to the recipe. Order_bys can either be keys on the ``shelf`` or Dimension or Metric objects. If the key is prefixed by "-" the ordering will be descending. :type order_bys: list """ # Order bys shouldn't be added to the _cauldron self._order_bys = [] for ingr in order_bys: order_by = self._shelf.find(ingr, (Dimension, Metric)) self._order_bys.append(order_by) self.dirty = True return self
[ "def", "order_by", "(", "self", ",", "*", "order_bys", ")", ":", "# Order bys shouldn't be added to the _cauldron", "self", ".", "_order_bys", "=", "[", "]", "for", "ingr", "in", "order_bys", ":", "order_by", "=", "self", ".", "_shelf", ".", "find", "(", "ingr", ",", "(", "Dimension", ",", "Metric", ")", ")", "self", ".", "_order_bys", ".", "append", "(", "order_by", ")", "self", ".", "dirty", "=", "True", "return", "self" ]
Add a list of ingredients to order by to the query. These can either be Dimension or Metric objects or strings representing order_bys on the shelf. The Order_by expression will be added to the query's order_by statement :param order_bys: Order_bys to add to the recipe. Order_bys can either be keys on the ``shelf`` or Dimension or Metric objects. If the key is prefixed by "-" the ordering will be descending. :type order_bys: list
[ "Add", "a", "list", "of", "ingredients", "to", "order", "by", "to", "the", "query", ".", "These", "can", "either", "be", "Dimension", "or", "Metric", "objects", "or", "strings", "representing", "order_bys", "on", "the", "shelf", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/core.py#L311-L333
juiceinc/recipe
recipe/core.py
Recipe.limit
def limit(self, limit): """ Limit the number of rows returned from the database. :param limit: The number of rows to return in the recipe. 0 will return all rows. :type limit: int """ if self._limit != limit: self.dirty = True self._limit = limit return self
python
def limit(self, limit): """ Limit the number of rows returned from the database. :param limit: The number of rows to return in the recipe. 0 will return all rows. :type limit: int """ if self._limit != limit: self.dirty = True self._limit = limit return self
[ "def", "limit", "(", "self", ",", "limit", ")", ":", "if", "self", ".", "_limit", "!=", "limit", ":", "self", ".", "dirty", "=", "True", "self", ".", "_limit", "=", "limit", "return", "self" ]
Limit the number of rows returned from the database. :param limit: The number of rows to return in the recipe. 0 will return all rows. :type limit: int
[ "Limit", "the", "number", "of", "rows", "returned", "from", "the", "database", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/core.py#L345-L355
juiceinc/recipe
recipe/core.py
Recipe.offset
def offset(self, offset): """ Offset a number of rows before returning rows from the database. :param offset: The number of rows to offset in the recipe. 0 will return from the first available row :type offset: int """ if self._offset != offset: self.dirty = True self._offset = offset return self
python
def offset(self, offset): """ Offset a number of rows before returning rows from the database. :param offset: The number of rows to offset in the recipe. 0 will return from the first available row :type offset: int """ if self._offset != offset: self.dirty = True self._offset = offset return self
[ "def", "offset", "(", "self", ",", "offset", ")", ":", "if", "self", ".", "_offset", "!=", "offset", ":", "self", ".", "dirty", "=", "True", "self", ".", "_offset", "=", "offset", "return", "self" ]
Offset a number of rows before returning rows from the database. :param offset: The number of rows to offset in the recipe. 0 will return from the first available row :type offset: int
[ "Offset", "a", "number", "of", "rows", "before", "returning", "rows", "from", "the", "database", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/core.py#L357-L367
juiceinc/recipe
recipe/core.py
Recipe._is_postgres
def _is_postgres(self): """ Determine if the running engine is postgres """ if self._is_postgres_engine is None: is_postgres_engine = False try: dialect = self.session.bind.engine.name if 'redshift' in dialect or 'postg' in dialect or 'pg' in \ dialect: is_postgres_engine = True except: pass self._is_postgres_engine = is_postgres_engine return self._is_postgres_engine
python
def _is_postgres(self): """ Determine if the running engine is postgres """ if self._is_postgres_engine is None: is_postgres_engine = False try: dialect = self.session.bind.engine.name if 'redshift' in dialect or 'postg' in dialect or 'pg' in \ dialect: is_postgres_engine = True except: pass self._is_postgres_engine = is_postgres_engine return self._is_postgres_engine
[ "def", "_is_postgres", "(", "self", ")", ":", "if", "self", ".", "_is_postgres_engine", "is", "None", ":", "is_postgres_engine", "=", "False", "try", ":", "dialect", "=", "self", ".", "session", ".", "bind", ".", "engine", ".", "name", "if", "'redshift'", "in", "dialect", "or", "'postg'", "in", "dialect", "or", "'pg'", "in", "dialect", ":", "is_postgres_engine", "=", "True", "except", ":", "pass", "self", ".", "_is_postgres_engine", "=", "is_postgres_engine", "return", "self", ".", "_is_postgres_engine" ]
Determine if the running engine is postgres
[ "Determine", "if", "the", "running", "engine", "is", "postgres" ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/core.py#L373-L385
juiceinc/recipe
recipe/core.py
Recipe._prepare_order_bys
def _prepare_order_bys(self): """ Build a set of order by columns """ order_bys = OrderedSet() if self._order_bys: for ingredient in self._order_bys: if isinstance(ingredient, Dimension): # Reverse the ordering columns so that dimensions # order by their label rather than their id columns = reversed(ingredient.columns) else: columns = ingredient.columns for c in columns: order_by = c.desc() if ingredient.ordering == 'desc' else c if str(order_by) not in [str(o) for o in order_bys]: order_bys.add(order_by) return list(order_bys)
python
def _prepare_order_bys(self): """ Build a set of order by columns """ order_bys = OrderedSet() if self._order_bys: for ingredient in self._order_bys: if isinstance(ingredient, Dimension): # Reverse the ordering columns so that dimensions # order by their label rather than their id columns = reversed(ingredient.columns) else: columns = ingredient.columns for c in columns: order_by = c.desc() if ingredient.ordering == 'desc' else c if str(order_by) not in [str(o) for o in order_bys]: order_bys.add(order_by) return list(order_bys)
[ "def", "_prepare_order_bys", "(", "self", ")", ":", "order_bys", "=", "OrderedSet", "(", ")", "if", "self", ".", "_order_bys", ":", "for", "ingredient", "in", "self", ".", "_order_bys", ":", "if", "isinstance", "(", "ingredient", ",", "Dimension", ")", ":", "# Reverse the ordering columns so that dimensions", "# order by their label rather than their id", "columns", "=", "reversed", "(", "ingredient", ".", "columns", ")", "else", ":", "columns", "=", "ingredient", ".", "columns", "for", "c", "in", "columns", ":", "order_by", "=", "c", ".", "desc", "(", ")", "if", "ingredient", ".", "ordering", "==", "'desc'", "else", "c", "if", "str", "(", "order_by", ")", "not", "in", "[", "str", "(", "o", ")", "for", "o", "in", "order_bys", "]", ":", "order_bys", ".", "add", "(", "order_by", ")", "return", "list", "(", "order_bys", ")" ]
Build a set of order by columns
[ "Build", "a", "set", "of", "order", "by", "columns" ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/core.py#L387-L403
juiceinc/recipe
recipe/core.py
Recipe.query
def query(self): """ Generates a query using the ingredients supplied by the recipe. :return: A SQLAlchemy query """ if len(self._cauldron.ingredients()) == 0: raise BadRecipe('No ingredients have been added to this recipe') if not self.dirty and self._query: return self._query # Step 1: Gather up global filters and user filters and # apply them as if they had been added to recipe().filters(...) for extension in self.recipe_extensions: extension.add_ingredients() # Step 2: Build the query (now that it has all the filters # and apply any blend recipes # Get the parts of the query from the cauldron # We don't need to regather order_bys recipe_parts = self._cauldron.brew_query_parts() recipe_parts['order_bys'] = self._prepare_order_bys() for extension in self.recipe_extensions: recipe_parts = extension.modify_recipe_parts(recipe_parts) # Start building the query query = self._session.query(*recipe_parts['columns']) if self._select_from is not None: query = query.select_from(self._select_from) recipe_parts['query'] = query \ .group_by(*recipe_parts['group_bys']) \ .order_by(*recipe_parts['order_bys']) \ .filter(*recipe_parts['filters']) if recipe_parts['havings']: for having in recipe_parts['havings']: recipe_parts['query'] = recipe_parts['query'].having(having) for extension in self.recipe_extensions: recipe_parts = extension.modify_prequery_parts(recipe_parts) if self._select_from is None and len( recipe_parts['query'].selectable.froms ) != 1: raise BadRecipe( 'Recipes must use ingredients that all come from ' 'the same table. \nDetails on this recipe:\n{' '}'.format(str(self._cauldron)) ) for extension in self.recipe_extensions: recipe_parts = extension.modify_postquery_parts(recipe_parts) recipe_parts = run_hooks( recipe_parts, 'modify_query', self.dynamic_extensions ) # Apply limit on the outermost query # This happens after building the comparison recipe if self._limit and self._limit > 0: recipe_parts['query'] = recipe_parts['query'].limit(self._limit) if self._offset and self._offset > 0: recipe_parts['query'] = recipe_parts['query'].offset(self._offset) # Step 5: Clear the dirty flag, # Patch the query if there's a comparison query # cache results self._query = recipe_parts['query'] self.dirty = False return self._query
python
def query(self): """ Generates a query using the ingredients supplied by the recipe. :return: A SQLAlchemy query """ if len(self._cauldron.ingredients()) == 0: raise BadRecipe('No ingredients have been added to this recipe') if not self.dirty and self._query: return self._query # Step 1: Gather up global filters and user filters and # apply them as if they had been added to recipe().filters(...) for extension in self.recipe_extensions: extension.add_ingredients() # Step 2: Build the query (now that it has all the filters # and apply any blend recipes # Get the parts of the query from the cauldron # We don't need to regather order_bys recipe_parts = self._cauldron.brew_query_parts() recipe_parts['order_bys'] = self._prepare_order_bys() for extension in self.recipe_extensions: recipe_parts = extension.modify_recipe_parts(recipe_parts) # Start building the query query = self._session.query(*recipe_parts['columns']) if self._select_from is not None: query = query.select_from(self._select_from) recipe_parts['query'] = query \ .group_by(*recipe_parts['group_bys']) \ .order_by(*recipe_parts['order_bys']) \ .filter(*recipe_parts['filters']) if recipe_parts['havings']: for having in recipe_parts['havings']: recipe_parts['query'] = recipe_parts['query'].having(having) for extension in self.recipe_extensions: recipe_parts = extension.modify_prequery_parts(recipe_parts) if self._select_from is None and len( recipe_parts['query'].selectable.froms ) != 1: raise BadRecipe( 'Recipes must use ingredients that all come from ' 'the same table. \nDetails on this recipe:\n{' '}'.format(str(self._cauldron)) ) for extension in self.recipe_extensions: recipe_parts = extension.modify_postquery_parts(recipe_parts) recipe_parts = run_hooks( recipe_parts, 'modify_query', self.dynamic_extensions ) # Apply limit on the outermost query # This happens after building the comparison recipe if self._limit and self._limit > 0: recipe_parts['query'] = recipe_parts['query'].limit(self._limit) if self._offset and self._offset > 0: recipe_parts['query'] = recipe_parts['query'].offset(self._offset) # Step 5: Clear the dirty flag, # Patch the query if there's a comparison query # cache results self._query = recipe_parts['query'] self.dirty = False return self._query
[ "def", "query", "(", "self", ")", ":", "if", "len", "(", "self", ".", "_cauldron", ".", "ingredients", "(", ")", ")", "==", "0", ":", "raise", "BadRecipe", "(", "'No ingredients have been added to this recipe'", ")", "if", "not", "self", ".", "dirty", "and", "self", ".", "_query", ":", "return", "self", ".", "_query", "# Step 1: Gather up global filters and user filters and", "# apply them as if they had been added to recipe().filters(...)", "for", "extension", "in", "self", ".", "recipe_extensions", ":", "extension", ".", "add_ingredients", "(", ")", "# Step 2: Build the query (now that it has all the filters", "# and apply any blend recipes", "# Get the parts of the query from the cauldron", "# We don't need to regather order_bys", "recipe_parts", "=", "self", ".", "_cauldron", ".", "brew_query_parts", "(", ")", "recipe_parts", "[", "'order_bys'", "]", "=", "self", ".", "_prepare_order_bys", "(", ")", "for", "extension", "in", "self", ".", "recipe_extensions", ":", "recipe_parts", "=", "extension", ".", "modify_recipe_parts", "(", "recipe_parts", ")", "# Start building the query", "query", "=", "self", ".", "_session", ".", "query", "(", "*", "recipe_parts", "[", "'columns'", "]", ")", "if", "self", ".", "_select_from", "is", "not", "None", ":", "query", "=", "query", ".", "select_from", "(", "self", ".", "_select_from", ")", "recipe_parts", "[", "'query'", "]", "=", "query", ".", "group_by", "(", "*", "recipe_parts", "[", "'group_bys'", "]", ")", ".", "order_by", "(", "*", "recipe_parts", "[", "'order_bys'", "]", ")", ".", "filter", "(", "*", "recipe_parts", "[", "'filters'", "]", ")", "if", "recipe_parts", "[", "'havings'", "]", ":", "for", "having", "in", "recipe_parts", "[", "'havings'", "]", ":", "recipe_parts", "[", "'query'", "]", "=", "recipe_parts", "[", "'query'", "]", ".", "having", "(", "having", ")", "for", "extension", "in", "self", ".", "recipe_extensions", ":", "recipe_parts", "=", "extension", ".", "modify_prequery_parts", "(", "recipe_parts", ")", "if", "self", ".", "_select_from", "is", "None", "and", "len", "(", "recipe_parts", "[", "'query'", "]", ".", "selectable", ".", "froms", ")", "!=", "1", ":", "raise", "BadRecipe", "(", "'Recipes must use ingredients that all come from '", "'the same table. \\nDetails on this recipe:\\n{'", "'}'", ".", "format", "(", "str", "(", "self", ".", "_cauldron", ")", ")", ")", "for", "extension", "in", "self", ".", "recipe_extensions", ":", "recipe_parts", "=", "extension", ".", "modify_postquery_parts", "(", "recipe_parts", ")", "recipe_parts", "=", "run_hooks", "(", "recipe_parts", ",", "'modify_query'", ",", "self", ".", "dynamic_extensions", ")", "# Apply limit on the outermost query", "# This happens after building the comparison recipe", "if", "self", ".", "_limit", "and", "self", ".", "_limit", ">", "0", ":", "recipe_parts", "[", "'query'", "]", "=", "recipe_parts", "[", "'query'", "]", ".", "limit", "(", "self", ".", "_limit", ")", "if", "self", ".", "_offset", "and", "self", ".", "_offset", ">", "0", ":", "recipe_parts", "[", "'query'", "]", "=", "recipe_parts", "[", "'query'", "]", ".", "offset", "(", "self", ".", "_offset", ")", "# Step 5: Clear the dirty flag,", "# Patch the query if there's a comparison query", "# cache results", "self", ".", "_query", "=", "recipe_parts", "[", "'query'", "]", "self", ".", "dirty", "=", "False", "return", "self", ".", "_query" ]
Generates a query using the ingredients supplied by the recipe. :return: A SQLAlchemy query
[ "Generates", "a", "query", "using", "the", "ingredients", "supplied", "by", "the", "recipe", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/core.py#L405-L479
juiceinc/recipe
recipe/core.py
Recipe.dirty
def dirty(self): """ The recipe is dirty if it is flagged dirty or any extensions are flagged dirty """ if self._dirty: return True else: for extension in self.recipe_extensions: if extension.dirty: return True return False
python
def dirty(self): """ The recipe is dirty if it is flagged dirty or any extensions are flagged dirty """ if self._dirty: return True else: for extension in self.recipe_extensions: if extension.dirty: return True return False
[ "def", "dirty", "(", "self", ")", ":", "if", "self", ".", "_dirty", ":", "return", "True", "else", ":", "for", "extension", "in", "self", ".", "recipe_extensions", ":", "if", "extension", ".", "dirty", ":", "return", "True", "return", "False" ]
The recipe is dirty if it is flagged dirty or any extensions are flagged dirty
[ "The", "recipe", "is", "dirty", "if", "it", "is", "flagged", "dirty", "or", "any", "extensions", "are", "flagged", "dirty" ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/core.py#L482-L491
juiceinc/recipe
recipe/core.py
Recipe.dirty
def dirty(self, value): """ If dirty is true set the recipe to dirty flag. If false, clear the recipe and all extension dirty flags """ if value: self._dirty = True else: self._dirty = False for extension in self.recipe_extensions: extension.dirty = False
python
def dirty(self, value): """ If dirty is true set the recipe to dirty flag. If false, clear the recipe and all extension dirty flags """ if value: self._dirty = True else: self._dirty = False for extension in self.recipe_extensions: extension.dirty = False
[ "def", "dirty", "(", "self", ",", "value", ")", ":", "if", "value", ":", "self", ".", "_dirty", "=", "True", "else", ":", "self", ".", "_dirty", "=", "False", "for", "extension", "in", "self", ".", "recipe_extensions", ":", "extension", ".", "dirty", "=", "False" ]
If dirty is true set the recipe to dirty flag. If false, clear the recipe and all extension dirty flags
[ "If", "dirty", "is", "true", "set", "the", "recipe", "to", "dirty", "flag", ".", "If", "false", "clear", "the", "recipe", "and", "all", "extension", "dirty", "flags" ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/core.py#L494-L502
juiceinc/recipe
recipe/core.py
Recipe.subquery
def subquery(self, name=None): """ The recipe's query as a subquery suitable for use in joins or other queries. """ query = self.query() return query.subquery(name=name)
python
def subquery(self, name=None): """ The recipe's query as a subquery suitable for use in joins or other queries. """ query = self.query() return query.subquery(name=name)
[ "def", "subquery", "(", "self", ",", "name", "=", "None", ")", ":", "query", "=", "self", ".", "query", "(", ")", "return", "query", ".", "subquery", "(", "name", "=", "name", ")" ]
The recipe's query as a subquery suitable for use in joins or other queries.
[ "The", "recipe", "s", "query", "as", "a", "subquery", "suitable", "for", "use", "in", "joins", "or", "other", "queries", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/core.py#L519-L524
juiceinc/recipe
recipe/core.py
Recipe.as_table
def as_table(self, name=None): """ Return an alias to a table """ if name is None: name = self._id return alias(self.subquery(), name=name)
python
def as_table(self, name=None): """ Return an alias to a table """ if name is None: name = self._id return alias(self.subquery(), name=name)
[ "def", "as_table", "(", "self", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "self", ".", "_id", "return", "alias", "(", "self", ".", "subquery", "(", ")", ",", "name", "=", "name", ")" ]
Return an alias to a table
[ "Return", "an", "alias", "to", "a", "table" ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/core.py#L526-L531
juiceinc/recipe
recipe/core.py
Recipe.all
def all(self): """ Return a (potentially cached) list of result objects. """ starttime = fetchtime = enchanttime = time.time() fetched_from_cache = False if self.dirty or self.all_dirty: query = self.query() self._all = query.all() # If we're using a caching query and that query did not # save new values to cache, we got the cached results # This is not 100% accurate; it only reports if the caching query # attempts to save to cache not the internal state of the cache # and whether the cache save actually occurred. if not getattr(query, 'saved_to_cache', True): fetched_from_cache = True fetchtime = time.time() self._all = self._cauldron.enchant( self._all, cache_context=self.cache_context ) enchanttime = time.time() self.all_dirty = False else: # In this case we are using the object self._all as cache fetched_from_cache = True self.stats.set_stats( len(self._all), fetchtime - starttime, enchanttime - fetchtime, fetched_from_cache ) return self._all
python
def all(self): """ Return a (potentially cached) list of result objects. """ starttime = fetchtime = enchanttime = time.time() fetched_from_cache = False if self.dirty or self.all_dirty: query = self.query() self._all = query.all() # If we're using a caching query and that query did not # save new values to cache, we got the cached results # This is not 100% accurate; it only reports if the caching query # attempts to save to cache not the internal state of the cache # and whether the cache save actually occurred. if not getattr(query, 'saved_to_cache', True): fetched_from_cache = True fetchtime = time.time() self._all = self._cauldron.enchant( self._all, cache_context=self.cache_context ) enchanttime = time.time() self.all_dirty = False else: # In this case we are using the object self._all as cache fetched_from_cache = True self.stats.set_stats( len(self._all), fetchtime - starttime, enchanttime - fetchtime, fetched_from_cache ) return self._all
[ "def", "all", "(", "self", ")", ":", "starttime", "=", "fetchtime", "=", "enchanttime", "=", "time", ".", "time", "(", ")", "fetched_from_cache", "=", "False", "if", "self", ".", "dirty", "or", "self", ".", "all_dirty", ":", "query", "=", "self", ".", "query", "(", ")", "self", ".", "_all", "=", "query", ".", "all", "(", ")", "# If we're using a caching query and that query did not", "# save new values to cache, we got the cached results", "# This is not 100% accurate; it only reports if the caching query", "# attempts to save to cache not the internal state of the cache", "# and whether the cache save actually occurred.", "if", "not", "getattr", "(", "query", ",", "'saved_to_cache'", ",", "True", ")", ":", "fetched_from_cache", "=", "True", "fetchtime", "=", "time", ".", "time", "(", ")", "self", ".", "_all", "=", "self", ".", "_cauldron", ".", "enchant", "(", "self", ".", "_all", ",", "cache_context", "=", "self", ".", "cache_context", ")", "enchanttime", "=", "time", ".", "time", "(", ")", "self", ".", "all_dirty", "=", "False", "else", ":", "# In this case we are using the object self._all as cache", "fetched_from_cache", "=", "True", "self", ".", "stats", ".", "set_stats", "(", "len", "(", "self", ".", "_all", ")", ",", "fetchtime", "-", "starttime", ",", "enchanttime", "-", "fetchtime", ",", "fetched_from_cache", ")", "return", "self", ".", "_all" ]
Return a (potentially cached) list of result objects.
[ "Return", "a", "(", "potentially", "cached", ")", "list", "of", "result", "objects", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/core.py#L533-L565
juiceinc/recipe
recipe/schemas.py
RecipeSchemas._validate_condition_keys
def _validate_condition_keys(self, field, value, error): """ Validates that all of the keys in one of the sets of keys are defined as keys of ``value``. """ if 'field' in value: operators = self.nonscalar_conditions + self.scalar_conditions matches = sum(1 for k in operators if k in value) if matches == 0: error(field, 'Must contain one of {}'.format(operators)) return False elif matches > 1: error( field, 'Must contain no more than one of {}'.format(operators) ) return False return True elif 'and' in value: for condition in value['and']: self._validate_condition_keys(field, condition, error) elif 'or' in value: for condition in value['or']: self._validate_condition_keys(field, condition, error) else: error(field, "Must contain field + operator keys, 'and', or 'or'.") return False
python
def _validate_condition_keys(self, field, value, error): """ Validates that all of the keys in one of the sets of keys are defined as keys of ``value``. """ if 'field' in value: operators = self.nonscalar_conditions + self.scalar_conditions matches = sum(1 for k in operators if k in value) if matches == 0: error(field, 'Must contain one of {}'.format(operators)) return False elif matches > 1: error( field, 'Must contain no more than one of {}'.format(operators) ) return False return True elif 'and' in value: for condition in value['and']: self._validate_condition_keys(field, condition, error) elif 'or' in value: for condition in value['or']: self._validate_condition_keys(field, condition, error) else: error(field, "Must contain field + operator keys, 'and', or 'or'.") return False
[ "def", "_validate_condition_keys", "(", "self", ",", "field", ",", "value", ",", "error", ")", ":", "if", "'field'", "in", "value", ":", "operators", "=", "self", ".", "nonscalar_conditions", "+", "self", ".", "scalar_conditions", "matches", "=", "sum", "(", "1", "for", "k", "in", "operators", "if", "k", "in", "value", ")", "if", "matches", "==", "0", ":", "error", "(", "field", ",", "'Must contain one of {}'", ".", "format", "(", "operators", ")", ")", "return", "False", "elif", "matches", ">", "1", ":", "error", "(", "field", ",", "'Must contain no more than one of {}'", ".", "format", "(", "operators", ")", ")", "return", "False", "return", "True", "elif", "'and'", "in", "value", ":", "for", "condition", "in", "value", "[", "'and'", "]", ":", "self", ".", "_validate_condition_keys", "(", "field", ",", "condition", ",", "error", ")", "elif", "'or'", "in", "value", ":", "for", "condition", "in", "value", "[", "'or'", "]", ":", "self", ".", "_validate_condition_keys", "(", "field", ",", "condition", ",", "error", ")", "else", ":", "error", "(", "field", ",", "\"Must contain field + operator keys, 'and', or 'or'.\"", ")", "return", "False" ]
Validates that all of the keys in one of the sets of keys are defined as keys of ``value``.
[ "Validates", "that", "all", "of", "the", "keys", "in", "one", "of", "the", "sets", "of", "keys", "are", "defined", "as", "keys", "of", "value", "." ]
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/schemas.py#L123-L149
lacava/few
few/selection.py
SurvivalMixin.survival
def survival(self,parents,offspring,elite=None,elite_index=None,X=None,X_O=None,F=None,F_O=None): """routes to the survival method, returns survivors""" if self.sel == 'tournament': survivors, survivor_index = self.tournament(parents + offspring, self.tourn_size, num_selections = len(parents)) elif self.sel == 'lexicase': survivor_index = self.lexicase(np.vstack((F,F_O)), num_selections = len(parents), survival = True) survivors = [(parents+ offspring)[s] for s in survivor_index] elif self.sel == 'epsilon_lexicase': # survivors, survivor_index = self.epsilon_lexicase(parents + offspring, num_selections = len(parents), survival = True) if self.lex_size: sizes = [len(i.stack) for i in (parents + offspring)] survivor_index = self.epsilon_lexicase(np.vstack((F,F_O)), sizes, num_selections = F.shape[0], survival = True) survivors = [(parents+ offspring)[s] for s in survivor_index] else: survivor_index = self.epsilon_lexicase(np.vstack((F,F_O)), [], num_selections = F.shape[0], survival = True) survivors = [(parents+ offspring)[s] for s in survivor_index] elif self.sel == 'deterministic_crowding': survivors, survivor_index = self.deterministic_crowding(parents,offspring,X,X_O) elif self.sel == 'random': # pdb.set_trace() survivor_index = self.random_state.permutation(np.arange(2*len(parents)))[:len(parents)] survivors = [(parents + offspring)[s] for s in survivor_index] # elitism if self.elitism: if min([x.fitness for x in survivors]) > elite.fitness: # if the elite individual did not survive and elitism is on, replace worst individual with elite rep_index = np.argmax([x.fitness for x in survivors]) survivors[rep_index] = elite survivor_index[rep_index] = elite_index # return survivors return survivors,survivor_index
python
def survival(self,parents,offspring,elite=None,elite_index=None,X=None,X_O=None,F=None,F_O=None): """routes to the survival method, returns survivors""" if self.sel == 'tournament': survivors, survivor_index = self.tournament(parents + offspring, self.tourn_size, num_selections = len(parents)) elif self.sel == 'lexicase': survivor_index = self.lexicase(np.vstack((F,F_O)), num_selections = len(parents), survival = True) survivors = [(parents+ offspring)[s] for s in survivor_index] elif self.sel == 'epsilon_lexicase': # survivors, survivor_index = self.epsilon_lexicase(parents + offspring, num_selections = len(parents), survival = True) if self.lex_size: sizes = [len(i.stack) for i in (parents + offspring)] survivor_index = self.epsilon_lexicase(np.vstack((F,F_O)), sizes, num_selections = F.shape[0], survival = True) survivors = [(parents+ offspring)[s] for s in survivor_index] else: survivor_index = self.epsilon_lexicase(np.vstack((F,F_O)), [], num_selections = F.shape[0], survival = True) survivors = [(parents+ offspring)[s] for s in survivor_index] elif self.sel == 'deterministic_crowding': survivors, survivor_index = self.deterministic_crowding(parents,offspring,X,X_O) elif self.sel == 'random': # pdb.set_trace() survivor_index = self.random_state.permutation(np.arange(2*len(parents)))[:len(parents)] survivors = [(parents + offspring)[s] for s in survivor_index] # elitism if self.elitism: if min([x.fitness for x in survivors]) > elite.fitness: # if the elite individual did not survive and elitism is on, replace worst individual with elite rep_index = np.argmax([x.fitness for x in survivors]) survivors[rep_index] = elite survivor_index[rep_index] = elite_index # return survivors return survivors,survivor_index
[ "def", "survival", "(", "self", ",", "parents", ",", "offspring", ",", "elite", "=", "None", ",", "elite_index", "=", "None", ",", "X", "=", "None", ",", "X_O", "=", "None", ",", "F", "=", "None", ",", "F_O", "=", "None", ")", ":", "if", "self", ".", "sel", "==", "'tournament'", ":", "survivors", ",", "survivor_index", "=", "self", ".", "tournament", "(", "parents", "+", "offspring", ",", "self", ".", "tourn_size", ",", "num_selections", "=", "len", "(", "parents", ")", ")", "elif", "self", ".", "sel", "==", "'lexicase'", ":", "survivor_index", "=", "self", ".", "lexicase", "(", "np", ".", "vstack", "(", "(", "F", ",", "F_O", ")", ")", ",", "num_selections", "=", "len", "(", "parents", ")", ",", "survival", "=", "True", ")", "survivors", "=", "[", "(", "parents", "+", "offspring", ")", "[", "s", "]", "for", "s", "in", "survivor_index", "]", "elif", "self", ".", "sel", "==", "'epsilon_lexicase'", ":", "# survivors, survivor_index = self.epsilon_lexicase(parents + offspring, num_selections = len(parents), survival = True)", "if", "self", ".", "lex_size", ":", "sizes", "=", "[", "len", "(", "i", ".", "stack", ")", "for", "i", "in", "(", "parents", "+", "offspring", ")", "]", "survivor_index", "=", "self", ".", "epsilon_lexicase", "(", "np", ".", "vstack", "(", "(", "F", ",", "F_O", ")", ")", ",", "sizes", ",", "num_selections", "=", "F", ".", "shape", "[", "0", "]", ",", "survival", "=", "True", ")", "survivors", "=", "[", "(", "parents", "+", "offspring", ")", "[", "s", "]", "for", "s", "in", "survivor_index", "]", "else", ":", "survivor_index", "=", "self", ".", "epsilon_lexicase", "(", "np", ".", "vstack", "(", "(", "F", ",", "F_O", ")", ")", ",", "[", "]", ",", "num_selections", "=", "F", ".", "shape", "[", "0", "]", ",", "survival", "=", "True", ")", "survivors", "=", "[", "(", "parents", "+", "offspring", ")", "[", "s", "]", "for", "s", "in", "survivor_index", "]", "elif", "self", ".", "sel", "==", "'deterministic_crowding'", ":", "survivors", ",", "survivor_index", "=", "self", ".", "deterministic_crowding", "(", "parents", ",", "offspring", ",", "X", ",", "X_O", ")", "elif", "self", ".", "sel", "==", "'random'", ":", "# pdb.set_trace()", "survivor_index", "=", "self", ".", "random_state", ".", "permutation", "(", "np", ".", "arange", "(", "2", "*", "len", "(", "parents", ")", ")", ")", "[", ":", "len", "(", "parents", ")", "]", "survivors", "=", "[", "(", "parents", "+", "offspring", ")", "[", "s", "]", "for", "s", "in", "survivor_index", "]", "# elitism", "if", "self", ".", "elitism", ":", "if", "min", "(", "[", "x", ".", "fitness", "for", "x", "in", "survivors", "]", ")", ">", "elite", ".", "fitness", ":", "# if the elite individual did not survive and elitism is on, replace worst individual with elite", "rep_index", "=", "np", ".", "argmax", "(", "[", "x", ".", "fitness", "for", "x", "in", "survivors", "]", ")", "survivors", "[", "rep_index", "]", "=", "elite", "survivor_index", "[", "rep_index", "]", "=", "elite_index", "# return survivors", "return", "survivors", ",", "survivor_index" ]
routes to the survival method, returns survivors
[ "routes", "to", "the", "survival", "method", "returns", "survivors" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/selection.py#L19-L50
lacava/few
few/selection.py
SurvivalMixin.tournament
def tournament(self,individuals,tourn_size, num_selections=None): """conducts tournament selection of size tourn_size""" winners = [] locs = [] if num_selections is None: num_selections = len(individuals) for i in np.arange(num_selections): # sample pool with replacement pool_i = self.random_state.choice(len(individuals),size=tourn_size) pool = [] for i in pool_i: pool.append(np.mean(individuals[i].fitness)) # winner locs.append(pool_i[np.argmin(pool)]) winners.append(copy.deepcopy(individuals[locs[-1]])) return winners,locs
python
def tournament(self,individuals,tourn_size, num_selections=None): """conducts tournament selection of size tourn_size""" winners = [] locs = [] if num_selections is None: num_selections = len(individuals) for i in np.arange(num_selections): # sample pool with replacement pool_i = self.random_state.choice(len(individuals),size=tourn_size) pool = [] for i in pool_i: pool.append(np.mean(individuals[i].fitness)) # winner locs.append(pool_i[np.argmin(pool)]) winners.append(copy.deepcopy(individuals[locs[-1]])) return winners,locs
[ "def", "tournament", "(", "self", ",", "individuals", ",", "tourn_size", ",", "num_selections", "=", "None", ")", ":", "winners", "=", "[", "]", "locs", "=", "[", "]", "if", "num_selections", "is", "None", ":", "num_selections", "=", "len", "(", "individuals", ")", "for", "i", "in", "np", ".", "arange", "(", "num_selections", ")", ":", "# sample pool with replacement", "pool_i", "=", "self", ".", "random_state", ".", "choice", "(", "len", "(", "individuals", ")", ",", "size", "=", "tourn_size", ")", "pool", "=", "[", "]", "for", "i", "in", "pool_i", ":", "pool", ".", "append", "(", "np", ".", "mean", "(", "individuals", "[", "i", "]", ".", "fitness", ")", ")", "# winner", "locs", ".", "append", "(", "pool_i", "[", "np", ".", "argmin", "(", "pool", ")", "]", ")", "winners", ".", "append", "(", "copy", ".", "deepcopy", "(", "individuals", "[", "locs", "[", "-", "1", "]", "]", ")", ")", "return", "winners", ",", "locs" ]
conducts tournament selection of size tourn_size
[ "conducts", "tournament", "selection", "of", "size", "tourn_size" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/selection.py#L52-L69
lacava/few
few/selection.py
SurvivalMixin.lexicase
def lexicase(self,F, num_selections=None, survival = False): """conducts lexicase selection for de-aggregated fitness vectors""" if num_selections is None: num_selections = F.shape[0] winners = [] locs = [] individual_locs = np.arange(F.shape[0]) for i in np.arange(num_selections): can_locs = individual_locs cases = list(np.arange(F.shape[1])) self.random_state.shuffle(cases) # pdb.set_trace() while len(cases) > 0 and len(can_locs) > 1: # get best fitness for case among candidates best_val_for_case = np.min(F[can_locs,cases[0]]) # filter individuals without an elite fitness on this case can_locs = [l for l in can_locs if F[l,cases[0]] <= best_val_for_case ] cases.pop(0) choice = self.random_state.randint(len(can_locs)) locs.append(can_locs[choice]) if survival: # filter out winners from remaining selection pool individual_locs = [i for i in individual_locs if i != can_locs[choice]] while len(locs) < num_selections: locs.append(individual_locs[0]) return locs
python
def lexicase(self,F, num_selections=None, survival = False): """conducts lexicase selection for de-aggregated fitness vectors""" if num_selections is None: num_selections = F.shape[0] winners = [] locs = [] individual_locs = np.arange(F.shape[0]) for i in np.arange(num_selections): can_locs = individual_locs cases = list(np.arange(F.shape[1])) self.random_state.shuffle(cases) # pdb.set_trace() while len(cases) > 0 and len(can_locs) > 1: # get best fitness for case among candidates best_val_for_case = np.min(F[can_locs,cases[0]]) # filter individuals without an elite fitness on this case can_locs = [l for l in can_locs if F[l,cases[0]] <= best_val_for_case ] cases.pop(0) choice = self.random_state.randint(len(can_locs)) locs.append(can_locs[choice]) if survival: # filter out winners from remaining selection pool individual_locs = [i for i in individual_locs if i != can_locs[choice]] while len(locs) < num_selections: locs.append(individual_locs[0]) return locs
[ "def", "lexicase", "(", "self", ",", "F", ",", "num_selections", "=", "None", ",", "survival", "=", "False", ")", ":", "if", "num_selections", "is", "None", ":", "num_selections", "=", "F", ".", "shape", "[", "0", "]", "winners", "=", "[", "]", "locs", "=", "[", "]", "individual_locs", "=", "np", ".", "arange", "(", "F", ".", "shape", "[", "0", "]", ")", "for", "i", "in", "np", ".", "arange", "(", "num_selections", ")", ":", "can_locs", "=", "individual_locs", "cases", "=", "list", "(", "np", ".", "arange", "(", "F", ".", "shape", "[", "1", "]", ")", ")", "self", ".", "random_state", ".", "shuffle", "(", "cases", ")", "# pdb.set_trace()", "while", "len", "(", "cases", ")", ">", "0", "and", "len", "(", "can_locs", ")", ">", "1", ":", "# get best fitness for case among candidates", "best_val_for_case", "=", "np", ".", "min", "(", "F", "[", "can_locs", ",", "cases", "[", "0", "]", "]", ")", "# filter individuals without an elite fitness on this case", "can_locs", "=", "[", "l", "for", "l", "in", "can_locs", "if", "F", "[", "l", ",", "cases", "[", "0", "]", "]", "<=", "best_val_for_case", "]", "cases", ".", "pop", "(", "0", ")", "choice", "=", "self", ".", "random_state", ".", "randint", "(", "len", "(", "can_locs", ")", ")", "locs", ".", "append", "(", "can_locs", "[", "choice", "]", ")", "if", "survival", ":", "# filter out winners from remaining selection pool", "individual_locs", "=", "[", "i", "for", "i", "in", "individual_locs", "if", "i", "!=", "can_locs", "[", "choice", "]", "]", "while", "len", "(", "locs", ")", "<", "num_selections", ":", "locs", ".", "append", "(", "individual_locs", "[", "0", "]", ")", "return", "locs" ]
conducts lexicase selection for de-aggregated fitness vectors
[ "conducts", "lexicase", "selection", "for", "de", "-", "aggregated", "fitness", "vectors" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/selection.py#L71-L100
lacava/few
few/selection.py
SurvivalMixin.epsilon_lexicase
def epsilon_lexicase(self, F, sizes, num_selections=None, survival = False): """conducts epsilon lexicase selection for de-aggregated fitness vectors""" # pdb.set_trace() if num_selections is None: num_selections = F.shape[0] if self.c: # use c library # define c types locs = np.empty(num_selections,dtype='int32',order='F') # self.lib.epsilon_lexicase(F,F.shape[0],F.shape[1],num_selections,locs) if self.lex_size: ep_lex(F,F.shape[0],F.shape[1],num_selections,locs,self.lex_size,np.array(sizes)) else: ep_lex(F,F.shape[0],F.shape[1],num_selections,locs,self.lex_size,np.array([])) return locs else: # use python version locs = [] individual_locs = np.arange(F.shape[0]) # calculate epsilon thresholds based on median absolute deviation (MAD) mad_for_case = np.array([self.mad(f) for f in F.transpose()]) for i in np.arange(num_selections): can_locs = individual_locs cases = list(np.arange(F.shape[1])) self.random_state.shuffle(cases) # pdb.set_trace() while len(cases) > 0 and len(can_locs) > 1: # get best fitness for case among candidates best_val_for_case = np.min(F[can_locs,cases[0]]) # filter individuals without an elite fitness on this case can_locs = [l for l in can_locs if F[l,cases[0]] <= best_val_for_case + mad_for_case[cases[0]]] cases.pop(0) choice = self.random_state.randint(len(can_locs)) locs.append(can_locs[choice]) if survival: # filter out winners from remaining selection pool individual_locs = [i for i in individual_locs if i != can_locs[choice]] while len(locs) < num_selections: locs.append(individual_locs[0]) return locs
python
def epsilon_lexicase(self, F, sizes, num_selections=None, survival = False): """conducts epsilon lexicase selection for de-aggregated fitness vectors""" # pdb.set_trace() if num_selections is None: num_selections = F.shape[0] if self.c: # use c library # define c types locs = np.empty(num_selections,dtype='int32',order='F') # self.lib.epsilon_lexicase(F,F.shape[0],F.shape[1],num_selections,locs) if self.lex_size: ep_lex(F,F.shape[0],F.shape[1],num_selections,locs,self.lex_size,np.array(sizes)) else: ep_lex(F,F.shape[0],F.shape[1],num_selections,locs,self.lex_size,np.array([])) return locs else: # use python version locs = [] individual_locs = np.arange(F.shape[0]) # calculate epsilon thresholds based on median absolute deviation (MAD) mad_for_case = np.array([self.mad(f) for f in F.transpose()]) for i in np.arange(num_selections): can_locs = individual_locs cases = list(np.arange(F.shape[1])) self.random_state.shuffle(cases) # pdb.set_trace() while len(cases) > 0 and len(can_locs) > 1: # get best fitness for case among candidates best_val_for_case = np.min(F[can_locs,cases[0]]) # filter individuals without an elite fitness on this case can_locs = [l for l in can_locs if F[l,cases[0]] <= best_val_for_case + mad_for_case[cases[0]]] cases.pop(0) choice = self.random_state.randint(len(can_locs)) locs.append(can_locs[choice]) if survival: # filter out winners from remaining selection pool individual_locs = [i for i in individual_locs if i != can_locs[choice]] while len(locs) < num_selections: locs.append(individual_locs[0]) return locs
[ "def", "epsilon_lexicase", "(", "self", ",", "F", ",", "sizes", ",", "num_selections", "=", "None", ",", "survival", "=", "False", ")", ":", "# pdb.set_trace()", "if", "num_selections", "is", "None", ":", "num_selections", "=", "F", ".", "shape", "[", "0", "]", "if", "self", ".", "c", ":", "# use c library", "# define c types", "locs", "=", "np", ".", "empty", "(", "num_selections", ",", "dtype", "=", "'int32'", ",", "order", "=", "'F'", ")", "# self.lib.epsilon_lexicase(F,F.shape[0],F.shape[1],num_selections,locs)", "if", "self", ".", "lex_size", ":", "ep_lex", "(", "F", ",", "F", ".", "shape", "[", "0", "]", ",", "F", ".", "shape", "[", "1", "]", ",", "num_selections", ",", "locs", ",", "self", ".", "lex_size", ",", "np", ".", "array", "(", "sizes", ")", ")", "else", ":", "ep_lex", "(", "F", ",", "F", ".", "shape", "[", "0", "]", ",", "F", ".", "shape", "[", "1", "]", ",", "num_selections", ",", "locs", ",", "self", ".", "lex_size", ",", "np", ".", "array", "(", "[", "]", ")", ")", "return", "locs", "else", ":", "# use python version", "locs", "=", "[", "]", "individual_locs", "=", "np", ".", "arange", "(", "F", ".", "shape", "[", "0", "]", ")", "# calculate epsilon thresholds based on median absolute deviation (MAD)", "mad_for_case", "=", "np", ".", "array", "(", "[", "self", ".", "mad", "(", "f", ")", "for", "f", "in", "F", ".", "transpose", "(", ")", "]", ")", "for", "i", "in", "np", ".", "arange", "(", "num_selections", ")", ":", "can_locs", "=", "individual_locs", "cases", "=", "list", "(", "np", ".", "arange", "(", "F", ".", "shape", "[", "1", "]", ")", ")", "self", ".", "random_state", ".", "shuffle", "(", "cases", ")", "# pdb.set_trace()", "while", "len", "(", "cases", ")", ">", "0", "and", "len", "(", "can_locs", ")", ">", "1", ":", "# get best fitness for case among candidates", "best_val_for_case", "=", "np", ".", "min", "(", "F", "[", "can_locs", ",", "cases", "[", "0", "]", "]", ")", "# filter individuals without an elite fitness on this case", "can_locs", "=", "[", "l", "for", "l", "in", "can_locs", "if", "F", "[", "l", ",", "cases", "[", "0", "]", "]", "<=", "best_val_for_case", "+", "mad_for_case", "[", "cases", "[", "0", "]", "]", "]", "cases", ".", "pop", "(", "0", ")", "choice", "=", "self", ".", "random_state", ".", "randint", "(", "len", "(", "can_locs", ")", ")", "locs", ".", "append", "(", "can_locs", "[", "choice", "]", ")", "if", "survival", ":", "# filter out winners from remaining selection pool", "individual_locs", "=", "[", "i", "for", "i", "in", "individual_locs", "if", "i", "!=", "can_locs", "[", "choice", "]", "]", "while", "len", "(", "locs", ")", "<", "num_selections", ":", "locs", ".", "append", "(", "individual_locs", "[", "0", "]", ")", "return", "locs" ]
conducts epsilon lexicase selection for de-aggregated fitness vectors
[ "conducts", "epsilon", "lexicase", "selection", "for", "de", "-", "aggregated", "fitness", "vectors" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/selection.py#L128-L170
lacava/few
few/selection.py
SurvivalMixin.mad
def mad(self,x, axis=None): """median absolute deviation statistic""" return np.median(np.abs(x - np.median(x, axis)), axis)
python
def mad(self,x, axis=None): """median absolute deviation statistic""" return np.median(np.abs(x - np.median(x, axis)), axis)
[ "def", "mad", "(", "self", ",", "x", ",", "axis", "=", "None", ")", ":", "return", "np", ".", "median", "(", "np", ".", "abs", "(", "x", "-", "np", ".", "median", "(", "x", ",", "axis", ")", ")", ",", "axis", ")" ]
median absolute deviation statistic
[ "median", "absolute", "deviation", "statistic" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/selection.py#L173-L175
lacava/few
few/selection.py
SurvivalMixin.deterministic_crowding
def deterministic_crowding(self,parents,offspring,X_parents,X_offspring): """deterministic crowding implementation (for non-steady state). offspring compete against the parent they are most similar to, here defined as the parent they are most correlated with. the offspring only replace their parent if they are more fit. """ # get children locations produced from crossover cross_children = [i for i,o in enumerate(offspring) if len(o.parentid) > 1] # order offspring so that they are lined up with their most similar parent for c1,c2 in zip(cross_children[::2], cross_children[1::2]): # get parent locations p_loc = [j for j,p in enumerate(parents) if p.id in offspring[c1].parentid] if len(p_loc) != 2: continue # if child is more correlated with its non-root parent if r2_score(X_parents[p_loc[0]],X_offspring[c1]) + r2_score(X_parents[p_loc[1]],X_offspring[c2]) < r2_score(X_parents[p_loc[0]],X_offspring[c2]) + r2_score(X_parents[p_loc[1]],X_offspring[c1]): # swap offspring offspring[c1],offspring[c2] = offspring[c2],offspring[c1] survivors = [] survivor_index = [] for i,(p,o) in enumerate(zip(parents,offspring)): if p.fitness >= o.fitness: survivors.append(copy.deepcopy(p)) survivor_index.append(i) else: survivors.append(copy.deepcopy(o)) survivor_index.append(i+len(parents)) # return survivors along with their indices return survivors, survivor_index
python
def deterministic_crowding(self,parents,offspring,X_parents,X_offspring): """deterministic crowding implementation (for non-steady state). offspring compete against the parent they are most similar to, here defined as the parent they are most correlated with. the offspring only replace their parent if they are more fit. """ # get children locations produced from crossover cross_children = [i for i,o in enumerate(offspring) if len(o.parentid) > 1] # order offspring so that they are lined up with their most similar parent for c1,c2 in zip(cross_children[::2], cross_children[1::2]): # get parent locations p_loc = [j for j,p in enumerate(parents) if p.id in offspring[c1].parentid] if len(p_loc) != 2: continue # if child is more correlated with its non-root parent if r2_score(X_parents[p_loc[0]],X_offspring[c1]) + r2_score(X_parents[p_loc[1]],X_offspring[c2]) < r2_score(X_parents[p_loc[0]],X_offspring[c2]) + r2_score(X_parents[p_loc[1]],X_offspring[c1]): # swap offspring offspring[c1],offspring[c2] = offspring[c2],offspring[c1] survivors = [] survivor_index = [] for i,(p,o) in enumerate(zip(parents,offspring)): if p.fitness >= o.fitness: survivors.append(copy.deepcopy(p)) survivor_index.append(i) else: survivors.append(copy.deepcopy(o)) survivor_index.append(i+len(parents)) # return survivors along with their indices return survivors, survivor_index
[ "def", "deterministic_crowding", "(", "self", ",", "parents", ",", "offspring", ",", "X_parents", ",", "X_offspring", ")", ":", "# get children locations produced from crossover", "cross_children", "=", "[", "i", "for", "i", ",", "o", "in", "enumerate", "(", "offspring", ")", "if", "len", "(", "o", ".", "parentid", ")", ">", "1", "]", "# order offspring so that they are lined up with their most similar parent", "for", "c1", ",", "c2", "in", "zip", "(", "cross_children", "[", ":", ":", "2", "]", ",", "cross_children", "[", "1", ":", ":", "2", "]", ")", ":", "# get parent locations", "p_loc", "=", "[", "j", "for", "j", ",", "p", "in", "enumerate", "(", "parents", ")", "if", "p", ".", "id", "in", "offspring", "[", "c1", "]", ".", "parentid", "]", "if", "len", "(", "p_loc", ")", "!=", "2", ":", "continue", "# if child is more correlated with its non-root parent", "if", "r2_score", "(", "X_parents", "[", "p_loc", "[", "0", "]", "]", ",", "X_offspring", "[", "c1", "]", ")", "+", "r2_score", "(", "X_parents", "[", "p_loc", "[", "1", "]", "]", ",", "X_offspring", "[", "c2", "]", ")", "<", "r2_score", "(", "X_parents", "[", "p_loc", "[", "0", "]", "]", ",", "X_offspring", "[", "c2", "]", ")", "+", "r2_score", "(", "X_parents", "[", "p_loc", "[", "1", "]", "]", ",", "X_offspring", "[", "c1", "]", ")", ":", "# swap offspring", "offspring", "[", "c1", "]", ",", "offspring", "[", "c2", "]", "=", "offspring", "[", "c2", "]", ",", "offspring", "[", "c1", "]", "survivors", "=", "[", "]", "survivor_index", "=", "[", "]", "for", "i", ",", "(", "p", ",", "o", ")", "in", "enumerate", "(", "zip", "(", "parents", ",", "offspring", ")", ")", ":", "if", "p", ".", "fitness", ">=", "o", ".", "fitness", ":", "survivors", ".", "append", "(", "copy", ".", "deepcopy", "(", "p", ")", ")", "survivor_index", ".", "append", "(", "i", ")", "else", ":", "survivors", ".", "append", "(", "copy", ".", "deepcopy", "(", "o", ")", ")", "survivor_index", ".", "append", "(", "i", "+", "len", "(", "parents", ")", ")", "# return survivors along with their indices", "return", "survivors", ",", "survivor_index" ]
deterministic crowding implementation (for non-steady state). offspring compete against the parent they are most similar to, here defined as the parent they are most correlated with. the offspring only replace their parent if they are more fit.
[ "deterministic", "crowding", "implementation", "(", "for", "non", "-", "steady", "state", ")", ".", "offspring", "compete", "against", "the", "parent", "they", "are", "most", "similar", "to", "here", "defined", "as", "the", "parent", "they", "are", "most", "correlated", "with", ".", "the", "offspring", "only", "replace", "their", "parent", "if", "they", "are", "more", "fit", "." ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/selection.py#L177-L208
lacava/few
few/variation.py
VariationMixin.variation
def variation(self,parents): """performs variation operators on parents.""" # downselect to features that are important if (self.valid(parents) and self.ml_type != 'SVC' and self.ml_type != 'SVR'): # this is needed because svm has a bug that throws valueerror on # attribute check if hasattr(self.pipeline.named_steps['ml'],'coef_'): # for l1 regularization, filter individuals with 0 coefficients if self.weight_parents: weights = self.pipeline.named_steps['ml'].coef_ if len(weights.shape)>1: # handle multi-coefficient models weights = [np.mean(abs(c)) for c in weights.transpose()] # softmax transformation of the weights weights = np.exp(weights)/np.sum(np.exp(weights)) offspring = copy.deepcopy( list(self.random_state.choice(self.valid(parents), self.population_size, p=weights))) else: offspring = copy.deepcopy(list( x for i,x in zip(self.pipeline.named_steps['ml'].coef_, self.valid(parents)) if (i != 0).any())) elif hasattr(self.pipeline.named_steps['ml'],'feature_importances_'): # for tree methods, filter our individuals with 0 feature importance if self.weight_parents: weights = self.pipeline.named_steps['ml'].feature_importances_ # softmax transformation of the weights weights = np.exp(weights)/np.sum(np.exp(weights)) offspring = copy.deepcopy(list( self.random_state.choice(self.valid(parents), self.population_size, p=weights))) else: offspring = copy.deepcopy(list( x for i,x in zip(self.pipeline.named_steps['ml'].feature_importances_, self.valid(parents)) if i != 0)) else: offspring = copy.deepcopy(self.valid(parents)) else: offspring = copy.deepcopy(self.valid(parents)) if self.elitism: # keep a copy of the elite individual elite_index = np.argmin([x.fitness for x in parents]) elite = copy.deepcopy(parents[elite_index]) # Apply crossover and mutation on the offspring if self.verbosity > 2: print("variation...") for child1, child2 in it.zip_longest(offspring[::2], offspring[1::2], fillvalue=None): if self.random_state.rand() < self.crossover_rate and child2 != None: # crossover self.cross(child1.stack, child2.stack, self.max_depth) # update ids child1.parentid = [child1.id,child2.id] child1.id = uuid.uuid4() child2.parentid = [child1.id,child2.id] child2.id = uuid.uuid4() # set default fitness child1.fitness = -1 child2.fitness = -1 elif child2 == None: # single mutation self.mutate(child1.stack,self.func_set,self.term_set) # update ids child1.parentid = [child1.id] child1.id = uuid.uuid4() # set default fitness child1.fitness = -1 else: #double mutation self.mutate(child1.stack,self.func_set,self.term_set) self.mutate(child2.stack,self.func_set,self.term_set) # update ids child1.parentid = [child1.id] child1.id = uuid.uuid4() child2.parentid = [child2.id] child2.id = uuid.uuid4() # set default fitness child1.fitness = -1 child2.fitness = -1 while len(offspring) < self.population_size: #make new offspring to replace the invalid ones offspring.append(Ind()) self.make_program(offspring[-1].stack,self.func_set,self.term_set, self.random_state.randint(self.min_depth, self.max_depth+1), self.otype) offspring[-1].stack = list(reversed(offspring[-1].stack)) return offspring,elite,elite_index
python
def variation(self,parents): """performs variation operators on parents.""" # downselect to features that are important if (self.valid(parents) and self.ml_type != 'SVC' and self.ml_type != 'SVR'): # this is needed because svm has a bug that throws valueerror on # attribute check if hasattr(self.pipeline.named_steps['ml'],'coef_'): # for l1 regularization, filter individuals with 0 coefficients if self.weight_parents: weights = self.pipeline.named_steps['ml'].coef_ if len(weights.shape)>1: # handle multi-coefficient models weights = [np.mean(abs(c)) for c in weights.transpose()] # softmax transformation of the weights weights = np.exp(weights)/np.sum(np.exp(weights)) offspring = copy.deepcopy( list(self.random_state.choice(self.valid(parents), self.population_size, p=weights))) else: offspring = copy.deepcopy(list( x for i,x in zip(self.pipeline.named_steps['ml'].coef_, self.valid(parents)) if (i != 0).any())) elif hasattr(self.pipeline.named_steps['ml'],'feature_importances_'): # for tree methods, filter our individuals with 0 feature importance if self.weight_parents: weights = self.pipeline.named_steps['ml'].feature_importances_ # softmax transformation of the weights weights = np.exp(weights)/np.sum(np.exp(weights)) offspring = copy.deepcopy(list( self.random_state.choice(self.valid(parents), self.population_size, p=weights))) else: offspring = copy.deepcopy(list( x for i,x in zip(self.pipeline.named_steps['ml'].feature_importances_, self.valid(parents)) if i != 0)) else: offspring = copy.deepcopy(self.valid(parents)) else: offspring = copy.deepcopy(self.valid(parents)) if self.elitism: # keep a copy of the elite individual elite_index = np.argmin([x.fitness for x in parents]) elite = copy.deepcopy(parents[elite_index]) # Apply crossover and mutation on the offspring if self.verbosity > 2: print("variation...") for child1, child2 in it.zip_longest(offspring[::2], offspring[1::2], fillvalue=None): if self.random_state.rand() < self.crossover_rate and child2 != None: # crossover self.cross(child1.stack, child2.stack, self.max_depth) # update ids child1.parentid = [child1.id,child2.id] child1.id = uuid.uuid4() child2.parentid = [child1.id,child2.id] child2.id = uuid.uuid4() # set default fitness child1.fitness = -1 child2.fitness = -1 elif child2 == None: # single mutation self.mutate(child1.stack,self.func_set,self.term_set) # update ids child1.parentid = [child1.id] child1.id = uuid.uuid4() # set default fitness child1.fitness = -1 else: #double mutation self.mutate(child1.stack,self.func_set,self.term_set) self.mutate(child2.stack,self.func_set,self.term_set) # update ids child1.parentid = [child1.id] child1.id = uuid.uuid4() child2.parentid = [child2.id] child2.id = uuid.uuid4() # set default fitness child1.fitness = -1 child2.fitness = -1 while len(offspring) < self.population_size: #make new offspring to replace the invalid ones offspring.append(Ind()) self.make_program(offspring[-1].stack,self.func_set,self.term_set, self.random_state.randint(self.min_depth, self.max_depth+1), self.otype) offspring[-1].stack = list(reversed(offspring[-1].stack)) return offspring,elite,elite_index
[ "def", "variation", "(", "self", ",", "parents", ")", ":", "# downselect to features that are important", "if", "(", "self", ".", "valid", "(", "parents", ")", "and", "self", ".", "ml_type", "!=", "'SVC'", "and", "self", ".", "ml_type", "!=", "'SVR'", ")", ":", "# this is needed because svm has a bug that throws valueerror on", "# attribute check", "if", "hasattr", "(", "self", ".", "pipeline", ".", "named_steps", "[", "'ml'", "]", ",", "'coef_'", ")", ":", "# for l1 regularization, filter individuals with 0 coefficients", "if", "self", ".", "weight_parents", ":", "weights", "=", "self", ".", "pipeline", ".", "named_steps", "[", "'ml'", "]", ".", "coef_", "if", "len", "(", "weights", ".", "shape", ")", ">", "1", ":", "# handle multi-coefficient models", "weights", "=", "[", "np", ".", "mean", "(", "abs", "(", "c", ")", ")", "for", "c", "in", "weights", ".", "transpose", "(", ")", "]", "# softmax transformation of the weights", "weights", "=", "np", ".", "exp", "(", "weights", ")", "/", "np", ".", "sum", "(", "np", ".", "exp", "(", "weights", ")", ")", "offspring", "=", "copy", ".", "deepcopy", "(", "list", "(", "self", ".", "random_state", ".", "choice", "(", "self", ".", "valid", "(", "parents", ")", ",", "self", ".", "population_size", ",", "p", "=", "weights", ")", ")", ")", "else", ":", "offspring", "=", "copy", ".", "deepcopy", "(", "list", "(", "x", "for", "i", ",", "x", "in", "zip", "(", "self", ".", "pipeline", ".", "named_steps", "[", "'ml'", "]", ".", "coef_", ",", "self", ".", "valid", "(", "parents", ")", ")", "if", "(", "i", "!=", "0", ")", ".", "any", "(", ")", ")", ")", "elif", "hasattr", "(", "self", ".", "pipeline", ".", "named_steps", "[", "'ml'", "]", ",", "'feature_importances_'", ")", ":", "# for tree methods, filter our individuals with 0 feature importance", "if", "self", ".", "weight_parents", ":", "weights", "=", "self", ".", "pipeline", ".", "named_steps", "[", "'ml'", "]", ".", "feature_importances_", "# softmax transformation of the weights", "weights", "=", "np", ".", "exp", "(", "weights", ")", "/", "np", ".", "sum", "(", "np", ".", "exp", "(", "weights", ")", ")", "offspring", "=", "copy", ".", "deepcopy", "(", "list", "(", "self", ".", "random_state", ".", "choice", "(", "self", ".", "valid", "(", "parents", ")", ",", "self", ".", "population_size", ",", "p", "=", "weights", ")", ")", ")", "else", ":", "offspring", "=", "copy", ".", "deepcopy", "(", "list", "(", "x", "for", "i", ",", "x", "in", "zip", "(", "self", ".", "pipeline", ".", "named_steps", "[", "'ml'", "]", ".", "feature_importances_", ",", "self", ".", "valid", "(", "parents", ")", ")", "if", "i", "!=", "0", ")", ")", "else", ":", "offspring", "=", "copy", ".", "deepcopy", "(", "self", ".", "valid", "(", "parents", ")", ")", "else", ":", "offspring", "=", "copy", ".", "deepcopy", "(", "self", ".", "valid", "(", "parents", ")", ")", "if", "self", ".", "elitism", ":", "# keep a copy of the elite individual", "elite_index", "=", "np", ".", "argmin", "(", "[", "x", ".", "fitness", "for", "x", "in", "parents", "]", ")", "elite", "=", "copy", ".", "deepcopy", "(", "parents", "[", "elite_index", "]", ")", "# Apply crossover and mutation on the offspring", "if", "self", ".", "verbosity", ">", "2", ":", "print", "(", "\"variation...\"", ")", "for", "child1", ",", "child2", "in", "it", ".", "zip_longest", "(", "offspring", "[", ":", ":", "2", "]", ",", "offspring", "[", "1", ":", ":", "2", "]", ",", "fillvalue", "=", "None", ")", ":", "if", "self", ".", "random_state", ".", "rand", "(", ")", "<", "self", ".", "crossover_rate", "and", "child2", "!=", "None", ":", "# crossover", "self", ".", "cross", "(", "child1", ".", "stack", ",", "child2", ".", "stack", ",", "self", ".", "max_depth", ")", "# update ids", "child1", ".", "parentid", "=", "[", "child1", ".", "id", ",", "child2", ".", "id", "]", "child1", ".", "id", "=", "uuid", ".", "uuid4", "(", ")", "child2", ".", "parentid", "=", "[", "child1", ".", "id", ",", "child2", ".", "id", "]", "child2", ".", "id", "=", "uuid", ".", "uuid4", "(", ")", "# set default fitness", "child1", ".", "fitness", "=", "-", "1", "child2", ".", "fitness", "=", "-", "1", "elif", "child2", "==", "None", ":", "# single mutation", "self", ".", "mutate", "(", "child1", ".", "stack", ",", "self", ".", "func_set", ",", "self", ".", "term_set", ")", "# update ids", "child1", ".", "parentid", "=", "[", "child1", ".", "id", "]", "child1", ".", "id", "=", "uuid", ".", "uuid4", "(", ")", "# set default fitness", "child1", ".", "fitness", "=", "-", "1", "else", ":", "#double mutation", "self", ".", "mutate", "(", "child1", ".", "stack", ",", "self", ".", "func_set", ",", "self", ".", "term_set", ")", "self", ".", "mutate", "(", "child2", ".", "stack", ",", "self", ".", "func_set", ",", "self", ".", "term_set", ")", "# update ids", "child1", ".", "parentid", "=", "[", "child1", ".", "id", "]", "child1", ".", "id", "=", "uuid", ".", "uuid4", "(", ")", "child2", ".", "parentid", "=", "[", "child2", ".", "id", "]", "child2", ".", "id", "=", "uuid", ".", "uuid4", "(", ")", "# set default fitness", "child1", ".", "fitness", "=", "-", "1", "child2", ".", "fitness", "=", "-", "1", "while", "len", "(", "offspring", ")", "<", "self", ".", "population_size", ":", "#make new offspring to replace the invalid ones", "offspring", ".", "append", "(", "Ind", "(", ")", ")", "self", ".", "make_program", "(", "offspring", "[", "-", "1", "]", ".", "stack", ",", "self", ".", "func_set", ",", "self", ".", "term_set", ",", "self", ".", "random_state", ".", "randint", "(", "self", ".", "min_depth", ",", "self", ".", "max_depth", "+", "1", ")", ",", "self", ".", "otype", ")", "offspring", "[", "-", "1", "]", ".", "stack", "=", "list", "(", "reversed", "(", "offspring", "[", "-", "1", "]", ".", "stack", ")", ")", "return", "offspring", ",", "elite", ",", "elite_index" ]
performs variation operators on parents.
[ "performs", "variation", "operators", "on", "parents", "." ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/variation.py#L19-L109
lacava/few
few/variation.py
VariationMixin.cross
def cross(self,p_i,p_j, max_depth = 2): """subtree-like swap crossover between programs p_i and p_j.""" # only choose crossover points for out_types available in both programs # pdb.set_trace() # determine possible outttypes types_p_i = [t for t in [p.out_type for p in p_i]] types_p_j = [t for t in [p.out_type for p in p_j]] types = set(types_p_i).intersection(types_p_j) # grab subtree of p_i p_i_sub = [i for i,n in enumerate(p_i) if n.out_type in types] x_i_end = self.random_state.choice(p_i_sub) x_i_begin = x_i_end arity_sum = p_i[x_i_end].arity[p_i[x_i_end].in_type] # print("x_i_end:",x_i_end) # i = 0 while (arity_sum > 0): #and i < 1000: if x_i_begin == 0: print("arity_sum:",arity_sum,"x_i_begin:",x_i_begin,"x_i_end:",x_i_end) x_i_begin -= 1 arity_sum += p_i[x_i_begin].arity[p_i[x_i_begin].in_type]-1 # i += 1 # if i == 1000: # print("in variation") # pdb.set_trace() # grab subtree of p_j with matching out_type to p_i[x_i_end] p_j_sub = [i for i,n in enumerate(p_j) if n.out_type == p_i[x_i_end].out_type] x_j_end = self.random_state.choice(p_j_sub) x_j_begin = x_j_end arity_sum = p_j[x_j_end].arity[p_j[x_j_end].in_type] # i = 0 while (arity_sum > 0): #and i < 1000: if x_j_begin == 0: print("arity_sum:",arity_sum,"x_j_begin:",x_j_begin,"x_j_end:",x_j_end) print("p_j:",p_j) x_j_begin -= 1 arity_sum += p_j[x_j_begin].arity[p_j[x_j_begin].in_type]-1 # i += 1 # if i == 1000: # print("in variation") # pdb.set_trace() #swap subtrees tmpi = p_i[:] tmpj = p_j[:] tmpi[x_i_begin:x_i_end+1:],tmpj[x_j_begin:x_j_end+1:] = \ tmpj[x_j_begin:x_j_end+1:],tmpi[x_i_begin:x_i_end+1:] if not self.is_valid_program(p_i) or not self.is_valid_program(p_j): # pdb.set_trace() print("parent 1:",p_i,"x_i_begin:",x_i_begin,"x_i_end:",x_i_end) print("parent 2:",p_j,"x_j_begin:",x_j_begin,"x_j_end:",x_j_end) print("child 1:",tmpi) print("child 2:",tmpj) raise ValueError('Crossover produced an invalid program.') # size check, then assignment if len(tmpi) <= 2**max_depth-1: p_i[:] = tmpi if len(tmpj) <= 2**max_depth-1: p_j[:] = tmpj
python
def cross(self,p_i,p_j, max_depth = 2): """subtree-like swap crossover between programs p_i and p_j.""" # only choose crossover points for out_types available in both programs # pdb.set_trace() # determine possible outttypes types_p_i = [t for t in [p.out_type for p in p_i]] types_p_j = [t for t in [p.out_type for p in p_j]] types = set(types_p_i).intersection(types_p_j) # grab subtree of p_i p_i_sub = [i for i,n in enumerate(p_i) if n.out_type in types] x_i_end = self.random_state.choice(p_i_sub) x_i_begin = x_i_end arity_sum = p_i[x_i_end].arity[p_i[x_i_end].in_type] # print("x_i_end:",x_i_end) # i = 0 while (arity_sum > 0): #and i < 1000: if x_i_begin == 0: print("arity_sum:",arity_sum,"x_i_begin:",x_i_begin,"x_i_end:",x_i_end) x_i_begin -= 1 arity_sum += p_i[x_i_begin].arity[p_i[x_i_begin].in_type]-1 # i += 1 # if i == 1000: # print("in variation") # pdb.set_trace() # grab subtree of p_j with matching out_type to p_i[x_i_end] p_j_sub = [i for i,n in enumerate(p_j) if n.out_type == p_i[x_i_end].out_type] x_j_end = self.random_state.choice(p_j_sub) x_j_begin = x_j_end arity_sum = p_j[x_j_end].arity[p_j[x_j_end].in_type] # i = 0 while (arity_sum > 0): #and i < 1000: if x_j_begin == 0: print("arity_sum:",arity_sum,"x_j_begin:",x_j_begin,"x_j_end:",x_j_end) print("p_j:",p_j) x_j_begin -= 1 arity_sum += p_j[x_j_begin].arity[p_j[x_j_begin].in_type]-1 # i += 1 # if i == 1000: # print("in variation") # pdb.set_trace() #swap subtrees tmpi = p_i[:] tmpj = p_j[:] tmpi[x_i_begin:x_i_end+1:],tmpj[x_j_begin:x_j_end+1:] = \ tmpj[x_j_begin:x_j_end+1:],tmpi[x_i_begin:x_i_end+1:] if not self.is_valid_program(p_i) or not self.is_valid_program(p_j): # pdb.set_trace() print("parent 1:",p_i,"x_i_begin:",x_i_begin,"x_i_end:",x_i_end) print("parent 2:",p_j,"x_j_begin:",x_j_begin,"x_j_end:",x_j_end) print("child 1:",tmpi) print("child 2:",tmpj) raise ValueError('Crossover produced an invalid program.') # size check, then assignment if len(tmpi) <= 2**max_depth-1: p_i[:] = tmpi if len(tmpj) <= 2**max_depth-1: p_j[:] = tmpj
[ "def", "cross", "(", "self", ",", "p_i", ",", "p_j", ",", "max_depth", "=", "2", ")", ":", "# only choose crossover points for out_types available in both programs", "# pdb.set_trace()", "# determine possible outttypes", "types_p_i", "=", "[", "t", "for", "t", "in", "[", "p", ".", "out_type", "for", "p", "in", "p_i", "]", "]", "types_p_j", "=", "[", "t", "for", "t", "in", "[", "p", ".", "out_type", "for", "p", "in", "p_j", "]", "]", "types", "=", "set", "(", "types_p_i", ")", ".", "intersection", "(", "types_p_j", ")", "# grab subtree of p_i", "p_i_sub", "=", "[", "i", "for", "i", ",", "n", "in", "enumerate", "(", "p_i", ")", "if", "n", ".", "out_type", "in", "types", "]", "x_i_end", "=", "self", ".", "random_state", ".", "choice", "(", "p_i_sub", ")", "x_i_begin", "=", "x_i_end", "arity_sum", "=", "p_i", "[", "x_i_end", "]", ".", "arity", "[", "p_i", "[", "x_i_end", "]", ".", "in_type", "]", "# print(\"x_i_end:\",x_i_end)", "# i = 0", "while", "(", "arity_sum", ">", "0", ")", ":", "#and i < 1000:", "if", "x_i_begin", "==", "0", ":", "print", "(", "\"arity_sum:\"", ",", "arity_sum", ",", "\"x_i_begin:\"", ",", "x_i_begin", ",", "\"x_i_end:\"", ",", "x_i_end", ")", "x_i_begin", "-=", "1", "arity_sum", "+=", "p_i", "[", "x_i_begin", "]", ".", "arity", "[", "p_i", "[", "x_i_begin", "]", ".", "in_type", "]", "-", "1", "# i += 1", "# if i == 1000:", "# print(\"in variation\")", "# pdb.set_trace()", "# grab subtree of p_j with matching out_type to p_i[x_i_end]", "p_j_sub", "=", "[", "i", "for", "i", ",", "n", "in", "enumerate", "(", "p_j", ")", "if", "n", ".", "out_type", "==", "p_i", "[", "x_i_end", "]", ".", "out_type", "]", "x_j_end", "=", "self", ".", "random_state", ".", "choice", "(", "p_j_sub", ")", "x_j_begin", "=", "x_j_end", "arity_sum", "=", "p_j", "[", "x_j_end", "]", ".", "arity", "[", "p_j", "[", "x_j_end", "]", ".", "in_type", "]", "# i = 0", "while", "(", "arity_sum", ">", "0", ")", ":", "#and i < 1000:", "if", "x_j_begin", "==", "0", ":", "print", "(", "\"arity_sum:\"", ",", "arity_sum", ",", "\"x_j_begin:\"", ",", "x_j_begin", ",", "\"x_j_end:\"", ",", "x_j_end", ")", "print", "(", "\"p_j:\"", ",", "p_j", ")", "x_j_begin", "-=", "1", "arity_sum", "+=", "p_j", "[", "x_j_begin", "]", ".", "arity", "[", "p_j", "[", "x_j_begin", "]", ".", "in_type", "]", "-", "1", "# i += 1", "# if i == 1000:", "# print(\"in variation\")", "# pdb.set_trace()", "#swap subtrees", "tmpi", "=", "p_i", "[", ":", "]", "tmpj", "=", "p_j", "[", ":", "]", "tmpi", "[", "x_i_begin", ":", "x_i_end", "+", "1", ":", "]", ",", "tmpj", "[", "x_j_begin", ":", "x_j_end", "+", "1", ":", "]", "=", "tmpj", "[", "x_j_begin", ":", "x_j_end", "+", "1", ":", "]", ",", "tmpi", "[", "x_i_begin", ":", "x_i_end", "+", "1", ":", "]", "if", "not", "self", ".", "is_valid_program", "(", "p_i", ")", "or", "not", "self", ".", "is_valid_program", "(", "p_j", ")", ":", "# pdb.set_trace()", "print", "(", "\"parent 1:\"", ",", "p_i", ",", "\"x_i_begin:\"", ",", "x_i_begin", ",", "\"x_i_end:\"", ",", "x_i_end", ")", "print", "(", "\"parent 2:\"", ",", "p_j", ",", "\"x_j_begin:\"", ",", "x_j_begin", ",", "\"x_j_end:\"", ",", "x_j_end", ")", "print", "(", "\"child 1:\"", ",", "tmpi", ")", "print", "(", "\"child 2:\"", ",", "tmpj", ")", "raise", "ValueError", "(", "'Crossover produced an invalid program.'", ")", "# size check, then assignment", "if", "len", "(", "tmpi", ")", "<=", "2", "**", "max_depth", "-", "1", ":", "p_i", "[", ":", "]", "=", "tmpi", "if", "len", "(", "tmpj", ")", "<=", "2", "**", "max_depth", "-", "1", ":", "p_j", "[", ":", "]", "=", "tmpj" ]
subtree-like swap crossover between programs p_i and p_j.
[ "subtree", "-", "like", "swap", "crossover", "between", "programs", "p_i", "and", "p_j", "." ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/variation.py#L111-L171