code
stringlengths 1
18.2k
|
|---|
def convert_numpy_type(cls, dtype): """Convert a numpy dtype into a Column datatype. Only handles common types. Implemented as a function to decouple from numpy """ m = { 'int64': cls.DATATYPE_INTEGER64, 'float64': cls.DATATYPE_FLOAT, 'object': cls.DATATYPE_TEXT # Hack. Pandas makes strings into object. } t = m.get(dtype.name, None) if not t: raise TypeError( "Failed to convert numpy type: '{}' ".format( dtype.name)) return t
|
def dict(self): """A dict that holds key/values for all of the properties in the object. :return: """ d = {p.key: getattr(self, p.key) for p in self.__mapper__.attrs if p.key not in ('table', 'stats', '_codes', 'data')} if not d: raise Exception(self.__dict__) d['schema_type'] = self.schema_type if self.data: # Copy data fields into top level dict, but don't overwrite existind values. for k, v in six.iteritems(self.data): if k not in d and k not in ('table', 'stats', '_codes', 'data'): d[k] = v return d
|
def nonull_dict(self): """Like dict, but does not hold any null values. :return: """ return {k: v for k, v in six.iteritems(self.dict) if v and k != '_codes'}
|
def mangle_name(name): """Mangles a column name to a standard form, remoing illegal characters. :param name: :return: """ import re try: return re.sub('_+', '_', re.sub('[^\w_]', '_', name).lower()).rstrip('_') except TypeError: raise TypeError( 'Trying to mangle name with invalid type of: ' + str(type(name)))
|
def reverse_code_map(self): """Return a map from a code ( usually a string ) to the shorter numeric value""" return {c.value: (c.ikey if c.ikey else c.key) for c in self.codes}
|
def expanded_transform(self): """Expands the transform string into segments """ segments = self._expand_transform(self.transform) if segments: segments[0]['datatype'] = self.valuetype_class for s in segments: s['column'] = self else: segments = [self.make_xform_seg(datatype=self.valuetype_class, column=self)] # If we want to add the find datatype cast to a transform. #segments.append(self.make_xform_seg(transforms=["cast_"+self.datatype], column=self)) return segments
|
def before_insert(mapper, conn, target): """event.listen method for Sqlalchemy to set the seqience_id for this object and create an ObjectNumber value for the id_""" # from identity import ObjectNumber # assert not target.fk_vid or not ObjectNumber.parse(target.fk_vid).revision if target.sequence_id is None: from ambry.orm.exc import DatabaseError raise DatabaseError('Must have sequence_id before insertion') # Check that the id column is always sequence id 1 assert (target.name == 'id') == (target.sequence_id == 1), (target.name, target.sequence_id) Column.before_update(mapper, conn, target)
|
def before_update(mapper, conn, target): """Set the column id number based on the table number and the sequence id for the column.""" assert target.datatype or target.valuetype target.name = Column.mangle_name(target.name) Column.update_number(target)
|
def spawnProcess(processProtocol, executable, args=(), env={}, path=None, uid=None, gid=None, usePTY=0, packages=()): """Launch a process with a particular Python environment. All arguments as to reactor.spawnProcess(), except for the addition of an optional packages iterable. This should be of strings naming packages the subprocess is to be able to import. """ env = env.copy() pythonpath = [] for pkg in packages: p = os.path.split(imp.find_module(pkg)[1])[0] if p.startswith(os.path.join(sys.prefix, 'lib')): continue pythonpath.append(p) pythonpath = list(set(pythonpath)) pythonpath.extend(env.get('PYTHONPATH', '').split(os.pathsep)) env['PYTHONPATH'] = os.pathsep.join(pythonpath) return reactor.spawnProcess(processProtocol, executable, args, env, path, uid, gid, usePTY)
|
def spawnPythonProcess(processProtocol, args=(), env={}, path=None, uid=None, gid=None, usePTY=0, packages=()): """Launch a Python process All arguments as to spawnProcess(), except the executable argument is omitted. """ return spawnProcess(processProtocol, sys.executable, args, env, path, uid, gid, usePTY, packages)
|
def _runable_for_event(f, tag, stage): """Loot at the event property for a function to see if it should be run at this stage. """ if not hasattr(f, '__ambry_event__'): return False f_tag, f_stage = f.__ambry_event__ if stage is None: stage = 0 if tag != f_tag or stage != f_stage: return False return True
|
def load_obj_from_path(import_path, prefix=None, ld=dict()): """ import a python object from an import path `import_path` - a python import path. For instance: mypackage.module.func or mypackage.module.class `prefix` (str) - a value to prepend to the import path if it isn't already there. For instance: load_obj_from_path('module.func', prefix='mypackage') is the same as load_obj_from_path('mypackage.module.func') `ld` (dict) key:value data to pass to the logger if an error occurs """ if prefix and not import_path.startswith(prefix): import_path = '.'.join([prefix, import_path]) log.debug( 'attempting to load a python object from an import path', extra=dict(import_path=import_path, **ld)) try: mod = importlib.import_module(import_path) return mod # yay, we found a module. return it except: pass # try to extract an object from a module try: path, obj_name = import_path.rsplit('.', 1) except ValueError: log_raise( ("import path needs at least 1 period in your
|
import path." " An example import path is something like: module.obj"), dict(import_path=import_path, **ld), InvalidImportPath) try: mod = importlib.import_module(path) except ImportError: newpath = path.replace(prefix, '', 1).lstrip('.') log.debug( "Could not load import path. Trying a different one", extra=dict(oldpath=path, newpath=newpath)) path = newpath mod = importlib.import_module(path) try: obj = getattr(mod, obj_name) except AttributeError: log_raise( ("object does not exist in given module." " Your import path is not" " properly defined because the given `obj_name` does not exist"), dict(import_path=path, obj_name=obj_name, **ld), InvalidImportPath) return obj
|
def artifact_quality(self, artifact_quality): """ Sets the artifact_quality of this ArtifactRest. :param artifact_quality: The artifact_quality of this ArtifactRest. :type: str """ allowed_values = ["NEW", "VERIFIED", "TESTED", "DEPRECATED", "BLACKLISTED", "DELETED", "TEMPORARY"] if artifact_quality not in allowed_values: raise ValueError( "Invalid value for `artifact_quality` ({0}), must be one of {1}" .format(artifact_quality, allowed_values) ) self._artifact_quality = artifact_quality
|
def _get_sqlite_columns(connection, table): """ Returns list of tuple containg columns of the table. Args: connection: sqlalchemy connection to sqlite database. table (str): name of the table Returns: list of (name, datatype, position): where name is column name, datatype is python type of the column, position is ordinal position of the column. """ # TODO: Move to the sqlite wrapper. # TODO: Consider sqlalchemy mapping. SQL_TO_PYTHON_TYPES = { 'INT': int, 'INTEGER': int, 'TINYINT': int, 'SMALLINT': int, 'MEDIUMINT': int, 'BIGINT': int, 'UNSIGNED BIG INT': int, 'INT': int, 'INT8': int, 'NUMERIC': float, 'REAL': float, 'FLOAT': float, 'DOUBLE': float, 'BOOLEAN': bool, 'CHARACTER': str, 'VARCHAR': str, 'TEXT': str } query = 'PRAGMA table_info(\'{}\');' result = connection.execute(query.format(table)) ret = [] for row in result: position = row[0] + 1 name = row[1] datatype =
|
row[2] try: datatype = SQL_TO_PYTHON_TYPES[datatype] except KeyError: raise Exception( 'Do not know how to convert {} sql datatype to python data type.' .format(datatype)) ret.append((name, datatype, position)) return ret
|
def partition(self): """For partition urltypes, return the partition specified by the ref """ if self.urltype != 'partition': return None return self._bundle.library.partition(self.url)
|
def datafile(self): """Return an MPR datafile from the /ingest directory of the build filesystem""" from ambry_sources import MPRowsFile if self._datafile is None: if self.urltype == 'partition': self._datafile = self.partition.datafile else: self._datafile = MPRowsFile(self._bundle.build_ingest_fs, self.name) return self._datafile
|
def spec(self): """Return a SourceSpec to describe this source""" from ambry_sources.sources import SourceSpec d = self.dict d['url'] = self.url # Will get the URL twice; once as ref and once as URL, but the ref is ignored return SourceSpec(**d)
|
def account(self): """Return an account record, based on the host in the url""" from ambry.util import parse_url_to_dict d = parse_url_to_dict(self.url) return self._bundle.library.account(d['netloc'])
|
def update_table(self, unknown_type='str'): """Update the source table from the datafile""" from ambry_sources.intuit import TypeIntuiter st = self.source_table if self.reftype == 'partition': for c in self.partition.table.columns: st.add_column(c.sequence_id, source_header=c.name, dest_header=c.name, datatype=c.datatype, description = c.description) elif self.datafile.exists: with self.datafile.reader as r: names = set() for col in r.columns: name = col['name'] if name in names: # Handle duplicate names. name = name+"_"+str(col['pos']) names.add(name) c = st.column(name) dt = col['resolved_type'] if col['resolved_type'] != 'unknown' else unknown_type if c: c.datatype = TypeIntuiter.promote_type(c.datatype, col['resolved_type']) else: c = st.add_column(col['pos'], source_header=name, dest_header=name, datatype=col['resolved_type'], description=col['description'], has_codes=col['has_codes'])
|
def update_spec(self): """Update the source specification with information from the row intuiter, but only if the spec values are not already set. """ if self.datafile.exists: with self.datafile.reader as r: self.header_lines = r.info['header_rows'] self.comment_lines = r.info['comment_rows'] self.start_line = r.info['data_start_row'] self.end_line = r.info['data_end_row']
|
def dict(self): """A dict that holds key/values for all of the properties in the object. :return: """ SKIP_KEYS = ('_source_table', '_dest_table', 'd_vid', 't_vid', 'st_id', 'dataset', 'hash', 'process_records') return OrderedDict([(k, getattr(self, k)) for k in self.properties if k not in SKIP_KEYS])
|
def get_runconfig(path=None, root=None, db=None): """Load the main configuration files and accounts file. Debprecated. Use load() """ return load(path, root=root, db=db)
|
def load(path=None, root=None, db=None, load_user=True): "Load all of the config files. " config = load_config(path, load_user=load_user) remotes = load_remotes(path, load_user=load_user) # The external file overwrites the main config if remotes: if not 'remotes' in config: config.remotes = AttrDict() for k, v in remotes.remotes.items(): config.remotes[k] = v accounts = load_accounts(path, load_user=load_user) # The external file overwrites the main config if accounts: if not 'accounts' in config: config.accounts = AttrDict() for k, v in accounts.accounts.items(): config.accounts[k] = v update_config(config) if root: config.library.filesystem_root = root if db: config.library.database = db return config
|
def find_config_file(file_name, extra_path=None, load_user=True): """ Find a configuration file in one of these directories, tried in this order: - A path provided as an argument - A path specified by the AMBRY_CONFIG environmenal variable - ambry in a path specified by the VIRTUAL_ENV environmental variable - ~/ambry - /etc/ambry :param file_name: :param extra_path: :param load_user: :param path: :return: """ paths = [] if extra_path is not None: paths.append(extra_path) if os.getenv(ENVAR.CONFIG): paths.append(os.getenv(ENVAR.CONFIG)) if os.getenv(ENVAR.VIRT): paths.append(os.path.join(os.getenv(ENVAR.VIRT), USER_DIR)) if load_user: paths.append(os.path.expanduser('~/' + USER_DIR)) paths.append(ROOT_DIR) for path in paths: if os.path.isdir(path) and os.path.exists(os.path.join(path, file_name)): f = os.path.join(path, file_name) return f raise ConfigurationError( "Failed to find configuration file '{}'. Looked for : {} ".format(file_name, paths))
|
def load_accounts(extra_path=None, load_user=True): """Load the yaml account files :param load_user: :return: An `AttrDict` """ from os.path import getmtime try: accts_file = find_config_file(ACCOUNTS_FILE, extra_path=extra_path, load_user=load_user) except ConfigurationError: accts_file = None if accts_file is not None and os.path.exists(accts_file): config = AttrDict() config.update_yaml(accts_file) if not 'accounts' in config: config.remotes = AttrDict() config.accounts.loaded = [accts_file, getmtime(accts_file)] return config else: return None
|
def load_remotes(extra_path=None, load_user=True): """Load the YAML remotes file, which sort of combines the Accounts file with part of the remotes sections from the main config :return: An `AttrDict` """ from os.path import getmtime try: remotes_file = find_config_file(REMOTES_FILE, extra_path=extra_path, load_user=load_user) except ConfigurationError: remotes_file = None if remotes_file is not None and os.path.exists(remotes_file): config = AttrDict() config.update_yaml(remotes_file) if not 'remotes' in config: config.remotes = AttrDict() config.remotes.loaded = [remotes_file, getmtime(remotes_file)] return config else: return None
|
def load_config(path=None, load_user=True): """ Load configuration information from a config directory. Tries directories in this order: - A path provided as an argument - A path specified by the AMBRY_CONFIG environmenal variable - ambry in a path specified by the VIRTUAL_ENV environmental variable - /etc/ambry - ~/ambry :param path: An iterable of additional paths to load. :return: An `AttrDict` of configuration information """ from os.path import getmtime config = AttrDict() if not path: path = ROOT_DIR config_file = find_config_file(CONFIG_FILE, extra_path=path, load_user=load_user) if os.path.exists(config_file): config.update_yaml(config_file) config.loaded = [config_file, getmtime(config_file)] else: # Probably never get here, since the find_config_dir would have thrown a ConfigurationError config = AttrDict() config.loaded = [None, 0] return config
|
def update_config(config, use_environ=True): """Update the configuration from environmental variables. Updates: - config.library.database from the AMBRY_DB environmental variable. - config.library.filesystem_root from the AMBRY_ROOT environmental variable. - config.accounts.password from the AMBRY_PASSWORD environmental variable. :param config: An `attrDict` of configuration information. """ from ambry.util import select_from_url try: _ = config.library except KeyError: config.library = AttrDict() try: _ = config.filesystem except KeyError: config.filesystem = AttrDict() try: _ = config.accounts except KeyError: config.accounts = AttrDict() if not config.accounts.get('loaded'): config.accounts.loaded = [None, 0] try: _ = config.accounts.password except KeyError: config.accounts.password = None try: _ = config.remotes except KeyError: config.remotes = AttrDict() # Default empty if not config.remotes.get('loaded'): config.remotes.loaded = [None, 0] if use_environ: if os.getenv(ENVAR.DB): config.library.database = os.getenv(ENVAR.DB) if os.getenv(ENVAR.ROOT): config.library.filesystem_root = os.getenv(ENVAR.ROOT) if os.getenv(ENVAR.PASSWORD): config.accounts.password = os.getenv(ENVAR.PASSWORD) # Move any remotes that were
|
configured under the library to the remotes section try: for k, v in config.library.remotes.items(): config.remotes[k] = { 'url': v } del config.library['remotes'] except KeyError as e: pass # Then move any of the account entries that are linked to remotes into the remotes. try: for k, v in config.remotes.items(): if 'url' in v: host = select_from_url(v['url'], 'netloc') if host in config.accounts: config.remotes[k].update(config.accounts[host]) del config.accounts[host] except KeyError: pass # Set a default for the library database try: _ = config.library.database except KeyError: config.library.database = 'sqlite:///{root}/library.db' # Raise exceptions on missing items checks = [ 'config.library.filesystem_root', ] for check in checks: try: _ = eval(check) except KeyError: raise ConfigurationError("Configuration is missing '{}'; loaded from {} " .format(check, config.loaded[0])) _, config.library.database = normalize_dsn_or_dict(config.library.database) for k, v in filesystem_defaults.items(): if k not
|
in config.filesystem: config.filesystem[k] = v config.modtime = max(config.loaded[1], config.remotes.loaded[1], config.accounts.loaded[1])
|
def normalize_dsn_or_dict(d): """Clean up a database DSN, or dict version of a DSN, returning both the cleaned DSN and dict version""" if isinstance(d, dict): try: # Convert from an AttrDict to a real dict d = d.to_dict() except AttributeError: pass # Already a real dict config = d dsn = None elif isinstance(d, string_types): config = None dsn = d else: raise ConfigurationError("Can't deal with database config '{}' type '{}' ".format(d, type(d))) if dsn: if dsn.startswith('sqlite') or dsn.startswith('spatialite'): driver, path = dsn.split(':', 1) slashes, path = path[:2], path[2:] if slashes != '//': raise ConfigurationError("Sqlite DSNs must start with at least 2 slashes") if len(path) == 1 and path[0] == '/': raise ConfigurationError("Sqlite DSNs can't have only 3 slashes in path") if len(path) > 1 and path[0] != '/':
|
raise ConfigurationError("Sqlite DSNs with a path must have 3 or 4 slashes.") path = path[1:] config = dict( server=None, username=None, password=None, driver=driver, dbname=path ) else: d = parse_url_to_dict(dsn) config = dict( server=d['hostname'], dbname=d['path'].strip('/'), driver=d['scheme'], password=d.get('password', None), username=d.get('username', None) ) else: up = d.get('username', '') or '' if d.get('password'): up += ':' + d.get('password', '') if up: up += '@' if up and not d.get('server'): raise ConfigurationError("Can't construct a DSN with a username or password without a hostname") host_part = up + d.get('server', '') if d.get('server') else '' if d.get('dbname', False): path_part = '/' + d.get('dbname') # if d['driver'] in ('sqlite3', 'sqlite', 'spatialite'): # path_part = '/' + path_part else: path_part = '' # w/ no dbname, Sqlite should use memory, which required 2 slash. Rel dir is 3,
|
abs dir is 4 dsn = '{}://{}{}'.format(d['driver'], host_part, path_part) return config, dsn
|
def create_new_product_version(self, **kwargs): """ Create a new ProductVersion for a Product This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create_new_product_version(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param ProductVersionRest body: :return: ProductVersionSingleton If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.create_new_product_version_with_http_info(**kwargs) else: (data) = self.create_new_product_version_with_http_info(**kwargs) return data
|
def get_build_configuration_sets(self, id, **kwargs): """ Gets build configuration sets associated with a product version This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_build_configuration_sets(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Product Version id (required) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: BuildConfigurationSetPage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_build_configuration_sets_with_http_info(id, **kwargs) else: (data) = self.get_build_configuration_sets_with_http_info(id, **kwargs) return data
|
def update_build_configuration_sets(self, id, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_build_configuration_sets(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Product Version id (required) :param list[BuildConfigurationSetRest] body: :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.update_build_configuration_sets_with_http_info(id, **kwargs) else: (data) = self.update_build_configuration_sets_with_http_info(id, **kwargs) return data
|
def execute_command(cmd, execute, echo=True): """Execute a command in shell or just print it if execute is False""" if execute: if echo: print("Executing: " + cmd) return os.system(cmd) else: print(cmd) return 0
|
def set_log_level(level): """Sets the desired log level.""" lLevel = level.lower() unrecognized = False if (lLevel == 'debug-all'): loglevel = logging.DEBUG elif (lLevel == 'debug'): loglevel = logging.DEBUG elif (lLevel == 'info'): loglevel = logging.INFO elif (lLevel == 'warning'): loglevel = logging.WARNING elif (lLevel == 'error'): loglevel = logging.ERROR elif (lLevel == 'critical'): loglevel = logging.CRITICAL else: loglevel = logging.DEBUG unrecognized = True formatter = logging.Formatter('%(asctime)s %(levelname)s %(filename)s:%(lineno)d/%(funcName)s: %(message)s') console = logging.StreamHandler() console.setLevel(loglevel) console.setFormatter(formatter) logging.getLogger('').addHandler(console) logging.getLogger('').setLevel(loglevel) #logging.basicConfig(format='%(asctime)s %(levelname)s %(filename)s:%(lineno)d/%(funcName)s: %(message)s', level=loglevel) if lLevel != 'debug-all': # lower the loglevel for enumerated packages to avoid unwanted messages packagesWarning = ["requests.packages.urllib3", "urllib3", "requests_kerberos", "jenkinsapi"] for package in packagesWarning: logging.debug("Setting loglevel for %s to WARNING.", package) logger = logging.getLogger(package) logger.setLevel(logging.WARNING) if unrecognized: logging.warning('Unrecognized log level: %s Log level set to debug', level)
|
#TODO ref: use external log config fh = logging.FileHandler('builder.log') fh.setLevel(loglevel) fh.setFormatter(formatter) logging.getLogger('').addHandler(fh)
|
def parse_conf_args(argv): """Parse command line options into {section: (option, key)} which can be used for overlaying on top of config :param argv: list of argumets to be parsed :return: Dictionary in the following format: {section: (option, key)}""" args = {} for rarg in argv: if re.match("^--.*", rarg): arg = rarg.replace('--','', 1) fsplit = arg.split('=', 1) if len(fsplit) != 2: raise Exception( "Command option '%s' not recognized." % rarg) rkey, value = fsplit ssplit = rkey.split('.', 1) if len(ssplit) != 2 or not ssplit[1]: raise Exception( "Command option '%s' not recognized." % rarg) section, option = ssplit args[section] = (option, value) else: raise Exception( "Command option '%s' not recognized." % rarg) return args
|
def required(field): """Decorator that checks if return value is set, if not, raises exception. """ def wrap(f): def wrappedf(*args): result = f(*args) if result is None or result == "": raise Exception( "Config option '%s' is required." % field) else: return result return wrappedf return wrap
|
def split_unescape(s, delim, escape='\\', unescape=True): """ >>> split_unescape('foo,bar', ',') ['foo', 'bar'] >>> split_unescape('foo$,bar', ',', '$') ['foo,bar'] >>> split_unescape('foo$$,bar', ',', '$', unescape=True) ['foo$', 'bar'] >>> split_unescape('foo$$,bar', ',', '$', unescape=False) ['foo$$', 'bar'] >>> split_unescape('foo$', ',', '$', unescape=True) ['foo$'] """ ret = [] current = [] itr = iter(s) for ch in itr: if ch == escape: try: # skip the next character; it has been escaped! if not unescape: current.append(escape) current.append(next(itr)) except StopIteration: if unescape: current.append(escape) elif ch == delim: # split! (add current to the list and reset it) ret.append(''.join(current)) current = [] else: current.append(ch) ret.append(''.join(current)) return ret
|
def render(self, template_name, variables=None): """ Render a template with the passed variables. """ if variables is None: variables = {} template = self._engine.get_template(template_name) return template.render(**variables)
|
def render_source(self, source, variables=None): """ Render a source with the passed variables. """ if variables is None: variables = {} template = self._engine.from_string(source) return template.render(**variables)
|
def construct_re(url_template, match_whole_str=False, converters=None, default_converter='string', anonymous=False): ''' url_template - str or unicode representing template Constructed pattern expects urlencoded string! returns (compiled re pattern, dict {url param name: [converter name, converter args (str)]}, list of (variable name, converter name, converter args name)) If anonymous=True is set, regexp will be compiled without names of variables. This is handy for example, if you want to dump an url map to JSON. ''' # needed for reverse url building (or not needed?) builder_params = [] # found url params and their converters url_params = {} result = r'^' parts = _split_pattern.split(url_template) for i, part in enumerate(parts): is_url_pattern = _static_url_pattern.match(part) if is_url_pattern: #NOTE: right order: # - make part str if it was unicode # - urlquote part # - escape all specific
|
for re chars in part result += re.escape(urlquote(part)) builder_params.append(part) continue is_converter = _converter_pattern.match(part) if is_converter: groups = is_converter.groupdict() converter_name = groups['converter'] or default_converter conv_object = init_converter(converters[converter_name], groups['args']) variable = groups['variable'] builder_params.append((variable, conv_object)) url_params[variable] = conv_object if anonymous: result += conv_object.regex else: result += '(?P<{}>{})'.format(variable, conv_object.regex) continue raise ValueError('Incorrect url template {!r}'.format(url_template)) if match_whole_str: result += '$' return re.compile(result), url_params, builder_params
|
def match(self, path, **kw): ''' path - str (urlencoded) ''' m = self._pattern.match(path) if m: kwargs = m.groupdict() # convert params for url_arg_name, value_urlencoded in kwargs.items(): conv_obj = self._url_params[url_arg_name] unicode_value = unquote(value_urlencoded) if isinstance(unicode_value, six.binary_type): # XXX ?? unicode_value = unicode_value.decode('utf-8', 'replace') try: kwargs[url_arg_name] = conv_obj.to_python(unicode_value, **kw) except ConvertError as err: logger.debug('ConvertError in parameter "%s" ' 'by %r, value "%s"', url_arg_name, err.converter.__class__, err.value) return None, {} return m.group(), kwargs return None, {}
|
def export(bundle, force=False, force_restricted=False): """ Exports bundle to ckan instance. Args: bundle (ambry.bundle.Bundle): force (bool, optional): if True, ignore existance error and continue to export. force_restricted (bool, optional): if True, then export restricted bundles as private (for debugging purposes). Raises: EnvironmentError: if ckan credentials are missing or invalid. UnpublishedAccessError: if dataset has unpublished access - one from ('internal', 'test', 'controlled', 'restricted', 'census'). """ if not ckan: raise EnvironmentError(MISSING_CREDENTIALS_MSG) # publish dataset. try: ckan.action.package_create(**_convert_bundle(bundle)) except ckanapi.ValidationError: if force: logger.warning( '{} dataset already exported, but new export forced. Continue to export dataset stuff.' .format(bundle.dataset)) else: raise # set permissions. access = bundle.dataset.config.metadata.about.access if access == 'restricted' and force_restricted: access = 'private' assert access, 'CKAN publishing requires access level.' if access in ('internal', 'controlled', 'restricted', 'census'): # Never publish dataset with
|
such access. raise UnpublishedAccessError( '{} dataset can not be published because of {} access.' .format(bundle.dataset.vid, bundle.dataset.config.metadata.about.access)) elif access == 'public': # The default permission of the CKAN allows to edit and create dataset without logging in. But # admin of the certain CKAN instance can change default permissions. # http://docs.ckan.org/en/ckan-1.7/authorization.html#anonymous-edit-mode user_roles = [ {'user': 'visitor', 'domain_object': bundle.dataset.vid.lower(), 'roles': ['editor']}, {'user': 'logged_in', 'domain_object': bundle.dataset.vid.lower(), 'roles': ['editor']}, ] elif access == 'registered': # Anonymous has no access, logged in users can read/edit. # http://docs.ckan.org/en/ckan-1.7/authorization.html#logged-in-edit-mode user_roles = [ {'user': 'visitor', 'domain_object': bundle.dataset.vid.lower(), 'roles': []}, {'user': 'logged_in', 'domain_object': bundle.dataset.vid.lower(), 'roles': ['editor']} ] elif access in ('private', 'licensed', 'test'): # Organization users can read/edit # http://docs.ckan.org/en/ckan-1.7/authorization.html#publisher-mode # disable access for anonymous and logged_in user_roles = [ {'user': 'visitor', 'domain_object': bundle.dataset.vid.lower(), 'roles': []}, {'user':
|
'logged_in', 'domain_object': bundle.dataset.vid.lower(), 'roles': []} ] organization_users = ckan.action.organization_show(id=CKAN_CONFIG.organization)['users'] for user in organization_users: user_roles.append({ 'user': user['id'], 'domain_object': bundle.dataset.vid.lower(), 'roles': ['editor']}), for role in user_roles: # http://docs.ckan.org/en/ckan-2.4.1/api/#ckan.logic.action.update.user_role_update ckan.action.user_role_update(**role) # TODO: Using bulk update gives http500 error. Try later with new version. # http://docs.ckan.org/en/ckan-2.4.1/api/#ckan.logic.action.update.user_role_bulk_update - the same # ckan.action.user_role_bulk_update(user_roles=user_roles) # publish partitions for partition in bundle.partitions: ckan.action.resource_create(**_convert_partition(partition)) # publish schema.csv ckan.action.resource_create(**_convert_schema(bundle)) # publish external documentation for name, external in six.iteritems(bundle.dataset.config.metadata.external_documentation): ckan.action.resource_create(**_convert_external(bundle, name, external))
|
def is_exported(bundle): """ Returns True if dataset is already exported to CKAN. Otherwise returns False. """ if not ckan: raise EnvironmentError(MISSING_CREDENTIALS_MSG) params = {'q': 'name:{}'.format(bundle.dataset.vid.lower())} resp = ckan.action.package_search(**params) return len(resp['results']) > 0
|
def _convert_bundle(bundle): """ Converts ambry bundle to dict ready to send to CKAN API. Args: bundle (ambry.bundle.Bundle): bundle to convert. Returns: dict: dict to send to CKAN to create dataset. See http://docs.ckan.org/en/latest/api/#ckan.logic.action.create.package_create """ # shortcut for metadata meta = bundle.dataset.config.metadata notes = '' for f in bundle.dataset.files: if f.path.endswith('documentation.md'): contents = f.unpacked_contents if isinstance(contents, six.binary_type): contents = contents.decode('utf-8') notes = json.dumps(contents) break ret = { 'name': bundle.dataset.vid.lower(), 'title': meta.about.title, 'author': meta.contacts.wrangler.name, 'author_email': meta.contacts.wrangler.email, 'maintainer': meta.contacts.maintainer.name, 'maintainer_email': meta.contacts.maintainer.email, 'license_id': '', 'notes': notes, 'url': meta.identity.source, 'version': bundle.dataset.version, 'state': 'active', 'owner_org': CKAN_CONFIG['organization'], } return ret
|
def _convert_partition(partition): """ Converts partition to resource dict ready to save to CKAN. """ # http://docs.ckan.org/en/latest/api/#ckan.logic.action.create.resource_create # convert bundle to csv. csvfile = six.StringIO() writer = unicodecsv.writer(csvfile) headers = partition.datafile.headers if headers: writer.writerow(headers) for row in partition: writer.writerow([row[h] for h in headers]) csvfile.seek(0) # prepare dict. ret = { 'package_id': partition.dataset.vid.lower(), 'url': 'http://example.com', 'revision_id': '', 'description': partition.description or '', 'format': 'text/csv', 'hash': '', 'name': partition.name, 'resource_type': '', 'mimetype': 'text/csv', 'mimetype_inner': '', 'webstore_url': '', 'cache_url': '', 'upload': csvfile } return ret
|
def _convert_schema(bundle): """ Converts schema of the dataset to resource dict ready to save to CKAN. """ # http://docs.ckan.org/en/latest/api/#ckan.logic.action.create.resource_create schema_csv = None for f in bundle.dataset.files: if f.path.endswith('schema.csv'): contents = f.unpacked_contents if isinstance(contents, six.binary_type): contents = contents.decode('utf-8') schema_csv = six.StringIO(contents) schema_csv.seek(0) break ret = { 'package_id': bundle.dataset.vid.lower(), 'url': 'http://example.com', 'revision_id': '', 'description': 'Schema of the dataset tables.', 'format': 'text/csv', 'hash': '', 'name': 'schema', 'upload': schema_csv, } return ret
|
def _convert_external(bundle, name, external): """ Converts external documentation to resource dict ready to save to CKAN. """ # http://docs.ckan.org/en/latest/api/#ckan.logic.action.create.resource_create ret = { 'package_id': bundle.dataset.vid.lower(), 'url': external.url, 'description': external.description, 'name': name, } return ret
|
def create_build_configuration_process(repository, revision, **kwargs): """ Create a new BuildConfiguration. BuildConfigurations represent the settings and configuration required to run a build of a specific version of the associated Project's source code. If a ProductVersion ID is provided, the BuildConfiguration will have access to artifacts which were produced for that version, but may not have been released yet. :return BPM Task ID of the new BuildConfiguration creation """ if not kwargs.get("dependency_ids"): kwargs["dependency_ids"] = [] if not kwargs.get("build_configuration_set_ids"): kwargs["build_configuration_set_ids"] = [] if kwargs.get("generic_parameters"): kwargs["generic_parameters"] = ast.literal_eval(kwargs.get("generic_parameters")) if not kwargs.get("project"): kwargs["project"] = pnc_api.projects.get_specific(kwargs.get("project_id")).content if not kwargs.get("environment"): kwargs["environment"] = pnc_api.environments.get_specific(kwargs.get("build_environment_id")).content build_configuration = create_build_conf_object(scm_revision=revision, **kwargs) repo_creation = swagger_client.RepositoryCreationUrlAutoRest() repo_creation.scm_url = repository repo_creation.build_configuration_rest = build_configuration response = utils.checked_api_call( pnc_api.bpm, 'start_r_creation_task_with_single_url', body=repo_creation) if response: return response
|
def create_repository_configuration(repository, no_sync=False): """ Create a new RepositoryConfiguration. If the provided repository URL is for external repository, it is cloned into internal one. :return BPM Task ID of the new RepositoryConfiguration creation """ repo = create_repository_configuration_raw(repository, no_sync) if repo: return utils.format_json(repo)
|
def wait_for_repo_creation(task_id, retry=30): """ Using polling check if the task finished """ success_event_types = ("RC_CREATION_SUCCESS", ) error_event_types = ("RC_REPO_CREATION_ERROR", "RC_REPO_CLONE_ERROR", "RC_CREATION_ERROR") while retry > 0: bpm_task = get_bpm_task_by_id(task_id) if contains_event_type(bpm_task.content.events, success_event_types): break if contains_event_type(bpm_task.content.events, error_event_types): logging.error("Creation of Repository Configuration failed") logging.error(bpm_task.content) return False logging.info("Waiting until Repository Configuration creation task "+str(task_id)+" finishes.") time.sleep(10) retry -= 1 return retry > 0
|
def count(self): ''' A count based on `count_field` and `format_args`. ''' args = self.format_args if args is None or \ (isinstance(args, dict) and self.count_field not in args): raise TypeError("count is required") return args[self.count_field] if isinstance(args, dict) else args
|
def generate_sources_zip(milestone_id=None, output=None): """ Generate a sources archive for given milestone id. """ if not is_input_valid(milestone_id, output): logging.error("invalid input") return 1 create_work_dir(output) download_sources_artifacts(milestone_id, output) create_zip(output)
|
def get_repository_configuration(id): """ Retrieve a specific RepositoryConfiguration """ response = utils.checked_api_call(pnc_api.repositories, 'get_specific', id=id) if response: return response.content
|
def update_repository_configuration(id, external_repository=None, prebuild_sync=None): """ Update an existing RepositoryConfiguration with new information """ to_update_id = id rc_to_update = pnc_api.repositories.get_specific(id=to_update_id).content if external_repository is None: external_repository = rc_to_update.external_url else: rc_to_update.external_url = external_repository if prebuild_sync is not None: rc_to_update.pre_build_sync_enabled = prebuild_sync if not external_repository and prebuild_sync: logging.error("You cannot enable prebuild sync without external repository") return response = utils.checked_api_call(pnc_api.repositories, 'update', id=to_update_id, body=rc_to_update) if response: return response.content
|
def list_repository_configurations(page_size=200, page_index=0, sort="", q=""): """ List all RepositoryConfigurations """ response = utils.checked_api_call(pnc_api.repositories, 'get_all', page_size=page_size, page_index=page_index, sort=sort, q=q) if response: return utils.format_json_list(response.content)
|
def search_repository_configuration(url, page_size=10, page_index=0, sort=""): """ Search for Repository Configurations based on internal or external url """ content = search_repository_configuration_raw(url, page_size, page_index, sort) if content: return utils.format_json_list(content)
|
def search_repository_configuration_raw(url, page_size=10, page_index=0, sort=""): """ Search for Repository Configurations based on internal or external url """ response = utils.checked_api_call(pnc_api.repositories, 'search', page_size=page_size, page_index=page_index, sort=sort, search=url) if response: return response.content
|
def match_repository_configuration(url, page_size=10, page_index=0, sort=""): """ Search for Repository Configurations based on internal or external url with exact match """ content = match_repository_configuration_raw(url, page_size, page_index, sort) if content: return utils.format_json_list(content)
|
def render(self): '''Proxy method to form's environment render method''' return self.env.template.render(self.template, form=self)
|
def accept(self, data): ''' Try to accpet MultiDict-like object and return if it is valid. ''' self.raw_data = MultiDict(data) self.errors = {} for field in self.fields: if field.writable: self.python_data.update(field.accept()) else: for name in field.field_names: # readonly field subfield = self.get_field(name) value = self.python_data[subfield.name] subfield.set_raw_value(self.raw_data, subfield.from_python(value)) return self.is_valid
|
def get_data(self, compact=True): ''' Returns data representing current state of the form. While Form.raw_data may contain alien fields and invalid data, this method returns only valid fields that belong to this form only. It's designed to pass somewhere current state of the form (as query string or by other means). ''' data = MultiDict() for field in self.fields: raw_value = field.from_python(self.python_data[field.name]) field.set_raw_value(data, raw_value) if compact: data = MultiDict([(k, v) for k, v in data.items() if v]) return data
|
def add_configuration(self, id, **kwargs): """ Adds a configuration to the Specified Set This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.add_configuration(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build Configuration Set id (required) :param BuildConfigurationRest body: :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.add_configuration_with_http_info(id, **kwargs) else: (data) = self.add_configuration_with_http_info(id, **kwargs) return data
|
def build(self, id, **kwargs): """ Builds the Configurations for the Specified Set This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.build(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build Configuration Set id (required) :param str callback_url: Optional Callback URL :param bool temporary_build: Is it a temporary build or a standard build? :param bool force_rebuild: DEPRECATED: Use RebuildMode. :param bool timestamp_alignment: Should we add a timestamp during the alignment? Valid only for temporary builds. :param str rebuild_mode: Rebuild Modes: FORCE: always rebuild all the configurations in the set; EXPLICIT_DEPENDENCY_CHECK: check if any of user
|
defined dependencies has been update; IMPLICIT_DEPENDENCY_CHECK: check if any captured dependency has been updated; :return: BuildConfigSetRecordSingleton If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.build_with_http_info(id, **kwargs) else: (data) = self.build_with_http_info(id, **kwargs) return data
|
def build_versioned(self, id, **kwargs): """ Builds the configurations for the Specified Set with an option to specify exact revision of a BC This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.build_versioned(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build Configuration Set id (required) :param str callback_url: Optional Callback URL :param bool temporary_build: Is it a temporary build or a standard build? :param bool force_rebuild: DEPRECATED: Use RebuildMode. :param bool timestamp_alignment: Should we add a timestamp during the alignment? Valid only for temporary builds. :param BuildConfigurationSetWithAuditedBCsRest body: :param str rebuild_mode: Rebuild Modes: FORCE: always
|
rebuild all the configurations in the set; EXPLICIT_DEPENDENCY_CHECK: check if any of user defined dependencies has been update; IMPLICIT_DEPENDENCY_CHECK: check if any captured dependency has been updated; :return: BuildConfigSetRecordSingleton If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.build_versioned_with_http_info(id, **kwargs) else: (data) = self.build_versioned_with_http_info(id, **kwargs) return data
|
def delete_specific(self, id, **kwargs): """ Removes a specific Build Configuration Set This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_specific(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build Configuration Set id (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_specific_with_http_info(id, **kwargs) else: (data) = self.delete_specific_with_http_info(id, **kwargs) return data
|
def get_all_build_config_set_records(self, id, **kwargs): """ Get all build config set execution records associated with this build config set, returns empty list if none are found This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_all_build_config_set_records(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build config set id (required) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: BuildConfigurationSetRecordPage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_all_build_config_set_records_with_http_info(id, **kwargs) else: (data) = self.get_all_build_config_set_records_with_http_info(id, **kwargs) return
|
data
|
def get_build_records(self, id, **kwargs): """ Gets all build records associated with the contained build configurations This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_build_records(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build configuration set id (required) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: BuildRecordPage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_build_records_with_http_info(id, **kwargs) else: (data) = self.get_build_records_with_http_info(id, **kwargs) return data
|
def get_configurations(self, id, **kwargs): """ Gets the Configurations for the Specified Set This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_configurations(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build Configuration Set id (required) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: BuildConfigurationPage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_configurations_with_http_info(id, **kwargs) else: (data) = self.get_configurations_with_http_info(id, **kwargs) return data
|
def remove_configuration(self, id, config_id, **kwargs): """ Removes a configuration from the specified config set This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.remove_configuration(id, config_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build configuration set id (required) :param int config_id: Build configuration id (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.remove_configuration_with_http_info(id, config_id, **kwargs) else: (data) = self.remove_configuration_with_http_info(id, config_id, **kwargs) return data
|
def update_configurations(self, id, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_configurations(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build Configuration Set Id (required) :param list[BuildConfigurationRest] body: :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.update_configurations_with_http_info(id, **kwargs) else: (data) = self.update_configurations_with_http_info(id, **kwargs) return data
|
def pluginPackagePaths(name): """ Return a list of additional directories which should be searched for modules to be included as part of the named plugin package. @type name: C{str} @param name: The fully-qualified Python name of a plugin package, eg C{'twisted.plugins'}. @rtype: C{list} of C{str} @return: The absolute paths to other directories which may contain plugin modules for the named plugin package. """ package = name.split('.') # Note that this may include directories which do not exist. It may be # preferable to remove such directories at this point, rather than allow # them to be searched later on. # # Note as well that only '__init__.py' will be considered to make a # directory a package (and thus exclude it from this list). This means # that if
|
you create a master plugin package which has some other kind of # __init__ (eg, __init__.pyc) it will be incorrectly treated as a # supplementary plugin directory. return [ os.path.abspath(os.path.join(x, *package)) for x in sys.path if not os.path.exists(os.path.join(x, *package + ['__init__.py']))]
|
def storage_method(func): '''Calls decorated method with VersionedStorage as self''' def wrap(self, *args, **kwargs): return func(self._root_storage, *args, **kwargs) return wrap
|
def render(self, template_name, **kw): 'Interface method called from `Template.render`' return self.env.get_template(template_name).render(**kw)
|
def cancel_all_builds_in_group(self, id, **kwargs): """ Cancel all builds running in the build group This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.cancel_all_builds_in_group(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build Configuration Set id (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.cancel_all_builds_in_group_with_http_info(id, **kwargs) else: (data) = self.cancel_all_builds_in_group_with_http_info(id, **kwargs) return data
|
def get_all_for_bc(self, id, **kwargs): """ Gets running Build Records for a specific Build Configuration. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_all_for_bc(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build Configuration id (required) :param int page_index: Page Index :param int page_size: Pagination size :param str search: Since this endpoint does not support queries, fulltext search is hard-coded for some predefined fields (record id, configuration name) and performed using this argument. Empty string leaves all data unfiltered. :return: BuildRecordPage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True
|
if kwargs.get('callback'): return self.get_all_for_bc_with_http_info(id, **kwargs) else: (data) = self.get_all_for_bc_with_http_info(id, **kwargs) return data
|
def get_all_for_bc_set_record(self, id, **kwargs): """ Gets running Build Records for a specific Build Configuration Set Record. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_all_for_bc_set_record(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build Configuration Set id (required) :param int page_index: Page Index :param int page_size: Pagination size :param str search: Since this endpoint does not support queries, fulltext search is hard-coded for some predefined fields (record id, configuration name) and performed using this argument. Empty string leaves all data unfiltered. :return: BuildRecordPage If the method is called asynchronously, returns the request thread. """
|
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_all_for_bc_set_record_with_http_info(id, **kwargs) else: (data) = self.get_all_for_bc_set_record_with_http_info(id, **kwargs) return data
|
def dictmapper(typename, mapping): """ A factory to create `namedtuple`-like classes from a field-to-dict-path mapping:: Person = dictmapper({'person':('person','name')}) example_dict = {'person':{'name':'John'}} john = Person(example_dict) assert john.name == 'John' If a function is specified as a mapping value instead of a dict "path", it will be run with the backing dict as its first argument. """ def init(self, d, *args, **kwargs): """ Initialize `dictmapper` classes with a dict to back getters. """ self._original_dict = d def getter_from_dict_path(path): if not callable(path) and len(path) < 1: raise ValueError('Dict paths should be iterables with at least one' ' key or callable objects that take one argument.') def getter(self): cur_dict = self._original_dict if callable(path): return path(cur_dict) return dict_value_from_path(cur_dict, path) return getter prop_mapping = dict((k, property(getter_from_dict_path(v))) for k, v in mapping.iteritems()) prop_mapping['__init__'] = init return
|
type(typename, tuple(), prop_mapping)
|
def ctor_args(self): """Return arguments for constructing a copy""" return dict( config=self._config, search=self._search, echo=self._echo, read_only=self.read_only )
|
def sync_config(self, force=False): """Sync the file config into the library proxy data in the root dataset """ from ambry.library.config import LibraryConfigSyncProxy lcsp = LibraryConfigSyncProxy(self) lcsp.sync(force=force)
|
def init_debug(self): """Initialize debugging features, such as a handler for USR2 to print a trace""" import signal def debug_trace(sig, frame): """Interrupt running process, and provide a python prompt for interactive debugging.""" self.log('Trace signal received') self.log(''.join(traceback.format_stack(frame))) signal.signal(signal.SIGUSR2, debug_trace)
|
def resolve_object_number(self, ref): """Resolve a variety of object numebrs to a dataset number""" if not isinstance(ref, ObjectNumber): on = ObjectNumber.parse(ref) else: on = ref ds_on = on.as_dataset return ds_on
|
def dataset(self, ref, load_all=False, exception=True): """Return all datasets""" return self.database.dataset(ref, load_all=load_all, exception=exception)
|
def new_bundle(self, assignment_class=None, **kwargs): """ Create a new bundle, with the same arguments as creating a new dataset :param assignment_class: String. assignment class to use for fetching a number, if one is not specified in kwargs :param kwargs: :return: """ if not ('id' in kwargs and bool(kwargs['id'])) or assignment_class is not None: kwargs['id'] = self.number(assignment_class) ds = self._db.new_dataset(**kwargs) self._db.commit() b = self.bundle(ds.vid) b.state = Bundle.STATES.NEW b.set_last_access(Bundle.STATES.NEW) b.set_file_system(source_url=self._fs.source(b.identity.source_path), build_url=self._fs.build(b.identity.source_path)) bs_meta = b.build_source_files.file(File.BSFILE.META) bs_meta.set_defaults() bs_meta.record_to_objects() bs_meta.objects_to_record() b.commit() self._db.commit() return b
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.