code
stringlengths
1
18.2k
def execute(self): """Convert the notebook to a python script and execute it, returning the local context as a dict""" from nbformat import read from nbconvert.exporters import export_script from cStringIO import StringIO notebook = read(StringIO(self.record.unpacked_contents), 4) script, resources = export_script(notebook) env_dict = {} exec (compile(script.replace('# coding: utf-8', ''), 'script', 'exec'), env_dict) return env_dict
def record_to_fs(self): """Create a filesystem file from a File""" fr = self.record if fr.contents: with self._fs.open(self.file_name, 'w', encoding='utf-8') as f: self.record_to_fh(f)
def import_module(self, module_path = 'ambry.build', **kwargs): """ Import the contents of the file into the ambry.build module :param kwargs: items to add to the module globals :return: """ from fs.errors import NoSysPathError if module_path in sys.modules: module = sys.modules[module_path] else: module = imp.new_module(module_path) sys.modules[module_path] = module bf = self.record if not bf.contents: return module module.__dict__.update(**kwargs) try: abs_path = self._fs.getsyspath(self.file_name) except NoSysPathError: abs_path = '<string>' import re if re.search(r'-\*-\s+coding:', bf.contents): # Has encoding, so don't decode contents = bf.contents else: contents = bf.unpacked_contents # Assumes utf-8 exec(compile(contents, abs_path, 'exec'), module.__dict__) return module
def import_bundle(self): """Add the filesystem to the Python sys path with an import hook, then import to file as Python""" from fs.errors import NoSysPathError try: import ambry.build module = sys.modules['ambry.build'] except ImportError: module = imp.new_module('ambry.build') sys.modules['ambry.build'] = module bf = self.record if not bf.has_contents: from ambry.bundle import Bundle return Bundle try: abs_path = self._fs.getsyspath(self.file_name) except NoSysPathError: abs_path = '<string>' exec(compile(bf.contents, abs_path, 'exec'), module.__dict__) return module.Bundle
def import_lib(self): """Import the lib.py file into the bundle module""" try: import ambry.build module = sys.modules['ambry.build'] except ImportError: module = imp.new_module('ambry.build') sys.modules['ambry.build'] = module bf = self.record if not bf.has_contents: return try: exec (compile(bf.contents, self.path, 'exec'), module.__dict__) except Exception: self._bundle.error("Failed to load code from {}".format(self.path)) raise # print(self.file_const, bundle.__dict__.keys()) # print(bf.contents) return module
def record_to_objects(self): """Create config records to match the file metadata""" from ambry.orm.exc import NotFoundError fr = self.record contents = fr.unpacked_contents if not contents: return # Zip transposes an array when in the form of a list of lists, so this transposes so # each row starts with the heading and the rest of the row are the values # for that row. The bool and filter return false when none of the values # are non-empty. Then zip again to transpose to original form. non_empty_rows = drop_empty(contents) s = self._dataset._database.session for i, row in enumerate(non_empty_rows): if i == 0: header = row else: d = dict(six.moves.zip(header, row)) if 'widths' in d: del d['widths'] # Obsolete column in old spreadsheets. if 'table' in d: d['dest_table_name'] = d['table'] del d['table']
if 'order' in d: d['stage'] = d['order'] del d['order'] if 'dest_table' in d: d['dest_table_name'] = d['dest_table'] del d['dest_table'] if 'source_table' in d: d['source_table_name'] = d['source_table'] del d['source_table'] d['d_vid'] = self._dataset.vid d['state'] = 'synced' try: ds = self._dataset.source_file(str(d['name'])) ds.update(**d) except NotFoundError: name = d['name'] del d['name'] try: ds = self._dataset.new_source(name, **d) except: print(name, d) import pprint pprint.pprint(d) raise except: # Odd error with 'none' in keys for d print('!!!', header) print('!!!', row) raise s.merge(ds) self._dataset._database.commit()
def record_to_objects(self): """Create config records to match the file metadata""" from ambry.orm import Column, Table, Dataset def _clean_int(i): if i is None: return None elif isinstance(i, int): return i elif isinstance(i, string_types): if len(i) == 0: return None return int(i.strip()) bsfile = self.record contents = bsfile.unpacked_contents if not contents: return line_no = 1 # Accounts for file header. Data starts on line 2 errors = [] warnings = [] extant_tables = {t.name: t for t in self._dataset.tables} old_types_map = { 'varchar': Column.DATATYPE_STR, 'integer': Column.DATATYPE_INTEGER, 'real': Column.DATATYPE_FLOAT, } def run_progress_f(line_no): self._bundle.log('Loading tables from file. Line #{}'.format(line_no)) from ambry.bundle.process import CallInterval run_progress_f = CallInterval(run_progress_f, 10) table_number = self._dataset._database.next_sequence_id(Dataset, self._dataset.vid, Table) for row in bsfile.dict_row_reader: line_no += 1 run_progress_f(line_no) # Skip blank lines if not row.get('column', False) and not row.get('table', False):
continue if not row.get('column', False): raise ConfigurationError('Row error: no column on line {}'.format(line_no)) if not row.get('table', False): raise ConfigurationError('Row error: no table on line {}'.format(line_no)) if not row.get('datatype', False) and not row.get('valuetype', False): raise ConfigurationError('Row error: no type on line {}'.format(line_no)) value_type = row.get('valuetype', '').strip() if row.get('valuetype', False) else None data_type = row.get('datatype', '').strip() if row.get('datatype', False) else None def resolve_data_type(value_type): from ambry.valuetype import resolve_value_type vt_class = resolve_value_type(value_type) if not vt_class: raise ConfigurationError("Row error: unknown valuetype '{}'".format(value_type)) return vt_class.python_type().__name__ # If we have a value type field, and not the datatype, # the value type is as specified, and the data type is derived from it. if value_type and not data_type: data_type = resolve_data_type(value_type) elif data_type and not value_type: value_type = data_type data_type = resolve_data_type(value_type) # There are
still some old data types hanging around data_type = old_types_map.get(data_type.lower(), data_type) table_name = row['table'] try: table = extant_tables[table_name] except KeyError: table = self._dataset.new_table( table_name, sequence_id=table_number, description=row.get('description') if row['column'] == 'id' else '' ) table_number += 1 extant_tables[table_name] = table data = {k.replace('d_', '', 1): v for k, v in list(row.items()) if k and k.startswith('d_') and v} if row['column'] == 'id': table.data.update(data) data = {} table.add_column( row['column'], fk_vid=row['is_fk'] if row.get('is_fk', False) else None, description=(row.get('description', '') or '').strip(), datatype=data_type, valuetype=value_type, parent=row.get('parent'), proto_vid=row.get('proto_vid'), size=_clean_int(row.get('size', None)), width=_clean_int(row.get('width', None)), data=data, keywords=row.get('keywords'), measure=row.get('measure'), transform=row.get('transform'), derivedfrom=row.get('derivedfrom'), units=row.get('units', None), universe=row.get('universe'), update_existing= True) self._dataset.t_sequence_id = table_number return warnings, errors
def record_to_objects(self): """Write from the stored file data to the source records""" from ambry.orm import SourceTable bsfile = self.record failures = set() # Clear out all of the columns from existing tables. We don't clear out the # tables, since they may be referenced by sources for row in bsfile.dict_row_reader: st = self._dataset.source_table(row['table']) if st: st.columns[:] = [] self._dataset.commit() for row in bsfile.dict_row_reader: st = self._dataset.source_table(row['table']) if not st: st = self._dataset.new_source_table(row['table']) # table_number += 1 if 'datatype' not in row: row['datatype'] = 'unknown' del row['table'] st.add_column(**row) # Create or update if failures: raise ConfigurationError('Failed to load source schema, missing sources: {} '.format(failures)) self._dataset.commit()
def execute(self): """ Executes all sql statements from bundle.sql. """ from ambry.mprlib import execute_sql execute_sql(self._bundle.library, self.record_content)
def list_records(self, file_const=None): """Iterate through the file records""" for r in self._dataset.files: if file_const and r.minor_type != file_const: continue yield self.instance_from_name(r.path)
def record_to_objects(self, preference=None): """Create objects from files, or merge the files into the objects. """ from ambry.orm.file import File for f in self.list_records(): pref = preference if preference else f.record.preference if pref == File.PREFERENCE.FILE: self._bundle.logger.debug(' Cleaning objects for file {}'.format(f.path)) f.clean_objects() if pref in (File.PREFERENCE.FILE, File.PREFERENCE.MERGE): self._bundle.logger.debug(' rto {}'.format(f.path)) f.record_to_objects()
def objects_to_record(self, preference=None): """Create file records from objects. """ from ambry.orm.file import File raise NotImplementedError("Still uses obsolete file_info_map") for file_const, (file_name, clz) in iteritems(file_info_map): f = self.file(file_const) pref = preference if preference else f.record.preference if pref in (File.PREFERENCE.MERGE, File.PREFERENCE.OBJECT): self._bundle.logger.debug(' otr {}'.format(file_const)) f.objects_to_record()
def set_defaults(self): """Add default content to any file record that is empty""" for const_name, c in file_classes.items(): if c.multiplicity == '1': f = self.file(const_name) if not f.record.unpacked_contents: f.setcontent(f.default)
def run(host='127.0.0.1', port=8000): """ Run web server. """ print("Server running on {}:{}".format(host, port)) app_router = Router() server = make_server(host, port, app_router) server.serve_forever()
def main(args=None): """ Create a private key and a certificate and write them to a file. """ if args is None: args = sys.argv[1:] o = Options() try: o.parseOptions(args) except usage.UsageError, e: raise SystemExit(str(e)) else: return createSSLCertificate(o)
def update(self, f): """Copy another files properties into this one.""" for p in self.__mapper__.attrs: if p.key == 'oid': continue try: setattr(self, p.key, getattr(f, p.key)) except AttributeError: # The dict() method copies data property values into the main dict, # and these don't have associated class properties. continue
def resetTimeout(self): """Reset the timeout count down""" if self.__timeoutCall is not None and self.timeOut is not None: self.__timeoutCall.reset(self.timeOut)
def setTimeout(self, period): """Change the timeout period @type period: C{int} or C{NoneType} @param period: The period, in seconds, to change the timeout to, or C{None} to disable the timeout. """ prev = self.timeOut self.timeOut = period if self.__timeoutCall is not None: if period is None: self.__timeoutCall.cancel() self.__timeoutCall = None else: self.__timeoutCall.reset(period) elif period is not None: self.__timeoutCall = self.callLater(period, self.__timedOut) return prev
def _load_controllers(self): """ Load all controllers from folder 'controllers'. Ignore files with leading underscore (for example: controllers/_blogs.py) """ for file_name in os.listdir(os.path.join(self._project_dir, 'controllers')): # ignore disabled controllers if not file_name.startswith('_'): module_name = file_name.split('.', 1)[0] module_path = "controllers.{}".format(module_name) module = import_module(module_path) # transform 'blog_articles' file name to 'BlogArticles' class controller_class_name = module_name.title().replace('_', '') controller_class = getattr(module, controller_class_name) controller = controller_class() for action_name in dir(controller): action = getattr(controller, action_name) if action_name.startswith('_') or not callable(action): continue url_path = "/".join([module_name, action_name]) self._controllers[url_path] = action return self._controllers
def _init_view(self): """ Initialize View with project settings. """ views_engine = get_config('rails.views.engine', 'jinja') templates_dir = os.path.join(self._project_dir, "views", "templates") self._view = View(views_engine, templates_dir)
def get_action_handler(self, controller_name, action_name): """ Return action of controller as callable. If requested controller isn't found - return 'not_found' action of requested controller or Index controller. """ try_actions = [ controller_name + '/' + action_name, controller_name + '/not_found', # call Index controller to catch all unhandled pages 'index/not_found' ] # search first appropriate action handler for path in try_actions: if path in self._controllers: return self._controllers[path] return None
def command_start(self, daemonize=False): ''' Start a server:: ./manage.py flup:start [--daemonize] ''' if daemonize: safe_makedirs(self.logfile, self.pidfile) flup_fastcgi(self.app, bind=self.bind, pidfile=self.pidfile, logfile=self.logfile, daemonize=daemonize, cwd=self.cwd, umask=self.umask, **self.fastcgi_params)
def command_stop(self): ''' Stop a server:: ./manage.py flup:stop ''' if self.pidfile: if not os.path.exists(self.pidfile): sys.exit("Pidfile {!r} doesn't exist".format(self.pidfile)) with open(self.pidfile) as pidfile: pid = int(pidfile.read()) for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGKILL]: try: if terminate(pid, sig, 3): os.remove(self.pidfile) # NOTE: we are not performing sys.exit here, # otherwise restart command will not work return except OSError as exc: if exc.errno != errno.ESRCH: raise elif sig == signal.SIGINT: sys.exit('Not running') sys.exit('No pidfile provided')
def _preprocess_sqlite_view(asql_query, library, backend, connection): """ Finds view or materialized view in the asql query and converts it to create table/insert rows. Note: Assume virtual tables for all partitions already created. Args: asql_query (str): asql query library (ambry.Library): backend (SQLiteBackend): connection (apsw.Connection): Returns: str: valid sql query containing create table and insert into queries if asql_query contains 'create materialized view'. If asql_query does not contain 'create materialized view' returns asql_query as is. """ new_query = None if 'create materialized view' in asql_query.lower() or 'create view' in asql_query.lower(): logger.debug( '_preprocess_sqlite_view: materialized view found.\n asql query: {}' .format(asql_query)) view = parse_view(asql_query) tablename = view.name.replace('-', '_').lower().replace('.', '_') create_query_columns = {} for column in view.columns: create_query_columns[column.name] = column.alias ref_to_partition_map = {} # key is ref found in the query, value is Partition
instance. alias_to_partition_map = {} # key is alias of ref found in the query, value is Partition instance. # collect sources from select statement of the view. for source in view.sources: partition = library.partition(source.name) ref_to_partition_map[source.name] = partition if source.alias: alias_to_partition_map[source.alias] = partition # collect sources from joins of the view. for join in view.joins: partition = library.partition(join.source.name) ref_to_partition_map[join.source.name] = partition if join.source.alias: alias_to_partition_map[join.source.alias] = partition # collect and convert columns. TYPE_MAP = { 'int': 'INTEGER', 'float': 'REAL', six.binary_type.__name__: 'TEXT', six.text_type.__name__: 'TEXT', 'date': 'DATE', 'datetime': 'TIMESTAMP WITHOUT TIME ZONE' } column_types = [] column_names = [] for column in view.columns: if '.' in column.name: source_alias, column_name = column.name.split('.') else: # TODO: Test that case. source_alias = None column_name = column.name # find column specification in the mpr file. if
source_alias: partition = alias_to_partition_map[source_alias] for part_column in partition.datafile.reader.columns: if part_column['name'] == column_name: sqlite_type = TYPE_MAP.get(part_column['type']) if not sqlite_type: raise Exception( 'Do not know how to convert {} to sql column.' .format(column['type'])) column_types.append( ' {} {}' .format(column.alias if column.alias else column.name, sqlite_type)) column_names.append(column.alias if column.alias else column.name) column_types_str = ',\n'.join(column_types) column_names_str = ', '.join(column_names) create_query = 'CREATE TABLE IF NOT EXISTS {}(\n{});'.format(tablename, column_types_str) # drop 'create materialized view' part _, select_part = asql_query.split(view.name) select_part = select_part.strip() assert select_part.lower().startswith('as') # drop 'as' keyword select_part = select_part.strip()[2:].strip() assert select_part.lower().strip().startswith('select') # Create query to copy data from mpr to just created table. copy_query = 'INSERT INTO {table}(\n{columns})\n {select}'.format( table=tablename, columns=column_names_str, select=select_part) if not copy_query.strip().lower().endswith(';'): copy_query = copy_query + ';' new_query = '{}\n\n{}'.format(create_query, copy_query) logger.debug( '_preprocess_sqlite_view: preprocess finished.\n asql query: {}\n\n new query:
{}' .format(asql_query, new_query)) return new_query or asql_query
def _preprocess_sqlite_index(asql_query, library, backend, connection): """ Creates materialized view for each indexed partition found in the query. Args: asql_query (str): asql query library (ambry.Library): backend (SQLiteBackend): connection (apsw.Connection): Returns: str: converted asql if it contains index query. If not, returns asql_query as is. """ new_query = None if asql_query.strip().lower().startswith('index'): logger.debug( '_preprocess_index: create index query found.\n asql query: {}' .format(asql_query)) index = parse_index(asql_query) partition = library.partition(index.source) table = backend.install(connection, partition, materialize=True) index_name = '{}_{}_ind'.format(partition.vid, '_'.join(index.columns)) new_query = 'CREATE INDEX IF NOT EXISTS {index} ON {table} ({columns});'.format( index=index_name, table=table, columns=','.join(index.columns)) logger.debug( '_preprocess_index: preprocess finished.\n asql query: {}\n new query: {}' .format(asql_query, new_query)) return new_query or asql_query
def install(self, connection, partition, table_name = None, index_columns=None, materialize=False, logger = None): """ Creates virtual table or read-only table for gion. Args: ref (str): id, vid, name or versioned name of the partition. materialize (boolean): if True, create read-only table. If False create virtual table. Returns: str: name of the created table. """ virtual_table = partition.vid table = partition.vid if not table_name else table_name if self._relation_exists(connection, table): if logger: logger.debug("Skipping '{}'; already installed".format(table)) return else: if logger: logger.info("Installing '{}'".format(table)) partition.localize() virtual_table = partition.vid + '_vt' self._add_partition(connection, partition) if materialize: if self._relation_exists(connection, table): debug_logger.debug( 'Materialized table of the partition already exists.\n partition: {}, table: {}' .format(partition.name, table)) else: cursor = connection.cursor() # create table create_query = self.__class__._get_create_query(partition, table) debug_logger.debug( 'Creating new materialized view for partition mpr.' '\n partition: {},
view: {}, query: {}' .format(partition.name, table, create_query)) cursor.execute(create_query) # populate just created table with data from virtual table. copy_query = '''INSERT INTO {} SELECT * FROM {};'''.format(table, virtual_table) debug_logger.debug( 'Populating sqlite table with rows from partition mpr.' '\n partition: {}, view: {}, query: {}' .format(partition.name, table, copy_query)) cursor.execute(copy_query) cursor.close() else: cursor = connection.cursor() view_q = "CREATE VIEW IF NOT EXISTS {} AS SELECT * FROM {} ".format(partition.vid, virtual_table) cursor.execute(view_q) cursor.close() if index_columns is not None: self.index(connection,table, index_columns) return table
def index(self, connection, partition, columns): """ Create an index on the columns. Args: connection (apsw.Connection): connection to sqlite database who stores mpr table or view. partition (orm.Partition): columns (list of str): """ import hashlib query_tmpl = ''' CREATE INDEX IF NOT EXISTS {index_name} ON {table_name} ({columns}); ''' if not isinstance(columns,(list,tuple)): columns = [columns] col_list = ','.join('"{}"'.format(col) for col in columns) col_hash = hashlib.md5(col_list).hexdigest() try: table_name = partition.vid except AttributeError: table_name = partition # Its really a table name query = query_tmpl.format( index_name='{}_{}_i'.format(table_name, col_hash), table_name=table_name, columns=col_list) logger.debug('Creating sqlite index: query: {}'.format(query)) cursor = connection.cursor() cursor.execute(query)
def close(self): """ Closes connection to sqlite database. """ if getattr(self, '_connection', None): logger.debug('Closing sqlite connection.') self._connection.close() self._connection = None
def _get_mpr_view(self, connection, table): """ Finds and returns view name in the sqlite db represented by given connection. Args: connection: connection to sqlite db where to look for partition table. table (orm.Table): Raises: MissingViewError: if database does not have partition table. Returns: str: database table storing partition data. """ logger.debug( 'Looking for view of the table.\n table: {}'.format(table.vid)) view = self.get_view_name(table) view_exists = self._relation_exists(connection, view) if view_exists: logger.debug( 'View of the table exists.\n table: {}, view: {}' .format(table.vid, view)) return view raise MissingViewError('sqlite database does not have view for {} table.' .format(table.vid))
def _get_mpr_table(self, connection, partition): """ Returns name of the sqlite table who stores mpr data. Args: connection (apsw.Connection): connection to sqlite database who stores mpr data. partition (orm.Partition): Returns: str: Raises: MissingTableError: if partition table not found in the db. """ # TODO: This is the first candidate for optimization. Add field to partition # with table name and update it while table creation. # Optimized version. # # return partition.mpr_table or raise exception # Not optimized version. # # first check either partition has readonly table. virtual_table = partition.vid table = '{}_v'.format(virtual_table) logger.debug( 'Looking for materialized table of the partition.\n partition: {}'.format(partition.name)) table_exists = self._relation_exists(connection, table) if table_exists: logger.debug( 'Materialized table of the partition found.\n partition: {}, table: {}' .format(partition.name, table)) return table # now check for
virtual table logger.debug( 'Looking for a virtual table of the partition.\n partition: {}'.format(partition.name)) virtual_exists = self._relation_exists(connection, virtual_table) if virtual_exists: logger.debug( 'Virtual table of the partition found.\n partition: {}, table: {}' .format(partition.name, table)) return virtual_table raise MissingTableError('sqlite database does not have table for mpr of {} partition.' .format(partition.vid))
def _relation_exists(self, connection, relation): """ Returns True if relation (table or view) exists in the sqlite db. Otherwise returns False. Args: connection (apsw.Connection): connection to sqlite database who stores mpr data. partition (orm.Partition): Returns: boolean: True if relation exists, False otherwise. """ query = 'SELECT 1 FROM sqlite_master WHERE (type=\'table\' OR type=\'view\') AND name=?;' cursor = connection.cursor() cursor.execute(query, [relation]) result = cursor.fetchall() return result == [(1,)]
def _get_create_query(partition, tablename, include=None): """ Creates and returns `CREATE TABLE ...` sql statement for given mprows. Args: partition (orm.Partition): tablename (str): name of the table in the return create query. include (list of str, optional): list of columns to include to query. Returns: str: create table query. """ TYPE_MAP = { 'int': 'INTEGER', 'float': 'REAL', six.binary_type.__name__: 'TEXT', six.text_type.__name__: 'TEXT', 'date': 'DATE', 'datetime': 'TIMESTAMP WITHOUT TIME ZONE' } columns_types = [] if not include: include = [] for column in sorted(partition.datafile.reader.columns, key=lambda x: x['pos']): if include and column['name'] not in include: continue sqlite_type = TYPE_MAP.get(column['type']) if not sqlite_type: raise Exception('Do not know how to convert {} to sql column.'.format(column['type'])) columns_types.append(' "{}" {}'.format(column['name'], sqlite_type)) columns_types_str = ',\n'.join(columns_types) query = 'CREATE TABLE IF NOT EXISTS {}(\n{})'.format(tablename, columns_types_str) return query
def _get_connection(self): """ Returns connection to sqlite db. Returns: connection to the sqlite db who stores mpr data. """ if getattr(self, '_connection', None): logger.debug('Connection to sqlite db already exists. Using existing one.') else: dsn = self._dsn if dsn == 'sqlite://': dsn = ':memory:' else: dsn = dsn.replace('sqlite:///', '') logger.debug( 'Creating new apsw connection.\n dsn: {}, config_dsn: {}' .format(dsn, self._dsn)) self._connection = apsw.Connection(dsn) return self._connection
def _add_partition(self, connection, partition): """ Creates sqlite virtual table for mpr file of the given partition. Args: connection: connection to the sqlite db who stores mpr data. partition (orm.Partition): """ logger.debug('Creating virtual table for partition.\n partition: {}'.format(partition.name)) sqlite_med.add_partition(connection, partition.datafile, partition.vid+'_vt')
def _execute(self, connection, query, fetch=True): """ Executes given query using given connection. Args: connection (apsw.Connection): connection to the sqlite db who stores mpr data. query (str): sql query fetch (boolean, optional): if True, fetch query result and return it. If False, do not fetch. Returns: iterable with query result. """ cursor = connection.cursor() try: cursor.execute(query) except Exception as e: from ambry.mprlib.exceptions import BadSQLError raise BadSQLError("Failed to execute query: {}; {}".format(query, e)) if fetch: return cursor.fetchall() else: return cursor
def _log_disconnect(self): """ Decrement connection count """ if self.logged: self.server.stats.connectionClosed() self.logged = False
def enable_cache(self): """ Enable client-side caching for the current request """ self.set_header('Cache-Control', 'max-age=%d, public' % self.CACHE_TIME) now = datetime.datetime.now() expires = now + datetime.timedelta(seconds=self.CACHE_TIME) self.set_header('Expires', expires.strftime('%a, %d %b %Y %H:%M:%S')) self.set_header('access-control-max-age', self.CACHE_TIME)
def list_milestones(page_size=200, page_index=0, q="", sort=""): """ List all ProductMilestones """ data = list_milestones_raw(page_size, page_index, sort, q) if data: return utils.format_json_list(data)
def update_milestone(id, **kwargs): """ Update a ProductMilestone """ data = update_milestone_raw(id, **kwargs) if data: return utils.format_json(data)
def close_milestone(id, **kwargs): """ Close a milestone. This triggers its release process. The user can optionally specify the release-date, otherwise today's date is used. If the wait parameter is specified and set to True, upon closing the milestone, we'll periodically check that the release being processed is done. Required: - id: int Optional: - wait key: bool """ data = close_milestone_raw(id, **kwargs) if data: return utils.format_json(data)
def init_app(self, app): """ Find and configure the user database from specified file """ app.config.setdefault('FLASK_AUTH_ALL', False) app.config.setdefault('FLASK_AUTH_REALM', 'Login Required') # Default set to bad file to trigger IOError app.config.setdefault('FLASK_HTPASSWD_PATH', '/^^^/^^^') # Load up user database try: self.load_users(app) except IOError: log.critical( 'No htpasswd file loaded, please set `FLASK_HTPASSWD`' 'or `FLASK_HTPASSWD_PATH` environment variable to a ' 'valid apache htpasswd file.' ) # Allow requiring auth for entire app, with pre request method @app.before_request def require_auth(): # pylint: disable=unused-variable """Pre request processing for enabling full app authentication.""" if not current_app.config['FLASK_AUTH_ALL']: return is_valid, user = self.authenticate() if not is_valid: return self.auth_failed() g.user = user
def check_basic_auth(self, username, password): """ This function is called to check if a username / password combination is valid via the htpasswd file. """ valid = self.users.check_password( username, password ) if not valid: log.warning('Invalid login from %s', username) valid = False return ( valid, username )
def get_hashhash(self, username): """ Generate a digest of the htpasswd hash """ return hashlib.sha256( self.users.get_hash(username) ).hexdigest()
def generate_token(self, username): """ assumes user exists in htpasswd file. Return the token for the given user by signing a token of the username and a hash of the htpasswd string. """ serializer = self.get_signature() return serializer.dumps( { 'username': username, 'hashhash': self.get_hashhash(username) } ).decode('UTF-8')
def check_token_auth(self, token): """ Check to see who this is and if their token gets them into the system. """ serializer = self.get_signature() try: data = serializer.loads(token) except BadSignature: log.warning('Received bad token signature') return False, None if data['username'] not in self.users.users(): log.warning( 'Token auth signed message, but invalid user %s', data['username'] ) return False, None if data['hashhash'] != self.get_hashhash(data['username']): log.warning( 'Token and password do not match, %s ' 'needs to regenerate token', data['username'] ) return False, None return True, data['username']
def authenticate(self): """Authenticate user by any means and return either true or false. Args: Returns: tuple (is_valid, username): True is valid user, False if not """ basic_auth = request.authorization is_valid = False user = None if basic_auth: is_valid, user = self.check_basic_auth( basic_auth.username, basic_auth.password ) else: # Try token auth token = request.headers.get('Authorization', None) param_token = request.args.get('access_token') if token or param_token: if token: # slice the 'token ' piece of the header (following # github style): token = token[6:] else: # Grab it from query dict instead token = param_token log.debug('Received token: %s', token) is_valid, user = self.check_token_auth(token) return (is_valid, user)
def required(self, func): """ Decorator function with basic and token authentication handler """ @wraps(func) def decorated(*args, **kwargs): """ Actual wrapper to run the auth checks. """ is_valid, user = self.authenticate() if not is_valid: return self.auth_failed() kwargs['user'] = user return func(*args, **kwargs) return decorated
def _dict(self, with_name=True): """Returns the identity as a dict. values that are empty are removed """ d = dict([(k, getattr(self, k)) for k, _, _ in self.name_parts]) if with_name: d['name'] = self.name try: d['vname'] = self.vname except ValueError: pass return self.clear_dict(d)
def source_path(self): """The name in a form suitable for use in a filesystem. Excludes the revision """ # Need to do this to ensure the function produces the # bundle path when called from subclasses names = [k for k, _, _ in self._name_parts] parts = [self.source] if self.bspace: parts.append(self.bspace) parts.append( self._path_join(names=names, excludes=['source', 'version', 'bspace'], sep=self.NAME_PART_SEP)) return os.path.join(*parts)
def cache_key(self): """The name in a form suitable for use as a cache-key""" try: return self.path except TypeError: raise TypeError("self.path is invalild: '{}', '{}'".format(str(self.path), type(self.path)))
def ver(self, revision): """Clone and change the version.""" c = self.clone() c.version = self._parse_version(self.version) return c
def as_partition(self, **kwargs): """Return a PartitionName based on this name.""" return PartitionName(**dict(list(self.dict.items()) + list(kwargs.items())))
def promote(self, name): """Promote to a PartitionName by combining with a bundle Name.""" return PartitionName(**dict(list(name.dict.items()) + list(self.dict.items())))
def path(self): """The path of the bundle source. Includes the revision. """ # Need to do this to ensure the function produces the # bundle path when called from subclasses names = [k for k, _, _ in Name._name_parts] return os.path.join(self.source, self._path_join(names=names, excludes=['source', 'format'], sep=self.NAME_PART_SEP), *self._local_parts() )
def sub_path(self): """The path of the partition source, excluding the bundle path parts. Includes the revision. """ try: return os.path.join(*(self._local_parts())) except TypeError as e: raise TypeError( "Path failed for partition {} : {}".format( self.name, e.message))
def partital_dict(self, with_name=True): """Returns the name as a dict, but with only the items that are particular to a PartitionName.""" d = self._dict(with_name=False) d = {k: d.get(k) for k, _, _ in PartialPartitionName._name_parts if d.get(k, False)} if 'format' in d and d['format'] == Name.DEFAULT_FORMAT: del d['format'] d['name'] = self.name return d
def _dict(self, with_name=True): """Returns the identity as a dict. values that are empty are removed """ d = dict([(k, getattr(self, k)) for k, _, _ in self.name_parts]) return self.clear_dict(d)
def with_none(self): """Convert the NameQuery.NONE to None. This is needed because on the kwargs list, a None value means the field is not specified, which equates to ANY. The _find_orm() routine, however, is easier to write if the NONE value is actually None. Returns a clone of the origin, with NONE converted to None """ n = self.clone() for k, _, _ in n.name_parts: if getattr(n, k) == n.NONE: delattr(n, k) n.use_clear_dict = False return n
def name_parts(self): """Works with PartialNameMixin.clear_dict to set NONE and ANY values.""" default = PartialMixin.ANY np = ([(k, default, True) for k, _, _ in super(NameQuery, self).name_parts] + [(k, default, True) for k, _, _ in Name._generated_names] ) return np
def name_parts(self): """Works with PartialNameMixin.clear_dict to set NONE and ANY values.""" default = PartialMixin.ANY return ([(k, default, True) for k, _, _ in PartitionName._name_parts] + [(k, default, True) for k, _, _ in Name._generated_names] )
def parse(cls, on_str, force_type=None): # @ReservedAssignment """Parse a string into one of the object number classes.""" on_str_orig = on_str if on_str is None: return None if not on_str: raise NotObjectNumberError("Got null input") if not isinstance(on_str, string_types): raise NotObjectNumberError("Must be a string. Got a {} ".format(type(on_str))) # if isinstance(on_str, unicode): # dataset = on_str.encode('ascii') if force_type: type_ = force_type else: type_ = on_str[0] on_str = on_str[1:] if type_ not in list(cls.NDS_LENGTH.keys()): raise NotObjectNumberError("Unknown type character '{}' for '{}'".format(type_, on_str_orig)) ds_length = len(on_str) - cls.NDS_LENGTH[type_] if ds_length not in cls.DATASET_LENGTHS: raise NotObjectNumberError( "Dataset string '{}' has an unfamiliar length: {}".format(on_str_orig, ds_length)) ds_lengths = cls.DATASET_LENGTHS[ds_length] assignment_class = ds_lengths[2] try: dataset = int(ObjectNumber.base62_decode(on_str[0:ds_lengths[0]])) if ds_lengths[1]: i = len(on_str) - ds_lengths[1] revision = int(ObjectNumber.base62_decode(on_str[i:])) on_str = on_str[0:i] # remove the revision else: revision
= None on_str = on_str[ds_lengths[0]:] if type_ == cls.TYPE.DATASET: return DatasetNumber(dataset, revision=revision, assignment_class=assignment_class) elif type_ == cls.TYPE.TABLE: table = int(ObjectNumber.base62_decode(on_str)) return TableNumber( DatasetNumber(dataset, assignment_class=assignment_class), table, revision=revision) elif type_ == cls.TYPE.PARTITION: partition = int(ObjectNumber.base62_decode(on_str)) return PartitionNumber( DatasetNumber(dataset, assignment_class=assignment_class), partition, revision=revision) elif type_ == cls.TYPE.COLUMN: table = int(ObjectNumber.base62_decode(on_str[0:cls.DLEN.TABLE])) column = int(ObjectNumber.base62_decode(on_str[cls.DLEN.TABLE:])) return ColumnNumber( TableNumber(DatasetNumber(dataset, assignment_class=assignment_class), table), column, revision=revision) elif type_ == cls.TYPE.OTHER1 or type_ == cls.TYPE.CONFIG: return GeneralNumber1(on_str_orig[0], DatasetNumber(dataset, assignment_class=assignment_class), int(ObjectNumber.base62_decode(on_str[0:cls.DLEN.OTHER1])), revision=revision) elif type_ == cls.TYPE.OTHER2: return GeneralNumber2(on_str_orig[0], DatasetNumber(dataset, assignment_class=assignment_class), int(ObjectNumber.base62_decode(on_str[0:cls.DLEN.OTHER1])), int(ObjectNumber.base62_decode( on_str[cls.DLEN.OTHER1:cls.DLEN.OTHER1+cls.DLEN.OTHER2])), revision=revision) else: raise NotObjectNumberError('Unknown type character: ' + type_ + ' in ' + str(on_str_orig)) except Base62DecodeError as e: raise NotObjectNumberError('Unknown character: ' + str(e))
def base62_encode(cls, num): """Encode a number in Base X. `num`: The number to encode `alphabet`: The alphabet to use for encoding Stolen from: http://stackoverflow.com/a/1119769/1144479 """ alphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" if num == 0: return alphabet[0] arr = [] base = len(alphabet) while num: rem = num % base num = num // base arr.append(alphabet[rem]) arr.reverse() return ''.join(arr)
def base62_decode(cls, string): """Decode a Base X encoded string into the number. Arguments: - `string`: The encoded string - `alphabet`: The alphabet to use for encoding Stolen from: http://stackoverflow.com/a/1119769/1144479 """ alphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" base = len(alphabet) strlen = len(string) num = 0 idx = 0 for char in string: power = (strlen - (idx + 1)) try: num += alphabet.index(char) * (base ** power) except ValueError: raise Base62DecodeError( "Failed to decode char: '{}'".format(char)) idx += 1 return num
def increment(cls, v): """Increment the version number of an object number of object number string""" if not isinstance(v, ObjectNumber): v = ObjectNumber.parse(v) return v.rev(v.revision+1)
def rev(self, i): """Return a clone with a different revision.""" on = copy(self) on.revision = i return on
def from_hex(cls, h, space, assignment_class='self'): """Produce a TopNumber, with a length to match the given assignment class, based on an input hex string. This can be used to create TopNumbers from a hash of a string. """ from math import log # Use the ln(N)/ln(base) trick to find the right number of hext digits # to use hex_digits = int( round(log(62 ** TopNumber.DLEN.DATASET_CLASSES[assignment_class]) / log(16), 0)) i = int(h[:hex_digits], 16) return TopNumber(space, i, assignment_class=assignment_class)
def from_string(cls, s, space): """Produce a TopNumber by hashing a string.""" import hashlib hs = hashlib.sha1(s).hexdigest() return cls.from_hex(hs, space)
def classify(cls, o): """Break an Identity name into parts, or describe the type of other forms. Break a name or object number into parts and classify them. Returns a named tuple that indicates which parts of input string are name components, object number and version number. Does not completely parse the name components. Also can handle Name, Identity and ObjectNumbers :param o: Input object to split """ # from collections import namedtuple s = str(o) if o is None: raise ValueError("Input cannot be None") class IdentityParts(object): on = None name = None isa = None name = None vname = None sname = None name_parts = None version = None cache_key = None # namedtuple('IdentityParts', ['isa', 'name', 'name_parts','on','version', 'vspec']) ip = IdentityParts() if isinstance(o, (DatasetNumber, PartitionNumber)): ip.on =
o ip.name = None ip.isa = type(ip.on) ip.name_parts = None elif isinstance(o, Name): ip.on = None ip.isa = type(o) ip.name = str(o) ip.name_parts = ip.name.split(Name.NAME_PART_SEP) elif '/' in s: # A cache key ip.cache_key = s.strip() ip.isa = str elif cls.OBJECT_NUMBER_SEP in s: # Must be a fqname ip.name, on_s = s.strip().split(cls.OBJECT_NUMBER_SEP) ip.on = ObjectNumber.parse(on_s) ip.name_parts = ip.name.split(Name.NAME_PART_SEP) ip.isa = type(ip.on) elif Name.NAME_PART_SEP in s: # Must be an sname or vname ip.name = s ip.on = None ip.name_parts = ip.name.split(Name.NAME_PART_SEP) ip.isa = Name else: # Probably an Object Number in string form ip.name = None ip.name_parts = None ip.on = ObjectNumber.parse(s.strip()) ip.isa = type(ip.on) if ip.name_parts: last = ip.name_parts[-1] try: ip.version = sv.Version(last) ip.vname = ip.name except ValueError: try: ip.version = sv.Spec(last) ip.vname = None # Specs
aren't vnames you can query except ValueError: pass if ip.version: ip.name_parts.pop() ip.sname = Name.NAME_PART_SEP.join(ip.name_parts) else: ip.sname = ip.name return ip
def to_meta(self, md5=None, file=None): """Return a dictionary of metadata, for use in the Remote api.""" # from collections import OrderedDict if not md5: if not file: raise ValueError('Must specify either file or md5') md5 = md5_for_file(file) size = os.stat(file).st_size else: size = None return { 'id': self.id_, 'identity': json.dumps(self.dict), 'name': self.sname, 'fqname': self.fqname, 'md5': md5, # This causes errors with calculating the AWS signature 'size': size }
def names_dict(self): """A dictionary with only the generated names, name, vname and fqname.""" INCLUDE_KEYS = ['name', 'vname', 'vid'] d = {k: v for k, v in iteritems(self.dict) if k in INCLUDE_KEYS} d['fqname'] = self.fqname return d
def ident_dict(self): """A dictionary with only the items required to specify the identy, excluding the generated names, name, vname and fqname.""" SKIP_KEYS = ['name','vname','fqname','vid','cache_key'] return {k: v for k, v in iteritems(self.dict) if k not in SKIP_KEYS}
def as_partition(self, partition=0, **kwargs): """Return a new PartitionIdentity based on this Identity. :param partition: Integer partition number for PartitionObjectNumber :param kwargs: """ assert isinstance(self._name, Name), "Wrong type: {}".format(type(self._name)) assert isinstance(self._on, DatasetNumber), "Wrong type: {}".format(type(self._on)) name = self._name.as_partition(**kwargs) on = self._on.as_partition(partition) return PartitionIdentity(name, on)
def add_partition(self, p): """Add a partition identity as a child of a dataset identity.""" if not self.partitions: self.partitions = {} self.partitions[p.vid] = p
def add_file(self, f): """Add a partition identity as a child of a dataset identity.""" if not self.files: self.files = set() self.files.add(f) self.locations.set(f.type_)
def partition(self): """Convenience function for accessing the first partition in the partitions list, when there is only one.""" if not self.partitions: return None if len(self.partitions) > 1: raise ValueError( "Can't use this method when there is more than one partition") return list(self.partitions.values())[0]
def rev(self, rev): """Return a new identity with the given revision""" d = self.dict d['revision'] = rev return self.from_dict(d)
def _info(self): """Returns an OrderedDict of information, for human display.""" d = OrderedDict() d['vid'] = self.vid d['sname'] = self.sname d['vname'] = self.vname return d
def from_dict(cls, d): """Like Identity.from_dict, but will cast the class type based on the format. i.e. if the format is hdf, return an HdfPartitionIdentity. :param d: :return: """ name = PartitionIdentity._name_class(**d) if 'id' in d and 'revision' in d: # The vid should be constructed from the id and the revision on = (ObjectNumber.parse(d['id']).rev(d['revision'])) elif 'vid' in d: on = ObjectNumber.parse(d['vid']) else: raise ValueError("Must have id and revision, or vid") try: return PartitionIdentity(name, on) except TypeError as e: raise TypeError( "Failed to make identity from \n{}\n: {}".format( d, e.message))
def as_dataset(self): """Convert this identity to the identity of the corresponding dataset.""" on = self.on.dataset on.revision = self.on.revision name = Name(**self.name.dict) return Identity(name, on)
def sleep(self): """Wait for the sleep time of the last response, to avoid being rate limited.""" if self.next_time and time.time() < self.next_time: time.sleep(self.next_time - time.time())
def root_sync(args, l, config): """Sync with the remote. For more options, use library sync """ from requests.exceptions import ConnectionError all_remote_names = [ r.short_name for r in l.remotes ] if args.all: remotes = all_remote_names else: remotes = args.refs prt("Sync with {} remotes or bundles ".format(len(remotes))) if not remotes: return for ref in remotes: l.commit() try: if ref in all_remote_names: # It's a remote name l.sync_remote(l.remote(ref)) else: # It's a bundle reference l.checkin_remote_bundle(ref) except NotFoundError as e: warn(e) continue except ConnectionError as e: warn(e) continue
def _CaptureException(f, *args, **kwargs): """Decorator implementation for capturing exceptions.""" from ambry.dbexceptions import LoggedException b = args[0] # The 'self' argument try: return f(*args, **kwargs) except Exception as e: raise try: b.set_error_state() b.commit() except Exception as e2: b.log('Failed to set bundle error state: {}'.format(e)) raise e if b.capture_exceptions: b.logged_exception(e) raise LoggedException(e, b) else: b.exception(e) raise
def set_file_system(self, source_url=False, build_url=False): """Set the source file filesystem and/or build file system""" assert isinstance(source_url, string_types) or source_url is None or source_url is False assert isinstance(build_url, string_types) or build_url is False if source_url: self._source_url = source_url self.dataset.config.library.source.url = self._source_url self._source_fs = None elif source_url is None: self._source_url = None self.dataset.config.library.source.url = self._source_url self._source_fs = None if build_url: self._build_url = build_url self.dataset.config.library.build.url = self._build_url self._build_fs = None self.dataset.commit()
def clear_file_systems(self): """Remove references to build and source file systems, reverting to the defaults""" self._source_url = None self.dataset.config.library.source.url = None self._source_fs = None self._build_url = None self.dataset.config.library.build.url = None self._build_fs = None self.dataset.commit()
def cast_to_subclass(self): """ Load the bundle file from the database to get the derived bundle class, then return a new bundle built on that class :return: """ self.import_lib() self.load_requirements() try: self.commit() # To ensure the rollback() doesn't clear out anything important bsf = self.build_source_files.file(File.BSFILE.BUILD) except Exception as e: self.log('Error trying to create a bundle source file ... {} '.format(e)) raise self.rollback() return self try: clz = bsf.import_bundle() except Exception as e: raise BundleError('Failed to load bundle code file, skipping : {}'.format(e)) b = clz(self._dataset, self._library, self._source_url, self._build_url) b.limited_run = self.limited_run b.capture_exceptions = self.capture_exceptions b.multi = self.multi return b
def load_requirements(self): """If there are python library requirements set, append the python dir to the path.""" for module_name, pip_name in iteritems(self.metadata.requirements): extant = self.dataset.config.requirements[module_name].url force = (extant and extant != pip_name) self._library.install_packages(module_name, pip_name, force=force) self.dataset.config.requirements[module_name].url = pip_name python_dir = self._library.filesystem.python() sys.path.append(python_dir)
def dep(self, source_name): """Return a bundle dependency from the sources list :param source_name: Source name. The URL field must be a bundle or partition reference :return: """ from ambry.orm.exc import NotFoundError from ambry.dbexceptions import ConfigurationError source = self.source(source_name) ref = source.url if not ref: raise ValueError("Got an empty ref for source '{}' ".format(source.name)) try: try: p = self.library.partition(ref) except NotFoundError: self.warn("Partition reference {} not found, try to download it".format(ref)) remote, vname = self.library.find_remote_bundle(ref, try_harder=True) if remote: self.warn("Installing {} from {}".format(remote, vname)) self.library.checkin_remote_bundle(vname, remote) p = self.library.partition(ref) else: raise if not p.is_local: with self.progress.start('test', 0, message='localizing') as ps: p.localize(ps) return p except NotFoundError: return self.library.bundle(ref)
def documentation(self): """Return the documentation, from the documentation.md file, with template substitutions""" # Return the documentation as a scalar term, which has .text() and .html methods to do # metadata substitution using Jinja s = '' rc = self.build_source_files.documentation.record_content if rc: s += rc for k, v in self.metadata.documentation.items(): if v: s += '\n### {}\n{}'.format(k.title(), v) return self.metadata.scalar_term(s)
def progress(self): """Returned a cached ProcessLogger to record build progress """ if not self._progress: # If won't be building, only use one connection new_connection = False if self._library.read_only else True self._progress = ProcessLogger(self.dataset, self.logger, new_connection=new_connection) return self._progress