code
stringlengths
1
18.2k
def _make_query_from_terms(self, terms, limit=None): """ Creates a query for dataset from decomposed search terms. Args: terms (dict or unicode or string): Returns: tuple of (TextClause, dict): First element is FTS query, second is parameters of the query. Element of the execution of the query is pair: (vid, score). """ expanded_terms = self._expand_terms(terms) if expanded_terms['doc']: # create query with real score. query_parts = ["SELECT vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) as score"] if expanded_terms['doc'] and expanded_terms['keywords']: query_parts = ["SELECT vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) " " + ts_rank_cd(setweight(to_tsvector(coalesce(keywords::text,'')),'B'), to_tsquery(:keywords))" ' as score'] else: # create query with score = 1 because query will not touch doc field. query_parts = ['SELECT vid, 1 as score'] query_parts.append('FROM dataset_index') query_params = {} where_counter = 0 if expanded_terms['doc']: where_counter += 1 query_parts.append('WHERE doc @@ to_tsquery(:doc)') query_params['doc'] = self.backend._and_join(expanded_terms['doc'])
if expanded_terms['keywords']: query_params['keywords'] = self.backend._and_join(expanded_terms['keywords']) kw_q = "to_tsvector(coalesce(keywords::text,'')) @@ to_tsquery(:keywords)" query_parts.append( ("AND " if where_counter else "WHERE ") + kw_q ) query_parts.append('ORDER BY score DESC') if limit: query_parts.append('LIMIT :limit') query_params['limit'] = limit query_parts.append(';') deb_msg = 'Dataset terms conversion: `{}` terms converted to `{}` with `{}` params query.'\ .format(terms, query_parts, query_params) logger.debug(deb_msg) q = text('\n'.join(query_parts)), query_params logger.debug('Dataset search query: {}'.format(q)) return q
def _delete(self, vid=None): """ Deletes given dataset from index. Args: vid (str): dataset vid. """ assert vid is not None query = text(""" DELETE FROM dataset_index WHERE vid = :vid; """) self.execute(query, vid=vid)
def _make_query_from_terms(self, terms, limit=None): """ Creates a query for partition from decomposed search terms. Args: terms (dict or unicode or string): Returns: tuple of (TextClause, dict): First element is FTS query, second is parameters of the query. Element of the execution of the query is tuple of three elements: (vid, dataset_vid, score). """ expanded_terms = self._expand_terms(terms) terms_used = 0 if expanded_terms['doc']: # create query with real score. query_parts = ["SELECT vid, dataset_vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) as score"] if expanded_terms['doc'] and expanded_terms['keywords']: query_parts = ["SELECT vid, dataset_vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) " " + ts_rank_cd(setweight(to_tsvector(coalesce(keywords::text,'')),'B'), to_tsquery(:keywords))" ' as score'] else: # create query with score = 1 because query will not touch doc field. query_parts = ['SELECT vid, dataset_vid, 1 as score'] query_parts.append('FROM partition_index') query_params = {} where_count = 0 if expanded_terms['doc']:
query_parts.append('WHERE doc @@ to_tsquery(:doc)') query_params['doc'] = self.backend._and_join(expanded_terms['doc']) where_count += 1 terms_used += 1 if expanded_terms['keywords']: query_params['keywords'] = self.backend._and_join(expanded_terms['keywords']) kw_q = "to_tsvector(coalesce(keywords::text,'')) @@ to_tsquery(:keywords)" query_parts.append(("AND " if where_count else "WHERE ") + kw_q) where_count += 1 terms_used += 1 if expanded_terms['from']: query_parts.append(("AND " if where_count else "WHERE ") + ' from_year >= :from_year') query_params['from_year'] = expanded_terms['from'] where_count += 1 terms_used += 1 if expanded_terms['to']: query_parts.append(("AND " if where_count else "WHERE ") + ' to_year <= :to_year') query_params['to_year'] = expanded_terms['to'] where_count += 1 terms_used += 1 query_parts.append('ORDER BY score DESC') if limit: query_parts.append('LIMIT :limit') query_params['limit'] = limit if not terms_used: logger.debug('No terms used; not creating query') return None, None query_parts.append(';') deb_msg = 'Dataset terms conversion: `{}` terms converted to `{}` with `{}` params query.'\ .format(terms, query_parts, query_params) logger.debug(deb_msg) return text('\n'.join(query_parts)),
query_params
def search(self, search_phrase, limit=None): """ Finds partitions by search phrase. Args: search_phrase (str or unicode): limit (int, optional): how many results to generate. None means without limit. Generates: PartitionSearchResult instances. """ query, query_params = self._make_query_from_terms(search_phrase, limit=limit) self._parsed_query = (str(query), query_params) if query is not None: self.backend.library.database.set_connection_search_path() results = self.execute(query, **query_params) for result in results: vid, dataset_vid, score = result yield PartitionSearchResult( vid=vid, dataset_vid=dataset_vid, score=score)
def _as_document(self, partition): """ Converts partition to document indexed by to FTS index. Args: partition (orm.Partition): partition to convert. Returns: dict with structure matches to BasePartitionIndex._schema. """ doc = super(self.__class__, self)._as_document(partition) # pass time_coverage to the _index_document. doc['time_coverage'] = partition.time_coverage return doc
def _index_document(self, document, force=False): """ Adds parition document to the index. """ time_coverage = document.pop('time_coverage', []) from_year = None to_year = None if time_coverage: from_year = int(time_coverage[0]) if time_coverage and time_coverage[0] else None to_year = int(time_coverage[-1]) if time_coverage and time_coverage[-1] else None query = text(""" INSERT INTO partition_index(vid, dataset_vid, title, keywords, doc, from_year, to_year) VALUES( :vid, :dataset_vid, :title, string_to_array(:keywords, ' '), to_tsvector('english', :doc), :from_year, :to_year); """) self.execute(query, from_year=from_year, to_year=to_year, **document)
def is_indexed(self, partition): """ Returns True if partition is already indexed. Otherwise returns False. """ query = text(""" SELECT vid FROM partition_index WHERE vid = :vid; """) result = self.execute(query, vid=partition.vid) return bool(result.fetchall())
def all(self): """ Returns list with vids of all indexed partitions. """ partitions = [] query = text(""" SELECT dataset_vid, vid FROM partition_index;""") for result in self.execute(query): dataset_vid, vid = result partitions.append(PartitionSearchResult(dataset_vid=dataset_vid, vid=vid, score=1)) return partitions
def search(self, search_phrase, limit=None): """ Finds identifiers by search phrase. Args: search_phrase (str or unicode): limit (int, optional): how many results to return. None means without limit. Returns: list of IdentifierSearchResult instances. """ query_parts = [ 'SELECT identifier, type, name, similarity(name, :word) AS sml', 'FROM identifier_index', 'WHERE name % :word', 'ORDER BY sml DESC, name'] query_params = { 'word': search_phrase} if limit: query_parts.append('LIMIT :limit') query_params['limit'] = limit query_parts.append(';') query = text('\n'.join(query_parts)) self.backend.library.database.set_connection_search_path() results = self.execute(query, **query_params).fetchall() for result in results: vid, type, name, score = result yield IdentifierSearchResult( score=score, vid=vid, type=type, name=name)
def _index_document(self, identifier, force=False): """ Adds identifier document to the index. """ query = text(""" INSERT INTO identifier_index(identifier, type, name) VALUES(:identifier, :type, :name); """) self.execute(query, **identifier)
def _delete(self, identifier=None): """ Deletes given identifier from index. Args: identifier (str): identifier of the document to delete. """ query = text(""" DELETE FROM identifier_index WHERE identifier = :identifier; """) self.execute(query, identifier=identifier)
def is_indexed(self, identifier): """ Returns True if identifier is already indexed. Otherwise returns False. """ query = text(""" SELECT identifier FROM identifier_index WHERE identifier = :identifier; """) result = self.execute(query, identifier=identifier['identifier']) return bool(result.fetchall())
def all(self): """ Returns list with all indexed identifiers. """ identifiers = [] query = text(""" SELECT identifier, type, name FROM identifier_index;""") for result in self.execute(query): vid, type_, name = result res = IdentifierSearchResult( score=1, vid=vid, type=type_, name=name) identifiers.append(res) return identifiers
def pare(text, size, etc='...'): '''Pare text to have maximum size and add etc to the end if it's changed''' size = int(size) text = text.strip() if len(text)>size: # strip the last word or not to_be_stripped = not whitespace_re.findall(text[size-1:size+2]) text = text[:size] if to_be_stripped: half = size//2 last = None for mo in whitespace_re.finditer(text[half:]): last = mo if last is not None: text = text[:half+last.start()+1] return text.rstrip() + etc else: return text
def get_environment(id=None, name=None): """ Get a specific Environment by name or ID """ data = get_environment_raw(id, name) if data: return utils.format_json(data)
def list_environments_raw(page_size=200, page_index=0, sort="", q=""): """ List all Environments """ response = utils.checked_api_call(pnc_api.environments, 'get_all', page_size=page_size, page_index=page_index, sort=sort, q=q) if response: return response.content
def dimensions(self): """Iterate over the dimension columns, regardless of parent/child status """ from ambry.valuetype.core import ROLE for c in self.columns: if c.role == ROLE.DIMENSION: yield c
def primary_dimensions(self): """Iterate over the primary dimension columns, columns which do not have a parent """ from ambry.valuetype.core import ROLE for c in self.columns: if not c.parent and c.role == ROLE.DIMENSION: yield c
def primary_measures(self): """Iterate over the primary columns, columns which do not have a parent Also sets the property partition_stats to the stats collection for the partition and column. """ from ambry.valuetype.core import ROLE for c in self.columns: if not c.parent and c.role == ROLE.MEASURE: yield c
def add_column(self, name, update_existing=False, **kwargs): """ Add a column to the table, or update an existing one. :param name: Name of the new or existing column. :param update_existing: If True, alter existing column values. Defaults to False :param kwargs: Other arguments for the the Column() constructor :return: a Column object """ from ..identity import ColumnNumber try: c = self.column(name) extant = True if not update_existing: return c except NotFoundError: sequence_id = len(self.columns) + 1 assert sequence_id c = Column(t_vid=self.vid, sequence_id=sequence_id, vid=str(ColumnNumber(ObjectNumber.parse(self.vid), sequence_id)), name=name, datatype='str') extant = False # Update possibly existing data c.data = dict((list(c.data.items()) if c.data else []) + list(kwargs.get('data', {}).items())) for key, value in list(kwargs.items()): if key[0] != '_' and key not in ['t_vid', 'name', 'sequence_id', 'data']: # Don't update the type if the user has
specfied a custom type if key == 'datatype' and not c.type_is_builtin(): continue # Don't change a datatype if the value is set and the new value is unknown if key == 'datatype' and value == 'unknown' and c.datatype: continue # Don't change a datatype if the value is set and the new value is unknown if key == 'description' and not value: continue try: setattr(c, key, value) except AttributeError: raise AttributeError("Column record has no attribute {}".format(key)) if key == 'is_primary_key' and isinstance(value, str) and len(value) == 0: value = False setattr(c, key, value) # If the id column has a description and the table does not, add it to # the table. if c.name == 'id' and c.is_primary_key and not self.description: self.description = c.description if not extant: self.columns.append(c)
return c
def is_empty(self): """Return True if the table has no columns or the only column is the id""" if len(self.columns) == 0: return True if len(self.columns) == 1 and self.columns[0].name == 'id': return True return False
def update_from_stats(self, stats): """Update columns based on partition statistics""" sd = dict(stats) for c in self.columns: if c not in sd: continue stat = sd[c] if stat.size and stat.size > c.size: c.size = stat.size c.lom = stat.lom
def update_id(self, sequence_id=None, force=True): """Alter the sequence id, and all of the names and ids derived from it. This often needs to be don after an IntegrityError in a multiprocessing run""" from ..identity import ObjectNumber if sequence_id: self.sequence_id = sequence_id assert self.d_vid if self.id is None or force: dataset_id = ObjectNumber.parse(self.d_vid).rev(None) self.d_id = str(dataset_id) self.id = str(TableNumber(dataset_id, self.sequence_id)) if self.vid is None or force: dataset_vid = ObjectNumber.parse(self.d_vid) self.vid = str(TableNumber(dataset_vid, self.sequence_id))
def transforms(self): """Return an array of arrays of column transforms. #The return value is an list of list, with each list being a segment of column transformations, and #each segment having one entry per column. """ tr = [] for c in self.columns: tr.append(c.expanded_transform) return six.moves.zip_longest(*tr)
def before_insert(mapper, conn, target): """event.listen method for Sqlalchemy to set the seqience_id for this object and create an ObjectNumber value for the id""" if target.sequence_id is None: from ambry.orm.exc import DatabaseError raise DatabaseError('Must have sequence id before insertion') Table.before_update(mapper, conn, target)
def before_update(mapper, conn, target): """Set the Table ID based on the dataset number and the sequence number for the table.""" target.name = Table.mangle_name(target.name) if isinstance(target, Column): raise TypeError('Got a column instead of a table') target.update_id(target.sequence_id, False)
def iter_paragraph(filename, filetype): """ A helper function to iterate through the diff types of Wikipedia data inputs. :param arguments: The docopt arguments :type arguments: dict :return: A generator yielding a pargraph of text for each iteration. """ assert filetype in ['jsonzip', 'jsondir', 'wikidump'] # Iterating through paragraphes from the Anntoated Wikipedia zipfile. if filetype == 'jsonzip': with ZipFile(filename, 'r') as zip_in: # Iterate through the individual files. for infile in zip_in.namelist(): if infile.endswith('/'): # Skip the directories. continue print(infile, end='\n', file=sys.stderr) # Logging progress. with zip_in.open(infile) as f_in: for line in io.TextIOWrapper(f_in, 'utf8'): # Each line is a separate json. data = json.loads(line) # The useful text under 'text' key. yield data['text'].strip() # Iterating through paragraphes from the Anntoated Wikipedia directory. elif filetype == 'jsondir': for root, dirs,
files in os.walk(filename): for wiki_file in files: infile = os.path.join(root, wiki_file) print(infile, end='\n', file=sys.stderr) # Logging progress. with io.open(infile, 'r', encoding='utf8') as f_in: for line in f_in: # Each line is a separate json. data = json.loads(line) # The useful text under 'text' key. yield data['text'].strip() # Iterating through paragraphes from the Wikipedia dump. elif filetype == 'wikidump': # Simply iterate through every line in the dump # and treat each line as a paragraph. with io.open(filename, 'r', encoding='utf8') as f_in: for line_count, paragraph in enumerate(f_in): if line_count % 100000: _msg = 'Processing line {}\n'.format(line_count) print(_msg, file=sys.stderr) # Logging progress. if pargraph: yield paragraph
def deduplicate(s, ch): """ From http://stackoverflow.com/q/42216559/610569 s = 'this is an irritating string with random spacing .' deduplicate(s) 'this is an irritating string with random spacing .' """ return ch.join([substring for substring in s.strip().split(ch) if substring])
def remove_text_inside_brackets(s, brackets="()[]"): """ From http://stackoverflow.com/a/14603508/610569 """ count = [0] * (len(brackets) // 2) # count open/close brackets saved_chars = [] for character in s: for i, b in enumerate(brackets): if character == b: # found bracket kind, is_close = divmod(i, 2) count[kind] += (-1)**is_close # `+1`: open, `-1`: close if count[kind] < 0: # unbalanced bracket count[kind] = 0 break else: # character is not a bracket if not any(count): # outside brackets saved_chars.append(character) return ''.join(saved_chars)
def color_print(s, color=None, highlight=None, end='\n', file=sys.stdout, **kwargs): """ From http://stackoverflow.com/a/287944/610569 See also https://gist.github.com/Sheljohn/68ca3be74139f66dbc6127784f638920 """ if color in palette and color != 'default': s = palette[color] + s # Highlight / Background color. if highlight and highlight in highlighter: s = highlighter[highlight] + s # Custom string format. for name, value in kwargs.items(): if name in formatter and value == True: s = formatter[name] + s print(s + palette['default'], end=end, file=file)
def wait_for_tasks(self, raise_if_error=True): """ Wait for the running tasks lauched from the sessions. Note that it also wait for tasks that are started from other tasks callbacks, like on_finished. :param raise_if_error: if True, raise all possible encountered errors using :class:`TaskErrors`. Else the errors are returned as a list. """ errors = [] tasks_seen = TaskCache() while True: for session in self.values(): errs = session.wait_for_tasks(raise_if_error=False) errors.extend(errs) # look for tasks created after the wait (in callbacks of # tasks from different sessions) tasks = [] for session in self.values(): tasks.extend(session.tasks()) # if none, then just break - else loop to wait for them if not any(t for t in tasks if t not in tasks_seen): break if raise_if_error and errors: raise TaskErrors(errors) return errors
def error(self): """ Return an instance of Exception if any, else None. Actually check for a :class:`TimeoutError` or a :class:`ExitCodeError`. """ if self.__timed_out: return TimeoutError(self.session, self, "timeout") if self.__exit_code is not None and \ self.__expected_exit_code is not None and \ self.__exit_code != self.__expected_exit_code: return ExitCodeError(self.session, self, 'bad exit code: Got %s' % self.__exit_code)
def create_new(self, **kwargs): """ Creates a new License This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create_new(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param LicenseRest body: :return: LicenseSingleton If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.create_new_with_http_info(**kwargs) else: (data) = self.create_new_with_http_info(**kwargs) return data
def delete(self, id, **kwargs): """ Deletes an existing License This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: License id (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_with_http_info(id, **kwargs) else: (data) = self.delete_with_http_info(id, **kwargs) return data
def get_all(self, **kwargs): """ Gets all Licenses This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_all(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: LicensePage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_all_with_http_info(**kwargs) else: (data) = self.get_all_with_http_info(**kwargs) return data
def get_specific(self, id, **kwargs): """ Get specific License This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_specific(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: License id (required) :return: LicenseSingleton If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_specific_with_http_info(id, **kwargs) else: (data) = self.get_specific_with_http_info(id, **kwargs) return data
def update(self, id, **kwargs): """ Updates an existing License This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: License id (required) :param LicenseRest body: :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.update_with_http_info(id, **kwargs) else: (data) = self.update_with_http_info(id, **kwargs) return data
def incver(self): """Increment all of the version numbers""" d = {} for p in self.__mapper__.attrs: if p.key in ['vid','vname','fqname', 'version', 'cache_key']: continue if p.key == 'revision': d[p.key] = self.revision + 1 else: d[p.key] = getattr(self, p.key) n = Dataset(**d) return n
def next_sequence_id(self, table_class, force_query=False): """Return the next sequence id for a object, identified by the vid of the parent object, and the database prefix for the child object. On the first call, will load the max sequence number from the database, but subsequence calls will run in process, so this isn't suitable for multi-process operation -- all of the tables in a dataset should be created by one process The child table must have a sequence_id value. """ from . import next_sequence_id from sqlalchemy.orm import object_session # NOTE: This next_sequence_id uses a different algorithm than dataset.next_sequence_id # FIXME replace this one with dataset.next_sequence_id return next_sequence_id(object_session(self), self._sequence_ids, self.vid, table_class, force_query=force_query)
def new_unique_object(self, table_class, sequence_id=None, force_query=False, **kwargs): """Use next_sequence_id to create a new child of the dataset, with a unique id""" from sqlalchemy.exc import IntegrityError from sqlalchemy.orm.exc import FlushError # If a sequence ID was specified, the caller is certain # that there is no potential for conflicts, # so there is no need to commit here. if not sequence_id: commit = True sequence_id = self.next_sequence_id(table_class, force_query=force_query) else: commit = False o = table_class( d_vid=self.vid, **kwargs ) o.update_id(sequence_id) if commit is False: return o self.commit() if self._database.driver == 'sqlite': # The Sqlite database can't have concurrency, so there no problem. self.session.add(o) self.commit() return o else: # Postgres. Concurrency is a bitch. table_name = table_class.__tablename__ child_sequence_id = table_class.sequence_id.property.columns[0].name try: self.session.add(o) self.commit() return o except (IntegrityError, FlushError) as e: self.rollback() self.session.merge(self)
print 'Failed' return None return # This is horrible, but it's the only thing that has worked for both # Sqlite and Postgres in both single processes and multiprocesses. d_vid = self.vid while True: try: self.session.add(o) self.commit() return o except (IntegrityError, FlushError) as e: self.rollback() self.session.expunge_all() ds = self._database.dataset(d_vid) sequence_id = ds.next_sequence_id(table_class, force_query=True) o.update_id(sequence_id) except Exception as e: print('Completely failed to get a new {} sequence_id; {}'.format(table_class, e)) self.rollback() import traceback # This bit is helpful in a multiprocessing run. tb = traceback.format_exc() print(tb) raise
def new_table(self, name, add_id=True, **kwargs): '''Add a table to the schema, or update it it already exists. If updating, will only update data. ''' from . import Table from .exc import NotFoundError try: table = self.table(name) extant = True except NotFoundError: extant = False if 'sequence_id' not in kwargs: kwargs['sequence_id'] = self._database.next_sequence_id(Dataset, self.vid, Table) table = Table(name=name, d_vid=self.vid, **kwargs) table.update_id() # Update possibly extant data table.data = dict( (list(table.data.items()) if table.data else []) + list(kwargs.get('data', {}).items())) for key, value in list(kwargs.items()): if not key: continue if key[0] != '_' and key not in ['vid', 'id', 'id_', 'd_id', 'name', 'sequence_id', 'table', 'column', 'data']: setattr(table, key, value) if add_id: table.add_id_column() if not extant: self.tables.append(table) return table
def new_partition(self, table, **kwargs): """ Creates new partition and returns it. Args: table (orm.Table): Returns: orm.Partition """ from . import Partition # Create the basic partition record, with a sequence ID. if isinstance(table, string_types): table = self.table(table) if 'sequence_id' in kwargs: sequence_id = kwargs['sequence_id'] del kwargs['sequence_id'] else: sequence_id = self._database.next_sequence_id(Dataset, self.vid, Partition) p = Partition( t_vid=table.vid, table_name=table.name, sequence_id=sequence_id, dataset=self, d_vid=self.vid, **kwargs ) p.update_id() return p
def partition(self, ref=None, **kwargs): """ Returns partition by ref. """ from .exc import NotFoundError from six import text_type if ref: for p in self.partitions: # This is slow for large datasets, like Census years. if (text_type(ref) == text_type(p.name) or text_type(ref) == text_type(p.id) or text_type(ref) == text_type(p.vid)): return p raise NotFoundError("Failed to find partition for ref '{}' in dataset '{}'".format(ref, self.name)) elif kwargs: from ..identity import PartitionNameQuery pnq = PartitionNameQuery(**kwargs) return self._find_orm
def bsfile(self, path): """Return a Build Source file ref, creating a new one if the one requested does not exist""" from sqlalchemy.orm.exc import NoResultFound from ambry.orm.exc import NotFoundError try: f = object_session(self)\ .query(File)\ .filter(File.d_vid == self.vid)\ .filter(File.major_type == File.MAJOR_TYPE.BUILDSOURCE)\ .filter(File.path == path)\ .one() return f except NoResultFound: raise NotFoundError("Failed to find file for path '{}' ".format(path))
def row(self, fields): """Return a row for fields, for CSV files, pretty printing, etc, give a set of fields to return""" d = self.dict row = [None] * len(fields) for i, f in enumerate(fields): if f in d: row[i] = d[f] return row
def metadata(self): """Access process configuarion values as attributes. """ from ambry.metadata.schema import Top # cross-module import top = Top() top.build_from_db(self.dataset) return top
def rows(self): """Return configuration in a form that can be used to reconstitute a Metadata object. Returns all of the rows for a dataset. This is distinct from get_config_value, which returns the value for the library. """ from ambry.orm import Config as SAConfig from sqlalchemy import or_ rows = [] configs = self.dataset.session\ .query(SAConfig)\ .filter(or_(SAConfig.group == 'config', SAConfig.group == 'process'), SAConfig.d_vid == self.dataset.vid)\ .all() for r in configs: parts = r.key.split('.', 3) if r.group == 'process': parts = ['process'] + parts cr = ((parts[0] if len(parts) > 0 else None, parts[1] if len(parts) > 1 else None, parts[2] if len(parts) > 2 else None ), r.value) rows.append(cr) return rows
def _set_value(instance_to_path_map, path_to_instance_map, prop_tree, config_instance): """ Finds appropriate term in the prop_tree and sets its value from config_instance. Args: configs_map (dict): key is id of the config, value is Config instance (AKA cache of the configs) prop_tree (PropertyDictTree): poperty tree to populate. config_instance (Config): """ path = instance_to_path_map[config_instance] # find group group = prop_tree for elem in path[:-1]: group = getattr(group, elem) assert group._key == config_instance.parent.key setattr(group, config_instance.key, config_instance.value) # # bind config to the term # # FIXME: Make all the terms to store config instance the same way. term = getattr(group, config_instance.key) try: if hasattr(term, '_term'): # ScalarTermS and ScalarTermU case term._term._config = config_instance return except KeyError: # python3 case. TODO: Find the way to make it simple. pass try: if hasattr(term, '_config'): term._config = config_instance
return except KeyError: # python3 case. TODO: Find the way to make it simple. pass else: pass
def get_or_create(session, model, **kwargs): """ Get or create sqlalchemy instance. Args: session (Sqlalchemy session): model (sqlalchemy model): kwargs (dict): kwargs to lookup or create instance. Returns: Tuple: first element is found or created instance, second is boolean - True if instance created, False if instance found. """ instance = session.query(model).filter_by(**kwargs).first() if instance: return instance, False else: instance = model(**kwargs) if 'dataset' in kwargs: instance.update_sequence_id(session, kwargs['dataset']) session.add(instance) session.commit() return instance, True
def _get_config_instance(group_or_term, session, **kwargs): """ Finds appropriate config instance and returns it. Args: group_or_term (Group or Term): session (Sqlalchemy session): kwargs (dict): kwargs to pass to get_or_create. Returns: tuple of (Config, bool): """ path = group_or_term._get_path() cached = group_or_term._top._cached_configs.get(path) if cached: config = cached created = False else: # does not exist or not yet cached config, created = get_or_create(session, Config, **kwargs) return config, created
def register_members(self): """Collect the names of the class member and convert them to object members. Unlike Terms, the Group class members are converted into object members, so the configuration data """ self._members = { name: attr for name, attr in iteritems(type(self).__dict__) if isinstance(attr, Group)} for name, m in iteritems(self._members): m.init_descriptor(name, self)
def add_error(self, group, term, sub_term, value): """For records that are not defined as terms, either add it to the errors list.""" self._errors[(group, term, sub_term)] = value
def _jinja_sub(self, st): """Create a Jina template engine, then perform substitutions on a string""" if isinstance(st, string_types): from jinja2 import Template try: for i in range(5): # Only do 5 recursive substitutions. st = Template(st).render(**(self._top.dict)) if '{{' not in st: break return st except Exception as e: return st #raise ValueError( # "Failed to render jinja template for metadata value '{}': {}".format(st, e)) return st
def scalar_term(self, st): """Return a _ScalarTermS or _ScalarTermU from a string, to perform text and HTML substitutions""" if isinstance(st, binary_type): return _ScalarTermS(st, self._jinja_sub) elif isinstance(st, text_type): return _ScalarTermU(st, self._jinja_sub) elif st is None: return _ScalarTermU(u(''), self._jinja_sub) else: return st
def update_config(self): """ Updates or creates config of that group. Requires tree bound to db. """ dataset = self._top._config.dataset session = object_session(self._top._config) logger.debug( 'Updating group config. dataset: {}, type: {}, key: {}'.format(dataset.vid, self._top._type, self._key)) self._config, created = _get_config_instance( self, session, parent_id=self._parent._config.id, d_vid=dataset.vid, group=self._key, key=self._key, type=self._top._type, dataset = dataset) if created: self._top._cached_configs[self._get_path()] = self._config self._top._add_valid(self._config) if created: logger.debug( 'New group config created and linked. config: {}'.format(self._config)) else: logger.debug( 'Existing group config linked. config: {}'.format(self._config))
def get_group_instance(self, parent): """Create an instance object""" o = copy.copy(self) o.init_instance(parent) return o
def update_config(self, key, value): """ Creates or updates db config of the VarDictGroup. Requires bound to db tree. """ dataset = self._top._config.dataset session = object_session(self._top._config) logger.debug( 'Updating VarDictGroup config. dataset: {}, type: {}, key: {}, value: {}'.format( dataset, self._top._type, key, value)) if not self._parent._config: self._parent.update_config() # create or update group config self._config, created = get_or_create( session, Config, d_vid=dataset.vid, type=self._top._type, parent=self._parent._config, group=self._key, key=self._key,dataset=dataset) self._top._add_valid(self._config) # create or update value config config, created = get_or_create( session, Config, parent=self._config, d_vid=dataset.vid, type=self._top._type, key=key,dataset=dataset) if config.value != value: # sync db value with term value. config.value = value session.merge(config) session.commit() logger.debug( 'Config bound to the VarDictGroup key updated. config: {}'.format(config)) self._top._add_valid(config)
def update_config(self): """ Creates or updates db config of the term. Requires bound to db tree. """ dataset = self._top._config.dataset session = object_session(self._top._config) #logger.debug('Updating term config. dataset: {}, type: {}, key: {}, value: {}'.format( # dataset, self._top._type, self._key, self.get())) if not self._parent._config: self._parent.update_config() self._config, created = _get_config_instance( self, session, parent=self._parent._config, d_vid=dataset.vid, type=self._top._type, key=self._key, dataset=dataset) if created: self._top._cached_configs[self._get_path()] = self._config # We update ScalarTerm and ListTerm values only. Composite terms (DictTerm for example) # should not contain value. if isinstance(self, (ScalarTerm, ListTerm)): if self._config.value != self.get(): self._config.value = self.get() session.merge(self._config) session.commit() self._top._add_valid(self._config)
def text(self): """Interpret the scalar as Markdown, strip the HTML and return text""" s = MLStripper() s.feed(self.html) return s.get_data()
def start(self, *args, **kwargs): """ Start to read the stream(s). """ queue = Queue() stdout_reader, stderr_reader = \ self._create_readers(queue, *args, **kwargs) self.thread = threading.Thread(target=self._read, args=(stdout_reader, stderr_reader, queue)) self.thread.daemon = True self.thread.start()
def quoteattrs(data): '''Takes dict of attributes and returns their HTML representation''' items = [] for key, value in data.items(): items.append('{}={}'.format(key, quoteattr(value))) return ' '.join(items)
def quote_js(text): '''Quotes text to be used as JavaScript string in HTML templates. The result doesn't contain surrounding quotes.''' if isinstance(text, six.binary_type): text = text.decode('utf-8') # for Jinja2 Markup text = text.replace('\\', '\\\\'); text = text.replace('\n', '\\n'); text = text.replace('\r', ''); for char in '\'"<>&': text = text.replace(char, '\\x{:02x}'.format(ord(char))) return text
def create_ramp_plan(err, ramp): """ Formulate and execute on a plan to slowly add heat or cooling to the system `err` initial error (PV - SP) `ramp` the size of the ramp A ramp plan might yield MVs in this order at every timestep: [5, 0, 4, 0, 3, 0, 2, 0, 1] where err == 5 + 4 + 3 + 2 + 1 """ if ramp == 1: # basecase yield int(err) while True: yield 0 # np.arange(n).sum() == err # --> solve for n # err = (n - 1) * (n // 2) == .5 * n**2 - .5 * n # 0 = n**2 - n --> solve for n n = np.abs(np.roots([.5, -.5, 0]).max()) niter = int(ramp // (2 * n)) # 2
means add all MV in first half of ramp MV = n log.info('Initializing a ramp plan', extra=dict( ramp_size=ramp, err=err, niter=niter)) for x in range(int(n)): budget = MV for x in range(niter): budget -= MV // niter yield int(np.sign(err) * (MV // niter)) yield int(budget * np.sign(err)) MV -= 1 while True: yield 0
def evaluate_stop_condition(errdata, stop_condition): """ Call the user-defined function: stop_condition(errdata) If the function returns -1, do nothing. Otherwise, sys.exit. """ if stop_condition: return_code = stop_condition(list(errdata)) if return_code != -1: log.info( 'Stop condition triggered! Relay is terminating.', extra=dict(return_code=return_code)) sys.exit(return_code)
def check(codeString, filename): """ Check the Python source given by C{codeString} for flakes. @param codeString: The Python source to check. @type codeString: C{str} @param filename: The name of the file the source came from, used to report errors. @type filename: C{str} @return: The number of warnings emitted. @rtype: C{int} """ # First, compile into an AST and handle syntax errors. try: tree = compile(codeString, filename, "exec", ast.PyCF_ONLY_AST) except SyntaxError, value: msg = value.args[0] (lineno, offset, text) = value.lineno, value.offset, value.text # If there's an encoding problem with the file, the text is None. if text is None: # Avoid using msg, since for the only known case, it contains a # bogus message that claims the encoding the file declared was # unknown. sys.stderr.write("%s: problem decoding source\n" %
(filename, )) else: line = text.splitlines()[-1] if offset is not None: offset = offset - (len(text) - len(line)) sys.stderr.write('%s:%d: %s' % (filename, lineno, msg)) sys.stderr.write(line + '\n') if offset is not None: sys.stderr.write(" " * offset + "^\n") return 1 else: # Okay, it's syntactically valid. Now check it. w = checker.Checker(tree, filename) lines = codeString.split('\n') messages = [message for message in w.messages if lines[message.lineno - 1].find('pyflakes:ignore') < 0] messages.sort(lambda a, b: cmp(a.lineno, b.lineno)) false_positives = 0 for warning in messages: if not (re.match('.*__init__.py', str(warning)) and isinstance(warning, (UnusedImport, ImportStarUsed))): print(warning) else: false_positives += 1 return len(messages) - false_positives
def checkPath(filename): """ Check the given path, printing out any warnings detected. @return: the number of warnings printed """ try: return check(file(filename, 'U').read() + '\n', filename) except IOError, msg: sys.stderr.write("%s: %s\n" % (filename, msg.args[1])) return 1
def clean_value(self): ''' Current field's converted value from form's python_data. ''' # XXX cached_property is used only for set initial state # this property should be set every time field data # has been changed, for instance, in accept method python_data = self.parent.python_data if self.name in python_data: return python_data[self.name] return self.get_initial()
def accept(self): '''Extracts raw value from form's raw data and passes it to converter''' value = self.raw_value if not self._check_value_type(value): # XXX should this be silent or TypeError? value = [] if self.multiple else self._null_value self.clean_value = self.conv.accept(value) return {self.name: self.clean_value}
def python_data(self): '''Representation of aggregate value as dictionary.''' try: value = self.clean_value except LookupError: # XXX is this necessary? value = self.get_initial() return self.from_python(value)
def accept(self): ''' Accepts all children fields, collects resulting values into dict and passes that dict to converter. Returns result of converter as separate value in parent `python_data` ''' result = dict(self.python_data) for field in self.fields: if field.writable: result.update(field.accept()) else: # readonly field field.set_raw_value(self.form.raw_data, field.from_python(result[field.name])) self.clean_value = self.conv.accept(result) return {self.name: self.clean_value}
def accept(self): ''' Acts as `Field.accepts` but returns result of every child field as value in parent `python_data`. ''' result = FieldSet.accept(self) self.clean_value = result[self.name] return self.clean_value
def handle(conn, addr, gateway, *args, **kwargs): """ NOTE: use tcp instead of udp because some operations need ack """ conn.sendall(b'OK pubsub 1.0\n') while True: try: s = conn.recv(1024).decode('utf-8').strip() if not s: conn.close() break except ConnectionResetError: logger.debug('Client close the connection.') break parts = s.split(' ') if len(parts) != 2: conn.send(b"Invalid command\n") continue cmd, topic = parts if cmd.lower() != 'sub': conn.send(bytes("Unknown command '{}'\n".format(cmd.lower()), 'utf-8')) continue if topic not in gateway.topics: conn.send(bytes("Unknown topic '{}'\n".format(topic), 'utf-8')) continue conn.sendall(bytes('ACK {} {}\n'.format(cmd, topic), 'utf-8')) subscriber = Subscriber(addr, conn) gateway.link(topic, subscriber) break
def find_files(dir_path, extension="*"): """ From https://stackoverflow.com/a/2186565/610569 """ if sys.version_info.major == 3 and sys.version_info.minor >= 5: pattern = '/'.join([dir_path, '**', extension]) for filename in glob.iglob(pattern, recursive=True): yield filename else: for root, dirnames, filenames in os.walk(dir_path): for filename in fnmatch.filter(filenames, extension): yield os.path.join(root, filename)
def getTicker(pair, connection=None, info=None): """Retrieve the ticker for the given pair. Returns a Ticker instance.""" if info is not None: info.validate_pair(pair) if connection is None: connection = common.BTCEConnection() response = connection.makeJSONRequest("/api/3/ticker/%s" % pair) if type(response) is not dict: raise TypeError("The response is a %r, not a dict." % type(response)) elif u'error' in response: print("There is a error \"%s\" while obtaining ticker %s" % (response['error'], pair)) ticker = None else: ticker = Ticker(**response[pair]) return ticker
def getDepth(pair, connection=None, info=None): """Retrieve the depth for the given pair. Returns a tuple (asks, bids); each of these is a list of (price, volume) tuples.""" if info is not None: info.validate_pair(pair) if connection is None: connection = common.BTCEConnection() response = connection.makeJSONRequest("/api/3/depth/%s" % pair) if type(response) is not dict: raise TypeError("The response is not a dict.") depth = response.get(pair) if type(depth) is not dict: raise TypeError("The pair depth is not a dict.") asks = depth.get(u'asks') if type(asks) is not list: raise TypeError("The response does not contain an asks list.") bids = depth.get(u'bids') if type(bids) is not list: raise TypeError("The response does not contain a bids list.") return asks, bids
def getTradeHistory(pair, connection=None, info=None, count=None): """Retrieve the trade history for the given pair. Returns a list of Trade instances. If count is not None, it should be an integer, and specifies the number of items from the trade history that will be processed and returned.""" if info is not None: info.validate_pair(pair) if connection is None: connection = common.BTCEConnection() response = connection.makeJSONRequest("/api/3/trades/%s" % pair) if type(response) is not dict: raise TypeError("The response is not a dict.") history = response.get(pair) if type(history) is not list: raise TypeError("The response is a %r, not a list." % type(history)) result = [] # Limit the number of items returned if requested. if count is not None: history = history[:count] for h in history: h["pair"] = pair t = Trade(**h) result.append(t) return result
def default(self): """Return default contents""" import ambry.bundle.default_files as df import os path = os.path.join(os.path.dirname(df.__file__), self.file_name) if six.PY2: with open(path, 'rb') as f: return f.read() else: # py3 with open(path, 'rt', encoding='utf-8') as f: return f.read()
def remove(self): """ Removes file from filesystem. """ from fs.errors import ResourceNotFoundError try: self._fs.remove(self.file_name) except ResourceNotFoundError: pass
def sync_dir(self): """ Report on which direction a synchronization should be done. :return: """ # NOTE: These are ordered so the FILE_TO_RECORD has preference over RECORD_TO_FILE # if there is a conflict. if self.exists() and bool(self.size()) and not self.record.size: # The fs exists, but the record is empty return self.SYNC_DIR.FILE_TO_RECORD if (self.fs_modtime or 0) > (self.record.modified or 0) and self.record.source_hash != self.fs_hash: # Filesystem is newer return self.SYNC_DIR.FILE_TO_RECORD if self.record.size and not self.exists(): # Record exists, but not the FS return self.SYNC_DIR.RECORD_TO_FILE if (self.record.modified or 0) > (self.fs_modtime or 0): # Record is newer return self.SYNC_DIR.RECORD_TO_FILE return None
def sync(self, force=None): """Synchronize between the file in the file system and the field record""" try: if force: sd = force else: sd = self.sync_dir() if sd == self.SYNC_DIR.FILE_TO_RECORD: if force and not self.exists(): return None self.fs_to_record() elif sd == self.SYNC_DIR.RECORD_TO_FILE: self.record_to_fs() else: return None self._dataset.config.sync[self.file_const][sd] = time.time() return sd except Exception as e: self._bundle.rollback() self._bundle.error("Failed to sync '{}': {}".format(self.file_const, e)) raise
def fh_to_record(self, f): """Load a file in the filesystem into the file record""" import unicodecsv as csv fn_path = self.file_name fr = self.record fr.path = fn_path rows = [] # NOTE. There were two cases here, for PY2 and PY3. Py two had # encoding='utf-8' in the reader. I've combined them b/c that's the default for # unicode csv, so it shouldn't be necessary. # Should probably be something like this: #if sys.version_info[0] >= 3: # Python 3 # import csv # f = open(self._fstor.syspath, 'rtU', encoding=encoding) # reader = csv.reader(f) #else: # Python 2 # import unicodecsv as csv # f = open(self._fstor.syspath, 'rbU') # reader = csv.reader(f, encoding=encoding) for row in csv.reader(f): row = [e if e.strip() != '' else None for e in row] if
any(bool(e) for e in row): rows.append(row) try: fr.update_contents(msgpack.packb(rows), 'application/msgpack') except AssertionError: raise fr.source_hash = self.fs_hash fr.synced_fs = self.fs_modtime fr.modified = self.fs_modtime
def record_to_fs(self): """Create a filesystem file from a File""" fr = self.record fn_path = self.file_name if fr.contents: if six.PY2: with self._fs.open(fn_path, 'wb') as f: self.record_to_fh(f) else: # py3 with self._fs.open(fn_path, 'w', newline='') as f: self.record_to_fh(f)
def record_to_fh(self, f): """Write the record, in filesystem format, to a file handle or file object""" fr = self.record if fr.contents: yaml.safe_dump(fr.unpacked_contents, f, default_flow_style=False, encoding='utf-8') fr.source_hash = self.fs_hash fr.modified = self.fs_modtime
def fh_to_record(self, f): """Load a file in the filesystem into the file record""" fn_path = self.file_name fr = self.record fr.path = fn_path fr.update_contents(f.read(), 'text/plain') fr.source_hash = self.fs_hash fr.synced_fs = self.fs_modtime fr.modified = self.fs_modtime
def record_to_objects(self): """Create config records to match the file metadata""" from ..util import AttrDict fr = self.record contents = fr.unpacked_contents if not contents: return ad = AttrDict(contents) # Get time that filessystem was synchronized to the File record. # Maybe use this to avoid overwriting configs that changed by bundle program. # fs_sync_time = self._dataset.config.sync[self.file_const][self.file_to_record] self._dataset.config.metadata.set(ad) self._dataset._database.commit() return ad
def objects_to_record(self): """Write from object metadata to the record. Note that we don't write everything""" o = self.get_object() o.about = self._bundle.metadata.about o.identity = self._dataset.identity.ident_dict o.names = self._dataset.identity.names_dict o.contacts = self._bundle.metadata.contacts self.set_object(o)
def update_identity(self): """Update the identity and names to match the dataset id and version""" fr = self.record d = fr.unpacked_contents d['identity'] = self._dataset.identity.ident_dict d['names'] = self._dataset.identity.names_dict fr.update_contents(msgpack.packb(d), 'application/msgpack')
def get_object(self): """Return contents in object form, an AttrDict""" from ..util import AttrDict c = self.record.unpacked_contents if not c: c = yaml.safe_load(self.default) return AttrDict(c)
def default(self): """Return default contents""" import json import ambry.bundle.default_files as df import os path = os.path.join(os.path.dirname(df.__file__), 'notebook.ipynb') if six.PY2: with open(path, 'rb') as f: content_str = f.read() else: # py3 with open(path, 'rt', encoding='utf-8') as f: content_str = f.read() c = json.loads(content_str) context = { 'title': self._bundle.metadata.about.title, 'summary': self._bundle.metadata.about.title, 'bundle_vname': self._bundle.identity.vname } for cell in c['cells']: for i in range(len(cell['source'])): cell['source'][i] = cell['source'][i].format(**context) c['metadata']['ambry'] = { 'identity': self._bundle.identity.dict } return json.dumps(c, indent=4)