sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def detect_column_renamings(self, table_differences): """ Try to find columns that only changed their names. :type table_differences: TableDiff """ rename_candidates = {} for added_column_name, added_column in table_differences.added_columns.items(): for removed_column in table_differences.removed_columns.values(): if len(self.diff_column(added_column, removed_column)) == 0: if added_column.get_name() not in rename_candidates: rename_candidates[added_column.get_name()] = [] rename_candidates[added_column.get_name()] = (removed_column, added_column, added_column_name) for candidate_columns in rename_candidates.values(): if len(candidate_columns) == 1: removed_column, added_column, _ = candidate_columns[0] removed_column_name = removed_column.get_name().lower() added_column_name = added_column.get_name().lower() if removed_column_name not in table_differences.renamed_columns: table_differences.renamed_columns[removed_column_name] = added_column del table_differences.added_columns[added_column_name] del table_differences.removed_columns[removed_column_name]
Try to find columns that only changed their names. :type table_differences: TableDiff
entailment
def diff_column(self, column1, column2): """ Returns the difference between column1 and column2 :type column1: eloquent.dbal.column.Column :type column2: eloquent.dbal.column.Column :rtype: list """ properties1 = column1.to_dict() properties2 = column2.to_dict() changed_properties = [] for prop in ['type', 'notnull', 'unsigned', 'autoincrement']: if properties1[prop] != properties2[prop]: changed_properties.append(prop) if properties1['default'] != properties2['default']\ or (properties1['default'] is None and properties2['default'] is not None)\ or (properties2['default'] is None and properties1['default'] is not None): changed_properties.append('default') if properties1['type'] == 'string' and properties1['type'] != 'guid'\ or properties1['type'] in ['binary', 'blob']: length1 = properties1['length'] or 255 length2 = properties2['length'] or 255 if length1 != length2: changed_properties.append('length') if properties1['fixed'] != properties2['fixed']: changed_properties.append('fixed') elif properties1['type'] in ['decimal', 'float', 'double precision']: precision1 = properties1['precision'] or 10 precision2 = properties2['precision'] or 10 if precision1 != precision2: changed_properties.append('precision') if properties1['scale'] != properties2['scale']: changed_properties.append('scale') return list(set(changed_properties))
Returns the difference between column1 and column2 :type column1: eloquent.dbal.column.Column :type column2: eloquent.dbal.column.Column :rtype: list
entailment
def execute(self, i, o): """ Executes the command. :type i: cleo.inputs.input.Input :type o: cleo.outputs.output.Output """ config = self._get_config(i) self._resolver = DatabaseManager(config)
Executes the command. :type i: cleo.inputs.input.Input :type o: cleo.outputs.output.Output
entailment
def call(self, name, options=None, o=None): """ Call another command. :param name: The command name :type name: str :param options: The options :type options: list or None :param o: The output :type o: cleo.outputs.output.Output """ if options is None: options = [] command = self.get_application().find(name) options = [('command', command.get_name())] + options return command.run(ListInput(options), o)
Call another command. :param name: The command name :type name: str :param options: The options :type options: list or None :param o: The output :type o: cleo.outputs.output.Output
entailment
def _get_config(self, i): """ Get the config. :type i: cleo.inputs.input.Input :rtype: dict """ variables = {} if not i.get_option('config'): raise Exception('The --config|-c option is missing.') with open(i.get_option('config')) as fh: exec(fh.read(), {}, variables) return variables['DATABASES']
Get the config. :type i: cleo.inputs.input.Input :rtype: dict
entailment
def associate(self, model): """ Associate the model instance to the given parent. :type model: eloquent.Model :rtype: eloquent.Model """ self._parent.set_attribute(self._foreign_key, model.get_key()) self._parent.set_attribute(self._morph_type, model.get_morph_class()) return self._parent.set_relation(self._relation, model)
Associate the model instance to the given parent. :type model: eloquent.Model :rtype: eloquent.Model
entailment
def _create_model_by_type(self, type): """ Create a new model instance by type. :rtype: Model """ klass = None for cls in eloquent.orm.model.Model.__subclasses__(): morph_class = cls.__morph_class__ or cls.__name__ if morph_class == type: klass = cls break return klass()
Create a new model instance by type. :rtype: Model
entailment
def get_column_listing(self, table): """ Get the column listing for a given table. :param table: The table :type table: str :rtype: list """ sql = self._grammar.compile_column_exists() database = self._connection.get_database_name() table = self._connection.get_table_prefix() + table results = self._connection.select(sql, [database, table]) return self._connection.get_post_processor().process_column_listing(results)
Get the column listing for a given table. :param table: The table :type table: str :rtype: list
entailment
def _populate_stub(self, name, stub, table): """ Populate the placeholders in the migration stub. :param name: The name of the migration :type name: str :param stub: The stub :type stub: str :param table: The table name :type table: str :rtype: str """ stub = stub.replace('DummyClass', self._get_class_name(name)) if table is not None: stub = stub.replace('dummy_table', table) return stub
Populate the placeholders in the migration stub. :param name: The name of the migration :type name: str :param stub: The stub :type stub: str :param table: The table name :type table: str :rtype: str
entailment
def _set_keys_for_save_query(self, query): """ Set the keys for a save update query. :param query: A Builder instance :type query: eloquent.orm.Builder :return: The Builder instance :rtype: eloquent.orm.Builder """ query.where(self._morph_type, self._morph_class) return super(MorphPivot, self)._set_keys_for_save_query(query)
Set the keys for a save update query. :param query: A Builder instance :type query: eloquent.orm.Builder :return: The Builder instance :rtype: eloquent.orm.Builder
entailment
def delete(self): """ Delete the pivot model record from the database. :rtype: int """ query = self._get_delete_query() query.where(self._morph_type, self._morph_class) return query.delete()
Delete the pivot model record from the database. :rtype: int
entailment
def get_relation_count_query(self, query, parent): """ Add the constraints for a relationship count query. :type query: Builder :type parent: Builder :rtype: Builder """ query = super(MorphOneOrMany, self).get_relation_count_query(query, parent) return query.where(self._morph_type, self._morph_class)
Add the constraints for a relationship count query. :type query: Builder :type parent: Builder :rtype: Builder
entailment
def add_eager_constraints(self, models): """ Set the constraints for an eager load of the relation. :type models: list """ super(MorphOneOrMany, self).add_eager_constraints(models) self._query.where(self._morph_type, self._morph_class)
Set the constraints for an eager load of the relation. :type models: list
entailment
def save(self, model): """ Attach a model instance to the parent models. :param model: The model instance to attach :type model: Model :rtype: Model """ model.set_attribute(self.get_plain_morph_type(), self._morph_class) return super(MorphOneOrMany, self).save(model)
Attach a model instance to the parent models. :param model: The model instance to attach :type model: Model :rtype: Model
entailment
def find_or_new(self, id, columns=None): """ Find a model by its primary key or return new instance of the related model. :param id: The primary key :type id: mixed :param columns: The columns to retrieve :type columns: list :rtype: Collection or Model """ if columns is None: columns = ['*'] instance = self.find(id, columns) if instance is None: instance = self._related.new_instance() self._set_foreign_attributes_for_create(instance) return instance
Find a model by its primary key or return new instance of the related model. :param id: The primary key :type id: mixed :param columns: The columns to retrieve :type columns: list :rtype: Collection or Model
entailment
def _set_foreign_attributes_for_create(self, model): """ Set the foreign ID and type for creation a related model. """ model.set_attribute(self.get_plain_foreign_key(), self.get_parent_key()) model.set_attribute(self.get_plain_morph_type(), self._morph_class)
Set the foreign ID and type for creation a related model.
entailment
def _parse_connection_name(self, name): """ Parse the connection into a tuple of the name and read / write type :param name: The name of the connection :type name: str :return: A tuple of the name and read / write type :rtype: tuple """ if name is None: name = self.get_default_connection() if name.endswith(('::read', '::write')): return name.split('::', 1) return name, None
Parse the connection into a tuple of the name and read / write type :param name: The name of the connection :type name: str :return: A tuple of the name and read / write type :rtype: tuple
entailment
def purge(self, name=None): """ Disconnect from the given database and remove from local cache :param name: The name of the connection :type name: str :rtype: None """ self.disconnect(name) if name in self._connections: del self._connections[name]
Disconnect from the given database and remove from local cache :param name: The name of the connection :type name: str :rtype: None
entailment
def no_constraints(cls, callback): """ Runs a callback with constraints disabled on the relation. """ cls._constraints = False results = callback() cls._constraints = True return results
Runs a callback with constraints disabled on the relation.
entailment
def get_keys(self, models, key=None): """ Get all the primary keys for an array of models. :type models: list :type key: str :rtype: list """ return list(set(map(lambda value: value.get_attribute(key) if key else value.get_key(), models)))
Get all the primary keys for an array of models. :type models: list :type key: str :rtype: list
entailment
def add_constraints(self): """ Set the base constraints on the relation query. :rtype: None """ parent_table = self._parent.get_table() self._set_join() if self._constraints: self._query.where('%s.%s' % (parent_table, self._first_key), '=', self._far_parent.get_key())
Set the base constraints on the relation query. :rtype: None
entailment
def get_relation_count_query(self, query, parent): """ Add the constraints for a relationship count query. :type query: Builder :type parent: Builder :rtype: Builder """ parent_table = self._parent.get_table() self._set_join(query) query.select(QueryExpression('COUNT(*)')) key = self.wrap('%s.%s' % (parent_table, self._first_key)) return query.where(self.get_has_compare_key(), '=', QueryExpression(key))
Add the constraints for a relationship count query. :type query: Builder :type parent: Builder :rtype: Builder
entailment
def _set_join(self, query=None): """ Set the join clause for the query. """ if not query: query = self._query foreign_key = '%s.%s' % (self._related.get_table(), self._second_key) query.join(self._parent.get_table(), self.get_qualified_parent_key_name(), '=', foreign_key)
Set the join clause for the query.
entailment
def plot_best_worst_fits(assignments_df, data, modality_col='Modality', score='$\log_2 K$'): """Violinplots of the highest and lowest scoring of each modality""" ncols = 2 nrows = len(assignments_df.groupby(modality_col).groups.keys()) fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(nrows*4, ncols*6)) axes_iter = axes.flat fits = 'Highest', 'Lowest' for modality, df in assignments_df.groupby(modality_col): df = df.sort_values(score) color = MODALITY_TO_COLOR[modality] for fit in fits: if fit == 'Highest': ids = df['Feature ID'][-10:] else: ids = df['Feature ID'][:10] fit_psi = data[ids] tidy_fit_psi = fit_psi.stack().reset_index() tidy_fit_psi = tidy_fit_psi.rename(columns={'level_0': 'Sample ID', 'level_1': 'Feature ID', 0: '$\Psi$'}) if tidy_fit_psi.empty: continue ax = six.next(axes_iter) violinplot(x='Feature ID', y='$\Psi$', data=tidy_fit_psi, color=color, ax=ax) ax.set(title='{} {} {}'.format(fit, score, modality), xticks=[]) sns.despine() fig.tight_layout()
Violinplots of the highest and lowest scoring of each modality
entailment
def violinplot(x=None, y=None, data=None, bw=0.2, scale='width', inner=None, ax=None, **kwargs): """Wrapper around Seaborn's Violinplot specifically for [0, 1] ranged data What's different: - bw = 0.2: Sets bandwidth to be small and the same between datasets - scale = 'width': Sets the width of all violinplots to be the same - inner = None: Don't plot a boxplot or points inside the violinplot """ if ax is None: ax = plt.gca() sns.violinplot(x, y, data=data, bw=bw, scale=scale, inner=inner, ax=ax, **kwargs) ax.set(ylim=(0, 1), yticks=(0, 0.5, 1)) return ax
Wrapper around Seaborn's Violinplot specifically for [0, 1] ranged data What's different: - bw = 0.2: Sets bandwidth to be small and the same between datasets - scale = 'width': Sets the width of all violinplots to be the same - inner = None: Don't plot a boxplot or points inside the violinplot
entailment
def bar(self, counts, phenotype_to_color=None, ax=None, percentages=True): """Draw barplots grouped by modality of modality percentage per group Parameters ---------- Returns ------- Raises ------ """ if percentages: counts = 100 * (counts.T / counts.T.sum()).T # with sns.set(style='whitegrid'): if ax is None: ax = plt.gca() full_width = 0.8 width = full_width / counts.shape[0] for i, (group, series) in enumerate(counts.iterrows()): left = np.arange(len(self.modality_order)) + i * width height = [series[i] if i in series else 0 for i in self.modality_order] color = phenotype_to_color[group] ax.bar(left, height, width=width, color=color, label=group, linewidth=.5, edgecolor='k') ylabel = 'Percentage of events' if percentages else 'Number of events' ax.set_ylabel(ylabel) ax.set_xticks(np.arange(len(self.modality_order)) + full_width / 2) ax.set_xticklabels(self.modality_order) ax.set_xlabel('Splicing modality') ax.set_xlim(0, len(self.modality_order)) ax.legend(loc='best') ax.grid(axis='y', linestyle='-', linewidth=0.5) sns.despine()
Draw barplots grouped by modality of modality percentage per group Parameters ---------- Returns ------- Raises ------
entailment
def event_estimation(self, event, logliks, logsumexps, renamed=''): """Show the values underlying bayesian modality estimations of an event Parameters ---------- Returns ------- Raises ------ """ plotter = _ModelLoglikPlotter() plotter.plot(event, logliks, logsumexps, self.modality_to_color, renamed=renamed) return plotter
Show the values underlying bayesian modality estimations of an event Parameters ---------- Returns ------- Raises ------
entailment
def predict(self, fitted): """Assign the most likely modality given the fitted data Parameters ---------- fitted : pandas.DataFrame or pandas.Series Either a (n_modalities, features) DatFrame or (n_modalities,) Series, either of which will return the best modality for each feature. """ if fitted.shape[0] != len(self.modalities): raise ValueError("This data doesn't look like it had the distance " "between it and the five modalities calculated") return fitted.idxmin()
Assign the most likely modality given the fitted data Parameters ---------- fitted : pandas.DataFrame or pandas.Series Either a (n_modalities, features) DatFrame or (n_modalities,) Series, either of which will return the best modality for each feature.
entailment
def logliks(self, x): """Calculate log-likelihood of a feature x for each model Converts all values that are exactly 1 or exactly 0 to 0.999 and 0.001 because they are out of range of the beta distribution. Parameters ---------- x : numpy.array-like A single vector to estimate the log-likelihood of the models on Returns ------- logliks : numpy.array Log-likelihood of these data in each member of the model's family """ x = x.copy() # Replace exactly 0 and exactly 1 values with a very small number # (machine epsilon, the smallest number that this computer is capable # of storing) because 0 and 1 are not in the Beta distribution. x[x == 0] = VERY_SMALL_NUMBER x[x == 1] = 1 - VERY_SMALL_NUMBER return np.array([np.log(prob) + rv.logpdf(x[np.isfinite(x)]).sum() for prob, rv in zip(self.prob_parameters, self.rvs)])
Calculate log-likelihood of a feature x for each model Converts all values that are exactly 1 or exactly 0 to 0.999 and 0.001 because they are out of range of the beta distribution. Parameters ---------- x : numpy.array-like A single vector to estimate the log-likelihood of the models on Returns ------- logliks : numpy.array Log-likelihood of these data in each member of the model's family
entailment
def nice_number_string(number, decimal_places=2): """Convert floats to either integers or a nice looking fraction""" if number == np.round(number): return str(int(number)) elif number < 1 and number > 0: inverse = 1 / number if int(inverse) == np.round(inverse): return r'\frac{{1}}{{{}}}'.format(int(inverse)) else: template = '{{:.{0}}}'.format(decimal_places) return template.format(number)
Convert floats to either integers or a nice looking fraction
entailment
def violinplot(self, n=1000, **kwargs): """Plot violins of each distribution in the model family Parameters ---------- n : int Number of random variables to generate kwargs : dict or keywords Any keyword arguments to seaborn.violinplot Returns ------- ax : matplotlib.Axes object Axes object with violins plotted """ kwargs.setdefault('palette', 'Purples') dfs = [] for rv in self.rvs: psi = rv.rvs(n) df = pd.Series(psi, name=self.ylabel).to_frame() alpha, beta = rv.args alpha = self.nice_number_string(alpha, decimal_places=2) beta = self.nice_number_string(beta, decimal_places=2) df['parameters'] = '$\\alpha = {0}$\n$\\beta = {1}$'.format( alpha, beta) dfs.append(df) data = pd.concat(dfs) if 'ax' not in kwargs: fig, ax = plt.subplots(figsize=(len(self.alphas)*0.625, 4)) else: ax = kwargs.pop('ax') ax = violinplot(x='parameters', y=self.ylabel, data=data, ax=ax, **kwargs) sns.despine(ax=ax) return ax
Plot violins of each distribution in the model family Parameters ---------- n : int Number of random variables to generate kwargs : dict or keywords Any keyword arguments to seaborn.violinplot Returns ------- ax : matplotlib.Axes object Axes object with violins plotted
entailment
def _single_feature_logliks_one_step(self, feature, models): """Get log-likelihood of models at each parameterization for given data Parameters ---------- feature : pandas.Series Percent-based values of a single feature. May contain NAs, but only non-NA values are used. Returns ------- logliks : pandas.DataFrame """ x_non_na = feature[~feature.isnull()] if x_non_na.empty: return pd.DataFrame() else: dfs = [] for name, model in models.items(): df = model.single_feature_logliks(feature) df['Modality'] = name dfs.append(df) return pd.concat(dfs, ignore_index=True)
Get log-likelihood of models at each parameterization for given data Parameters ---------- feature : pandas.Series Percent-based values of a single feature. May contain NAs, but only non-NA values are used. Returns ------- logliks : pandas.DataFrame
entailment
def fit(self, data): """Get the modality assignments of each splicing event in the data Parameters ---------- data : pandas.DataFrame A (n_samples, n_events) dataframe of splicing events' PSI scores. Must be psi scores which range from 0 to 1 Returns ------- log2_bayes_factors : pandas.DataFrame A (n_modalities, n_events) dataframe of the estimated log2 bayes factor for each splicing event, for each modality Raises ------ AssertionError If any value in ``data`` does not fall only between 0 and 1. """ self.assert_less_than_or_equal_1(data.values.flat) self.assert_non_negative(data.values.flat) if isinstance(data, pd.DataFrame): log2_bayes_factors = data.apply(self.single_feature_fit) elif isinstance(data, pd.Series): log2_bayes_factors = self.single_feature_fit(data) log2_bayes_factors.name = self.score_name return log2_bayes_factors
Get the modality assignments of each splicing event in the data Parameters ---------- data : pandas.DataFrame A (n_samples, n_events) dataframe of splicing events' PSI scores. Must be psi scores which range from 0 to 1 Returns ------- log2_bayes_factors : pandas.DataFrame A (n_modalities, n_events) dataframe of the estimated log2 bayes factor for each splicing event, for each modality Raises ------ AssertionError If any value in ``data`` does not fall only between 0 and 1.
entailment
def predict(self, log2_bayes_factors, reset_index=False): """Guess the most likely modality for each event For each event that has at least one non-NA value, if no modalilites have logsumexp'd logliks greater than the log Bayes factor threshold, then they are assigned the 'multimodal' modality, because we cannot reject the null hypothesis that these did not come from the uniform distribution. Parameters ---------- log2_bayes_factors : pandas.DataFrame A (4, n_events) dataframe with bayes factors for the Psi~1, Psi~0, bimodal, and middle modalities. If an event has no bayes factors for any of those modalities, it is ignored reset_index : bool If True, remove the first level of the index from the dataframe. Useful if you are using this function to apply to a grouped dataframe where the first level is something other than the modality, e.g. the celltype Returns ------- modalities : pandas.Series A (n_events,) series with the most likely modality for each event """ if reset_index: x = log2_bayes_factors.reset_index(level=0, drop=True) else: x = log2_bayes_factors if isinstance(x, pd.DataFrame): not_na = (x.notnull() > 0).any() not_na_columns = not_na[not_na].index x.ix[NULL_MODEL, not_na_columns] = self.logbf_thresh elif isinstance(x, pd.Series): x[NULL_MODEL] = self.logbf_thresh return x.idxmax()
Guess the most likely modality for each event For each event that has at least one non-NA value, if no modalilites have logsumexp'd logliks greater than the log Bayes factor threshold, then they are assigned the 'multimodal' modality, because we cannot reject the null hypothesis that these did not come from the uniform distribution. Parameters ---------- log2_bayes_factors : pandas.DataFrame A (4, n_events) dataframe with bayes factors for the Psi~1, Psi~0, bimodal, and middle modalities. If an event has no bayes factors for any of those modalities, it is ignored reset_index : bool If True, remove the first level of the index from the dataframe. Useful if you are using this function to apply to a grouped dataframe where the first level is something other than the modality, e.g. the celltype Returns ------- modalities : pandas.Series A (n_events,) series with the most likely modality for each event
entailment
def single_feature_logliks(self, feature): """Calculate log-likelihoods of each modality's parameterization Used for plotting the estimates of a single feature Parameters ---------- featre : pandas.Series A single feature's values. All values must range from 0 to 1. Returns ------- logliks : pandas.DataFrame The log-likelihood the data, for each model, for each parameterization Raises ------ AssertionError If any value in ``x`` does not fall only between 0 and 1. """ self.assert_less_than_or_equal_1(feature.values) self.assert_non_negative(feature.values) logliks = self._single_feature_logliks_one_step( feature, self.one_param_models) logsumexps = self.logliks_to_logsumexp(logliks) # If none of the one-parameter models passed, try the two-param models if (logsumexps <= self.logbf_thresh).all(): logliks_two_params = self._single_feature_logliks_one_step( feature, self.two_param_models) logliks = pd.concat([logliks, logliks_two_params]) return logliks
Calculate log-likelihoods of each modality's parameterization Used for plotting the estimates of a single feature Parameters ---------- featre : pandas.Series A single feature's values. All values must range from 0 to 1. Returns ------- logliks : pandas.DataFrame The log-likelihood the data, for each model, for each parameterization Raises ------ AssertionError If any value in ``x`` does not fall only between 0 and 1.
entailment
def single_feature_fit(self, feature): """Get the log2 bayes factor of the fit for each modality""" if np.isfinite(feature).sum() == 0: series = pd.Series(index=MODALITY_ORDER) else: logbf_one_param = pd.Series( {k: v.logsumexp_logliks(feature) for k, v in self.one_param_models.items()}) # Check if none of the previous features fit if (logbf_one_param <= self.logbf_thresh).all(): logbf_two_param = pd.Series( {k: v.logsumexp_logliks(feature) for k, v in self.two_param_models.items()}) series = pd.concat([logbf_one_param, logbf_two_param]) series[NULL_MODEL] = self.logbf_thresh else: series = logbf_one_param series.index.name = 'Modality' series.name = self.score_name return series
Get the log2 bayes factor of the fit for each modality
entailment
def violinplot(self, n=1000, figsize=None, **kwargs): r"""Visualize all modality family members with parameters Use violinplots to visualize distributions of modality family members Parameters ---------- n : int Number of random variables to generate kwargs : dict or keywords Any keyword arguments to seaborn.violinplot Returns ------- fig : matplotlib.Figure object Figure object with violins plotted """ if figsize is None: nrows = len(self.models) width = max(len(m.rvs) for name, m in self.models.items())*0.625 height = nrows*2.5 figsize = width, height fig, axes = plt.subplots(nrows=nrows, figsize=figsize) for ax, model_name in zip(axes, MODALITY_ORDER): try: model = self.models[model_name] cmap = MODALITY_TO_CMAP[model_name] palette = cmap(np.linspace(0, 1, len(model.rvs))) model.violinplot(n=n, ax=ax, palette=palette, **kwargs) ax.set(title=model_name, xlabel='') except KeyError: continue fig.tight_layout()
r"""Visualize all modality family members with parameters Use violinplots to visualize distributions of modality family members Parameters ---------- n : int Number of random variables to generate kwargs : dict or keywords Any keyword arguments to seaborn.violinplot Returns ------- fig : matplotlib.Figure object Figure object with violins plotted
entailment
def bin_range_strings(bins, fmt=':g'): """Given a list of bins, make a list of strings of those bin ranges Parameters ---------- bins : list_like List of anything, usually values of bin edges Returns ------- bin_ranges : list List of bin ranges >>> bin_range_strings((0, 0.5, 1)) ['0-0.5', '0.5-1'] """ return [('{' + fmt + '}-{' + fmt + '}').format(i, j) for i, j in zip(bins, bins[1:])]
Given a list of bins, make a list of strings of those bin ranges Parameters ---------- bins : list_like List of anything, usually values of bin edges Returns ------- bin_ranges : list List of bin ranges >>> bin_range_strings((0, 0.5, 1)) ['0-0.5', '0.5-1']
entailment
def binify(data, bins): """Makes a histogram of each column the provided binsize Parameters ---------- data : pandas.DataFrame A samples x features dataframe. Each feature (column) will be binned into the provided bins bins : iterable Bins you would like to use for this data. Must include the final bin value, e.g. (0, 0.5, 1) for the two bins (0, 0.5) and (0.5, 1). nbins = len(bins) - 1 Returns ------- binned : pandas.DataFrame An nbins x features DataFrame of each column binned across rows """ if bins is None: raise ValueError('Must specify "bins"') if isinstance(data, pd.DataFrame): binned = data.apply(lambda x: pd.Series(np.histogram(x, bins=bins, range=(0, 1))[0])) elif isinstance(data, pd.Series): binned = pd.Series(np.histogram(data, bins=bins, range=(0, 1))[0]) else: raise ValueError('`data` must be either a 1d vector or 2d matrix') binned.index = bin_range_strings(bins) # Normalize so each column sums to 1 binned = binned / binned.sum().astype(float) return binned
Makes a histogram of each column the provided binsize Parameters ---------- data : pandas.DataFrame A samples x features dataframe. Each feature (column) will be binned into the provided bins bins : iterable Bins you would like to use for this data. Must include the final bin value, e.g. (0, 0.5, 1) for the two bins (0, 0.5) and (0.5, 1). nbins = len(bins) - 1 Returns ------- binned : pandas.DataFrame An nbins x features DataFrame of each column binned across rows
entailment
def kld(p, q): """Kullback-Leiber divergence of two probability distributions pandas dataframes, p and q Parameters ---------- p : pandas.DataFrame An nbins x features DataFrame, or (nbins,) Series q : pandas.DataFrame An nbins x features DataFrame, or (nbins,) Series Returns ------- kld : pandas.Series Kullback-Lieber divergence of the common columns between the dataframe. E.g. between 1st column in p and 1st column in q, and 2nd column in p and 2nd column in q. Raises ------ ValueError If the data provided is not a probability distribution, i.e. it has negative values or its columns do not sum to 1, raise ValueError Notes ----- The input to this function must be probability distributions, not raw values. Otherwise, the output makes no sense. """ try: _check_prob_dist(p) _check_prob_dist(q) except ValueError: return np.nan # If one of them is zero, then the other should be considered to be 0. # In this problem formulation, log0 = 0 p = p.replace(0, np.nan) q = q.replace(0, np.nan) return (np.log2(p / q) * p).sum(axis=0)
Kullback-Leiber divergence of two probability distributions pandas dataframes, p and q Parameters ---------- p : pandas.DataFrame An nbins x features DataFrame, or (nbins,) Series q : pandas.DataFrame An nbins x features DataFrame, or (nbins,) Series Returns ------- kld : pandas.Series Kullback-Lieber divergence of the common columns between the dataframe. E.g. between 1st column in p and 1st column in q, and 2nd column in p and 2nd column in q. Raises ------ ValueError If the data provided is not a probability distribution, i.e. it has negative values or its columns do not sum to 1, raise ValueError Notes ----- The input to this function must be probability distributions, not raw values. Otherwise, the output makes no sense.
entailment
def jsd(p, q): """Finds the per-column JSD between dataframes p and q Jensen-Shannon divergence of two probability distrubutions pandas dataframes, p and q. These distributions are usually created by running binify() on the dataframe. Parameters ---------- p : pandas.DataFrame An nbins x features DataFrame. q : pandas.DataFrame An nbins x features DataFrame. Returns ------- jsd : pandas.Series Jensen-Shannon divergence of each column with the same names between p and q Raises ------ ValueError If the data provided is not a probability distribution, i.e. it has negative values or its columns do not sum to 1, raise ValueError """ try: _check_prob_dist(p) _check_prob_dist(q) except ValueError: return np.nan weight = 0.5 m = weight * (p + q) result = weight * kld(p, m) + (1 - weight) * kld(q, m) return result
Finds the per-column JSD between dataframes p and q Jensen-Shannon divergence of two probability distrubutions pandas dataframes, p and q. These distributions are usually created by running binify() on the dataframe. Parameters ---------- p : pandas.DataFrame An nbins x features DataFrame. q : pandas.DataFrame An nbins x features DataFrame. Returns ------- jsd : pandas.Series Jensen-Shannon divergence of each column with the same names between p and q Raises ------ ValueError If the data provided is not a probability distribution, i.e. it has negative values or its columns do not sum to 1, raise ValueError
entailment
def entropy(binned, base=2): """Find the entropy of each column of a dataframe Parameters ---------- binned : pandas.DataFrame A nbins x features DataFrame of probability distributions, where each column sums to 1 base : numeric The log-base of the entropy. Default is 2, so the resulting entropy is in bits. Returns ------- entropy : pandas.Seires Entropy values for each column of the dataframe. Raises ------ ValueError If the data provided is not a probability distribution, i.e. it has negative values or its columns do not sum to 1, raise ValueError """ try: _check_prob_dist(binned) except ValueError: np.nan return -((np.log(binned) / np.log(base)) * binned).sum(axis=0)
Find the entropy of each column of a dataframe Parameters ---------- binned : pandas.DataFrame A nbins x features DataFrame of probability distributions, where each column sums to 1 base : numeric The log-base of the entropy. Default is 2, so the resulting entropy is in bits. Returns ------- entropy : pandas.Seires Entropy values for each column of the dataframe. Raises ------ ValueError If the data provided is not a probability distribution, i.e. it has negative values or its columns do not sum to 1, raise ValueError
entailment
def binify_and_jsd(df1, df2, bins, pair=None): """Binify and calculate jensen-shannon divergence between two dataframes Parameters ---------- df1, df2 : pandas.DataFrames Dataframes to calculate JSD between columns of. Must have overlapping column names bins : array-like Bins to use for transforming df{1,2} into probability distributions pair : str, optional Name of the pair to save as the name of the series Returns ------- divergence : pandas.Series The Jensen-Shannon divergence between columns of df1, df2 """ binned1 = binify(df1, bins=bins).dropna(how='all', axis=1) binned2 = binify(df2, bins=bins).dropna(how='all', axis=1) binned1, binned2 = binned1.align(binned2, axis=1, join='inner') series = np.sqrt(jsd(binned1, binned2)) series.name = pair return series
Binify and calculate jensen-shannon divergence between two dataframes Parameters ---------- df1, df2 : pandas.DataFrames Dataframes to calculate JSD between columns of. Must have overlapping column names bins : array-like Bins to use for transforming df{1,2} into probability distributions pair : str, optional Name of the pair to save as the name of the series Returns ------- divergence : pandas.Series The Jensen-Shannon divergence between columns of df1, df2
entailment
def cross_phenotype_jsd(data, groupby, bins, n_iter=100): """Jensen-Shannon divergence of features across phenotypes Parameters ---------- data : pandas.DataFrame A (n_samples, n_features) Dataframe groupby : mappable A samples to phenotypes mapping n_iter : int Number of bootstrap resampling iterations to perform for the within-group comparisons n_bins : int Number of bins to binify the singles data on Returns ------- jsd_df : pandas.DataFrame A (n_features, n_phenotypes^2) dataframe of the JSD between each feature between and within phenotypes """ grouped = data.groupby(groupby) jsds = [] seen = set([]) for phenotype1, df1 in grouped: for phenotype2, df2 in grouped: pair = tuple(sorted([phenotype1, phenotype2])) if pair in seen: continue seen.add(pair) if phenotype1 == phenotype2: seriess = [] bs = cross_validation.Bootstrap(df1.shape[0], n_iter=n_iter, train_size=0.5) for i, (ind1, ind2) in enumerate(bs): df1_subset = df1.iloc[ind1, :] df2_subset = df2.iloc[ind2, :] seriess.append( binify_and_jsd(df1_subset, df2_subset, None, bins)) series = pd.concat(seriess, axis=1, names=None).mean(axis=1) series.name = pair jsds.append(series) else: series = binify_and_jsd(df1, df2, pair, bins) jsds.append(series) return pd.concat(jsds, axis=1)
Jensen-Shannon divergence of features across phenotypes Parameters ---------- data : pandas.DataFrame A (n_samples, n_features) Dataframe groupby : mappable A samples to phenotypes mapping n_iter : int Number of bootstrap resampling iterations to perform for the within-group comparisons n_bins : int Number of bins to binify the singles data on Returns ------- jsd_df : pandas.DataFrame A (n_features, n_phenotypes^2) dataframe of the JSD between each feature between and within phenotypes
entailment
def jsd_df_to_2d(jsd_df): """Transform a tall JSD dataframe to a square matrix of mean JSDs Parameters ---------- jsd_df : pandas.DataFrame A (n_features, n_phenotypes^2) dataframe of the JSD between each feature between and within phenotypes Returns ------- jsd_2d : pandas.DataFrame A (n_phenotypes, n_phenotypes) symmetric dataframe of the mean JSD between and within phenotypes """ jsd_2d = jsd_df.mean().reset_index() jsd_2d = jsd_2d.rename( columns={'level_0': 'phenotype1', 'level_1': 'phenotype2', 0: 'jsd'}) jsd_2d = jsd_2d.pivot(index='phenotype1', columns='phenotype2', values='jsd') return jsd_2d + np.tril(jsd_2d.T, -1)
Transform a tall JSD dataframe to a square matrix of mean JSDs Parameters ---------- jsd_df : pandas.DataFrame A (n_features, n_phenotypes^2) dataframe of the JSD between each feature between and within phenotypes Returns ------- jsd_2d : pandas.DataFrame A (n_phenotypes, n_phenotypes) symmetric dataframe of the mean JSD between and within phenotypes
entailment
def run(self, callback=None, limit=0): """ Start pcap's loop over the interface, calling the given callback for each packet :param callback: a function receiving (win_pcap, param, header, pkt_data) for each packet intercepted :param limit: how many packets to capture (A value of -1 or 0 is equivalent to infinity) """ if self._handle is None: raise self.DeviceIsNotOpen() # Set new callback self._callback = callback # Run loop with callback wrapper wtypes.pcap_loop(self._handle, limit, self._callback_wrapper, None)
Start pcap's loop over the interface, calling the given callback for each packet :param callback: a function receiving (win_pcap, param, header, pkt_data) for each packet intercepted :param limit: how many packets to capture (A value of -1 or 0 is equivalent to infinity)
entailment
def send(self, packet_buffer): """ send a buffer as a packet to the network interface :param packet_buffer: buffer to send (length shouldn't exceed MAX_INT) """ if self._handle is None: raise self.DeviceIsNotOpen() buffer_length = len(packet_buffer) buf_send = ctypes.cast(ctypes.create_string_buffer(packet_buffer, buffer_length), ctypes.POINTER(ctypes.c_ubyte)) wtypes.pcap_sendpacket(self._handle, buf_send, buffer_length)
send a buffer as a packet to the network interface :param packet_buffer: buffer to send (length shouldn't exceed MAX_INT)
entailment
def capture_on(pattern, callback): """ :param pattern: a wildcard pattern to match the description of a network interface to capture packets on :param callback: a function to call with each intercepted packet """ device_name, desc = WinPcapDevices.get_matching_device(pattern) if device_name is not None: with WinPcap(device_name) as capture: capture.run(callback=callback)
:param pattern: a wildcard pattern to match the description of a network interface to capture packets on :param callback: a function to call with each intercepted packet
entailment
def capture_on_device_name(device_name, callback): """ :param device_name: the name (guid) of a device as provided by WinPcapDevices.list_devices() :param callback: a function to call with each intercepted packet """ with WinPcap(device_name) as capture: capture.run(callback=callback)
:param device_name: the name (guid) of a device as provided by WinPcapDevices.list_devices() :param callback: a function to call with each intercepted packet
entailment
def send_packet(self, pattern, packet_buffer, callback=None, limit=10): """ Send a buffer as a packet to a network interface and optionally capture a response :param pattern: a wildcard pattern to match the description of a network interface to capture packets on :param packet_buffer: a buffer to send (length shouldn't exceed MAX_INT) :param callback: If not None, a function to call with each intercepted packet :param limit: how many packets to capture (A value of -1 or 0 is equivalent to infinity) """ device_name, desc = WinPcapDevices.get_matching_device(pattern) if device_name is not None: with WinPcap(device_name) as capture: capture.send(packet_buffer) if callback is not None: capture.run(callback=callback, limit=limit)
Send a buffer as a packet to a network interface and optionally capture a response :param pattern: a wildcard pattern to match the description of a network interface to capture packets on :param packet_buffer: a buffer to send (length shouldn't exceed MAX_INT) :param callback: If not None, a function to call with each intercepted packet :param limit: how many packets to capture (A value of -1 or 0 is equivalent to infinity)
entailment
def get_next_value( sequence_name='default', initial_value=1, reset_value=None, *, nowait=False, using=None): """ Return the next value for a given sequence. """ # Inner import because models cannot be imported before their application. from .models import Sequence if reset_value is not None: assert initial_value < reset_value if using is None: using = router.db_for_write(Sequence) connection = connections[using] if (getattr(connection, 'pg_version', 0) >= 90500 and reset_value is None and not nowait): # PostgreSQL ≥ 9.5 supports "upsert". with connection.cursor() as cursor: cursor.execute(UPSERT_QUERY, [sequence_name, initial_value]) last, = cursor.fetchone() return last else: # Other databases require making more database queries. with transaction.atomic(using=using, savepoint=False): sequence, created = ( Sequence.objects .select_for_update(nowait=nowait) .get_or_create(name=sequence_name, defaults={'last': initial_value}) ) if not created: sequence.last += 1 if reset_value is not None and sequence.last >= reset_value: sequence.last = initial_value sequence.save() return sequence.last
Return the next value for a given sequence.
entailment
def check(self, final_line_count): """Check the status of all provided data and update the suite.""" if self._lines_seen["version"]: self._process_version_lines() self._process_plan_lines(final_line_count)
Check the status of all provided data and update the suite.
entailment
def _process_version_lines(self): """Process version line rules.""" if len(self._lines_seen["version"]) > 1: self._add_error(_("Multiple version lines appeared.")) elif self._lines_seen["version"][0] != 1: self._add_error(_("The version must be on the first line."))
Process version line rules.
entailment
def _process_plan_lines(self, final_line_count): """Process plan line rules.""" if not self._lines_seen["plan"]: self._add_error(_("Missing a plan.")) return if len(self._lines_seen["plan"]) > 1: self._add_error(_("Only one plan line is permitted per file.")) return plan, at_line = self._lines_seen["plan"][0] if not self._plan_on_valid_line(at_line, final_line_count): self._add_error( _("A plan must appear at the beginning or end of the file.") ) return if plan.expected_tests != self._lines_seen["test"]: self._add_error( _("Expected {expected_count} tests but only {seen_count} ran.").format( expected_count=plan.expected_tests, seen_count=self._lines_seen["test"], ) )
Process plan line rules.
entailment
def _plan_on_valid_line(self, at_line, final_line_count): """Check if a plan is on a valid line.""" # Put the common cases first. if at_line == 1 or at_line == final_line_count: return True # The plan may only appear on line 2 if the version is at line 1. after_version = ( self._lines_seen["version"] and self._lines_seen["version"][0] == 1 and at_line == 2 ) if after_version: return True return False
Check if a plan is on a valid line.
entailment
def handle_bail(self, bail): """Handle a bail line.""" self._add_error(_("Bailed: {reason}").format(reason=bail.reason))
Handle a bail line.
entailment
def handle_skipping_plan(self, skip_plan): """Handle a plan that contains a SKIP directive.""" skip_line = Result(True, None, skip_plan.directive.text, Directive("SKIP")) self._suite.addTest(Adapter(self._filename, skip_line))
Handle a plan that contains a SKIP directive.
entailment
def _add_error(self, message): """Add an error test to the suite.""" error_line = Result(False, None, message, Directive("")) self._suite.addTest(Adapter(self._filename, error_line))
Add an error test to the suite.
entailment
def format_exception(exception): """Format an exception as diagnostics output. exception is the tuple as expected from sys.exc_info. """ exception_lines = traceback.format_exception(*exception) # The lines returned from format_exception do not strictly contain # one line per element in the list (i.e. some elements have new # line characters in the middle). Normalize that oddity. lines = "".join(exception_lines).splitlines(True) return format_as_diagnostics(lines)
Format an exception as diagnostics output. exception is the tuple as expected from sys.exc_info.
entailment
def parse(self, fh): """Generate tap.line.Line objects, given a file-like object `fh`. `fh` may be any object that implements both the iterator and context management protocol (i.e. it can be used in both a "with" statement and a "for...in" statement.) Trailing whitespace and newline characters will be automatically stripped from the input lines. """ with fh: try: first_line = next(fh) except StopIteration: return first_parsed = self.parse_line(first_line.rstrip()) fh_new = itertools.chain([first_line], fh) if first_parsed.category == "version" and first_parsed.version >= 13: if ENABLE_VERSION_13: fh_new = peekable(itertools.chain([first_line], fh)) self._try_peeking = True else: # pragma no cover print( """ WARNING: Optional imports not found, TAP 13 output will be ignored. To parse yaml, see requirements in docs: https://tappy.readthedocs.io/en/latest/consumers.html#tap-version-13""" ) for line in fh_new: yield self.parse_line(line.rstrip(), fh_new)
Generate tap.line.Line objects, given a file-like object `fh`. `fh` may be any object that implements both the iterator and context management protocol (i.e. it can be used in both a "with" statement and a "for...in" statement.) Trailing whitespace and newline characters will be automatically stripped from the input lines.
entailment
def parse_line(self, text, fh=None): """Parse a line into whatever TAP category it belongs.""" match = self.ok.match(text) if match: return self._parse_result(True, match, fh) match = self.not_ok.match(text) if match: return self._parse_result(False, match, fh) if self.diagnostic.match(text): return Diagnostic(text) match = self.plan.match(text) if match: return self._parse_plan(match) match = self.bail.match(text) if match: return Bail(match.group("reason")) match = self.version.match(text) if match: return self._parse_version(match) return Unknown()
Parse a line into whatever TAP category it belongs.
entailment
def _parse_plan(self, match): """Parse a matching plan line.""" expected_tests = int(match.group("expected")) directive = Directive(match.group("directive")) # Only SKIP directives are allowed in the plan. if directive.text and not directive.skip: return Unknown() return Plan(expected_tests, directive)
Parse a matching plan line.
entailment
def _parse_result(self, ok, match, fh=None): """Parse a matching result line into a result instance.""" peek_match = None try: if fh is not None and self._try_peeking: peek_match = self.yaml_block_start.match(fh.peek()) except StopIteration: pass if peek_match is None: return Result( ok, number=match.group("number"), description=match.group("description").strip(), directive=Directive(match.group("directive")), ) indent = peek_match.group("indent") concat_yaml = self._extract_yaml_block(indent, fh) return Result( ok, number=match.group("number"), description=match.group("description").strip(), directive=Directive(match.group("directive")), raw_yaml_block=concat_yaml, )
Parse a matching result line into a result instance.
entailment
def _extract_yaml_block(self, indent, fh): """Extract a raw yaml block from a file handler""" raw_yaml = [] indent_match = re.compile(r"^{}".format(indent)) try: fh.next() while indent_match.match(fh.peek()): raw_yaml.append(fh.next().replace(indent, "", 1)) # check for the end and stop adding yaml if encountered if self.yaml_block_end.match(fh.peek()): fh.next() break except StopIteration: pass return "\n".join(raw_yaml)
Extract a raw yaml block from a file handler
entailment
def yaml_block(self): """Lazy load a yaml_block. If yaml support is not available, there is an error in parsing the yaml block, or no yaml is associated with this result, ``None`` will be returned. :rtype: dict """ if LOAD_YAML and self._yaml_block is not None: try: yaml_dict = yaml.load(self._yaml_block) return yaml_dict except yaml.error.YAMLError: print("Error parsing yaml block. Check formatting.") return None
Lazy load a yaml_block. If yaml support is not available, there is an error in parsing the yaml block, or no yaml is associated with this result, ``None`` will be returned. :rtype: dict
entailment
def load(self, files): """Load any files found into a suite. Any directories are walked and their files are added as TAP files. :returns: A ``unittest.TestSuite`` instance """ suite = unittest.TestSuite() for filepath in files: if os.path.isdir(filepath): self._find_tests_in_directory(filepath, suite) else: suite.addTest(self.load_suite_from_file(filepath)) return suite
Load any files found into a suite. Any directories are walked and their files are added as TAP files. :returns: A ``unittest.TestSuite`` instance
entailment
def load_suite_from_file(self, filename): """Load a test suite with test lines from the provided TAP file. :returns: A ``unittest.TestSuite`` instance """ suite = unittest.TestSuite() rules = Rules(filename, suite) if not os.path.exists(filename): rules.handle_file_does_not_exist() return suite line_generator = self._parser.parse_file(filename) return self._load_lines(filename, line_generator, suite, rules)
Load a test suite with test lines from the provided TAP file. :returns: A ``unittest.TestSuite`` instance
entailment
def load_suite_from_stdin(self): """Load a test suite with test lines from the TAP stream on STDIN. :returns: A ``unittest.TestSuite`` instance """ suite = unittest.TestSuite() rules = Rules("stream", suite) line_generator = self._parser.parse_stdin() return self._load_lines("stream", line_generator, suite, rules)
Load a test suite with test lines from the TAP stream on STDIN. :returns: A ``unittest.TestSuite`` instance
entailment
def _load_lines(self, filename, line_generator, suite, rules): """Load a suite with lines produced by the line generator.""" line_counter = 0 for line in line_generator: line_counter += 1 if line.category in self.ignored_lines: continue if line.category == "test": suite.addTest(Adapter(filename, line)) rules.saw_test() elif line.category == "plan": if line.skip: rules.handle_skipping_plan(line) return suite rules.saw_plan(line, line_counter) elif line.category == "bail": rules.handle_bail(line) return suite elif line.category == "version": rules.saw_version_at(line_counter) rules.check(line_counter) return suite
Load a suite with lines produced by the line generator.
entailment
def _track(self, class_name): """Keep track of which test cases have executed.""" if self._test_cases.get(class_name) is None: if self.streaming and self.header: self._write_test_case_header(class_name, self.stream) self._test_cases[class_name] = [] if self.combined: self.combined_test_cases_seen.append(class_name)
Keep track of which test cases have executed.
entailment
def set_plan(self, total): """Notify the tracker how many total tests there will be.""" self.plan = total if self.streaming: # This will only write the plan if we haven't written it # already but we want to check if we already wrote a # test out (in which case we can't just write the plan out # right here). if not self.combined_test_cases_seen: self._write_plan(self.stream) elif not self.combined: raise ValueError( "set_plan can only be used with combined or streaming output" )
Notify the tracker how many total tests there will be.
entailment
def generate_tap_reports(self): """Generate TAP reports. The results are either combined into a single output file or the output file name is generated from the test case. """ # We're streaming but set_plan wasn't called, so we can only # know the plan now (at the end). if self.streaming and not self._plan_written: print("1..{0}".format(self.combined_line_number), file=self.stream) self._plan_written = True return if self.combined: combined_file = "testresults.tap" if self.outdir: combined_file = os.path.join(self.outdir, combined_file) with open(combined_file, "w") as out_file: self._write_tap_version(out_file) if self.plan is not None: print("1..{0}".format(self.plan), file=out_file) for test_case in self.combined_test_cases_seen: self.generate_tap_report( test_case, self._test_cases[test_case], out_file ) if self.plan is None: print("1..{0}".format(self.combined_line_number), file=out_file) else: for test_case, tap_lines in self._test_cases.items(): with open(self._get_tap_file_path(test_case), "w") as out_file: self._write_tap_version(out_file) self.generate_tap_report(test_case, tap_lines, out_file)
Generate TAP reports. The results are either combined into a single output file or the output file name is generated from the test case.
entailment
def _write_plan(self, stream): """Write the plan line to the stream. If we have a plan and have not yet written it out, write it to the given stream. """ if self.plan is not None: if not self._plan_written: print("1..{0}".format(self.plan), file=stream) self._plan_written = True
Write the plan line to the stream. If we have a plan and have not yet written it out, write it to the given stream.
entailment
def _get_tap_file_path(self, test_case): """Get the TAP output file path for the test case.""" sanitized_test_case = test_case.translate(self._sanitized_table) tap_file = sanitized_test_case + ".tap" if self.outdir: return os.path.join(self.outdir, tap_file) return tap_file
Get the TAP output file path for the test case.
entailment
def main(argv=sys.argv, stream=sys.stderr): """Entry point for ``tappy`` command.""" args = parse_args(argv) suite = build_suite(args) runner = unittest.TextTestRunner(verbosity=args.verbose, stream=stream) result = runner.run(suite) return get_status(result)
Entry point for ``tappy`` command.
entailment
def build_suite(args): """Build a test suite by loading TAP files or a TAP stream.""" loader = Loader() if len(args.files) == 0 or args.files[0] == "-": suite = loader.load_suite_from_stdin() else: suite = loader.load(args.files) return suite
Build a test suite by loading TAP files or a TAP stream.
entailment
def addFailure(self, result): """Add a failure to the result.""" result.addFailure(self, (Exception, Exception(), None)) # Since TAP will not provide assertion data, clean up the assertion # section so it is not so spaced out. test, err = result.failures[-1] result.failures[-1] = (test, "")
Add a failure to the result.
entailment
def mptt_before_insert(mapper, connection, instance): """ Based on example https://bitbucket.org/zzzeek/sqlalchemy/src/73095b353124/examples/nested_sets/nested_sets.py?at=master """ table = _get_tree_table(mapper) db_pk = instance.get_pk_column() table_pk = getattr(table.c, db_pk.name) if instance.parent_id is None: instance.left = 1 instance.right = 2 instance.level = instance.get_default_level() tree_id = connection.scalar( select( [ func.max(table.c.tree_id) + 1 ] ) ) or 1 instance.tree_id = tree_id else: (parent_pos_left, parent_pos_right, parent_tree_id, parent_level) = connection.execute( select( [ table.c.lft, table.c.rgt, table.c.tree_id, table.c.level ] ).where( table_pk == instance.parent_id ) ).fetchone() # Update key of right side connection.execute( table.update( and_(table.c.rgt >= parent_pos_right, table.c.tree_id == parent_tree_id) ).values( lft=case( [ ( table.c.lft > parent_pos_right, table.c.lft + 2 ) ], else_=table.c.lft ), rgt=case( [ ( table.c.rgt >= parent_pos_right, table.c.rgt + 2 ) ], else_=table.c.rgt ) ) ) instance.level = parent_level + 1 instance.tree_id = parent_tree_id instance.left = parent_pos_right instance.right = parent_pos_right + 1
Based on example https://bitbucket.org/zzzeek/sqlalchemy/src/73095b353124/examples/nested_sets/nested_sets.py?at=master
entailment
def mptt_before_update(mapper, connection, instance): """ Based on this example: http://stackoverflow.com/questions/889527/move-node-in-nested-set """ node_id = getattr(instance, instance.get_pk_name()) table = _get_tree_table(mapper) db_pk = instance.get_pk_column() default_level = instance.get_default_level() table_pk = getattr(table.c, db_pk.name) mptt_move_inside = None left_sibling = None left_sibling_tree_id = None if hasattr(instance, 'mptt_move_inside'): mptt_move_inside = instance.mptt_move_inside if hasattr(instance, 'mptt_move_before'): ( right_sibling_left, right_sibling_right, right_sibling_parent, right_sibling_level, right_sibling_tree_id ) = connection.execute( select( [ table.c.lft, table.c.rgt, table.c.parent_id, table.c.level, table.c.tree_id ] ).where( table_pk == instance.mptt_move_before ) ).fetchone() current_lvl_nodes = connection.execute( select( [ table.c.lft, table.c.rgt, table.c.parent_id, table.c.tree_id ] ).where( and_( table.c.level == right_sibling_level, table.c.tree_id == right_sibling_tree_id, table.c.lft < right_sibling_left ) ) ).fetchall() if current_lvl_nodes: ( left_sibling_left, left_sibling_right, left_sibling_parent, left_sibling_tree_id ) = current_lvl_nodes[-1] instance.parent_id = left_sibling_parent left_sibling = { 'lft': left_sibling_left, 'rgt': left_sibling_right, 'is_parent': False } # if move_before to top level elif not right_sibling_parent: left_sibling_tree_id = right_sibling_tree_id - 1 # if placed after a particular node if hasattr(instance, 'mptt_move_after'): ( left_sibling_left, left_sibling_right, left_sibling_parent, left_sibling_tree_id ) = connection.execute( select( [ table.c.lft, table.c.rgt, table.c.parent_id, table.c.tree_id ] ).where( table_pk == instance.mptt_move_after ) ).fetchone() instance.parent_id = left_sibling_parent left_sibling = { 'lft': left_sibling_left, 'rgt': left_sibling_right, 'is_parent': False } """ Get subtree from node SELECT id, name, level FROM my_tree WHERE left_key >= $left_key AND right_key <= $right_key ORDER BY left_key """ subtree = connection.execute( select([table_pk]) .where( and_( table.c.lft >= instance.left, table.c.rgt <= instance.right, table.c.tree_id == instance.tree_id ) ).order_by( table.c.lft ) ).fetchall() subtree = [x[0] for x in subtree] """ step 0: Initialize parameters. Put there left and right position of moving node """ ( node_pos_left, node_pos_right, node_tree_id, node_parent_id, node_level ) = connection.execute( select( [ table.c.lft, table.c.rgt, table.c.tree_id, table.c.parent_id, table.c.level ] ).where( table_pk == node_id ) ).fetchone() # if instance just update w/o move # XXX why this str() around parent_id comparison? if not left_sibling \ and str(node_parent_id) == str(instance.parent_id) \ and not mptt_move_inside: if left_sibling_tree_id is None: return # fix tree shorting if instance.parent_id is not None: ( parent_id, parent_pos_right, parent_pos_left, parent_tree_id, parent_level ) = connection.execute( select( [ table_pk, table.c.rgt, table.c.lft, table.c.tree_id, table.c.level ] ).where( table_pk == instance.parent_id ) ).fetchone() if node_parent_id is None and node_tree_id == parent_tree_id: instance.parent_id = None return # delete from old tree mptt_before_delete(mapper, connection, instance, False) if instance.parent_id is not None: """ Put there right position of new parent node (there moving node should be moved) """ ( parent_id, parent_pos_right, parent_pos_left, parent_tree_id, parent_level ) = connection.execute( select( [ table_pk, table.c.rgt, table.c.lft, table.c.tree_id, table.c.level ] ).where( table_pk == instance.parent_id ) ).fetchone() # 'size' of moving node (including all it's sub nodes) node_size = node_pos_right - node_pos_left + 1 # left sibling node if not left_sibling: left_sibling = { 'lft': parent_pos_left, 'rgt': parent_pos_right, 'is_parent': True } # insert subtree in exist tree instance.tree_id = parent_tree_id _insert_subtree( table, connection, node_size, node_pos_left, node_pos_right, parent_pos_left, parent_pos_right, subtree, parent_tree_id, parent_level, node_level, left_sibling, table_pk ) else: # if insert after if left_sibling_tree_id or left_sibling_tree_id == 0: tree_id = left_sibling_tree_id + 1 connection.execute( table.update( table.c.tree_id > left_sibling_tree_id ).values( tree_id=table.c.tree_id + 1 ) ) # if just insert else: tree_id = connection.scalar( select( [ func.max(table.c.tree_id) + 1 ] ) ) connection.execute( table.update( table_pk.in_( subtree ) ).values( lft=table.c.lft - node_pos_left + 1, rgt=table.c.rgt - node_pos_left + 1, level=table.c.level - node_level + default_level, tree_id=tree_id ) )
Based on this example: http://stackoverflow.com/questions/889527/move-node-in-nested-set
entailment
def after_flush_postexec(self, session, context): """ Event listener to recursively expire `left` and `right` attributes the parents of all modified instances part of this flush. """ instances = self.instances[session] while instances: instance = instances.pop() if instance not in session: continue parent = self.get_parent_value(instance) while parent != NO_VALUE and parent is not None: instances.discard(parent) session.expire(parent, ['left', 'right', 'tree_id', 'level']) parent = self.get_parent_value(parent) else: session.expire(instance, ['left', 'right', 'tree_id', 'level']) self.expire_session_for_children(session, instance)
Event listener to recursively expire `left` and `right` attributes the parents of all modified instances part of this flush.
entailment
def is_ancestor_of(self, other, inclusive=False): """ class or instance level method which returns True if self is ancestor (closer to root) of other else False. Optional flag `inclusive` on whether or not to treat self as ancestor of self. For example see: * :mod:`sqlalchemy_mptt.tests.cases.integrity.test_hierarchy_structure` """ if inclusive: return (self.tree_id == other.tree_id) \ & (self.left <= other.left) \ & (other.right <= self.right) return (self.tree_id == other.tree_id) \ & (self.left < other.left) \ & (other.right < self.right)
class or instance level method which returns True if self is ancestor (closer to root) of other else False. Optional flag `inclusive` on whether or not to treat self as ancestor of self. For example see: * :mod:`sqlalchemy_mptt.tests.cases.integrity.test_hierarchy_structure`
entailment
def move_inside(self, parent_id): """ Moving one node of tree inside another For example see: * :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_inside_function` * :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_inside_to_the_same_parent_function` """ # noqa session = Session.object_session(self) self.parent_id = parent_id self.mptt_move_inside = parent_id session.add(self)
Moving one node of tree inside another For example see: * :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_inside_function` * :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_inside_to_the_same_parent_function`
entailment
def move_after(self, node_id): """ Moving one node of tree after another For example see :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_after_function` """ # noqa session = Session.object_session(self) self.parent_id = self.parent_id self.mptt_move_after = node_id session.add(self)
Moving one node of tree after another For example see :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_after_function`
entailment
def move_before(self, node_id): """ Moving one node of tree before another For example see: * :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_before_function` * :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_before_to_other_tree` * :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_before_to_top_level` """ # noqa session = Session.object_session(self) table = _get_tree_table(self.__mapper__) pk = getattr(table.c, self.get_pk_column().name) node = session.query(table).filter(pk == node_id).one() self.parent_id = node.parent_id self.mptt_move_before = node_id session.add(self)
Moving one node of tree before another For example see: * :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_before_function` * :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_before_to_other_tree` * :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_before_to_top_level`
entailment
def leftsibling_in_level(self): """ Node to the left of the current node at the same level For example see :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_leftsibling_in_level` """ # noqa table = _get_tree_table(self.__mapper__) session = Session.object_session(self) current_lvl_nodes = session.query(table) \ .filter_by(level=self.level).filter_by(tree_id=self.tree_id) \ .filter(table.c.lft < self.left).order_by(table.c.lft).all() if current_lvl_nodes: return current_lvl_nodes[-1] return None
Node to the left of the current node at the same level For example see :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_leftsibling_in_level`
entailment
def _node_to_dict(cls, node, json, json_fields): """ Helper method for ``get_tree``. """ if json: pk_name = node.get_pk_name() # jqTree or jsTree format result = {'id': getattr(node, pk_name), 'label': node.__repr__()} if json_fields: result.update(json_fields(node)) else: result = {'node': node} return result
Helper method for ``get_tree``.
entailment
def get_tree(cls, session=None, json=False, json_fields=None, query=None): """ This method generate tree of current node table in dict or json format. You can make custom query with attribute ``query``. By default it return all nodes in table. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session Kwargs: json (bool): if True return JSON jqTree format json_fields (function): append custom fields in JSON query (function): it takes :class:`sqlalchemy.orm.query.Query` object as an argument, and returns in a modified form :: def query(nodes): return nodes.filter(node.__class__.tree_id.is_(node.tree_id)) node.get_tree(session=DBSession, json=True, query=query) Example: * :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_get_tree` * :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_get_json_tree` * :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_get_json_tree_with_custom_field` """ # noqa tree = [] nodes_of_level = {} # handle custom query nodes = cls._base_query(session) if query: nodes = query(nodes) nodes = cls._base_order(nodes).all() # search minimal level of nodes. min_level = min([node.level for node in nodes] or [None]) def get_node_id(node): return getattr(node, node.get_pk_name()) for node in nodes: result = cls._node_to_dict(node, json, json_fields) parent_id = node.parent_id if node.level != min_level: # for cildren # Find parent in the tree if parent_id not in nodes_of_level.keys(): continue if 'children' not in nodes_of_level[parent_id]: nodes_of_level[parent_id]['children'] = [] # Append node to parent nl = nodes_of_level[parent_id]['children'] nl.append(result) nodes_of_level[get_node_id(node)] = nl[-1] else: # for top level nodes tree.append(result) nodes_of_level[get_node_id(node)] = tree[-1] return tree
This method generate tree of current node table in dict or json format. You can make custom query with attribute ``query``. By default it return all nodes in table. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session Kwargs: json (bool): if True return JSON jqTree format json_fields (function): append custom fields in JSON query (function): it takes :class:`sqlalchemy.orm.query.Query` object as an argument, and returns in a modified form :: def query(nodes): return nodes.filter(node.__class__.tree_id.is_(node.tree_id)) node.get_tree(session=DBSession, json=True, query=query) Example: * :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_get_tree` * :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_get_json_tree` * :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_get_json_tree_with_custom_field`
entailment
def drilldown_tree(self, session=None, json=False, json_fields=None): """ This method generate a branch from a tree, begining with current node. For example: node7.drilldown_tree() .. code:: level Nested sets example 1 1(1)22 --------------------- _______________|_________|_________ | | | | | | 2 2(2)5 6(4)11 | 12(7)21 | | ^ | ^ | 3 3(3)4 7(5)8 9(6)10 | 13(8)16 17(10)20 | | | | | 4 | 14(9)15 18(11)19 | | | --------------------- Example in tests: * :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_drilldown_tree` """ if not session: session = object_session(self) return self.get_tree( session, json=json, json_fields=json_fields, query=self._drilldown_query )
This method generate a branch from a tree, begining with current node. For example: node7.drilldown_tree() .. code:: level Nested sets example 1 1(1)22 --------------------- _______________|_________|_________ | | | | | | 2 2(2)5 6(4)11 | 12(7)21 | | ^ | ^ | 3 3(3)4 7(5)8 9(6)10 | 13(8)16 17(10)20 | | | | | 4 | 14(9)15 18(11)19 | | | --------------------- Example in tests: * :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_drilldown_tree`
entailment
def path_to_root(self, session=None, order=desc): """Generate path from a leaf or intermediate node to the root. For example: node11.path_to_root() .. code:: level Nested sets example ----------------------------------------- 1 | 1(1)22 | ________|______|_____________________ | | | | | | | ------+--------- | | 2 2(2)5 6(4)11 | -- 12(7)21 | | ^ | / \ | 3 3(3)4 7(5)8 9(6)10 ---/---- \ | 13(8)16 | 17(10)20 | | | | | 4 14(9)15 | 18(11)19 | | | ------------- """ table = self.__class__ query = self._base_query_obj(session=session) query = query.filter(table.is_ancestor_of(self, inclusive=True)) return self._base_order(query, order=order)
Generate path from a leaf or intermediate node to the root. For example: node11.path_to_root() .. code:: level Nested sets example ----------------------------------------- 1 | 1(1)22 | ________|______|_____________________ | | | | | | | ------+--------- | | 2 2(2)5 6(4)11 | -- 12(7)21 | | ^ | / \ | 3 3(3)4 7(5)8 9(6)10 ---/---- \ | 13(8)16 | 17(10)20 | | | | | 4 14(9)15 | 18(11)19 | | | -------------
entailment
def rebuild_tree(cls, session, tree_id): """ This method rebuid tree. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session tree_id (int or str): id of tree Example: * :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_rebuild` """ session.query(cls).filter_by(tree_id=tree_id)\ .update({cls.left: 0, cls.right: 0, cls.level: 0}) top = session.query(cls).filter_by(parent_id=None)\ .filter_by(tree_id=tree_id).one() top.left = left = 1 top.right = right = 2 top.level = level = cls.get_default_level() def recursive(children, left, right, level): level = level + 1 for i, node in enumerate(children): same_level_right = children[i - 1].right left = left + 1 if i > 0: left = left + 1 if same_level_right: left = same_level_right + 1 right = left + 1 node.left = left node.right = right parent = node.parent j = 0 while parent: parent.right = right + 1 + j parent = parent.parent j += 1 node.level = level recursive(node.children, left, right, level) recursive(top.children, left, right, level)
This method rebuid tree. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session tree_id (int or str): id of tree Example: * :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_rebuild`
entailment
def rebuild(cls, session, tree_id=None): """ This function rebuid tree. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session Kwargs: tree_id (int or str): id of tree, default None Example: * :mod:`sqlalchemy_mptt.tests.TestTree.test_rebuild` """ trees = session.query(cls).filter_by(parent_id=None) if tree_id: trees = trees.filter_by(tree_id=tree_id) for tree in trees: cls.rebuild_tree(session, tree.tree_id)
This function rebuid tree. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session Kwargs: tree_id (int or str): id of tree, default None Example: * :mod:`sqlalchemy_mptt.tests.TestTree.test_rebuild`
entailment
def qx(mt, x): """ qx: Returns the probability that a life aged x dies before 1 year With the convention: the true probability is qx/1000 Args: mt: the mortality table x: the age as integer number. """ if x < len(mt.qx): return mt.qx[x] else: return 0
qx: Returns the probability that a life aged x dies before 1 year With the convention: the true probability is qx/1000 Args: mt: the mortality table x: the age as integer number.
entailment
def lx(mt, x): """ lx : Returns the number of survivors at begining of age x """ if x < len(mt.lx): return mt.lx[x] else: return 0
lx : Returns the number of survivors at begining of age x
entailment
def dx(mt, x): """ Returns the number of dying at begining of age x """ end_x_val = mt.lx.index(0) if x < end_x_val: return mt.lx[x] - mt.lx[x + 1] else: return 0.0
Returns the number of dying at begining of age x
entailment
def tpx(mt, x, t): """ tpx : Returns the probability that x will survive within t years """ """ npx : Returns n years survival probability at age x """ return mt.lx[x + t] / mt.lx[x]
tpx : Returns the probability that x will survive within t years
entailment
def tqx(mt, x, t): """ nqx : Returns the probability to die within n years at age x """ return (mt.lx[x] - mt.lx[x + t]) / mt.lx[x]
nqx : Returns the probability to die within n years at age x
entailment
def tqxn(mt, x, n, t): """ n/qx : Probability to die in n years being alive at age x. Probability that x survives n year, and then dies in th subsequent t years """ return tpx(mt, x, t) * qx(mt, x + n)
n/qx : Probability to die in n years being alive at age x. Probability that x survives n year, and then dies in th subsequent t years
entailment
def ex(mt, x): """ ex : Returns the curtate expectation of life. Life expectancy """ sum1 = 0 for j in mt.lx[x + 1:-1]: sum1 += j #print sum1 try: return sum1 / mt.lx[x] + 0.5 except: return 0
ex : Returns the curtate expectation of life. Life expectancy
entailment
def Sx(mt, x): """ Return the Sx """ n = len(mt.Nx) sum1 = 0 for j in range(x, n): k = mt.Nx[j] sum1 += k return sum1
Return the Sx
entailment
def Cx(mt, x): """ Return the Cx """ return ((1 / (1 + mt.i)) ** (x + 1)) * mt.dx[x] * ((1 + mt.i) ** 0.5)
Return the Cx
entailment