repository
stringclasses 11
values | repo_id
stringlengths 1
3
| target_module_path
stringlengths 16
72
| prompt
stringlengths 407
21.7k
| relavent_test_path
stringlengths 51
97
| full_function
stringlengths 2.6k
33.8k
| function_name
stringlengths 3
49
| context-complexity
stringclasses 3
values |
---|---|---|---|---|---|---|---|
seaborn | 6 | seaborn/_base.py | def iter_data(
self, grouping_vars=None, *,
reverse=False, from_comp_data=False,
by_facet=True, allow_empty=False, dropna=True,
):
"""Generator for getting subsets of data defined by semantic variables.
Also injects "col" and "row" into grouping semantics.
Parameters
----------
grouping_vars : string or list of strings
Semantic variables that define the subsets of data.
reverse : bool
If True, reverse the order of iteration.
from_comp_data : bool
If True, use self.comp_data rather than self.plot_data
by_facet : bool
If True, add faceting variables to the set of grouping variables.
allow_empty : bool
If True, yield an empty dataframe when no observations exist for
combinations of grouping variables.
dropna : bool
If True, remove rows with missing data.
Yields
------
sub_vars : dict
Keys are semantic names, values are the level of that semantic.
sub_data : :class:`pandas.DataFrame`
Subset of ``plot_data`` for this combination of semantic values.
"""
| /usr/src/app/target_test_cases/failed_tests_VectorPlotter.iter_data.txt | def iter_data(
self, grouping_vars=None, *,
reverse=False, from_comp_data=False,
by_facet=True, allow_empty=False, dropna=True,
):
"""Generator for getting subsets of data defined by semantic variables.
Also injects "col" and "row" into grouping semantics.
Parameters
----------
grouping_vars : string or list of strings
Semantic variables that define the subsets of data.
reverse : bool
If True, reverse the order of iteration.
from_comp_data : bool
If True, use self.comp_data rather than self.plot_data
by_facet : bool
If True, add faceting variables to the set of grouping variables.
allow_empty : bool
If True, yield an empty dataframe when no observations exist for
combinations of grouping variables.
dropna : bool
If True, remove rows with missing data.
Yields
------
sub_vars : dict
Keys are semantic names, values are the level of that semantic.
sub_data : :class:`pandas.DataFrame`
Subset of ``plot_data`` for this combination of semantic values.
"""
# TODO should this default to using all (non x/y?) semantics?
# or define grouping vars somewhere?
if grouping_vars is None:
grouping_vars = []
elif isinstance(grouping_vars, str):
grouping_vars = [grouping_vars]
elif isinstance(grouping_vars, tuple):
grouping_vars = list(grouping_vars)
# Always insert faceting variables
if by_facet:
facet_vars = {"col", "row"}
grouping_vars.extend(
facet_vars & set(self.variables) - set(grouping_vars)
)
# Reduce to the semantics used in this plot
grouping_vars = [var for var in grouping_vars if var in self.variables]
if from_comp_data:
data = self.comp_data
else:
data = self.plot_data
if dropna:
data = data.dropna()
levels = self.var_levels.copy()
if from_comp_data:
for axis in {"x", "y"} & set(grouping_vars):
converter = self.converters[axis].iloc[0]
if self.var_types[axis] == "categorical":
if self._var_ordered[axis]:
# If the axis is ordered, then the axes in a possible
# facet grid are by definition "shared", or there is a
# single axis with a unique cat -> idx mapping.
# So we can just take the first converter object.
levels[axis] = converter.convert_units(levels[axis])
else:
# Otherwise, the mappings may not be unique, but we can
# use the unique set of index values in comp_data.
levels[axis] = np.sort(data[axis].unique())
else:
transform = converter.get_transform().transform
levels[axis] = transform(converter.convert_units(levels[axis]))
if grouping_vars:
grouped_data = data.groupby(
grouping_vars, sort=False, as_index=False, observed=False,
)
grouping_keys = []
for var in grouping_vars:
key = levels.get(var)
grouping_keys.append([] if key is None else key)
iter_keys = itertools.product(*grouping_keys)
if reverse:
iter_keys = reversed(list(iter_keys))
for key in iter_keys:
pd_key = (
key[0] if len(key) == 1 and _version_predates(pd, "2.2.0") else key
)
try:
data_subset = grouped_data.get_group(pd_key)
except KeyError:
# XXX we are adding this to allow backwards compatibility
# with the empty artists that old categorical plots would
# add (before 0.12), which we may decide to break, in which
# case this option could be removed
data_subset = data.loc[[]]
if data_subset.empty and not allow_empty:
continue
sub_vars = dict(zip(grouping_vars, key))
yield sub_vars, data_subset.copy()
else:
yield {}, data.copy()
| VectorPlotter.iter_data | Self-Contained |
seaborn | 21 | seaborn/axisgrid.py | def add_legend(self, legend_data=None, title=None, label_order=None,
adjust_subtitles=False, **kwargs):
"""Draw a legend, maybe placing it outside axes and resizing the figure.
Parameters
----------
legend_data : dict
Dictionary mapping label names (or two-element tuples where the
second element is a label name) to matplotlib artist handles. The
default reads from ``self._legend_data``.
title : string
Title for the legend. The default reads from ``self._hue_var``.
label_order : list of labels
The order that the legend entries should appear in. The default
reads from ``self.hue_names``.
adjust_subtitles : bool
If True, modify entries with invisible artists to left-align
the labels and set the font size to that of a title.
kwargs : key, value pairings
Other keyword arguments are passed to the underlying legend methods
on the Figure or Axes object.
Returns
-------
self : Grid instance
Returns self for easy chaining.
"""
| /usr/src/app/target_test_cases/failed_tests_axisgrid.Grid.add_legend.txt | def add_legend(self, legend_data=None, title=None, label_order=None,
adjust_subtitles=False, **kwargs):
"""Draw a legend, maybe placing it outside axes and resizing the figure.
Parameters
----------
legend_data : dict
Dictionary mapping label names (or two-element tuples where the
second element is a label name) to matplotlib artist handles. The
default reads from ``self._legend_data``.
title : string
Title for the legend. The default reads from ``self._hue_var``.
label_order : list of labels
The order that the legend entries should appear in. The default
reads from ``self.hue_names``.
adjust_subtitles : bool
If True, modify entries with invisible artists to left-align
the labels and set the font size to that of a title.
kwargs : key, value pairings
Other keyword arguments are passed to the underlying legend methods
on the Figure or Axes object.
Returns
-------
self : Grid instance
Returns self for easy chaining.
"""
# Find the data for the legend
if legend_data is None:
legend_data = self._legend_data
if label_order is None:
if self.hue_names is None:
label_order = list(legend_data.keys())
else:
label_order = list(map(utils.to_utf8, self.hue_names))
blank_handle = mpl.patches.Patch(alpha=0, linewidth=0)
handles = [legend_data.get(lab, blank_handle) for lab in label_order]
title = self._hue_var if title is None else title
title_size = mpl.rcParams["legend.title_fontsize"]
# Unpack nested labels from a hierarchical legend
labels = []
for entry in label_order:
if isinstance(entry, tuple):
_, label = entry
else:
label = entry
labels.append(label)
# Set default legend kwargs
kwargs.setdefault("scatterpoints", 1)
if self._legend_out:
kwargs.setdefault("frameon", False)
kwargs.setdefault("loc", "center right")
# Draw a full-figure legend outside the grid
figlegend = self._figure.legend(handles, labels, **kwargs)
self._legend = figlegend
figlegend.set_title(title, prop={"size": title_size})
if adjust_subtitles:
adjust_legend_subtitles(figlegend)
# Draw the plot to set the bounding boxes correctly
_draw_figure(self._figure)
# Calculate and set the new width of the figure so the legend fits
legend_width = figlegend.get_window_extent().width / self._figure.dpi
fig_width, fig_height = self._figure.get_size_inches()
self._figure.set_size_inches(fig_width + legend_width, fig_height)
# Draw the plot again to get the new transformations
_draw_figure(self._figure)
# Now calculate how much space we need on the right side
legend_width = figlegend.get_window_extent().width / self._figure.dpi
space_needed = legend_width / (fig_width + legend_width)
margin = .04 if self._margin_titles else .01
self._space_needed = margin + space_needed
right = 1 - self._space_needed
# Place the subplot axes to give space for the legend
self._figure.subplots_adjust(right=right)
self._tight_layout_rect[2] = right
else:
# Draw a legend in the first axis
ax = self.axes.flat[0]
kwargs.setdefault("loc", "best")
leg = ax.legend(handles, labels, **kwargs)
leg.set_title(title, prop={"size": title_size})
self._legend = leg
if adjust_subtitles:
adjust_legend_subtitles(leg)
return self
| axisgrid.Grid.add_legend | Repo-Level |
seaborn | 28 | seaborn/axisgrid.py | def __init__(
self, data, *, hue=None, vars=None, x_vars=None, y_vars=None,
hue_order=None, palette=None, hue_kws=None, corner=False, diag_sharey=True,
height=2.5, aspect=1, layout_pad=.5, despine=True, dropna=False,
):
"""Initialize the plot figure and PairGrid object.
Parameters
----------
data : DataFrame
Tidy (long-form) dataframe where each column is a variable and
each row is an observation.
hue : string (variable name)
Variable in ``data`` to map plot aspects to different colors. This
variable will be excluded from the default x and y variables.
vars : list of variable names
Variables within ``data`` to use, otherwise use every column with
a numeric datatype.
{x, y}_vars : lists of variable names
Variables within ``data`` to use separately for the rows and
columns of the figure; i.e. to make a non-square plot.
hue_order : list of strings
Order for the levels of the hue variable in the palette
palette : dict or seaborn color palette
Set of colors for mapping the ``hue`` variable. If a dict, keys
should be values in the ``hue`` variable.
hue_kws : dictionary of param -> list of values mapping
Other keyword arguments to insert into the plotting call to let
other plot attributes vary across levels of the hue variable (e.g.
the markers in a scatterplot).
corner : bool
If True, don't add axes to the upper (off-diagonal) triangle of the
grid, making this a "corner" plot.
height : scalar
Height (in inches) of each facet.
aspect : scalar
Aspect * height gives the width (in inches) of each facet.
layout_pad : scalar
Padding between axes; passed to ``fig.tight_layout``.
despine : boolean
Remove the top and right spines from the plots.
dropna : boolean
Drop missing values from the data before plotting.
See Also
--------
pairplot : Easily drawing common uses of :class:`PairGrid`.
FacetGrid : Subplot grid for plotting conditional relationships.
Examples
--------
.. include:: ../docstrings/PairGrid.rst
"""
| /usr/src/app/target_test_cases/failed_tests_axisgrid.PairGrid.__init__.txt | def __init__(
self, data, *, hue=None, vars=None, x_vars=None, y_vars=None,
hue_order=None, palette=None, hue_kws=None, corner=False, diag_sharey=True,
height=2.5, aspect=1, layout_pad=.5, despine=True, dropna=False,
):
"""Initialize the plot figure and PairGrid object.
Parameters
----------
data : DataFrame
Tidy (long-form) dataframe where each column is a variable and
each row is an observation.
hue : string (variable name)
Variable in ``data`` to map plot aspects to different colors. This
variable will be excluded from the default x and y variables.
vars : list of variable names
Variables within ``data`` to use, otherwise use every column with
a numeric datatype.
{x, y}_vars : lists of variable names
Variables within ``data`` to use separately for the rows and
columns of the figure; i.e. to make a non-square plot.
hue_order : list of strings
Order for the levels of the hue variable in the palette
palette : dict or seaborn color palette
Set of colors for mapping the ``hue`` variable. If a dict, keys
should be values in the ``hue`` variable.
hue_kws : dictionary of param -> list of values mapping
Other keyword arguments to insert into the plotting call to let
other plot attributes vary across levels of the hue variable (e.g.
the markers in a scatterplot).
corner : bool
If True, don't add axes to the upper (off-diagonal) triangle of the
grid, making this a "corner" plot.
height : scalar
Height (in inches) of each facet.
aspect : scalar
Aspect * height gives the width (in inches) of each facet.
layout_pad : scalar
Padding between axes; passed to ``fig.tight_layout``.
despine : boolean
Remove the top and right spines from the plots.
dropna : boolean
Drop missing values from the data before plotting.
See Also
--------
pairplot : Easily drawing common uses of :class:`PairGrid`.
FacetGrid : Subplot grid for plotting conditional relationships.
Examples
--------
.. include:: ../docstrings/PairGrid.rst
"""
super().__init__()
data = handle_data_source(data)
# Sort out the variables that define the grid
numeric_cols = self._find_numeric_cols(data)
if hue in numeric_cols:
numeric_cols.remove(hue)
if vars is not None:
x_vars = list(vars)
y_vars = list(vars)
if x_vars is None:
x_vars = numeric_cols
if y_vars is None:
y_vars = numeric_cols
if np.isscalar(x_vars):
x_vars = [x_vars]
if np.isscalar(y_vars):
y_vars = [y_vars]
self.x_vars = x_vars = list(x_vars)
self.y_vars = y_vars = list(y_vars)
self.square_grid = self.x_vars == self.y_vars
if not x_vars:
raise ValueError("No variables found for grid columns.")
if not y_vars:
raise ValueError("No variables found for grid rows.")
# Create the figure and the array of subplots
figsize = len(x_vars) * height * aspect, len(y_vars) * height
with _disable_autolayout():
fig = plt.figure(figsize=figsize)
axes = fig.subplots(len(y_vars), len(x_vars),
sharex="col", sharey="row",
squeeze=False)
# Possibly remove upper axes to make a corner grid
# Note: setting up the axes is usually the most time-intensive part
# of using the PairGrid. We are foregoing the speed improvement that
# we would get by just not setting up the hidden axes so that we can
# avoid implementing fig.subplots ourselves. But worth thinking about.
self._corner = corner
if corner:
hide_indices = np.triu_indices_from(axes, 1)
for i, j in zip(*hide_indices):
axes[i, j].remove()
axes[i, j] = None
self._figure = fig
self.axes = axes
self.data = data
# Save what we are going to do with the diagonal
self.diag_sharey = diag_sharey
self.diag_vars = None
self.diag_axes = None
self._dropna = dropna
# Label the axes
self._add_axis_labels()
# Sort out the hue variable
self._hue_var = hue
if hue is None:
self.hue_names = hue_order = ["_nolegend_"]
self.hue_vals = pd.Series(["_nolegend_"] * len(data),
index=data.index)
else:
# We need hue_order and hue_names because the former is used to control
# the order of drawing and the latter is used to control the order of
# the legend. hue_names can become string-typed while hue_order must
# retain the type of the input data. This is messy but results from
# the fact that PairGrid can implement the hue-mapping logic itself
# (and was originally written exclusively that way) but now can delegate
# to the axes-level functions, while always handling legend creation.
# See GH2307
hue_names = hue_order = categorical_order(data[hue], hue_order)
if dropna:
# Filter NA from the list of unique hue names
hue_names = list(filter(pd.notnull, hue_names))
self.hue_names = hue_names
self.hue_vals = data[hue]
# Additional dict of kwarg -> list of values for mapping the hue var
self.hue_kws = hue_kws if hue_kws is not None else {}
self._orig_palette = palette
self._hue_order = hue_order
self.palette = self._get_palette(data, hue, hue_order, palette)
self._legend_data = {}
# Make the plot look nice
for ax in axes[:-1, :].flat:
if ax is None:
continue
for label in ax.get_xticklabels():
label.set_visible(False)
ax.xaxis.offsetText.set_visible(False)
ax.xaxis.label.set_visible(False)
for ax in axes[:, 1:].flat:
if ax is None:
continue
for label in ax.get_yticklabels():
label.set_visible(False)
ax.yaxis.offsetText.set_visible(False)
ax.yaxis.label.set_visible(False)
self._tight_layout_rect = [.01, .01, .99, .99]
self._tight_layout_pad = layout_pad
self._despine = despine
if despine:
utils.despine(fig=fig)
self.tight_layout(pad=layout_pad)
| axisgrid.PairGrid.__init__ | Repo-Level |
seaborn | 29 | seaborn/axisgrid.py | def pairplot(
data, *,
hue=None, hue_order=None, palette=None,
vars=None, x_vars=None, y_vars=None,
kind="scatter", diag_kind="auto", markers=None,
height=2.5, aspect=1, corner=False, dropna=False,
plot_kws=None, diag_kws=None, grid_kws=None, size=None,
):
"""Plot pairwise relationships in a dataset.
By default, this function will create a grid of Axes such that each numeric
variable in ``data`` will by shared across the y-axes across a single row and
the x-axes across a single column. The diagonal plots are treated
differently: a univariate distribution plot is drawn to show the marginal
distribution of the data in each column.
It is also possible to show a subset of variables or plot different
variables on the rows and columns.
This is a high-level interface for :class:`PairGrid` that is intended to
make it easy to draw a few common styles. You should use :class:`PairGrid`
directly if you need more flexibility.
Parameters
----------
data : `pandas.DataFrame`
Tidy (long-form) dataframe where each column is a variable and
each row is an observation.
hue : name of variable in ``data``
Variable in ``data`` to map plot aspects to different colors.
hue_order : list of strings
Order for the levels of the hue variable in the palette
palette : dict or seaborn color palette
Set of colors for mapping the ``hue`` variable. If a dict, keys
should be values in the ``hue`` variable.
vars : list of variable names
Variables within ``data`` to use, otherwise use every column with
a numeric datatype.
{x, y}_vars : lists of variable names
Variables within ``data`` to use separately for the rows and
columns of the figure; i.e. to make a non-square plot.
kind : {'scatter', 'kde', 'hist', 'reg'}
Kind of plot to make.
diag_kind : {'auto', 'hist', 'kde', None}
Kind of plot for the diagonal subplots. If 'auto', choose based on
whether or not ``hue`` is used.
markers : single matplotlib marker code or list
Either the marker to use for all scatterplot points or a list of markers
with a length the same as the number of levels in the hue variable so that
differently colored points will also have different scatterplot
markers.
height : scalar
Height (in inches) of each facet.
aspect : scalar
Aspect * height gives the width (in inches) of each facet.
corner : bool
If True, don't add axes to the upper (off-diagonal) triangle of the
grid, making this a "corner" plot.
dropna : boolean
Drop missing values from the data before plotting.
{plot, diag, grid}_kws : dicts
Dictionaries of keyword arguments. ``plot_kws`` are passed to the
bivariate plotting function, ``diag_kws`` are passed to the univariate
plotting function, and ``grid_kws`` are passed to the :class:`PairGrid`
constructor.
Returns
-------
grid : :class:`PairGrid`
Returns the underlying :class:`PairGrid` instance for further tweaking.
See Also
--------
PairGrid : Subplot grid for more flexible plotting of pairwise relationships.
JointGrid : Grid for plotting joint and marginal distributions of two variables.
Examples
--------
.. include:: ../docstrings/pairplot.rst
"""
| /usr/src/app/target_test_cases/failed_tests_axisgrid.pairplot.txt | def pairplot(
data, *,
hue=None, hue_order=None, palette=None,
vars=None, x_vars=None, y_vars=None,
kind="scatter", diag_kind="auto", markers=None,
height=2.5, aspect=1, corner=False, dropna=False,
plot_kws=None, diag_kws=None, grid_kws=None, size=None,
):
"""Plot pairwise relationships in a dataset.
By default, this function will create a grid of Axes such that each numeric
variable in ``data`` will by shared across the y-axes across a single row and
the x-axes across a single column. The diagonal plots are treated
differently: a univariate distribution plot is drawn to show the marginal
distribution of the data in each column.
It is also possible to show a subset of variables or plot different
variables on the rows and columns.
This is a high-level interface for :class:`PairGrid` that is intended to
make it easy to draw a few common styles. You should use :class:`PairGrid`
directly if you need more flexibility.
Parameters
----------
data : `pandas.DataFrame`
Tidy (long-form) dataframe where each column is a variable and
each row is an observation.
hue : name of variable in ``data``
Variable in ``data`` to map plot aspects to different colors.
hue_order : list of strings
Order for the levels of the hue variable in the palette
palette : dict or seaborn color palette
Set of colors for mapping the ``hue`` variable. If a dict, keys
should be values in the ``hue`` variable.
vars : list of variable names
Variables within ``data`` to use, otherwise use every column with
a numeric datatype.
{x, y}_vars : lists of variable names
Variables within ``data`` to use separately for the rows and
columns of the figure; i.e. to make a non-square plot.
kind : {'scatter', 'kde', 'hist', 'reg'}
Kind of plot to make.
diag_kind : {'auto', 'hist', 'kde', None}
Kind of plot for the diagonal subplots. If 'auto', choose based on
whether or not ``hue`` is used.
markers : single matplotlib marker code or list
Either the marker to use for all scatterplot points or a list of markers
with a length the same as the number of levels in the hue variable so that
differently colored points will also have different scatterplot
markers.
height : scalar
Height (in inches) of each facet.
aspect : scalar
Aspect * height gives the width (in inches) of each facet.
corner : bool
If True, don't add axes to the upper (off-diagonal) triangle of the
grid, making this a "corner" plot.
dropna : boolean
Drop missing values from the data before plotting.
{plot, diag, grid}_kws : dicts
Dictionaries of keyword arguments. ``plot_kws`` are passed to the
bivariate plotting function, ``diag_kws`` are passed to the univariate
plotting function, and ``grid_kws`` are passed to the :class:`PairGrid`
constructor.
Returns
-------
grid : :class:`PairGrid`
Returns the underlying :class:`PairGrid` instance for further tweaking.
See Also
--------
PairGrid : Subplot grid for more flexible plotting of pairwise relationships.
JointGrid : Grid for plotting joint and marginal distributions of two variables.
Examples
--------
.. include:: ../docstrings/pairplot.rst
"""
# Avoid circular import
from .distributions import histplot, kdeplot
# Handle deprecations
if size is not None:
height = size
msg = ("The `size` parameter has been renamed to `height`; "
"please update your code.")
warnings.warn(msg, UserWarning)
if not isinstance(data, pd.DataFrame):
raise TypeError(
f"'data' must be pandas DataFrame object, not: {type(data)}")
plot_kws = {} if plot_kws is None else plot_kws.copy()
diag_kws = {} if diag_kws is None else diag_kws.copy()
grid_kws = {} if grid_kws is None else grid_kws.copy()
# Resolve "auto" diag kind
if diag_kind == "auto":
if hue is None:
diag_kind = "kde" if kind == "kde" else "hist"
else:
diag_kind = "hist" if kind == "hist" else "kde"
# Set up the PairGrid
grid_kws.setdefault("diag_sharey", diag_kind == "hist")
grid = PairGrid(data, vars=vars, x_vars=x_vars, y_vars=y_vars, hue=hue,
hue_order=hue_order, palette=palette, corner=corner,
height=height, aspect=aspect, dropna=dropna, **grid_kws)
# Add the markers here as PairGrid has figured out how many levels of the
# hue variable are needed and we don't want to duplicate that process
if markers is not None:
if kind == "reg":
# Needed until regplot supports style
if grid.hue_names is None:
n_markers = 1
else:
n_markers = len(grid.hue_names)
if not isinstance(markers, list):
markers = [markers] * n_markers
if len(markers) != n_markers:
raise ValueError("markers must be a singleton or a list of "
"markers for each level of the hue variable")
grid.hue_kws = {"marker": markers}
elif kind == "scatter":
if isinstance(markers, str):
plot_kws["marker"] = markers
elif hue is not None:
plot_kws["style"] = data[hue]
plot_kws["markers"] = markers
# Draw the marginal plots on the diagonal
diag_kws = diag_kws.copy()
diag_kws.setdefault("legend", False)
if diag_kind == "hist":
grid.map_diag(histplot, **diag_kws)
elif diag_kind == "kde":
diag_kws.setdefault("fill", True)
diag_kws.setdefault("warn_singular", False)
grid.map_diag(kdeplot, **diag_kws)
# Maybe plot on the off-diagonals
if diag_kind is not None:
plotter = grid.map_offdiag
else:
plotter = grid.map
if kind == "scatter":
from .relational import scatterplot # Avoid circular import
plotter(scatterplot, **plot_kws)
elif kind == "reg":
from .regression import regplot # Avoid circular import
plotter(regplot, **plot_kws)
elif kind == "kde":
from .distributions import kdeplot # Avoid circular import
plot_kws.setdefault("warn_singular", False)
plotter(kdeplot, **plot_kws)
elif kind == "hist":
from .distributions import histplot # Avoid circular import
plotter(histplot, **plot_kws)
# Add a legend
if hue is not None:
grid.add_legend()
grid.tight_layout()
return grid
| axisgrid.pairplot | Self-Contained |
seaborn | 33 | seaborn/palettes.py | def color_palette(palette=None, n_colors=None, desat=None, as_cmap=False):
"""Return a list of colors or continuous colormap defining a palette.
Possible ``palette`` values include:
- Name of a seaborn palette (deep, muted, bright, pastel, dark, colorblind)
- Name of matplotlib colormap
- 'husl' or 'hls'
- 'ch:<cubehelix arguments>'
- 'light:<color>', 'dark:<color>', 'blend:<color>,<color>',
- A sequence of colors in any format matplotlib accepts
Calling this function with ``palette=None`` will return the current
matplotlib color cycle.
This function can also be used in a ``with`` statement to temporarily
set the color cycle for a plot or set of plots.
See the :ref:`tutorial <palette_tutorial>` for more information.
Parameters
----------
palette : None, string, or sequence, optional
Name of palette or None to return current palette. If a sequence, input
colors are used but possibly cycled and desaturated.
n_colors : int, optional
Number of colors in the palette. If ``None``, the default will depend
on how ``palette`` is specified. Named palettes default to 6 colors,
but grabbing the current palette or passing in a list of colors will
not change the number of colors unless this is specified. Asking for
more colors than exist in the palette will cause it to cycle. Ignored
when ``as_cmap`` is True.
desat : float, optional
Proportion to desaturate each color by.
as_cmap : bool
If True, return a :class:`matplotlib.colors.ListedColormap`.
Returns
-------
list of RGB tuples or :class:`matplotlib.colors.ListedColormap`
See Also
--------
set_palette : Set the default color cycle for all plots.
set_color_codes : Reassign color codes like ``"b"``, ``"g"``, etc. to
colors from one of the seaborn palettes.
Examples
--------
.. include:: ../docstrings/color_palette.rst
"""
| /usr/src/app/target_test_cases/failed_tests_color_palette.txt | def color_palette(palette=None, n_colors=None, desat=None, as_cmap=False):
"""Return a list of colors or continuous colormap defining a palette.
Possible ``palette`` values include:
- Name of a seaborn palette (deep, muted, bright, pastel, dark, colorblind)
- Name of matplotlib colormap
- 'husl' or 'hls'
- 'ch:<cubehelix arguments>'
- 'light:<color>', 'dark:<color>', 'blend:<color>,<color>',
- A sequence of colors in any format matplotlib accepts
Calling this function with ``palette=None`` will return the current
matplotlib color cycle.
This function can also be used in a ``with`` statement to temporarily
set the color cycle for a plot or set of plots.
See the :ref:`tutorial <palette_tutorial>` for more information.
Parameters
----------
palette : None, string, or sequence, optional
Name of palette or None to return current palette. If a sequence, input
colors are used but possibly cycled and desaturated.
n_colors : int, optional
Number of colors in the palette. If ``None``, the default will depend
on how ``palette`` is specified. Named palettes default to 6 colors,
but grabbing the current palette or passing in a list of colors will
not change the number of colors unless this is specified. Asking for
more colors than exist in the palette will cause it to cycle. Ignored
when ``as_cmap`` is True.
desat : float, optional
Proportion to desaturate each color by.
as_cmap : bool
If True, return a :class:`matplotlib.colors.ListedColormap`.
Returns
-------
list of RGB tuples or :class:`matplotlib.colors.ListedColormap`
See Also
--------
set_palette : Set the default color cycle for all plots.
set_color_codes : Reassign color codes like ``"b"``, ``"g"``, etc. to
colors from one of the seaborn palettes.
Examples
--------
.. include:: ../docstrings/color_palette.rst
"""
if palette is None:
palette = get_color_cycle()
if n_colors is None:
n_colors = len(palette)
elif not isinstance(palette, str):
palette = palette
if n_colors is None:
n_colors = len(palette)
else:
if n_colors is None:
# Use all colors in a qualitative palette or 6 of another kind
n_colors = QUAL_PALETTE_SIZES.get(palette, 6)
if palette in SEABORN_PALETTES:
# Named "seaborn variant" of matplotlib default color cycle
palette = SEABORN_PALETTES[palette]
elif palette == "hls":
# Evenly spaced colors in cylindrical RGB space
palette = hls_palette(n_colors, as_cmap=as_cmap)
elif palette == "husl":
# Evenly spaced colors in cylindrical Lab space
palette = husl_palette(n_colors, as_cmap=as_cmap)
elif palette.lower() == "jet":
# Paternalism
raise ValueError("No.")
elif palette.startswith("ch:"):
# Cubehelix palette with params specified in string
args, kwargs = _parse_cubehelix_args(palette)
palette = cubehelix_palette(n_colors, *args, **kwargs, as_cmap=as_cmap)
elif palette.startswith("light:"):
# light palette to color specified in string
_, color = palette.split(":")
reverse = color.endswith("_r")
if reverse:
color = color[:-2]
palette = light_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)
elif palette.startswith("dark:"):
# light palette to color specified in string
_, color = palette.split(":")
reverse = color.endswith("_r")
if reverse:
color = color[:-2]
palette = dark_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)
elif palette.startswith("blend:"):
# blend palette between colors specified in string
_, colors = palette.split(":")
colors = colors.split(",")
palette = blend_palette(colors, n_colors, as_cmap=as_cmap)
else:
try:
# Perhaps a named matplotlib colormap?
palette = mpl_palette(palette, n_colors, as_cmap=as_cmap)
except (ValueError, KeyError): # Error class changed in mpl36
raise ValueError(f"{palette!r} is not a valid palette name")
if desat is not None:
palette = [desaturate(c, desat) for c in palette]
if not as_cmap:
# Always return as many colors as we asked for
pal_cycle = cycle(palette)
palette = [next(pal_cycle) for _ in range(n_colors)]
# Always return in r, g, b tuple format
try:
palette = map(mpl.colors.colorConverter.to_rgb, palette)
palette = _ColorPalette(palette)
except ValueError:
raise ValueError(f"Could not generate a palette for {palette}")
return palette
| color_palette | Repo-Level |
seaborn | 71 | seaborn/utils.py | def despine(fig=None, ax=None, top=True, right=True, left=False,
bottom=False, offset=None, trim=False):
"""Remove the top and right spines from plot(s).
fig : matplotlib figure, optional
Figure to despine all axes of, defaults to the current figure.
ax : matplotlib axes, optional
Specific axes object to despine. Ignored if fig is provided.
top, right, left, bottom : boolean, optional
If True, remove that spine.
offset : int or dict, optional
Absolute distance, in points, spines should be moved away
from the axes (negative values move spines inward). A single value
applies to all spines; a dict can be used to set offset values per
side.
trim : bool, optional
If True, limit spines to the smallest and largest major tick
on each non-despined axis.
Returns
-------
None
"""
| /usr/src/app/target_test_cases/failed_tests_utils.despine.txt | def despine(fig=None, ax=None, top=True, right=True, left=False,
bottom=False, offset=None, trim=False):
"""Remove the top and right spines from plot(s).
fig : matplotlib figure, optional
Figure to despine all axes of, defaults to the current figure.
ax : matplotlib axes, optional
Specific axes object to despine. Ignored if fig is provided.
top, right, left, bottom : boolean, optional
If True, remove that spine.
offset : int or dict, optional
Absolute distance, in points, spines should be moved away
from the axes (negative values move spines inward). A single value
applies to all spines; a dict can be used to set offset values per
side.
trim : bool, optional
If True, limit spines to the smallest and largest major tick
on each non-despined axis.
Returns
-------
None
"""
# Get references to the axes we want
if fig is None and ax is None:
axes = plt.gcf().axes
elif fig is not None:
axes = fig.axes
elif ax is not None:
axes = [ax]
for ax_i in axes:
for side in ["top", "right", "left", "bottom"]:
# Toggle the spine objects
is_visible = not locals()[side]
ax_i.spines[side].set_visible(is_visible)
if offset is not None and is_visible:
try:
val = offset.get(side, 0)
except AttributeError:
val = offset
ax_i.spines[side].set_position(('outward', val))
# Potentially move the ticks
if left and not right:
maj_on = any(
t.tick1line.get_visible()
for t in ax_i.yaxis.majorTicks
)
min_on = any(
t.tick1line.get_visible()
for t in ax_i.yaxis.minorTicks
)
ax_i.yaxis.set_ticks_position("right")
for t in ax_i.yaxis.majorTicks:
t.tick2line.set_visible(maj_on)
for t in ax_i.yaxis.minorTicks:
t.tick2line.set_visible(min_on)
if bottom and not top:
maj_on = any(
t.tick1line.get_visible()
for t in ax_i.xaxis.majorTicks
)
min_on = any(
t.tick1line.get_visible()
for t in ax_i.xaxis.minorTicks
)
ax_i.xaxis.set_ticks_position("top")
for t in ax_i.xaxis.majorTicks:
t.tick2line.set_visible(maj_on)
for t in ax_i.xaxis.minorTicks:
t.tick2line.set_visible(min_on)
if trim:
# clip off the parts of the spines that extend past major ticks
xticks = np.asarray(ax_i.get_xticks())
if xticks.size:
firsttick = np.compress(xticks >= min(ax_i.get_xlim()),
xticks)[0]
lasttick = np.compress(xticks <= max(ax_i.get_xlim()),
xticks)[-1]
ax_i.spines['bottom'].set_bounds(firsttick, lasttick)
ax_i.spines['top'].set_bounds(firsttick, lasttick)
newticks = xticks.compress(xticks <= lasttick)
newticks = newticks.compress(newticks >= firsttick)
ax_i.set_xticks(newticks)
yticks = np.asarray(ax_i.get_yticks())
if yticks.size:
firsttick = np.compress(yticks >= min(ax_i.get_ylim()),
yticks)[0]
lasttick = np.compress(yticks <= max(ax_i.get_ylim()),
yticks)[-1]
ax_i.spines['left'].set_bounds(firsttick, lasttick)
ax_i.spines['right'].set_bounds(firsttick, lasttick)
newticks = yticks.compress(yticks <= lasttick)
newticks = newticks.compress(newticks >= firsttick)
ax_i.set_yticks(newticks)
| utils.despine | Self-Contained |
seaborn | 73 | seaborn/utils.py | def move_legend(obj, loc, **kwargs):
"""
Recreate a plot's legend at a new location.
The name is a slight misnomer. Matplotlib legends do not expose public
control over their position parameters. So this function creates a new legend,
copying over the data from the original object, which is then removed.
Parameters
----------
obj : the object with the plot
This argument can be either a seaborn or matplotlib object:
- :class:`seaborn.FacetGrid` or :class:`seaborn.PairGrid`
- :class:`matplotlib.axes.Axes` or :class:`matplotlib.figure.Figure`
loc : str or int
Location argument, as in :meth:`matplotlib.axes.Axes.legend`.
kwargs
Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.legend`.
Examples
--------
.. include:: ../docstrings/move_legend.rst
"""
| /usr/src/app/target_test_cases/failed_tests_utils.move_legend.txt | def move_legend(obj, loc, **kwargs):
"""
Recreate a plot's legend at a new location.
The name is a slight misnomer. Matplotlib legends do not expose public
control over their position parameters. So this function creates a new legend,
copying over the data from the original object, which is then removed.
Parameters
----------
obj : the object with the plot
This argument can be either a seaborn or matplotlib object:
- :class:`seaborn.FacetGrid` or :class:`seaborn.PairGrid`
- :class:`matplotlib.axes.Axes` or :class:`matplotlib.figure.Figure`
loc : str or int
Location argument, as in :meth:`matplotlib.axes.Axes.legend`.
kwargs
Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.legend`.
Examples
--------
.. include:: ../docstrings/move_legend.rst
"""
# This is a somewhat hackish solution that will hopefully be obviated by
# upstream improvements to matplotlib legends that make them easier to
# modify after creation.
from seaborn.axisgrid import Grid # Avoid circular import
# Locate the legend object and a method to recreate the legend
if isinstance(obj, Grid):
old_legend = obj.legend
legend_func = obj.figure.legend
elif isinstance(obj, mpl.axes.Axes):
old_legend = obj.legend_
legend_func = obj.legend
elif isinstance(obj, mpl.figure.Figure):
if obj.legends:
old_legend = obj.legends[-1]
else:
old_legend = None
legend_func = obj.legend
else:
err = "`obj` must be a seaborn Grid or matplotlib Axes or Figure instance."
raise TypeError(err)
if old_legend is None:
err = f"{obj} has no legend attached."
raise ValueError(err)
# Extract the components of the legend we need to reuse
# Import here to avoid a circular import
from seaborn._compat import get_legend_handles
handles = get_legend_handles(old_legend)
labels = [t.get_text() for t in old_legend.get_texts()]
# Handle the case where the user is trying to override the labels
if (new_labels := kwargs.pop("labels", None)) is not None:
if len(new_labels) != len(labels):
err = "Length of new labels does not match existing legend."
raise ValueError(err)
labels = new_labels
# Extract legend properties that can be passed to the recreation method
# (Vexingly, these don't all round-trip)
legend_kws = inspect.signature(mpl.legend.Legend).parameters
props = {k: v for k, v in old_legend.properties().items() if k in legend_kws}
# Delegate default bbox_to_anchor rules to matplotlib
props.pop("bbox_to_anchor")
# Try to propagate the existing title and font properties; respect new ones too
title = props.pop("title")
if "title" in kwargs:
title.set_text(kwargs.pop("title"))
title_kwargs = {k: v for k, v in kwargs.items() if k.startswith("title_")}
for key, val in title_kwargs.items():
title.set(**{key[6:]: val})
kwargs.pop(key)
# Try to respect the frame visibility
kwargs.setdefault("frameon", old_legend.legendPatch.get_visible())
# Remove the old legend and create the new one
props.update(kwargs)
old_legend.remove()
new_legend = legend_func(handles, labels, loc=loc, **props)
new_legend.set_title(title.get_text(), title.get_fontproperties())
# Let the Grid object continue to track the correct legend object
if isinstance(obj, Grid):
obj._legend = new_legend
| utils.move_legend | Self-Contained |
scikit-learn | 0 | sklearn/linear_model/_bayes.py | def fit(self, X, y):
"""Fit the model according to the given training data and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values (integers). Will be cast to X's dtype if necessary.
Returns
-------
self : object
Fitted estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_ARDRegression.fit.txt | def fit(self, X, y):
"""Fit the model according to the given training data and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values (integers). Will be cast to X's dtype if necessary.
Returns
-------
self : object
Fitted estimator.
"""
X, y = validate_data(
self,
X,
y,
dtype=[np.float64, np.float32],
force_writeable=True,
y_numeric=True,
ensure_min_samples=2,
)
dtype = X.dtype
n_samples, n_features = X.shape
coef_ = np.zeros(n_features, dtype=dtype)
X, y, X_offset_, y_offset_, X_scale_ = _preprocess_data(
X, y, fit_intercept=self.fit_intercept, copy=self.copy_X
)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
# Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
# Initialization of the values of the parameters
eps = np.finfo(np.float64).eps
# Add `eps` in the denominator to omit division by zero if `np.var(y)`
# is zero.
# Explicitly set dtype to avoid unintended type promotion with numpy 2.
alpha_ = np.asarray(1.0 / (np.var(y) + eps), dtype=dtype)
lambda_ = np.ones(n_features, dtype=dtype)
self.scores_ = list()
coef_old_ = None
def update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_):
coef_[keep_lambda] = alpha_ * np.linalg.multi_dot(
[sigma_, X[:, keep_lambda].T, y]
)
return coef_
update_sigma = (
self._update_sigma
if n_samples >= n_features
else self._update_sigma_woodbury
)
# Iterative procedure of ARDRegression
for iter_ in range(self.max_iter):
sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda)
coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_)
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1.0 - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = (gamma_ + 2.0 * lambda_1) / (
(coef_[keep_lambda]) ** 2 + 2.0 * lambda_2
)
alpha_ = (n_samples - gamma_.sum() + 2.0 * alpha_1) / (
rmse_ + 2.0 * alpha_2
)
# Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
# Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (
fast_logdet(sigma_)
+ n_samples * log(alpha_)
+ np.sum(np.log(lambda_))
)
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_**2).sum())
self.scores_.append(s)
# Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
if not keep_lambda.any():
break
self.n_iter_ = iter_ + 1
if keep_lambda.any():
# update sigma and mu using updated params from the last iteration
sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda)
coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_)
else:
sigma_ = np.array([]).reshape(0, 0)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
| ARDRegression.fit | Repo-Level |
scikit-learn | 9 | sklearn/linear_model/_bayes.py | def fit(self, X, y, sample_weight=None):
"""Fit the model.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
sample_weight : ndarray of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.20
parameter *sample_weight* support to BayesianRidge.
Returns
-------
self : object
Returns the instance itself.
"""
| /usr/src/app/target_test_cases/failed_tests_BayesianRidge.fit.txt | def fit(self, X, y, sample_weight=None):
"""Fit the model.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
sample_weight : ndarray of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.20
parameter *sample_weight* support to BayesianRidge.
Returns
-------
self : object
Returns the instance itself.
"""
X, y = validate_data(
self,
X,
y,
dtype=[np.float64, np.float32],
force_writeable=True,
y_numeric=True,
)
dtype = X.dtype
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=dtype)
X, y, X_offset_, y_offset_, X_scale_ = _preprocess_data(
X,
y,
fit_intercept=self.fit_intercept,
copy=self.copy_X,
sample_weight=sample_weight,
)
if sample_weight is not None:
# Sample weight can be implemented via a simple rescaling.
X, y, _ = _rescale_data(X, y, sample_weight)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
n_samples, n_features = X.shape
# Initialization of the values of the parameters
eps = np.finfo(np.float64).eps
# Add `eps` in the denominator to omit division by zero if `np.var(y)`
# is zero
alpha_ = self.alpha_init
lambda_ = self.lambda_init
if alpha_ is None:
alpha_ = 1.0 / (np.var(y) + eps)
if lambda_ is None:
lambda_ = 1.0
# Avoid unintended type promotion to float64 with numpy 2
alpha_ = np.asarray(alpha_, dtype=dtype)
lambda_ = np.asarray(lambda_, dtype=dtype)
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S**2
# Convergence loop of the bayesian ridge regression
for iter_ in range(self.max_iter):
# update posterior mean coef_ based on alpha_ and lambda_ and
# compute corresponding rmse
coef_, rmse_ = self._update_coef_(
X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_
)
if self.compute_score:
# compute the log marginal likelihood
s = self._log_marginal_likelihood(
n_samples, n_features, eigen_vals_, alpha_, lambda_, coef_, rmse_
)
self.scores_.append(s)
# Update alpha and lambda according to (MacKay, 1992)
gamma_ = np.sum((alpha_ * eigen_vals_) / (lambda_ + alpha_ * eigen_vals_))
lambda_ = (gamma_ + 2 * lambda_1) / (np.sum(coef_**2) + 2 * lambda_2)
alpha_ = (n_samples - gamma_ + 2 * alpha_1) / (rmse_ + 2 * alpha_2)
# Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.n_iter_ = iter_ + 1
# return regularization parameters and corresponding posterior mean,
# log marginal likelihood and posterior covariance
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_, rmse_ = self._update_coef_(
X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_
)
if self.compute_score:
# compute the log marginal likelihood
s = self._log_marginal_likelihood(
n_samples, n_features, eigen_vals_, alpha_, lambda_, coef_, rmse_
)
self.scores_.append(s)
self.scores_ = np.array(self.scores_)
# posterior covariance is given by 1/alpha_ * scaled_sigma_
scaled_sigma_ = np.dot(
Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis]
)
self.sigma_ = (1.0 / alpha_) * scaled_sigma_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
| BayesianRidge.fit | Repo-Level |
scikit-learn | 14 | sklearn/cluster/_bisect_k_means.py | def fit(self, X, y=None, sample_weight=None):
"""Compute bisecting k-means clustering.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster.
.. note:: The data will be converted to C ordering,
which will cause a memory copy
if the given data is not C-contiguous.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable.
Returns
-------
self
Fitted estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_BisectingKMeans.fit.txt | def fit(self, X, y=None, sample_weight=None):
"""Compute bisecting k-means clustering.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster.
.. note:: The data will be converted to C ordering,
which will cause a memory copy
if the given data is not C-contiguous.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable.
Returns
-------
self
Fitted estimator.
"""
X = validate_data(
self,
X,
accept_sparse="csr",
dtype=[np.float64, np.float32],
order="C",
copy=self.copy_x,
accept_large_sparse=False,
)
self._check_params_vs_input(X)
self._random_state = check_random_state(self.random_state)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self._n_threads = _openmp_effective_n_threads()
if self.algorithm == "lloyd" or self.n_clusters == 1:
self._kmeans_single = _kmeans_single_lloyd
self._check_mkl_vcomp(X, X.shape[0])
else:
self._kmeans_single = _kmeans_single_elkan
# Subtract of mean of X for more accurate distance computations
if not sp.issparse(X):
self._X_mean = X.mean(axis=0)
X -= self._X_mean
# Initialize the hierarchical clusters tree
self._bisecting_tree = _BisectingTree(
indices=np.arange(X.shape[0]),
center=X.mean(axis=0),
score=0,
)
x_squared_norms = row_norms(X, squared=True)
for _ in range(self.n_clusters - 1):
# Chose cluster to bisect
cluster_to_bisect = self._bisecting_tree.get_cluster_to_bisect()
# Split this cluster into 2 subclusters
self._bisect(X, x_squared_norms, sample_weight, cluster_to_bisect)
# Aggregate final labels and centers from the bisecting tree
self.labels_ = np.full(X.shape[0], -1, dtype=np.int32)
self.cluster_centers_ = np.empty((self.n_clusters, X.shape[1]), dtype=X.dtype)
for i, cluster_node in enumerate(self._bisecting_tree.iter_leaves()):
self.labels_[cluster_node.indices] = i
self.cluster_centers_[i] = cluster_node.center
cluster_node.label = i # label final clusters for future prediction
cluster_node.indices = None # release memory
# Restore original data
if not sp.issparse(X):
X += self._X_mean
self.cluster_centers_ += self._X_mean
_inertia = _inertia_sparse if sp.issparse(X) else _inertia_dense
self.inertia_ = _inertia(
X, sample_weight, self.cluster_centers_, self.labels_, self._n_threads
)
self._n_features_out = self.cluster_centers_.shape[0]
return self
| BisectingKMeans.fit | Repo-Level |
scikit-learn | 15 | sklearn/calibration.py | def fit(self, X, y, sample_weight=None, **fit_params):
"""Fit the calibrated model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
**fit_params : dict
Parameters to pass to the `fit` method of the underlying
classifier.
Returns
-------
self : object
Returns an instance of self.
"""
| /usr/src/app/target_test_cases/failed_tests_CalibratedClassifierCV.fit.txt | def fit(self, X, y, sample_weight=None, **fit_params):
"""Fit the calibrated model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
**fit_params : dict
Parameters to pass to the `fit` method of the underlying
classifier.
Returns
-------
self : object
Returns an instance of self.
"""
check_classification_targets(y)
X, y = indexable(X, y)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
estimator = self._get_estimator()
self.calibrated_classifiers_ = []
if self.cv == "prefit":
# `classes_` should be consistent with that of estimator
check_is_fitted(self.estimator, attributes=["classes_"])
self.classes_ = self.estimator.classes_
predictions, _ = _get_response_values(
estimator,
X,
response_method=["decision_function", "predict_proba"],
)
if predictions.ndim == 1:
# Reshape binary output from `(n_samples,)` to `(n_samples, 1)`
predictions = predictions.reshape(-1, 1)
calibrated_classifier = _fit_calibrator(
estimator,
predictions,
y,
self.classes_,
self.method,
sample_weight,
)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
# Set `classes_` using all `y`
label_encoder_ = LabelEncoder().fit(y)
self.classes_ = label_encoder_.classes_
if _routing_enabled():
routed_params = process_routing(
self,
"fit",
sample_weight=sample_weight,
**fit_params,
)
else:
# sample_weight checks
fit_parameters = signature(estimator.fit).parameters
supports_sw = "sample_weight" in fit_parameters
if sample_weight is not None and not supports_sw:
estimator_name = type(estimator).__name__
warnings.warn(
f"Since {estimator_name} does not appear to accept"
" sample_weight, sample weights will only be used for the"
" calibration itself. This can be caused by a limitation of"
" the current scikit-learn API. See the following issue for"
" more details:"
" https://github.com/scikit-learn/scikit-learn/issues/21134."
" Be warned that the result of the calibration is likely to be"
" incorrect."
)
routed_params = Bunch()
routed_params.splitter = Bunch(split={}) # no routing for splitter
routed_params.estimator = Bunch(fit=fit_params)
if sample_weight is not None and supports_sw:
routed_params.estimator.fit["sample_weight"] = sample_weight
# Check that each cross-validation fold can have at least one
# example per class
if isinstance(self.cv, int):
n_folds = self.cv
elif hasattr(self.cv, "n_splits"):
n_folds = self.cv.n_splits
else:
n_folds = None
if n_folds and np.any(np.unique(y, return_counts=True)[1] < n_folds):
raise ValueError(
f"Requesting {n_folds}-fold "
"cross-validation but provided less than "
f"{n_folds} examples for at least one class."
)
if isinstance(self.cv, LeaveOneOut):
raise ValueError(
"LeaveOneOut cross-validation does not allow"
"all classes to be present in test splits. "
"Please use a cross-validation generator that allows "
"all classes to appear in every test and train split."
)
cv = check_cv(self.cv, y, classifier=True)
if self.ensemble:
parallel = Parallel(n_jobs=self.n_jobs)
self.calibrated_classifiers_ = parallel(
delayed(_fit_classifier_calibrator_pair)(
clone(estimator),
X,
y,
train=train,
test=test,
method=self.method,
classes=self.classes_,
sample_weight=sample_weight,
fit_params=routed_params.estimator.fit,
)
for train, test in cv.split(X, y, **routed_params.splitter.split)
)
else:
this_estimator = clone(estimator)
method_name = _check_response_method(
this_estimator,
["decision_function", "predict_proba"],
).__name__
predictions = cross_val_predict(
estimator=this_estimator,
X=X,
y=y,
cv=cv,
method=method_name,
n_jobs=self.n_jobs,
params=routed_params.estimator.fit,
)
if len(self.classes_) == 2:
# Ensure shape (n_samples, 1) in the binary case
if method_name == "predict_proba":
# Select the probability column of the postive class
predictions = _process_predict_proba(
y_pred=predictions,
target_type="binary",
classes=self.classes_,
pos_label=self.classes_[1],
)
predictions = predictions.reshape(-1, 1)
this_estimator.fit(X, y, **routed_params.estimator.fit)
# Note: Here we don't pass on fit_params because the supported
# calibrators don't support fit_params anyway
calibrated_classifier = _fit_calibrator(
this_estimator,
predictions,
y,
self.classes_,
self.method,
sample_weight,
)
self.calibrated_classifiers_.append(calibrated_classifier)
first_clf = self.calibrated_classifiers_[0].estimator
if hasattr(first_clf, "n_features_in_"):
self.n_features_in_ = first_clf.n_features_in_
if hasattr(first_clf, "feature_names_in_"):
self.feature_names_in_ = first_clf.feature_names_in_
return self
| CalibratedClassifierCV.fit | Repo-Level |
scikit-learn | 38 | sklearn/linear_model/_coordinate_descent.py | def fit(self, X, y, sample_weight=None, check_input=True):
"""Fit model with coordinate descent.
Parameters
----------
X : {ndarray, sparse matrix, sparse array} of (n_samples, n_features)
Data.
Note that large sparse matrices and arrays requiring `int64`
indices are not accepted.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target. Will be cast to X's dtype if necessary.
sample_weight : float or array-like of shape (n_samples,), default=None
Sample weights. Internally, the `sample_weight` vector will be
rescaled to sum to `n_samples`.
.. versionadded:: 0.23
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Fitted estimator.
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
| /usr/src/app/target_test_cases/failed_tests_ElasticNet.fit.txt | def fit(self, X, y, sample_weight=None, check_input=True):
"""Fit model with coordinate descent.
Parameters
----------
X : {ndarray, sparse matrix, sparse array} of (n_samples, n_features)
Data.
Note that large sparse matrices and arrays requiring `int64`
indices are not accepted.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target. Will be cast to X's dtype if necessary.
sample_weight : float or array-like of shape (n_samples,), default=None
Sample weights. Internally, the `sample_weight` vector will be
rescaled to sum to `n_samples`.
.. versionadded:: 0.23
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Fitted estimator.
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn(
(
"With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator"
),
stacklevel=2,
)
# Remember if X is copied
X_copied = False
# We expect X and y to be float64 or float32 Fortran ordered arrays
# when bypassing checks
if check_input:
X_copied = self.copy_X and self.fit_intercept
X, y = validate_data(
self,
X,
y,
accept_sparse="csc",
order="F",
dtype=[np.float64, np.float32],
force_writeable=True,
accept_large_sparse=False,
copy=X_copied,
multi_output=True,
y_numeric=True,
)
y = check_array(
y, order="F", copy=False, dtype=X.dtype.type, ensure_2d=False
)
n_samples, n_features = X.shape
alpha = self.alpha
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sample_weight is not None:
if check_input:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
# TLDR: Rescale sw to sum up to n_samples.
# Long: The objective function of Enet
#
# 1/2 * np.average(squared error, weights=sw)
# + alpha * penalty (1)
#
# is invariant under rescaling of sw.
# But enet_path coordinate descent minimizes
#
# 1/2 * sum(squared error) + alpha' * penalty (2)
#
# and therefore sets
#
# alpha' = n_samples * alpha (3)
#
# inside its function body, which results in objective (2) being
# equivalent to (1) in case of no sw.
# With sw, however, enet_path should set
#
# alpha' = sum(sw) * alpha (4)
#
# Therefore, we use the freedom of Eq. (1) to rescale sw before
# calling enet_path, i.e.
#
# sw *= n_samples / sum(sw)
#
# such that sum(sw) = n_samples. This way, (3) and (4) are the same.
sample_weight = sample_weight * (n_samples / np.sum(sample_weight))
# Note: Alternatively, we could also have rescaled alpha instead
# of sample_weight:
#
# alpha *= np.sum(sample_weight) / n_samples
# Ensure copying happens only once, don't do it again if done above.
# X and y will be rescaled if sample_weight is not None, order='F'
# ensures that the returned X and y are still F-contiguous.
should_copy = self.copy_X and not X_copied
X, y, X_offset, y_offset, X_scale, precompute, Xy = _pre_fit(
X,
y,
None,
self.precompute,
fit_intercept=self.fit_intercept,
copy=should_copy,
check_input=check_input,
sample_weight=sample_weight,
)
# coordinate descent needs F-ordered arrays and _pre_fit might have
# called _rescale_data
if check_input or sample_weight is not None:
X, y = _set_order(X, y, order="F")
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_targets = y.shape[1]
if not self.warm_start or not hasattr(self, "coef_"):
coef_ = np.zeros((n_targets, n_features), dtype=X.dtype, order="F")
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=X.dtype)
self.n_iter_ = []
for k in range(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = self.path(
X,
y[:, k],
l1_ratio=self.l1_ratio,
eps=None,
n_alphas=None,
alphas=[alpha],
precompute=precompute,
Xy=this_Xy,
copy_X=True,
coef_init=coef_[k],
verbose=False,
return_n_iter=True,
positive=self.positive,
check_input=False,
# from here on **params
tol=self.tol,
X_offset=X_offset,
X_scale=X_scale,
max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
sample_weight=sample_weight,
)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_ = coef_[0]
self.dual_gap_ = dual_gaps_[0]
else:
self.coef_ = coef_
self.dual_gap_ = dual_gaps_
self._set_intercept(X_offset, y_offset, X_scale)
# check for finiteness of coefficients
if not all(np.isfinite(w).all() for w in [self.coef_, self.intercept_]):
raise ValueError(
"Coordinate descent iterations resulted in non-finite parameter"
" values. The input data may contain large values and need to"
" be preprocessed."
)
# return self for chaining fit and predict calls
return self
| ElasticNet.fit | Repo-Level |
scikit-learn | 55 | sklearn/gaussian_process/_gpc.py | def log_marginal_likelihood(
self, theta=None, eval_gradient=False, clone_kernel=True
):
"""Return log-marginal likelihood of theta for training data.
In the case of multi-class classification, the mean log-marginal
likelihood of the one-versus-rest classifiers are returned.
Parameters
----------
theta : array-like of shape (n_kernel_params,), default=None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. In the case of multi-class classification, theta may
be the hyperparameters of the compound kernel or of an individual
kernel. In the latter case, all individual kernel get assigned the
same theta values. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default=False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. Note that gradient computation is not supported
for non-binary classification. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when `eval_gradient` is True.
"""
| /usr/src/app/target_test_cases/failed_tests_GaussianProcessClassifier.log_marginal_likelihood.txt | def log_marginal_likelihood(
self, theta=None, eval_gradient=False, clone_kernel=True
):
"""Return log-marginal likelihood of theta for training data.
In the case of multi-class classification, the mean log-marginal
likelihood of the one-versus-rest classifiers are returned.
Parameters
----------
theta : array-like of shape (n_kernel_params,), default=None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. In the case of multi-class classification, theta may
be the hyperparameters of the compound kernel or of an individual
kernel. In the latter case, all individual kernel get assigned the
same theta values. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default=False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. Note that gradient computation is not supported
for non-binary classification. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when `eval_gradient` is True.
"""
check_is_fitted(self)
if theta is None:
if eval_gradient:
raise ValueError("Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
theta = np.asarray(theta)
if self.n_classes_ == 2:
return self.base_estimator_.log_marginal_likelihood(
theta, eval_gradient, clone_kernel=clone_kernel
)
else:
if eval_gradient:
raise NotImplementedError(
"Gradient of log-marginal-likelihood not implemented for "
"multi-class GPC."
)
estimators = self.base_estimator_.estimators_
n_dims = estimators[0].kernel_.n_dims
if theta.shape[0] == n_dims: # use same theta for all sub-kernels
return np.mean(
[
estimator.log_marginal_likelihood(
theta, clone_kernel=clone_kernel
)
for i, estimator in enumerate(estimators)
]
)
elif theta.shape[0] == n_dims * self.classes_.shape[0]:
# theta for compound kernel
return np.mean(
[
estimator.log_marginal_likelihood(
theta[n_dims * i : n_dims * (i + 1)],
clone_kernel=clone_kernel,
)
for i, estimator in enumerate(estimators)
]
)
else:
raise ValueError(
"Shape of theta must be either %d or %d. "
"Obtained theta with shape %d."
% (n_dims, n_dims * self.classes_.shape[0], theta.shape[0])
)
| GaussianProcessClassifier.log_marginal_likelihood | Repo-Level |
scikit-learn | 57 | sklearn/gaussian_process/_gpr.py | def fit(self, X, y):
"""Fit Gaussian process regression model.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
GaussianProcessRegressor class instance.
"""
| /usr/src/app/target_test_cases/failed_tests_GaussianProcessRegressor.fit.txt | def fit(self, X, y):
"""Fit Gaussian process regression model.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
GaussianProcessRegressor class instance.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") * RBF(
1.0, length_scale_bounds="fixed"
)
else:
self.kernel_ = clone(self.kernel)
self._rng = check_random_state(self.random_state)
if self.kernel_.requires_vector_input:
dtype, ensure_2d = "numeric", True
else:
dtype, ensure_2d = None, False
X, y = validate_data(
self,
X,
y,
multi_output=True,
y_numeric=True,
ensure_2d=ensure_2d,
dtype=dtype,
)
n_targets_seen = y.shape[1] if y.ndim > 1 else 1
if self.n_targets is not None and n_targets_seen != self.n_targets:
raise ValueError(
"The number of targets seen in `y` is different from the parameter "
f"`n_targets`. Got {n_targets_seen} != {self.n_targets}."
)
# Normalize target value
if self.normalize_y:
self._y_train_mean = np.mean(y, axis=0)
self._y_train_std = _handle_zeros_in_scale(np.std(y, axis=0), copy=False)
# Remove mean and make unit variance
y = (y - self._y_train_mean) / self._y_train_std
else:
shape_y_stats = (y.shape[1],) if y.ndim == 2 else 1
self._y_train_mean = np.zeros(shape=shape_y_stats)
self._y_train_std = np.ones(shape=shape_y_stats)
if np.iterable(self.alpha) and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError(
"alpha must be a scalar or an array with same number of "
f"entries as y. ({self.alpha.shape[0]} != {y.shape[0]})"
)
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True, clone_kernel=False
)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta, clone_kernel=False)
# First optimize starting from theta specified in kernel
optima = [
(
self._constrained_optimization(
obj_func, self.kernel_.theta, self.kernel_.bounds
)
)
]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite."
)
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = self._rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial, bounds)
)
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.kernel_._check_bounds_params()
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = self.log_marginal_likelihood(
self.kernel_.theta, clone_kernel=False
)
# Precompute quantities required for predictions which are independent
# of actual query points
# Alg. 2.1, page 19, line 2 -> L = cholesky(K + sigma^2 I)
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
self.L_ = cholesky(K, lower=GPR_CHOLESKY_LOWER, check_finite=False)
except np.linalg.LinAlgError as exc:
exc.args = (
(
f"The kernel, {self.kernel_}, is not returning a positive "
"definite matrix. Try gradually increasing the 'alpha' "
"parameter of your GaussianProcessRegressor estimator."
),
) + exc.args
raise
# Alg 2.1, page 19, line 3 -> alpha = L^T \ (L \ y)
self.alpha_ = cho_solve(
(self.L_, GPR_CHOLESKY_LOWER),
self.y_train_,
check_finite=False,
)
return self
| GaussianProcessRegressor.fit | Repo-Level |
scikit-learn | 58 | sklearn/gaussian_process/_gpr.py | def log_marginal_likelihood(
self, theta=None, eval_gradient=False, clone_kernel=True
):
"""Return log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like of shape (n_kernel_params,) default=None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default=False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
| /usr/src/app/target_test_cases/failed_tests_GaussianProcessRegressor.log_marginal_likelihood.txt | def log_marginal_likelihood(
self, theta=None, eval_gradient=False, clone_kernel=True
):
"""Return log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like of shape (n_kernel_params,) default=None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default=False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError("Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
if clone_kernel:
kernel = self.kernel_.clone_with_theta(theta)
else:
kernel = self.kernel_
kernel.theta = theta
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
# Alg. 2.1, page 19, line 2 -> L = cholesky(K + sigma^2 I)
K[np.diag_indices_from(K)] += self.alpha
try:
L = cholesky(K, lower=GPR_CHOLESKY_LOWER, check_finite=False)
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
# Alg 2.1, page 19, line 3 -> alpha = L^T \ (L \ y)
alpha = cho_solve((L, GPR_CHOLESKY_LOWER), y_train, check_finite=False)
# Alg 2.1, page 19, line 7
# -0.5 . y^T . alpha - sum(log(diag(L))) - n_samples / 2 log(2*pi)
# y is originally thought to be a (1, n_samples) row vector. However,
# in multioutputs, y is of shape (n_samples, 2) and we need to compute
# y^T . alpha for each output, independently using einsum. Thus, it
# is equivalent to:
# for output_idx in range(n_outputs):
# log_likelihood_dims[output_idx] = (
# y_train[:, [output_idx]] @ alpha[:, [output_idx]]
# )
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
# the log likehood is sum-up across the outputs
log_likelihood = log_likelihood_dims.sum(axis=-1)
if eval_gradient:
# Eq. 5.9, p. 114, and footnote 5 in p. 114
# 0.5 * trace((alpha . alpha^T - K^-1) . K_gradient)
# alpha is supposed to be a vector of (n_samples,) elements. With
# multioutputs, alpha is a matrix of size (n_samples, n_outputs).
# Therefore, we want to construct a matrix of
# (n_samples, n_samples, n_outputs) equivalent to
# for output_idx in range(n_outputs):
# output_alpha = alpha[:, [output_idx]]
# inner_term[..., output_idx] = output_alpha @ output_alpha.T
inner_term = np.einsum("ik,jk->ijk", alpha, alpha)
# compute K^-1 of shape (n_samples, n_samples)
K_inv = cho_solve(
(L, GPR_CHOLESKY_LOWER), np.eye(K.shape[0]), check_finite=False
)
# create a new axis to use broadcasting between inner_term and
# K_inv
inner_term -= K_inv[..., np.newaxis]
# Since we are interested about the trace of
# inner_term @ K_gradient, we don't explicitly compute the
# matrix-by-matrix operation and instead use an einsum. Therefore
# it is equivalent to:
# for param_idx in range(n_kernel_params):
# for output_idx in range(n_output):
# log_likehood_gradient_dims[param_idx, output_idx] = (
# inner_term[..., output_idx] @
# K_gradient[..., param_idx]
# )
log_likelihood_gradient_dims = 0.5 * np.einsum(
"ijl,jik->kl", inner_term, K_gradient
)
# the log likehood gradient is the sum-up across the outputs
log_likelihood_gradient = log_likelihood_gradient_dims.sum(axis=-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
| GaussianProcessRegressor.log_marginal_likelihood | Self-Contained |
scikit-learn | 59 | sklearn/gaussian_process/_gpr.py | def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model.
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, optionally also
returns its standard deviation (`return_std=True`) or covariance
(`return_cov=True`). Note that at most one of the two can be requested.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated.
return_std : bool, default=False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default=False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean.
Returns
-------
y_mean : ndarray of shape (n_samples,) or (n_samples, n_targets)
Mean of predictive distribution at query points.
y_std : ndarray of shape (n_samples,) or (n_samples, n_targets), optional
Standard deviation of predictive distribution at query points.
Only returned when `return_std` is True.
y_cov : ndarray of shape (n_samples, n_samples) or \
(n_samples, n_samples, n_targets), optional
Covariance of joint predictive distribution at query points.
Only returned when `return_cov` is True.
"""
| /usr/src/app/target_test_cases/failed_tests_GaussianProcessRegressor.predict.txt | def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model.
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, optionally also
returns its standard deviation (`return_std=True`) or covariance
(`return_cov=True`). Note that at most one of the two can be requested.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated.
return_std : bool, default=False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default=False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean.
Returns
-------
y_mean : ndarray of shape (n_samples,) or (n_samples, n_targets)
Mean of predictive distribution at query points.
y_std : ndarray of shape (n_samples,) or (n_samples, n_targets), optional
Standard deviation of predictive distribution at query points.
Only returned when `return_std` is True.
y_cov : ndarray of shape (n_samples, n_samples) or \
(n_samples, n_samples, n_targets), optional
Covariance of joint predictive distribution at query points.
Only returned when `return_cov` is True.
"""
if return_std and return_cov:
raise RuntimeError(
"At most one of return_std or return_cov can be requested."
)
if self.kernel is None or self.kernel.requires_vector_input:
dtype, ensure_2d = "numeric", True
else:
dtype, ensure_2d = None, False
X = validate_data(self, X, ensure_2d=ensure_2d, dtype=dtype, reset=False)
if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
if self.kernel is None:
kernel = C(1.0, constant_value_bounds="fixed") * RBF(
1.0, length_scale_bounds="fixed"
)
else:
kernel = self.kernel
n_targets = self.n_targets if self.n_targets is not None else 1
y_mean = np.zeros(shape=(X.shape[0], n_targets)).squeeze()
if return_cov:
y_cov = kernel(X)
if n_targets > 1:
y_cov = np.repeat(
np.expand_dims(y_cov, -1), repeats=n_targets, axis=-1
)
return y_mean, y_cov
elif return_std:
y_var = kernel.diag(X)
if n_targets > 1:
y_var = np.repeat(
np.expand_dims(y_var, -1), repeats=n_targets, axis=-1
)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
# Alg 2.1, page 19, line 4 -> f*_bar = K(X_test, X_train) . alpha
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans @ self.alpha_
# undo normalisation
y_mean = self._y_train_std * y_mean + self._y_train_mean
# if y_mean has shape (n_samples, 1), reshape to (n_samples,)
if y_mean.ndim > 1 and y_mean.shape[1] == 1:
y_mean = np.squeeze(y_mean, axis=1)
# Alg 2.1, page 19, line 5 -> v = L \ K(X_test, X_train)^T
V = solve_triangular(
self.L_, K_trans.T, lower=GPR_CHOLESKY_LOWER, check_finite=False
)
if return_cov:
# Alg 2.1, page 19, line 6 -> K(X_test, X_test) - v^T. v
y_cov = self.kernel_(X) - V.T @ V
# undo normalisation
y_cov = np.outer(y_cov, self._y_train_std**2).reshape(*y_cov.shape, -1)
# if y_cov has shape (n_samples, n_samples, 1), reshape to
# (n_samples, n_samples)
if y_cov.shape[2] == 1:
y_cov = np.squeeze(y_cov, axis=2)
return y_mean, y_cov
elif return_std:
# Compute variance of predictive distribution
# Use einsum to avoid explicitly forming the large matrix
# V^T @ V just to extract its diagonal afterward.
y_var = self.kernel_.diag(X).copy()
y_var -= np.einsum("ij,ji->i", V.T, V)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn(
"Predicted variances smaller than 0. "
"Setting those variances to 0."
)
y_var[y_var_negative] = 0.0
# undo normalisation
y_var = np.outer(y_var, self._y_train_std**2).reshape(*y_var.shape, -1)
# if y_var has shape (n_samples, 1), reshape to (n_samples,)
if y_var.shape[1] == 1:
y_var = np.squeeze(y_var, axis=1)
return y_mean, np.sqrt(y_var)
else:
return y_mean
| GaussianProcessRegressor.predict | Repo-Level |
scikit-learn | 66 | sklearn/decomposition/_incremental_pca.py | def partial_fit(self, X, y=None, check_input=True):
"""Incremental fit with X. All of X is processed as a single batch.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
check_input : bool, default=True
Run check_array on X.
Returns
-------
self : object
Returns the instance itself.
"""
| /usr/src/app/target_test_cases/failed_tests_IncrementalPCA.partial_fit.txt | def partial_fit(self, X, y=None, check_input=True):
"""Incremental fit with X. All of X is processed as a single batch.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
check_input : bool, default=True
Run check_array on X.
Returns
-------
self : object
Returns the instance itself.
"""
first_pass = not hasattr(self, "components_")
if check_input:
if sparse.issparse(X):
raise TypeError(
"IncrementalPCA.partial_fit does not support "
"sparse input. Either convert data to dense "
"or use IncrementalPCA.fit to do so in batches."
)
X = validate_data(
self,
X,
copy=self.copy,
dtype=[np.float64, np.float32],
force_writeable=True,
reset=first_pass,
)
n_samples, n_features = X.shape
if first_pass:
self.components_ = None
if self.n_components is None:
if self.components_ is None:
self.n_components_ = min(n_samples, n_features)
else:
self.n_components_ = self.components_.shape[0]
elif not self.n_components <= n_features:
raise ValueError(
"n_components=%r invalid for n_features=%d, need "
"more rows than columns for IncrementalPCA "
"processing" % (self.n_components, n_features)
)
elif not self.n_components <= n_samples:
raise ValueError(
"n_components=%r must be less or equal to "
"the batch number of samples "
"%d." % (self.n_components, n_samples)
)
else:
self.n_components_ = self.n_components
if (self.components_ is not None) and (
self.components_.shape[0] != self.n_components_
):
raise ValueError(
"Number of input features has changed from %i "
"to %i between calls to partial_fit! Try "
"setting n_components to a fixed value."
% (self.components_.shape[0], self.n_components_)
)
# This is the first partial_fit
if not hasattr(self, "n_samples_seen_"):
self.n_samples_seen_ = 0
self.mean_ = 0.0
self.var_ = 0.0
# Update stats - they are 0 if this is the first step
col_mean, col_var, n_total_samples = _incremental_mean_and_var(
X,
last_mean=self.mean_,
last_variance=self.var_,
last_sample_count=np.repeat(self.n_samples_seen_, X.shape[1]),
)
n_total_samples = n_total_samples[0]
# Whitening
if self.n_samples_seen_ == 0:
# If it is the first step, simply whiten X
X -= col_mean
else:
col_batch_mean = np.mean(X, axis=0)
X -= col_batch_mean
# Build matrix of combined previous basis and new data
mean_correction = np.sqrt(
(self.n_samples_seen_ / n_total_samples) * n_samples
) * (self.mean_ - col_batch_mean)
X = np.vstack(
(
self.singular_values_.reshape((-1, 1)) * self.components_,
X,
mean_correction,
)
)
U, S, Vt = linalg.svd(X, full_matrices=False, check_finite=False)
U, Vt = svd_flip(U, Vt, u_based_decision=False)
explained_variance = S**2 / (n_total_samples - 1)
explained_variance_ratio = S**2 / np.sum(col_var * n_total_samples)
self.n_samples_seen_ = n_total_samples
self.components_ = Vt[: self.n_components_]
self.singular_values_ = S[: self.n_components_]
self.mean_ = col_mean
self.var_ = col_var
self.explained_variance_ = explained_variance[: self.n_components_]
self.explained_variance_ratio_ = explained_variance_ratio[: self.n_components_]
# we already checked `self.n_components <= n_samples` above
if self.n_components_ not in (n_samples, n_features):
self.noise_variance_ = explained_variance[self.n_components_ :].mean()
else:
self.noise_variance_ = 0.0
return self
| IncrementalPCA.partial_fit | Repo-Level |
scikit-learn | 70 | sklearn/impute/_iterative.py | def fit_transform(self, X, y=None, **params):
"""Fit the imputer on `X` and return the transformed `X`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
**params : dict
Parameters routed to the `fit` method of the sub-estimator via the
metadata routing API.
.. versionadded:: 1.5
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data.
"""
| /usr/src/app/target_test_cases/failed_tests_IterativeImputer.fit_transform.txt | def fit_transform(self, X, y=None, **params):
"""Fit the imputer on `X` and return the transformed `X`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
**params : dict
Parameters routed to the `fit` method of the sub-estimator via the
metadata routing API.
.. versionadded:: 1.5
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data.
"""
_raise_for_params(params, self, "fit")
routed_params = process_routing(
self,
"fit",
**params,
)
self.random_state_ = getattr(
self, "random_state_", check_random_state(self.random_state)
)
if self.estimator is None:
from ..linear_model import BayesianRidge
self._estimator = BayesianRidge()
else:
self._estimator = clone(self.estimator)
self.imputation_sequence_ = []
self.initial_imputer_ = None
X, Xt, mask_missing_values, complete_mask = self._initial_imputation(
X, in_fit=True
)
super()._fit_indicator(complete_mask)
X_indicator = super()._transform_indicator(complete_mask)
if self.max_iter == 0 or np.all(mask_missing_values):
self.n_iter_ = 0
return super()._concatenate_indicator(Xt, X_indicator)
# Edge case: a single feature, we return the initial imputation.
if Xt.shape[1] == 1:
self.n_iter_ = 0
return super()._concatenate_indicator(Xt, X_indicator)
self._min_value = self._validate_limit(self.min_value, "min", X.shape[1])
self._max_value = self._validate_limit(self.max_value, "max", X.shape[1])
if not np.all(np.greater(self._max_value, self._min_value)):
raise ValueError("One (or more) features have min_value >= max_value.")
# order in which to impute
# note this is probably too slow for large feature data (d > 100000)
# and a better way would be good.
# see: https://goo.gl/KyCNwj and subsequent comments
ordered_idx = self._get_ordered_idx(mask_missing_values)
self.n_features_with_missing_ = len(ordered_idx)
abs_corr_mat = self._get_abs_corr_mat(Xt)
n_samples, n_features = Xt.shape
if self.verbose > 0:
print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,))
start_t = time()
if not self.sample_posterior:
Xt_previous = Xt.copy()
normalized_tol = self.tol * np.max(np.abs(X[~mask_missing_values]))
for self.n_iter_ in range(1, self.max_iter + 1):
if self.imputation_order == "random":
ordered_idx = self._get_ordered_idx(mask_missing_values)
for feat_idx in ordered_idx:
neighbor_feat_idx = self._get_neighbor_feat_idx(
n_features, feat_idx, abs_corr_mat
)
Xt, estimator = self._impute_one_feature(
Xt,
mask_missing_values,
feat_idx,
neighbor_feat_idx,
estimator=None,
fit_mode=True,
params=routed_params.estimator.fit,
)
estimator_triplet = _ImputerTriplet(
feat_idx, neighbor_feat_idx, estimator
)
self.imputation_sequence_.append(estimator_triplet)
if self.verbose > 1:
print(
"[IterativeImputer] Ending imputation round "
"%d/%d, elapsed time %0.2f"
% (self.n_iter_, self.max_iter, time() - start_t)
)
if not self.sample_posterior:
inf_norm = np.linalg.norm(Xt - Xt_previous, ord=np.inf, axis=None)
if self.verbose > 0:
print(
"[IterativeImputer] Change: {}, scaled tolerance: {} ".format(
inf_norm, normalized_tol
)
)
if inf_norm < normalized_tol:
if self.verbose > 0:
print("[IterativeImputer] Early stopping criterion reached.")
break
Xt_previous = Xt.copy()
else:
if not self.sample_posterior:
warnings.warn(
"[IterativeImputer] Early stopping criterion not reached.",
ConvergenceWarning,
)
_assign_where(Xt, X, cond=~mask_missing_values)
return super()._concatenate_indicator(Xt, X_indicator)
| IterativeImputer.fit_transform | Repo-Level |
scikit-learn | 72 | sklearn/preprocessing/_discretization.py | def fit(self, X, y=None, sample_weight=None):
"""
Fit the estimator.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to be discretized.
y : None
Ignored. This parameter exists only for compatibility with
:class:`~sklearn.pipeline.Pipeline`.
sample_weight : ndarray of shape (n_samples,)
Contains weight values to be associated with each sample.
Cannot be used when `strategy` is set to `"uniform"`.
.. versionadded:: 1.3
Returns
-------
self : object
Returns the instance itself.
"""
| /usr/src/app/target_test_cases/failed_tests_KBinsDiscretizer.fit.txt | def fit(self, X, y=None, sample_weight=None):
"""
Fit the estimator.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to be discretized.
y : None
Ignored. This parameter exists only for compatibility with
:class:`~sklearn.pipeline.Pipeline`.
sample_weight : ndarray of shape (n_samples,)
Contains weight values to be associated with each sample.
Cannot be used when `strategy` is set to `"uniform"`.
.. versionadded:: 1.3
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(self, X, dtype="numeric")
if self.dtype in (np.float64, np.float32):
output_dtype = self.dtype
else: # self.dtype is None
output_dtype = X.dtype
n_samples, n_features = X.shape
if sample_weight is not None and self.strategy == "uniform":
raise ValueError(
"`sample_weight` was provided but it cannot be "
"used with strategy='uniform'. Got strategy="
f"{self.strategy!r} instead."
)
if self.subsample is not None and n_samples > self.subsample:
# Take a subsample of `X`
X = resample(
X,
replace=False,
n_samples=self.subsample,
random_state=self.random_state,
)
n_features = X.shape[1]
n_bins = self._validate_n_bins(n_features)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
bin_edges = np.zeros(n_features, dtype=object)
for jj in range(n_features):
column = X[:, jj]
col_min, col_max = column.min(), column.max()
if col_min == col_max:
warnings.warn(
"Feature %d is constant and will be replaced with 0." % jj
)
n_bins[jj] = 1
bin_edges[jj] = np.array([-np.inf, np.inf])
continue
if self.strategy == "uniform":
bin_edges[jj] = np.linspace(col_min, col_max, n_bins[jj] + 1)
elif self.strategy == "quantile":
quantiles = np.linspace(0, 100, n_bins[jj] + 1)
if sample_weight is None:
bin_edges[jj] = np.asarray(np.percentile(column, quantiles))
else:
bin_edges[jj] = np.asarray(
[
_weighted_percentile(column, sample_weight, q)
for q in quantiles
],
dtype=np.float64,
)
elif self.strategy == "kmeans":
from ..cluster import KMeans # fixes import loops
# Deterministic initialization with uniform spacing
uniform_edges = np.linspace(col_min, col_max, n_bins[jj] + 1)
init = (uniform_edges[1:] + uniform_edges[:-1])[:, None] * 0.5
# 1D k-means procedure
km = KMeans(n_clusters=n_bins[jj], init=init, n_init=1)
centers = km.fit(
column[:, None], sample_weight=sample_weight
).cluster_centers_[:, 0]
# Must sort, centers may be unsorted even with sorted init
centers.sort()
bin_edges[jj] = (centers[1:] + centers[:-1]) * 0.5
bin_edges[jj] = np.r_[col_min, bin_edges[jj], col_max]
# Remove bins whose width are too small (i.e., <= 1e-8)
if self.strategy in ("quantile", "kmeans"):
mask = np.ediff1d(bin_edges[jj], to_begin=np.inf) > 1e-8
bin_edges[jj] = bin_edges[jj][mask]
if len(bin_edges[jj]) - 1 != n_bins[jj]:
warnings.warn(
"Bins whose width are too small (i.e., <= "
"1e-8) in feature %d are removed. Consider "
"decreasing the number of bins." % jj
)
n_bins[jj] = len(bin_edges[jj]) - 1
self.bin_edges_ = bin_edges
self.n_bins_ = n_bins
if "onehot" in self.encode:
self._encoder = OneHotEncoder(
categories=[np.arange(i) for i in self.n_bins_],
sparse_output=self.encode == "onehot",
dtype=output_dtype,
)
# Fit the OneHotEncoder with toy datasets
# so that it's ready for use after the KBinsDiscretizer is fitted
self._encoder.fit(np.zeros((1, len(self.n_bins_))))
return self
| KBinsDiscretizer.fit | Repo-Level |
scikit-learn | 75 | sklearn/cluster/_kmeans.py | def fit(self, X, y=None, sample_weight=None):
"""Compute k-means clustering.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory
copy if the given data is not C-contiguous.
If a sparse matrix is passed, a copy will be made if it's not in
CSR format.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
.. versionadded:: 0.20
Returns
-------
self : object
Fitted estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_KMeans.fit.txt | def fit(self, X, y=None, sample_weight=None):
"""Compute k-means clustering.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory
copy if the given data is not C-contiguous.
If a sparse matrix is passed, a copy will be made if it's not in
CSR format.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
.. versionadded:: 0.20
Returns
-------
self : object
Fitted estimator.
"""
X = validate_data(
self,
X,
accept_sparse="csr",
dtype=[np.float64, np.float32],
order="C",
copy=self.copy_x,
accept_large_sparse=False,
)
self._check_params_vs_input(X)
random_state = check_random_state(self.random_state)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self._n_threads = _openmp_effective_n_threads()
# Validate init array
init = self.init
init_is_array_like = _is_arraylike_not_scalar(init)
if init_is_array_like:
init = check_array(init, dtype=X.dtype, copy=True, order="C")
self._validate_center_shape(X, init)
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X):
X_mean = X.mean(axis=0)
# The copy was already done above
X -= X_mean
if init_is_array_like:
init -= X_mean
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
if self._algorithm == "elkan":
kmeans_single = _kmeans_single_elkan
else:
kmeans_single = _kmeans_single_lloyd
self._check_mkl_vcomp(X, X.shape[0])
best_inertia, best_labels = None, None
for i in range(self._n_init):
# Initialize centers
centers_init = self._init_centroids(
X,
x_squared_norms=x_squared_norms,
init=init,
random_state=random_state,
sample_weight=sample_weight,
)
if self.verbose:
print("Initialization complete")
# run a k-means once
labels, inertia, centers, n_iter_ = kmeans_single(
X,
sample_weight,
centers_init,
max_iter=self.max_iter,
verbose=self.verbose,
tol=self._tol,
n_threads=self._n_threads,
)
# determine if these results are the best so far
# we chose a new run if it has a better inertia and the clustering is
# different from the best so far (it's possible that the inertia is
# slightly better even if the clustering is the same with potentially
# permuted labels, due to rounding errors)
if best_inertia is None or (
inertia < best_inertia
and not _is_same_clustering(labels, best_labels, self.n_clusters)
):
best_labels = labels
best_centers = centers
best_inertia = inertia
best_n_iter = n_iter_
if not sp.issparse(X):
if not self.copy_x:
X += X_mean
best_centers += X_mean
distinct_clusters = len(set(best_labels))
if distinct_clusters < self.n_clusters:
warnings.warn(
"Number of distinct clusters ({}) found smaller than "
"n_clusters ({}). Possibly due to duplicate points "
"in X.".format(distinct_clusters, self.n_clusters),
ConvergenceWarning,
stacklevel=2,
)
self.cluster_centers_ = best_centers
self._n_features_out = self.cluster_centers_.shape[0]
self.labels_ = best_labels
self.inertia_ = best_inertia
self.n_iter_ = best_n_iter
return self
| KMeans.fit | Repo-Level |
scikit-learn | 77 | sklearn/impute/_knn.py | def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data to complete.
Returns
-------
X : array-like of shape (n_samples, n_output_features)
The imputed dataset. `n_output_features` is the number of features
that is not always missing during `fit`.
"""
| /usr/src/app/target_test_cases/failed_tests_KNNImputer.transform.txt | def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data to complete.
Returns
-------
X : array-like of shape (n_samples, n_output_features)
The imputed dataset. `n_output_features` is the number of features
that is not always missing during `fit`.
"""
check_is_fitted(self)
if not is_scalar_nan(self.missing_values):
ensure_all_finite = True
else:
ensure_all_finite = "allow-nan"
X = validate_data(
self,
X,
accept_sparse=False,
dtype=FLOAT_DTYPES,
force_writeable=True,
ensure_all_finite=ensure_all_finite,
copy=self.copy,
reset=False,
)
mask = _get_mask(X, self.missing_values)
mask_fit_X = self._mask_fit_X
valid_mask = self._valid_mask
X_indicator = super()._transform_indicator(mask)
# Removes columns where the training data is all nan
if not np.any(mask[:, valid_mask]):
# No missing values in X
if self.keep_empty_features:
Xc = X
Xc[:, ~valid_mask] = 0
else:
Xc = X[:, valid_mask]
# Even if there are no missing values in X, we still concatenate Xc
# with the missing value indicator matrix, X_indicator.
# This is to ensure that the output maintains consistency in terms
# of columns, regardless of whether missing values exist in X or not.
return super()._concatenate_indicator(Xc, X_indicator)
row_missing_idx = np.flatnonzero(mask[:, valid_mask].any(axis=1))
non_missing_fix_X = np.logical_not(mask_fit_X)
# Maps from indices from X to indices in dist matrix
dist_idx_map = np.zeros(X.shape[0], dtype=int)
dist_idx_map[row_missing_idx] = np.arange(row_missing_idx.shape[0])
def process_chunk(dist_chunk, start):
row_missing_chunk = row_missing_idx[start : start + len(dist_chunk)]
# Find and impute missing by column
for col in range(X.shape[1]):
if not valid_mask[col]:
# column was all missing during training
continue
col_mask = mask[row_missing_chunk, col]
if not np.any(col_mask):
# column has no missing values
continue
(potential_donors_idx,) = np.nonzero(non_missing_fix_X[:, col])
# receivers_idx are indices in X
receivers_idx = row_missing_chunk[np.flatnonzero(col_mask)]
# distances for samples that needed imputation for column
dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][
:, potential_donors_idx
]
# receivers with all nan distances impute with mean
all_nan_dist_mask = np.isnan(dist_subset).all(axis=1)
all_nan_receivers_idx = receivers_idx[all_nan_dist_mask]
if all_nan_receivers_idx.size:
col_mean = np.ma.array(
self._fit_X[:, col], mask=mask_fit_X[:, col]
).mean()
X[all_nan_receivers_idx, col] = col_mean
if len(all_nan_receivers_idx) == len(receivers_idx):
# all receivers imputed with mean
continue
# receivers with at least one defined distance
receivers_idx = receivers_idx[~all_nan_dist_mask]
dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][
:, potential_donors_idx
]
n_neighbors = min(self.n_neighbors, len(potential_donors_idx))
value = self._calc_impute(
dist_subset,
n_neighbors,
self._fit_X[potential_donors_idx, col],
mask_fit_X[potential_donors_idx, col],
)
X[receivers_idx, col] = value
# process in fixed-memory chunks
gen = pairwise_distances_chunked(
X[row_missing_idx, :],
self._fit_X,
metric=self.metric,
missing_values=self.missing_values,
ensure_all_finite=ensure_all_finite,
reduce_func=process_chunk,
)
for chunk in gen:
# process_chunk modifies X in place. No return value.
pass
if self.keep_empty_features:
Xc = X
Xc[:, ~valid_mask] = 0
else:
Xc = X[:, valid_mask]
return super()._concatenate_indicator(Xc, X_indicator)
| KNNImputer.transform | Repo-Level |
scikit-learn | 99 | sklearn/linear_model/_linear_loss.py | def gradient_hessian(
self,
coef,
X,
y,
sample_weight=None,
l2_reg_strength=0.0,
n_threads=1,
gradient_out=None,
hessian_out=None,
raw_prediction=None,
):
"""Computes gradient and hessian w.r.t. coef.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
gradient_out : None or ndarray of shape coef.shape
A location into which the gradient is stored. If None, a new array
might be created.
hessian_out : None or ndarray
A location into which the hessian is stored. If None, a new array
might be created.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space). If provided, these are used. If
None, then raw_prediction = X @ coef + intercept is calculated.
Returns
-------
gradient : ndarray of shape coef.shape
The gradient of the loss.
hessian : ndarray
Hessian matrix.
hessian_warning : bool
True if pointwise hessian has more than half of its elements non-positive.
"""
| /usr/src/app/target_test_cases/failed_tests_LinearModelLoss.gradient_hessian.txt | def gradient_hessian(
self,
coef,
X,
y,
sample_weight=None,
l2_reg_strength=0.0,
n_threads=1,
gradient_out=None,
hessian_out=None,
raw_prediction=None,
):
"""Computes gradient and hessian w.r.t. coef.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
gradient_out : None or ndarray of shape coef.shape
A location into which the gradient is stored. If None, a new array
might be created.
hessian_out : None or ndarray
A location into which the hessian is stored. If None, a new array
might be created.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space). If provided, these are used. If
None, then raw_prediction = X @ coef + intercept is calculated.
Returns
-------
gradient : ndarray of shape coef.shape
The gradient of the loss.
hessian : ndarray
Hessian matrix.
hessian_warning : bool
True if pointwise hessian has more than half of its elements non-positive.
"""
n_samples, n_features = X.shape
n_dof = n_features + int(self.fit_intercept)
if raw_prediction is None:
weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X)
else:
weights, intercept = self.weight_intercept(coef)
grad_pointwise, hess_pointwise = self.base_loss.gradient_hessian(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
n_threads=n_threads,
)
sw_sum = n_samples if sample_weight is None else np.sum(sample_weight)
grad_pointwise /= sw_sum
hess_pointwise /= sw_sum
# For non-canonical link functions and far away from the optimum, the pointwise
# hessian can be negative. We take care that 75% of the hessian entries are
# positive.
hessian_warning = np.mean(hess_pointwise <= 0) > 0.25
hess_pointwise = np.abs(hess_pointwise)
if not self.base_loss.is_multiclass:
# gradient
if gradient_out is None:
grad = np.empty_like(coef, dtype=weights.dtype)
else:
grad = gradient_out
grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights
if self.fit_intercept:
grad[-1] = grad_pointwise.sum()
# hessian
if hessian_out is None:
hess = np.empty(shape=(n_dof, n_dof), dtype=weights.dtype)
else:
hess = hessian_out
if hessian_warning:
# Exit early without computing the hessian.
return grad, hess, hessian_warning
# TODO: This "sandwich product", X' diag(W) X, is the main computational
# bottleneck for solvers. A dedicated Cython routine might improve it
# exploiting the symmetry (as opposed to, e.g., BLAS gemm).
if sparse.issparse(X):
hess[:n_features, :n_features] = (
X.T
@ sparse.dia_matrix(
(hess_pointwise, 0), shape=(n_samples, n_samples)
)
@ X
).toarray()
else:
# np.einsum may use less memory but the following, using BLAS matrix
# multiplication (gemm), is by far faster.
WX = hess_pointwise[:, None] * X
hess[:n_features, :n_features] = np.dot(X.T, WX)
if l2_reg_strength > 0:
# The L2 penalty enters the Hessian on the diagonal only. To add those
# terms, we use a flattened view on the array.
hess.reshape(-1)[
: (n_features * n_dof) : (n_dof + 1)
] += l2_reg_strength
if self.fit_intercept:
# With intercept included as added column to X, the hessian becomes
# hess = (X, 1)' @ diag(h) @ (X, 1)
# = (X' @ diag(h) @ X, X' @ h)
# ( h @ X, sum(h))
# The left upper part has already been filled, it remains to compute
# the last row and the last column.
Xh = X.T @ hess_pointwise
hess[:-1, -1] = Xh
hess[-1, :-1] = Xh
hess[-1, -1] = hess_pointwise.sum()
else:
# Here we may safely assume HalfMultinomialLoss aka categorical
# cross-entropy.
raise NotImplementedError
return grad, hess, hessian_warning
| LinearModelLoss.gradient_hessian | File-Level |
scikit-learn | 100 | sklearn/linear_model/_linear_loss.py | def gradient_hessian_product(
self, coef, X, y, sample_weight=None, l2_reg_strength=0.0, n_threads=1
):
"""Computes gradient and hessp (hessian product function) w.r.t. coef.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
Returns
-------
gradient : ndarray of shape coef.shape
The gradient of the loss.
hessp : callable
Function that takes in a vector input of shape of gradient and
and returns matrix-vector product with hessian.
"""
| /usr/src/app/target_test_cases/failed_tests_LinearModelLoss.gradient_hessian_product.txt | def gradient_hessian_product(
self, coef, X, y, sample_weight=None, l2_reg_strength=0.0, n_threads=1
):
"""Computes gradient and hessp (hessian product function) w.r.t. coef.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
Returns
-------
gradient : ndarray of shape coef.shape
The gradient of the loss.
hessp : callable
Function that takes in a vector input of shape of gradient and
and returns matrix-vector product with hessian.
"""
(n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes
n_dof = n_features + int(self.fit_intercept)
weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X)
sw_sum = n_samples if sample_weight is None else np.sum(sample_weight)
if not self.base_loss.is_multiclass:
grad_pointwise, hess_pointwise = self.base_loss.gradient_hessian(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
n_threads=n_threads,
)
grad_pointwise /= sw_sum
hess_pointwise /= sw_sum
grad = np.empty_like(coef, dtype=weights.dtype)
grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights
if self.fit_intercept:
grad[-1] = grad_pointwise.sum()
# Precompute as much as possible: hX, hX_sum and hessian_sum
hessian_sum = hess_pointwise.sum()
if sparse.issparse(X):
hX = (
sparse.dia_matrix((hess_pointwise, 0), shape=(n_samples, n_samples))
@ X
)
else:
hX = hess_pointwise[:, np.newaxis] * X
if self.fit_intercept:
# Calculate the double derivative with respect to intercept.
# Note: In case hX is sparse, hX.sum is a matrix object.
hX_sum = np.squeeze(np.asarray(hX.sum(axis=0)))
# prevent squeezing to zero-dim array if n_features == 1
hX_sum = np.atleast_1d(hX_sum)
# With intercept included and l2_reg_strength = 0, hessp returns
# res = (X, 1)' @ diag(h) @ (X, 1) @ s
# = (X, 1)' @ (hX @ s[:n_features], sum(h) * s[-1])
# res[:n_features] = X' @ hX @ s[:n_features] + sum(h) * s[-1]
# res[-1] = 1' @ hX @ s[:n_features] + sum(h) * s[-1]
def hessp(s):
ret = np.empty_like(s)
if sparse.issparse(X):
ret[:n_features] = X.T @ (hX @ s[:n_features])
else:
ret[:n_features] = np.linalg.multi_dot([X.T, hX, s[:n_features]])
ret[:n_features] += l2_reg_strength * s[:n_features]
if self.fit_intercept:
ret[:n_features] += s[-1] * hX_sum
ret[-1] = hX_sum @ s[:n_features] + hessian_sum * s[-1]
return ret
else:
# Here we may safely assume HalfMultinomialLoss aka categorical
# cross-entropy.
# HalfMultinomialLoss computes only the diagonal part of the hessian, i.e.
# diagonal in the classes. Here, we want the matrix-vector product of the
# full hessian. Therefore, we call gradient_proba.
grad_pointwise, proba = self.base_loss.gradient_proba(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
n_threads=n_threads,
)
grad_pointwise /= sw_sum
grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F")
grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights
if self.fit_intercept:
grad[:, -1] = grad_pointwise.sum(axis=0)
# Full hessian-vector product, i.e. not only the diagonal part of the
# hessian. Derivation with some index battle for input vector s:
# - sample index i
# - feature indices j, m
# - class indices k, l
# - 1_{k=l} is one if k=l else 0
# - p_i_k is the (predicted) probability that sample i belongs to class k
# for all i: sum_k p_i_k = 1
# - s_l_m is input vector for class l and feature m
# - X' = X transposed
#
# Note: Hessian with dropping most indices is just:
# X' @ p_k (1(k=l) - p_l) @ X
#
# result_{k j} = sum_{i, l, m} Hessian_{i, k j, m l} * s_l_m
# = sum_{i, l, m} (X')_{ji} * p_i_k * (1_{k=l} - p_i_l)
# * X_{im} s_l_m
# = sum_{i, m} (X')_{ji} * p_i_k
# * (X_{im} * s_k_m - sum_l p_i_l * X_{im} * s_l_m)
#
# See also https://github.com/scikit-learn/scikit-learn/pull/3646#discussion_r17461411 # noqa
def hessp(s):
s = s.reshape((n_classes, -1), order="F") # shape = (n_classes, n_dof)
if self.fit_intercept:
s_intercept = s[:, -1]
s = s[:, :-1] # shape = (n_classes, n_features)
else:
s_intercept = 0
tmp = X @ s.T + s_intercept # X_{im} * s_k_m
tmp += (-proba * tmp).sum(axis=1)[:, np.newaxis] # - sum_l ..
tmp *= proba # * p_i_k
if sample_weight is not None:
tmp *= sample_weight[:, np.newaxis]
# hess_prod = empty_like(grad), but we ravel grad below and this
# function is run after that.
hess_prod = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F")
hess_prod[:, :n_features] = (tmp.T @ X) / sw_sum + l2_reg_strength * s
if self.fit_intercept:
hess_prod[:, -1] = tmp.sum(axis=0) / sw_sum
if coef.ndim == 1:
return hess_prod.ravel(order="F")
else:
return hess_prod
if coef.ndim == 1:
return grad.ravel(order="F"), hessp
return grad, hessp
| LinearModelLoss.gradient_hessian_product | File-Level |
scikit-learn | 103 | sklearn/linear_model/_base.py | def fit(self, X, y, sample_weight=None):
"""
Fit linear model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary.
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.17
parameter *sample_weight* support to LinearRegression.
Returns
-------
self : object
Fitted Estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_LinearRegression.fit.txt | def fit(self, X, y, sample_weight=None):
"""
Fit linear model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary.
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.17
parameter *sample_weight* support to LinearRegression.
Returns
-------
self : object
Fitted Estimator.
"""
n_jobs_ = self.n_jobs
accept_sparse = False if self.positive else ["csr", "csc", "coo"]
X, y = validate_data(
self,
X,
y,
accept_sparse=accept_sparse,
y_numeric=True,
multi_output=True,
force_writeable=True,
)
has_sw = sample_weight is not None
if has_sw:
sample_weight = _check_sample_weight(
sample_weight, X, dtype=X.dtype, ensure_non_negative=True
)
# Note that neither _rescale_data nor the rest of the fit method of
# LinearRegression can benefit from in-place operations when X is a
# sparse matrix. Therefore, let's not copy X when it is sparse.
copy_X_in_preprocess_data = self.copy_X and not sp.issparse(X)
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X,
y,
fit_intercept=self.fit_intercept,
copy=copy_X_in_preprocess_data,
sample_weight=sample_weight,
)
if has_sw:
# Sample weight can be implemented via a simple rescaling. Note
# that we safely do inplace rescaling when _preprocess_data has
# already made a copy if requested.
X, y, sample_weight_sqrt = _rescale_data(
X, y, sample_weight, inplace=copy_X_in_preprocess_data
)
if self.positive:
if y.ndim < 2:
self.coef_ = optimize.nnls(X, y)[0]
else:
# scipy.optimize.nnls cannot handle y with shape (M, K)
outs = Parallel(n_jobs=n_jobs_)(
delayed(optimize.nnls)(X, y[:, j]) for j in range(y.shape[1])
)
self.coef_ = np.vstack([out[0] for out in outs])
elif sp.issparse(X):
X_offset_scale = X_offset / X_scale
if has_sw:
def matvec(b):
return X.dot(b) - sample_weight_sqrt * b.dot(X_offset_scale)
def rmatvec(b):
return X.T.dot(b) - X_offset_scale * b.dot(sample_weight_sqrt)
else:
def matvec(b):
return X.dot(b) - b.dot(X_offset_scale)
def rmatvec(b):
return X.T.dot(b) - X_offset_scale * b.sum()
X_centered = sparse.linalg.LinearOperator(
shape=X.shape, matvec=matvec, rmatvec=rmatvec
)
if y.ndim < 2:
self.coef_ = lsqr(X_centered, y)[0]
else:
# sparse_lstsq cannot handle y with shape (M, K)
outs = Parallel(n_jobs=n_jobs_)(
delayed(lsqr)(X_centered, y[:, j].ravel())
for j in range(y.shape[1])
)
self.coef_ = np.vstack([out[0] for out in outs])
else:
self.coef_, _, self.rank_, self.singular_ = linalg.lstsq(X, y)
self.coef_ = self.coef_.T
if y.ndim == 1:
self.coef_ = np.ravel(self.coef_)
self._set_intercept(X_offset, y_offset, X_scale)
return self
| LinearRegression.fit | Repo-Level |
scikit-learn | 114 | sklearn/cluster/_kmeans.py | def fit(self, X, y=None, sample_weight=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
If a sparse matrix is passed, a copy will be made if it's not in
CSR format.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
.. versionadded:: 0.20
Returns
-------
self : object
Fitted estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_MiniBatchKMeans.fit.txt | def fit(self, X, y=None, sample_weight=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
If a sparse matrix is passed, a copy will be made if it's not in
CSR format.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
.. versionadded:: 0.20
Returns
-------
self : object
Fitted estimator.
"""
X = validate_data(
self,
X,
accept_sparse="csr",
dtype=[np.float64, np.float32],
order="C",
accept_large_sparse=False,
)
self._check_params_vs_input(X)
random_state = check_random_state(self.random_state)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self._n_threads = _openmp_effective_n_threads()
n_samples, n_features = X.shape
# Validate init array
init = self.init
if _is_arraylike_not_scalar(init):
init = check_array(init, dtype=X.dtype, copy=True, order="C")
self._validate_center_shape(X, init)
self._check_mkl_vcomp(X, self._batch_size)
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
# Validation set for the init
validation_indices = random_state.randint(0, n_samples, self._init_size)
X_valid = X[validation_indices]
sample_weight_valid = sample_weight[validation_indices]
# perform several inits with random subsets
best_inertia = None
for init_idx in range(self._n_init):
if self.verbose:
print(f"Init {init_idx + 1}/{self._n_init} with method {init}")
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans.
cluster_centers = self._init_centroids(
X,
x_squared_norms=x_squared_norms,
init=init,
random_state=random_state,
init_size=self._init_size,
sample_weight=sample_weight,
)
# Compute inertia on a validation set.
_, inertia = _labels_inertia_threadpool_limit(
X_valid,
sample_weight_valid,
cluster_centers,
n_threads=self._n_threads,
)
if self.verbose:
print(f"Inertia for init {init_idx + 1}/{self._n_init}: {inertia}")
if best_inertia is None or inertia < best_inertia:
init_centers = cluster_centers
best_inertia = inertia
centers = init_centers
centers_new = np.empty_like(centers)
# Initialize counts
self._counts = np.zeros(self.n_clusters, dtype=X.dtype)
# Attributes to monitor the convergence
self._ewa_inertia = None
self._ewa_inertia_min = None
self._no_improvement = 0
# Initialize number of samples seen since last reassignment
self._n_since_last_reassign = 0
n_steps = (self.max_iter * n_samples) // self._batch_size
with _get_threadpool_controller().limit(limits=1, user_api="blas"):
# Perform the iterative optimization until convergence
for i in range(n_steps):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.randint(0, n_samples, self._batch_size)
# Perform the actual update step on the minibatch data
batch_inertia = _mini_batch_step(
X=X[minibatch_indices],
sample_weight=sample_weight[minibatch_indices],
centers=centers,
centers_new=centers_new,
weight_sums=self._counts,
random_state=random_state,
random_reassign=self._random_reassign(),
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose,
n_threads=self._n_threads,
)
if self._tol > 0.0:
centers_squared_diff = np.sum((centers_new - centers) ** 2)
else:
centers_squared_diff = 0
centers, centers_new = centers_new, centers
# Monitor convergence and do early stopping if necessary
if self._mini_batch_convergence(
i, n_steps, n_samples, centers_squared_diff, batch_inertia
):
break
self.cluster_centers_ = centers
self._n_features_out = self.cluster_centers_.shape[0]
self.n_steps_ = i + 1
self.n_iter_ = int(np.ceil(((i + 1) * self._batch_size) / n_samples))
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia_threadpool_limit(
X,
sample_weight,
self.cluster_centers_,
n_threads=self._n_threads,
)
else:
self.inertia_ = self._ewa_inertia * n_samples
return self
| MiniBatchKMeans.fit | Repo-Level |
scikit-learn | 115 | sklearn/cluster/_kmeans.py | def partial_fit(self, X, y=None, sample_weight=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
If a sparse matrix is passed, a copy will be made if it's not in
CSR format.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
Returns
-------
self : object
Return updated estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_MiniBatchKMeans.partial_fit.txt | def partial_fit(self, X, y=None, sample_weight=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
If a sparse matrix is passed, a copy will be made if it's not in
CSR format.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
Returns
-------
self : object
Return updated estimator.
"""
has_centers = hasattr(self, "cluster_centers_")
X = validate_data(
self,
X,
accept_sparse="csr",
dtype=[np.float64, np.float32],
order="C",
accept_large_sparse=False,
reset=not has_centers,
)
self._random_state = getattr(
self, "_random_state", check_random_state(self.random_state)
)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self.n_steps_ = getattr(self, "n_steps_", 0)
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
if not has_centers:
# this instance has not been fitted yet (fit or partial_fit)
self._check_params_vs_input(X)
self._n_threads = _openmp_effective_n_threads()
# Validate init array
init = self.init
if _is_arraylike_not_scalar(init):
init = check_array(init, dtype=X.dtype, copy=True, order="C")
self._validate_center_shape(X, init)
self._check_mkl_vcomp(X, X.shape[0])
# initialize the cluster centers
self.cluster_centers_ = self._init_centroids(
X,
x_squared_norms=x_squared_norms,
init=init,
random_state=self._random_state,
init_size=self._init_size,
sample_weight=sample_weight,
)
# Initialize counts
self._counts = np.zeros(self.n_clusters, dtype=X.dtype)
# Initialize number of samples seen since last reassignment
self._n_since_last_reassign = 0
with _get_threadpool_controller().limit(limits=1, user_api="blas"):
_mini_batch_step(
X,
sample_weight=sample_weight,
centers=self.cluster_centers_,
centers_new=self.cluster_centers_,
weight_sums=self._counts,
random_state=self._random_state,
random_reassign=self._random_reassign(),
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose,
n_threads=self._n_threads,
)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia_threadpool_limit(
X,
sample_weight,
self.cluster_centers_,
n_threads=self._n_threads,
)
self.n_steps_ += 1
self._n_features_out = self.cluster_centers_.shape[0]
return self
| MiniBatchKMeans.partial_fit | Repo-Level |
scikit-learn | 120 | sklearn/linear_model/_coordinate_descent.py | def fit(self, X, y):
"""Fit MultiTaskElasticNet model with coordinate descent.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data.
y : ndarray of shape (n_samples, n_targets)
Target. Will be cast to X's dtype if necessary.
Returns
-------
self : object
Fitted estimator.
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
| /usr/src/app/target_test_cases/failed_tests_MultiTaskElasticNet.fit.txt | def fit(self, X, y):
"""Fit MultiTaskElasticNet model with coordinate descent.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data.
y : ndarray of shape (n_samples, n_targets)
Target. Will be cast to X's dtype if necessary.
Returns
-------
self : object
Fitted estimator.
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# Need to validate separately here.
# We can't pass multi_output=True because that would allow y to be csr.
check_X_params = dict(
dtype=[np.float64, np.float32],
order="F",
force_writeable=True,
copy=self.copy_X and self.fit_intercept,
)
check_y_params = dict(ensure_2d=False, order="F")
X, y = validate_data(
self, X, y, validate_separately=(check_X_params, check_y_params)
)
check_consistent_length(X, y)
y = y.astype(X.dtype)
if hasattr(self, "l1_ratio"):
model_str = "ElasticNet"
else:
model_str = "Lasso"
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
n_targets = y.shape[1]
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, fit_intercept=self.fit_intercept, copy=False
)
if not self.warm_start or not hasattr(self, "coef_"):
self.coef_ = np.zeros(
(n_targets, n_features), dtype=X.dtype.type, order="F"
)
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
random = self.selection == "random"
(
self.coef_,
self.dual_gap_,
self.eps_,
self.n_iter_,
) = cd_fast.enet_coordinate_descent_multi_task(
self.coef_,
l1_reg,
l2_reg,
X,
y,
self.max_iter,
self.tol,
check_random_state(self.random_state),
random,
)
# account for different objective scaling here and in cd_fast
self.dual_gap_ /= n_samples
self._set_intercept(X_offset, y_offset, X_scale)
# return self for chaining fit and predict calls
return self
| MultiTaskElasticNet.fit | Repo-Level |
scikit-learn | 123 | sklearn/neighbors/_nca.py | def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training samples.
y : array-like of shape (n_samples,)
The corresponding training labels.
Returns
-------
self : object
Fitted estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_NeighborhoodComponentsAnalysis.fit.txt | def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training samples.
y : array-like of shape (n_samples,)
The corresponding training labels.
Returns
-------
self : object
Fitted estimator.
"""
# Validate the inputs X and y, and converts y to numerical classes.
X, y = validate_data(self, X, y, ensure_min_samples=2)
check_classification_targets(y)
y = LabelEncoder().fit_transform(y)
# Check the preferred dimensionality of the projected space
if self.n_components is not None and self.n_components > X.shape[1]:
raise ValueError(
"The preferred dimensionality of the "
f"projected space `n_components` ({self.n_components}) cannot "
"be greater than the given data "
f"dimensionality ({X.shape[1]})!"
)
# If warm_start is enabled, check that the inputs are consistent
if (
self.warm_start
and hasattr(self, "components_")
and self.components_.shape[1] != X.shape[1]
):
raise ValueError(
f"The new inputs dimensionality ({X.shape[1]}) does not "
"match the input dimensionality of the "
f"previously learned transformation ({self.components_.shape[1]})."
)
# Check how the linear transformation should be initialized
init = self.init
if isinstance(init, np.ndarray):
init = check_array(init)
# Assert that init.shape[1] = X.shape[1]
if init.shape[1] != X.shape[1]:
raise ValueError(
f"The input dimensionality ({init.shape[1]}) of the given "
"linear transformation `init` must match the "
f"dimensionality of the given inputs `X` ({X.shape[1]})."
)
# Assert that init.shape[0] <= init.shape[1]
if init.shape[0] > init.shape[1]:
raise ValueError(
f"The output dimensionality ({init.shape[0]}) of the given "
"linear transformation `init` cannot be "
f"greater than its input dimensionality ({init.shape[1]})."
)
# Assert that self.n_components = init.shape[0]
if self.n_components is not None and self.n_components != init.shape[0]:
raise ValueError(
"The preferred dimensionality of the "
f"projected space `n_components` ({self.n_components}) does"
" not match the output dimensionality of "
"the given linear transformation "
f"`init` ({init.shape[0]})!"
)
# Initialize the random generator
self.random_state_ = check_random_state(self.random_state)
# Measure the total training time
t_train = time.time()
# Compute a mask that stays fixed during optimization:
same_class_mask = y[:, np.newaxis] == y[np.newaxis, :]
# (n_samples, n_samples)
# Initialize the transformation
transformation = np.ravel(self._initialize(X, y, init))
# Create a dictionary of parameters to be passed to the optimizer
disp = self.verbose - 2 if self.verbose > 1 else -1
optimizer_params = {
"method": "L-BFGS-B",
"fun": self._loss_grad_lbfgs,
"args": (X, same_class_mask, -1.0),
"jac": True,
"x0": transformation,
"tol": self.tol,
"options": dict(maxiter=self.max_iter, disp=disp),
"callback": self._callback,
}
# Call the optimizer
self.n_iter_ = 0
opt_result = minimize(**optimizer_params)
# Reshape the solution found by the optimizer
self.components_ = opt_result.x.reshape(-1, X.shape[1])
# Stop timer
t_train = time.time() - t_train
if self.verbose:
cls_name = self.__class__.__name__
# Warn the user if the algorithm did not converge
if not opt_result.success:
warn(
"[{}] NCA did not converge: {}".format(
cls_name, opt_result.message
),
ConvergenceWarning,
)
print("[{}] Training took {:8.2f}s.".format(cls_name, t_train))
return self
| NeighborhoodComponentsAnalysis.fit | Repo-Level |
scikit-learn | 131 | sklearn/multiclass.py | def partial_fit(self, X, y, classes=None, **partial_fit_params):
"""Partially fit underlying estimators.
Should be used when memory is inefficient to train all data. Chunks
of data can be passed in several iteration, where the first call
should have an array of all target variables.
Parameters
----------
X : {array-like, sparse matrix) of shape (n_samples, n_features)
Data.
y : array-like of shape (n_samples,)
Multi-class targets.
classes : array, shape (n_classes, )
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is only required in the first call of partial_fit
and can be omitted in the subsequent calls.
**partial_fit_params : dict
Parameters passed to the ``estimator.partial_fit`` method of each
sub-estimator.
.. versionadded:: 1.4
Only available if `enable_metadata_routing=True`. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
self : object
The partially fitted underlying estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_OneVsOneClassifier.partial_fit.txt | def partial_fit(self, X, y, classes=None, **partial_fit_params):
"""Partially fit underlying estimators.
Should be used when memory is inefficient to train all data. Chunks
of data can be passed in several iteration, where the first call
should have an array of all target variables.
Parameters
----------
X : {array-like, sparse matrix) of shape (n_samples, n_features)
Data.
y : array-like of shape (n_samples,)
Multi-class targets.
classes : array, shape (n_classes, )
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is only required in the first call of partial_fit
and can be omitted in the subsequent calls.
**partial_fit_params : dict
Parameters passed to the ``estimator.partial_fit`` method of each
sub-estimator.
.. versionadded:: 1.4
Only available if `enable_metadata_routing=True`. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
self : object
The partially fitted underlying estimator.
"""
_raise_for_params(partial_fit_params, self, "partial_fit")
routed_params = process_routing(
self,
"partial_fit",
**partial_fit_params,
)
first_call = _check_partial_fit_first_call(self, classes)
if first_call:
self.estimators_ = [
clone(self.estimator)
for _ in range(self.n_classes_ * (self.n_classes_ - 1) // 2)
]
if len(np.setdiff1d(y, self.classes_)):
raise ValueError(
"Mini-batch contains {0} while it must be subset of {1}".format(
np.unique(y), self.classes_
)
)
X, y = validate_data(
self,
X,
y,
accept_sparse=["csr", "csc"],
ensure_all_finite=False,
reset=first_call,
)
check_classification_targets(y)
combinations = itertools.combinations(range(self.n_classes_), 2)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_partial_fit_ovo_binary)(
estimator,
X,
y,
self.classes_[i],
self.classes_[j],
partial_fit_params=routed_params.estimator.partial_fit,
)
for estimator, (i, j) in zip(self.estimators_, (combinations))
)
self.pairwise_indices_ = None
if hasattr(self.estimators_[0], "n_features_in_"):
self.n_features_in_ = self.estimators_[0].n_features_in_
return self
| OneVsOneClassifier.partial_fit | Repo-Level |
scikit-learn | 133 | sklearn/multiclass.py | def partial_fit(self, X, y, classes=None, **partial_fit_params):
"""Partially fit underlying estimators.
Should be used when memory is inefficient to train all data.
Chunks of data can be passed in several iterations.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_classes)
Multi-class targets. An indicator matrix turns on multilabel
classification.
classes : array, shape (n_classes, )
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is only required in the first call of partial_fit
and can be omitted in the subsequent calls.
**partial_fit_params : dict
Parameters passed to the ``estimator.partial_fit`` method of each
sub-estimator.
.. versionadded:: 1.4
Only available if `enable_metadata_routing=True`. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
self : object
Instance of partially fitted estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_OneVsRestClassifier.partial_fit.txt | def partial_fit(self, X, y, classes=None, **partial_fit_params):
"""Partially fit underlying estimators.
Should be used when memory is inefficient to train all data.
Chunks of data can be passed in several iterations.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_classes)
Multi-class targets. An indicator matrix turns on multilabel
classification.
classes : array, shape (n_classes, )
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is only required in the first call of partial_fit
and can be omitted in the subsequent calls.
**partial_fit_params : dict
Parameters passed to the ``estimator.partial_fit`` method of each
sub-estimator.
.. versionadded:: 1.4
Only available if `enable_metadata_routing=True`. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
self : object
Instance of partially fitted estimator.
"""
_raise_for_params(partial_fit_params, self, "partial_fit")
routed_params = process_routing(
self,
"partial_fit",
**partial_fit_params,
)
if _check_partial_fit_first_call(self, classes):
self.estimators_ = [clone(self.estimator) for _ in range(self.n_classes_)]
# A sparse LabelBinarizer, with sparse_output=True, has been
# shown to outperform or match a dense label binarizer in all
# cases and has also resulted in less or equal memory consumption
# in the fit_ovr function overall.
self.label_binarizer_ = LabelBinarizer(sparse_output=True)
self.label_binarizer_.fit(self.classes_)
if len(np.setdiff1d(y, self.classes_)):
raise ValueError(
(
"Mini-batch contains {0} while classes " + "must be subset of {1}"
).format(np.unique(y), self.classes_)
)
Y = self.label_binarizer_.transform(y)
Y = Y.tocsc()
columns = (col.toarray().ravel() for col in Y.T)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_partial_fit_binary)(
estimator,
X,
column,
partial_fit_params=routed_params.estimator.partial_fit,
)
for estimator, column in zip(self.estimators_, columns)
)
if hasattr(self.estimators_[0], "n_features_in_"):
self.n_features_in_ = self.estimators_[0].n_features_in_
return self
| OneVsRestClassifier.partial_fit | Repo-Level |
scikit-learn | 158 | sklearn/linear_model/_quantile.py | def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
Returns self.
"""
| /usr/src/app/target_test_cases/failed_tests_QuantileRegressor.fit.txt | def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
Returns self.
"""
X, y = validate_data(
self,
X,
y,
accept_sparse=["csc", "csr", "coo"],
y_numeric=True,
multi_output=False,
)
sample_weight = _check_sample_weight(sample_weight, X)
n_features = X.shape[1]
n_params = n_features
if self.fit_intercept:
n_params += 1
# Note that centering y and X with _preprocess_data does not work
# for quantile regression.
# The objective is defined as 1/n * sum(pinball loss) + alpha * L1.
# So we rescale the penalty term, which is equivalent.
alpha = np.sum(sample_weight) * self.alpha
if self.solver == "interior-point" and sp_version >= parse_version("1.11.0"):
raise ValueError(
f"Solver {self.solver} is not anymore available in SciPy >= 1.11.0."
)
if sparse.issparse(X) and self.solver not in ["highs", "highs-ds", "highs-ipm"]:
raise ValueError(
f"Solver {self.solver} does not support sparse X. "
"Use solver 'highs' for example."
)
# make default solver more stable
if self.solver_options is None and self.solver == "interior-point":
solver_options = {"lstsq": True}
else:
solver_options = self.solver_options
# After rescaling alpha, the minimization problem is
# min sum(pinball loss) + alpha * L1
# Use linear programming formulation of quantile regression
# min_x c x
# A_eq x = b_eq
# 0 <= x
# x = (s0, s, t0, t, u, v) = slack variables >= 0
# intercept = s0 - t0
# coef = s - t
# c = (0, alpha * 1_p, 0, alpha * 1_p, quantile * 1_n, (1-quantile) * 1_n)
# residual = y - X@coef - intercept = u - v
# A_eq = (1_n, X, -1_n, -X, diag(1_n), -diag(1_n))
# b_eq = y
# p = n_features
# n = n_samples
# 1_n = vector of length n with entries equal one
# see https://stats.stackexchange.com/questions/384909/
#
# Filtering out zero sample weights from the beginning makes life
# easier for the linprog solver.
indices = np.nonzero(sample_weight)[0]
n_indices = len(indices) # use n_mask instead of n_samples
if n_indices < len(sample_weight):
sample_weight = sample_weight[indices]
X = _safe_indexing(X, indices)
y = _safe_indexing(y, indices)
c = np.concatenate(
[
np.full(2 * n_params, fill_value=alpha),
sample_weight * self.quantile,
sample_weight * (1 - self.quantile),
]
)
if self.fit_intercept:
# do not penalize the intercept
c[0] = 0
c[n_params] = 0
if self.solver in ["highs", "highs-ds", "highs-ipm"]:
# Note that highs methods always use a sparse CSC memory layout internally,
# even for optimization problems parametrized using dense numpy arrays.
# Therefore, we work with CSC matrices as early as possible to limit
# unnecessary repeated memory copies.
eye = sparse.eye(n_indices, dtype=X.dtype, format="csc")
if self.fit_intercept:
ones = sparse.csc_matrix(np.ones(shape=(n_indices, 1), dtype=X.dtype))
A_eq = sparse.hstack([ones, X, -ones, -X, eye, -eye], format="csc")
else:
A_eq = sparse.hstack([X, -X, eye, -eye], format="csc")
else:
eye = np.eye(n_indices)
if self.fit_intercept:
ones = np.ones((n_indices, 1))
A_eq = np.concatenate([ones, X, -ones, -X, eye, -eye], axis=1)
else:
A_eq = np.concatenate([X, -X, eye, -eye], axis=1)
b_eq = y
result = linprog(
c=c,
A_eq=A_eq,
b_eq=b_eq,
method=self.solver,
options=solver_options,
)
solution = result.x
if not result.success:
failure = {
1: "Iteration limit reached.",
2: "Problem appears to be infeasible.",
3: "Problem appears to be unbounded.",
4: "Numerical difficulties encountered.",
}
warnings.warn(
"Linear programming for QuantileRegressor did not succeed.\n"
f"Status is {result.status}: "
+ failure.setdefault(result.status, "unknown reason")
+ "\n"
+ "Result message of linprog:\n"
+ result.message,
ConvergenceWarning,
)
# positive slack - negative slack
# solution is an array with (params_pos, params_neg, u, v)
params = solution[:n_params] - solution[n_params : 2 * n_params]
self.n_iter_ = result.nit
if self.fit_intercept:
self.coef_ = params[1:]
self.intercept_ = params[0]
else:
self.coef_ = params
self.intercept_ = 0.0
return self
| QuantileRegressor.fit | Repo-Level |
scikit-learn | 159 | sklearn/linear_model/_ransac.py | def fit(self, X, y, *, sample_weight=None, **fit_params):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample
raises error if sample_weight is passed and estimator
fit method does not support it.
.. versionadded:: 0.18
**fit_params : dict
Parameters routed to the `fit` method of the sub-estimator via the
metadata routing API.
.. versionadded:: 1.5
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
self : object
Fitted `RANSACRegressor` estimator.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
| /usr/src/app/target_test_cases/failed_tests_RANSACRegressor.fit.txt | def fit(self, X, y, *, sample_weight=None, **fit_params):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample
raises error if sample_weight is passed and estimator
fit method does not support it.
.. versionadded:: 0.18
**fit_params : dict
Parameters routed to the `fit` method of the sub-estimator via the
metadata routing API.
.. versionadded:: 1.5
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
self : object
Fitted `RANSACRegressor` estimator.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
# Need to validate separately here. We can't pass multi_output=True
# because that would allow y to be csr. Delay expensive finiteness
# check to the estimator's own input validation.
_raise_for_params(fit_params, self, "fit")
check_X_params = dict(accept_sparse="csr", ensure_all_finite=False)
check_y_params = dict(ensure_2d=False)
X, y = validate_data(
self, X, y, validate_separately=(check_X_params, check_y_params)
)
check_consistent_length(X, y)
if self.estimator is not None:
estimator = clone(self.estimator)
else:
estimator = LinearRegression()
if self.min_samples is None:
if not isinstance(estimator, LinearRegression):
raise ValueError(
"`min_samples` needs to be explicitly set when estimator "
"is not a LinearRegression."
)
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
min_samples = self.min_samples
if min_samples > X.shape[0]:
raise ValueError(
"`min_samples` may not be larger than number "
"of samples: n_samples = %d." % (X.shape[0])
)
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.loss == "absolute_error":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred)
else:
loss_function = lambda y_true, y_pred: np.sum(
np.abs(y_true - y_pred), axis=1
)
elif self.loss == "squared_error":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2
else:
loss_function = lambda y_true, y_pred: np.sum(
(y_true - y_pred) ** 2, axis=1
)
elif callable(self.loss):
loss_function = self.loss
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
estimator.set_params(random_state=random_state)
except ValueError:
pass
estimator_fit_has_sample_weight = has_fit_parameter(estimator, "sample_weight")
estimator_name = type(estimator).__name__
if sample_weight is not None and not estimator_fit_has_sample_weight:
raise ValueError(
"%s does not support sample_weight. Sample"
" weights are only used for the calibration"
" itself." % estimator_name
)
if sample_weight is not None:
fit_params["sample_weight"] = sample_weight
if _routing_enabled():
routed_params = process_routing(self, "fit", **fit_params)
else:
routed_params = Bunch()
routed_params.estimator = Bunch(fit={}, predict={}, score={})
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
routed_params.estimator.fit = {"sample_weight": sample_weight}
n_inliers_best = 1
score_best = -np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
inlier_best_idxs_subset = None
self.n_skips_no_inliers_ = 0
self.n_skips_invalid_data_ = 0
self.n_skips_invalid_model_ = 0
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
self.n_trials_ = 0
max_trials = self.max_trials
while self.n_trials_ < max_trials:
self.n_trials_ += 1
if (
self.n_skips_no_inliers_
+ self.n_skips_invalid_data_
+ self.n_skips_invalid_model_
) > self.max_skips:
break
# choose random sample set
subset_idxs = sample_without_replacement(
n_samples, min_samples, random_state=random_state
)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if self.is_data_valid is not None and not self.is_data_valid(
X_subset, y_subset
):
self.n_skips_invalid_data_ += 1
continue
# cut `fit_params` down to `subset_idxs`
fit_params_subset = _check_method_params(
X, params=routed_params.estimator.fit, indices=subset_idxs
)
# fit model for current random sample set
estimator.fit(X_subset, y_subset, **fit_params_subset)
# check if estimated model is valid
if self.is_model_valid is not None and not self.is_model_valid(
estimator, X_subset, y_subset
):
self.n_skips_invalid_model_ += 1
continue
# residuals of all data for current random sample model
y_pred = estimator.predict(X)
residuals_subset = loss_function(y, y_pred)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset <= residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
self.n_skips_no_inliers_ += 1
continue
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# cut `fit_params` down to `inlier_idxs_subset`
score_params_inlier_subset = _check_method_params(
X, params=routed_params.estimator.score, indices=inlier_idxs_subset
)
# score of inlier data set
score_subset = estimator.score(
X_inlier_subset,
y_inlier_subset,
**score_params_inlier_subset,
)
# same number of inliers but worse score -> skip current random
# sample
if n_inliers_subset == n_inliers_best and score_subset < score_best:
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
inlier_best_idxs_subset = inlier_idxs_subset
max_trials = min(
max_trials,
_dynamic_max_trials(
n_inliers_best, n_samples, min_samples, self.stop_probability
),
)
# break if sufficient number of inliers or score is reached
if n_inliers_best >= self.stop_n_inliers or score_best >= self.stop_score:
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
if (
self.n_skips_no_inliers_
+ self.n_skips_invalid_data_
+ self.n_skips_invalid_model_
) > self.max_skips:
raise ValueError(
"RANSAC skipped more iterations than `max_skips` without"
" finding a valid consensus set. Iterations were skipped"
" because each randomly chosen sub-sample failed the"
" passing criteria. See estimator attributes for"
" diagnostics (n_skips*)."
)
else:
raise ValueError(
"RANSAC could not find a valid consensus set. All"
" `max_trials` iterations were skipped because each"
" randomly chosen sub-sample failed the passing criteria."
" See estimator attributes for diagnostics (n_skips*)."
)
else:
if (
self.n_skips_no_inliers_
+ self.n_skips_invalid_data_
+ self.n_skips_invalid_model_
) > self.max_skips:
warnings.warn(
(
"RANSAC found a valid consensus set but exited"
" early due to skipping more iterations than"
" `max_skips`. See estimator attributes for"
" diagnostics (n_skips*)."
),
ConvergenceWarning,
)
# estimate final model using all inliers
fit_params_best_idxs_subset = _check_method_params(
X, params=routed_params.estimator.fit, indices=inlier_best_idxs_subset
)
estimator.fit(X_inlier_best, y_inlier_best, **fit_params_best_idxs_subset)
self.estimator_ = estimator
self.inlier_mask_ = inlier_mask_best
return self
| RANSACRegressor.fit | Repo-Level |
scikit-learn | 166 | sklearn/feature_selection/_rfe.py | def fit(self, X, y, *, groups=None, **params):
"""Fit the RFE model and automatically tune the number of selected features.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like of shape (n_samples,)
Target values (integers for classification, real numbers for
regression).
groups : array-like of shape (n_samples,) or None, default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
.. versionadded:: 0.20
**params : dict of str -> object
Parameters passed to the ``fit`` method of the estimator,
the scorer, and the CV splitter.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>`
for more details.
Returns
-------
self : object
Fitted estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_RFECV.fit.txt | def fit(self, X, y, *, groups=None, **params):
"""Fit the RFE model and automatically tune the number of selected features.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like of shape (n_samples,)
Target values (integers for classification, real numbers for
regression).
groups : array-like of shape (n_samples,) or None, default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
.. versionadded:: 0.20
**params : dict of str -> object
Parameters passed to the ``fit`` method of the estimator,
the scorer, and the CV splitter.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>`
for more details.
Returns
-------
self : object
Fitted estimator.
"""
_raise_for_params(params, self, "fit")
X, y = validate_data(
self,
X,
y,
accept_sparse="csr",
ensure_min_features=2,
ensure_all_finite=False,
multi_output=True,
)
if _routing_enabled():
if groups is not None:
params.update({"groups": groups})
routed_params = process_routing(self, "fit", **params)
else:
routed_params = Bunch(
estimator=Bunch(fit={}),
splitter=Bunch(split={"groups": groups}),
scorer=Bunch(score={}),
)
# Initialization
cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator))
scorer = self._get_scorer()
# Build an RFE object, which will evaluate and score each possible
# feature count, down to self.min_features_to_select
n_features = X.shape[1]
if self.min_features_to_select > n_features:
warnings.warn(
(
f"Found min_features_to_select={self.min_features_to_select} > "
f"{n_features=}. There will be no feature selection and all "
"features will be kept."
),
UserWarning,
)
rfe = RFE(
estimator=self.estimator,
n_features_to_select=min(self.min_features_to_select, n_features),
importance_getter=self.importance_getter,
step=self.step,
verbose=self.verbose,
)
# Determine the number of subsets of features by fitting across
# the train folds and choosing the "features_to_select" parameter
# that gives the least averaged error across all folds.
# Note that joblib raises a non-picklable error for bound methods
# even if n_jobs is set to 1 with the default multiprocessing
# backend.
# This branching is done so that to
# make sure that user code that sets n_jobs to 1
# and provides bound methods as scorers is not broken with the
# addition of n_jobs parameter in version 0.18.
if effective_n_jobs(self.n_jobs) == 1:
parallel, func = list, _rfe_single_fit
else:
parallel = Parallel(n_jobs=self.n_jobs)
func = delayed(_rfe_single_fit)
scores_features = parallel(
func(rfe, self.estimator, X, y, train, test, scorer, routed_params)
for train, test in cv.split(X, y, **routed_params.splitter.split)
)
scores, step_n_features = zip(*scores_features)
step_n_features_rev = np.array(step_n_features[0])[::-1]
scores = np.array(scores)
# Reverse order such that lowest number of features is selected in case of tie.
scores_sum_rev = np.sum(scores, axis=0)[::-1]
n_features_to_select = step_n_features_rev[np.argmax(scores_sum_rev)]
# Re-execute an elimination with best_k over the whole set
rfe = RFE(
estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step,
importance_getter=self.importance_getter,
verbose=self.verbose,
)
rfe.fit(X, y, **routed_params.estimator.fit)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
self.estimator_.fit(self._transform(X), y, **routed_params.estimator.fit)
# reverse to stay consistent with before
scores_rev = scores[:, ::-1]
self.cv_results_ = {
"mean_test_score": np.mean(scores_rev, axis=0),
"std_test_score": np.std(scores_rev, axis=0),
**{f"split{i}_test_score": scores_rev[i] for i in range(scores.shape[0])},
"n_features": step_n_features_rev,
}
return self
| RFECV.fit | Repo-Level |
scikit-learn | 170 | sklearn/semi_supervised/_self_training.py | def fit(self, X, y, **params):
"""
Fit self-training classifier using `X`, `y` as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
y : {array-like, sparse matrix} of shape (n_samples,)
Array representing the labels. Unlabeled samples should have the
label -1.
**params : dict
Parameters to pass to the underlying estimators.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_SelfTrainingClassifier.fit.txt | def fit(self, X, y, **params):
"""
Fit self-training classifier using `X`, `y` as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
y : {array-like, sparse matrix} of shape (n_samples,)
Array representing the labels. Unlabeled samples should have the
label -1.
**params : dict
Parameters to pass to the underlying estimators.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
"""
_raise_for_params(params, self, "fit")
self.estimator_ = self._get_estimator()
# we need row slicing support for sparse matrices, but costly finiteness check
# can be delegated to the base estimator.
X, y = validate_data(
self,
X,
y,
accept_sparse=["csr", "csc", "lil", "dok"],
ensure_all_finite=False,
)
if y.dtype.kind in ["U", "S"]:
raise ValueError(
"y has dtype string. If you wish to predict on "
"string targets, use dtype object, and use -1"
" as the label for unlabeled samples."
)
has_label = y != -1
if np.all(has_label):
warnings.warn("y contains no unlabeled samples", UserWarning)
if self.criterion == "k_best" and (
self.k_best > X.shape[0] - np.sum(has_label)
):
warnings.warn(
(
"k_best is larger than the amount of unlabeled "
"samples. All unlabeled samples will be labeled in "
"the first iteration"
),
UserWarning,
)
if _routing_enabled():
routed_params = process_routing(self, "fit", **params)
else:
routed_params = Bunch(estimator=Bunch(fit={}))
self.transduction_ = np.copy(y)
self.labeled_iter_ = np.full_like(y, -1)
self.labeled_iter_[has_label] = 0
self.n_iter_ = 0
while not np.all(has_label) and (
self.max_iter is None or self.n_iter_ < self.max_iter
):
self.n_iter_ += 1
self.estimator_.fit(
X[safe_mask(X, has_label)],
self.transduction_[has_label],
**routed_params.estimator.fit,
)
# Predict on the unlabeled samples
prob = self.estimator_.predict_proba(X[safe_mask(X, ~has_label)])
pred = self.estimator_.classes_[np.argmax(prob, axis=1)]
max_proba = np.max(prob, axis=1)
# Select new labeled samples
if self.criterion == "threshold":
selected = max_proba > self.threshold
else:
n_to_select = min(self.k_best, max_proba.shape[0])
if n_to_select == max_proba.shape[0]:
selected = np.ones_like(max_proba, dtype=bool)
else:
# NB these are indices, not a mask
selected = np.argpartition(-max_proba, n_to_select)[:n_to_select]
# Map selected indices into original array
selected_full = np.nonzero(~has_label)[0][selected]
# Add newly labeled confident predictions to the dataset
self.transduction_[selected_full] = pred[selected]
has_label[selected_full] = True
self.labeled_iter_[selected_full] = self.n_iter_
if selected_full.shape[0] == 0:
# no changed labels
self.termination_condition_ = "no_change"
break
if self.verbose:
print(
f"End of iteration {self.n_iter_},"
f" added {selected_full.shape[0]} new labels."
)
if self.n_iter_ == self.max_iter:
self.termination_condition_ = "max_iter"
if np.all(has_label):
self.termination_condition_ = "all_labeled"
self.estimator_.fit(
X[safe_mask(X, has_label)],
self.transduction_[has_label],
**routed_params.estimator.fit,
)
self.classes_ = self.estimator_.classes_
return self
| SelfTrainingClassifier.fit | Repo-Level |
scikit-learn | 173 | sklearn/feature_selection/_sequential.py | def fit(self, X, y=None, **params):
"""Learn the features to select from X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of predictors.
y : array-like of shape (n_samples,), default=None
Target values. This parameter may be ignored for
unsupervised learning.
**params : dict, default=None
Parameters to be passed to the underlying `estimator`, `cv`
and `scorer` objects.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Returns the instance itself.
"""
| /usr/src/app/target_test_cases/failed_tests_SequentialFeatureSelector.fit.txt | def fit(self, X, y=None, **params):
"""Learn the features to select from X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of predictors.
y : array-like of shape (n_samples,), default=None
Target values. This parameter may be ignored for
unsupervised learning.
**params : dict, default=None
Parameters to be passed to the underlying `estimator`, `cv`
and `scorer` objects.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Returns the instance itself.
"""
_raise_for_params(params, self, "fit")
tags = self.__sklearn_tags__()
X = validate_data(
self,
X,
accept_sparse="csc",
ensure_min_features=2,
ensure_all_finite=not tags.input_tags.allow_nan,
)
n_features = X.shape[1]
if self.n_features_to_select == "auto":
if self.tol is not None:
# With auto feature selection, `n_features_to_select_` will be updated
# to `support_.sum()` after features are selected.
self.n_features_to_select_ = n_features - 1
else:
self.n_features_to_select_ = n_features // 2
elif isinstance(self.n_features_to_select, Integral):
if self.n_features_to_select >= n_features:
raise ValueError("n_features_to_select must be < n_features.")
self.n_features_to_select_ = self.n_features_to_select
elif isinstance(self.n_features_to_select, Real):
self.n_features_to_select_ = int(n_features * self.n_features_to_select)
if self.tol is not None and self.tol < 0 and self.direction == "forward":
raise ValueError(
"tol must be strictly positive when doing forward selection"
)
cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator))
cloned_estimator = clone(self.estimator)
# the current mask corresponds to the set of features:
# - that we have already *selected* if we do forward selection
# - that we have already *excluded* if we do backward selection
current_mask = np.zeros(shape=n_features, dtype=bool)
n_iterations = (
self.n_features_to_select_
if self.n_features_to_select == "auto" or self.direction == "forward"
else n_features - self.n_features_to_select_
)
old_score = -np.inf
is_auto_select = self.tol is not None and self.n_features_to_select == "auto"
# We only need to verify the routing here and not use the routed params
# because internally the actual routing will also take place inside the
# `cross_val_score` function.
if _routing_enabled():
process_routing(self, "fit", **params)
for _ in range(n_iterations):
new_feature_idx, new_score = self._get_best_new_feature_score(
cloned_estimator, X, y, cv, current_mask, **params
)
if is_auto_select and ((new_score - old_score) < self.tol):
break
old_score = new_score
current_mask[new_feature_idx] = True
if self.direction == "backward":
current_mask = ~current_mask
self.support_ = current_mask
self.n_features_to_select_ = self.support_.sum()
return self
| SequentialFeatureSelector.fit | Repo-Level |
scikit-learn | 180 | sklearn/cluster/_spectral.py | def fit(self, X, y=None):
"""Perform spectral clustering from features, or affinity matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples)
Training instances to cluster, similarities / affinities between
instances if ``affinity='precomputed'``, or distances between
instances if ``affinity='precomputed_nearest_neighbors``. If a
sparse matrix is provided in a format other than ``csr_matrix``,
``csc_matrix``, or ``coo_matrix``, it will be converted into a
sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
A fitted instance of the estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_SpectralClustering.fit.txt | def fit(self, X, y=None):
"""Perform spectral clustering from features, or affinity matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples)
Training instances to cluster, similarities / affinities between
instances if ``affinity='precomputed'``, or distances between
instances if ``affinity='precomputed_nearest_neighbors``. If a
sparse matrix is provided in a format other than ``csr_matrix``,
``csc_matrix``, or ``coo_matrix``, it will be converted into a
sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
A fitted instance of the estimator.
"""
X = validate_data(
self,
X,
accept_sparse=["csr", "csc", "coo"],
dtype=np.float64,
ensure_min_samples=2,
)
allow_squared = self.affinity in [
"precomputed",
"precomputed_nearest_neighbors",
]
if X.shape[0] == X.shape[1] and not allow_squared:
warnings.warn(
"The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``."
)
if self.affinity == "nearest_neighbors":
connectivity = kneighbors_graph(
X, n_neighbors=self.n_neighbors, include_self=True, n_jobs=self.n_jobs
)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == "precomputed_nearest_neighbors":
estimator = NearestNeighbors(
n_neighbors=self.n_neighbors, n_jobs=self.n_jobs, metric="precomputed"
).fit(X)
connectivity = estimator.kneighbors_graph(X=X, mode="connectivity")
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == "precomputed":
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params["gamma"] = self.gamma
params["degree"] = self.degree
params["coef0"] = self.coef0
self.affinity_matrix_ = pairwise_kernels(
X, metric=self.affinity, filter_params=True, **params
)
random_state = check_random_state(self.random_state)
n_components = (
self.n_clusters if self.n_components is None else self.n_components
)
# We now obtain the real valued solution matrix to the
# relaxed Ncut problem, solving the eigenvalue problem
# L_sym x = lambda x and recovering u = D^-1/2 x.
# The first eigenvector is constant only for fully connected graphs
# and should be kept for spectral clustering (drop_first = False)
# See spectral_embedding documentation.
maps = _spectral_embedding(
self.affinity_matrix_,
n_components=n_components,
eigen_solver=self.eigen_solver,
random_state=random_state,
eigen_tol=self.eigen_tol,
drop_first=False,
)
if self.verbose:
print(f"Computing label assignment using {self.assign_labels}")
if self.assign_labels == "kmeans":
_, self.labels_, _ = k_means(
maps,
self.n_clusters,
random_state=random_state,
n_init=self.n_init,
verbose=self.verbose,
)
elif self.assign_labels == "cluster_qr":
self.labels_ = cluster_qr(maps)
else:
self.labels_ = discretize(maps, random_state=random_state)
return self
| SpectralClustering.fit | Repo-Level |
scikit-learn | 181 | sklearn/preprocessing/_polynomial.py | def fit(self, X, y=None, sample_weight=None):
"""Compute knot positions of splines.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
y : None
Ignored.
sample_weight : array-like of shape (n_samples,), default = None
Individual weights for each sample. Used to calculate quantiles if
`knots="quantile"`. For `knots="uniform"`, zero weighted
observations are ignored for finding the min and max of `X`.
Returns
-------
self : object
Fitted transformer.
"""
| /usr/src/app/target_test_cases/failed_tests_SplineTransformer.fit.txt | def fit(self, X, y=None, sample_weight=None):
"""Compute knot positions of splines.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
y : None
Ignored.
sample_weight : array-like of shape (n_samples,), default = None
Individual weights for each sample. Used to calculate quantiles if
`knots="quantile"`. For `knots="uniform"`, zero weighted
observations are ignored for finding the min and max of `X`.
Returns
-------
self : object
Fitted transformer.
"""
X = validate_data(
self,
X,
reset=True,
accept_sparse=False,
ensure_min_samples=2,
ensure_2d=True,
)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
_, n_features = X.shape
if isinstance(self.knots, str):
base_knots = self._get_base_knot_positions(
X, n_knots=self.n_knots, knots=self.knots, sample_weight=sample_weight
)
else:
base_knots = check_array(self.knots, dtype=np.float64)
if base_knots.shape[0] < 2:
raise ValueError("Number of knots, knots.shape[0], must be >= 2.")
elif base_knots.shape[1] != n_features:
raise ValueError("knots.shape[1] == n_features is violated.")
elif not np.all(np.diff(base_knots, axis=0) > 0):
raise ValueError("knots must be sorted without duplicates.")
if self.sparse_output and sp_version < parse_version("1.8.0"):
raise ValueError(
"Option sparse_output=True is only available with scipy>=1.8.0, "
f"but here scipy=={sp_version} is used."
)
# number of knots for base interval
n_knots = base_knots.shape[0]
if self.extrapolation == "periodic" and n_knots <= self.degree:
raise ValueError(
"Periodic splines require degree < n_knots. Got n_knots="
f"{n_knots} and degree={self.degree}."
)
# number of splines basis functions
if self.extrapolation != "periodic":
n_splines = n_knots + self.degree - 1
else:
# periodic splines have self.degree less degrees of freedom
n_splines = n_knots - 1
degree = self.degree
n_out = n_features * n_splines
# We have to add degree number of knots below, and degree number knots
# above the base knots in order to make the spline basis complete.
if self.extrapolation == "periodic":
# For periodic splines the spacing of the first / last degree knots
# needs to be a continuation of the spacing of the last / first
# base knots.
period = base_knots[-1] - base_knots[0]
knots = np.r_[
base_knots[-(degree + 1) : -1] - period,
base_knots,
base_knots[1 : (degree + 1)] + period,
]
else:
# Eilers & Marx in "Flexible smoothing with B-splines and
# penalties" https://doi.org/10.1214/ss/1038425655 advice
# against repeating first and last knot several times, which
# would have inferior behaviour at boundaries if combined with
# a penalty (hence P-Spline). We follow this advice even if our
# splines are unpenalized. Meaning we do not:
# knots = np.r_[
# np.tile(base_knots.min(axis=0), reps=[degree, 1]),
# base_knots,
# np.tile(base_knots.max(axis=0), reps=[degree, 1])
# ]
# Instead, we reuse the distance of the 2 fist/last knots.
dist_min = base_knots[1] - base_knots[0]
dist_max = base_knots[-1] - base_knots[-2]
knots = np.r_[
np.linspace(
base_knots[0] - degree * dist_min,
base_knots[0] - dist_min,
num=degree,
),
base_knots,
np.linspace(
base_knots[-1] + dist_max,
base_knots[-1] + degree * dist_max,
num=degree,
),
]
# With a diagonal coefficient matrix, we get back the spline basis
# elements, i.e. the design matrix of the spline.
# Note, BSpline appreciates C-contiguous float64 arrays as c=coef.
coef = np.eye(n_splines, dtype=np.float64)
if self.extrapolation == "periodic":
coef = np.concatenate((coef, coef[:degree, :]))
extrapolate = self.extrapolation in ["periodic", "continue"]
bsplines = [
BSpline.construct_fast(
knots[:, i], coef, self.degree, extrapolate=extrapolate
)
for i in range(n_features)
]
self.bsplines_ = bsplines
self.n_features_out_ = n_out - n_features * (1 - self.include_bias)
return self
| SplineTransformer.fit | Repo-Level |
scikit-learn | 182 | sklearn/preprocessing/_polynomial.py | def transform(self, X):
"""Transform each feature data to B-splines.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data to transform.
Returns
-------
XBS : {ndarray, sparse matrix} of shape (n_samples, n_features * n_splines)
The matrix of features, where n_splines is the number of bases
elements of the B-splines, n_knots + degree - 1.
"""
| /usr/src/app/target_test_cases/failed_tests_SplineTransformer.transform.txt | def transform(self, X):
"""Transform each feature data to B-splines.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data to transform.
Returns
-------
XBS : {ndarray, sparse matrix} of shape (n_samples, n_features * n_splines)
The matrix of features, where n_splines is the number of bases
elements of the B-splines, n_knots + degree - 1.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False, accept_sparse=False, ensure_2d=True)
n_samples, n_features = X.shape
n_splines = self.bsplines_[0].c.shape[1]
degree = self.degree
# TODO: Remove this condition, once scipy 1.10 is the minimum version.
# Only scipy => 1.10 supports design_matrix(.., extrapolate=..).
# The default (implicit in scipy < 1.10) is extrapolate=False.
scipy_1_10 = sp_version >= parse_version("1.10.0")
# Note: self.bsplines_[0].extrapolate is True for extrapolation in
# ["periodic", "continue"]
if scipy_1_10:
use_sparse = self.sparse_output
kwargs_extrapolate = {"extrapolate": self.bsplines_[0].extrapolate}
else:
use_sparse = self.sparse_output and not self.bsplines_[0].extrapolate
kwargs_extrapolate = dict()
# Note that scipy BSpline returns float64 arrays and converts input
# x=X[:, i] to c-contiguous float64.
n_out = self.n_features_out_ + n_features * (1 - self.include_bias)
if X.dtype in FLOAT_DTYPES:
dtype = X.dtype
else:
dtype = np.float64
if use_sparse:
output_list = []
else:
XBS = np.zeros((n_samples, n_out), dtype=dtype, order=self.order)
for i in range(n_features):
spl = self.bsplines_[i]
if self.extrapolation in ("continue", "error", "periodic"):
if self.extrapolation == "periodic":
# With periodic extrapolation we map x to the segment
# [spl.t[k], spl.t[n]].
# This is equivalent to BSpline(.., extrapolate="periodic")
# for scipy>=1.0.0.
n = spl.t.size - spl.k - 1
# Assign to new array to avoid inplace operation
x = spl.t[spl.k] + (X[:, i] - spl.t[spl.k]) % (
spl.t[n] - spl.t[spl.k]
)
else:
x = X[:, i]
if use_sparse:
XBS_sparse = BSpline.design_matrix(
x, spl.t, spl.k, **kwargs_extrapolate
)
if self.extrapolation == "periodic":
# See the construction of coef in fit. We need to add the last
# degree spline basis function to the first degree ones and
# then drop the last ones.
# Note: See comment about SparseEfficiencyWarning below.
XBS_sparse = XBS_sparse.tolil()
XBS_sparse[:, :degree] += XBS_sparse[:, -degree:]
XBS_sparse = XBS_sparse[:, :-degree]
else:
XBS[:, (i * n_splines) : ((i + 1) * n_splines)] = spl(x)
else: # extrapolation in ("constant", "linear")
xmin, xmax = spl.t[degree], spl.t[-degree - 1]
# spline values at boundaries
f_min, f_max = spl(xmin), spl(xmax)
mask = (xmin <= X[:, i]) & (X[:, i] <= xmax)
if use_sparse:
mask_inv = ~mask
x = X[:, i].copy()
# Set some arbitrary values outside boundary that will be reassigned
# later.
x[mask_inv] = spl.t[self.degree]
XBS_sparse = BSpline.design_matrix(x, spl.t, spl.k)
# Note: Without converting to lil_matrix we would get:
# scipy.sparse._base.SparseEfficiencyWarning: Changing the sparsity
# structure of a csr_matrix is expensive. lil_matrix is more
# efficient.
if np.any(mask_inv):
XBS_sparse = XBS_sparse.tolil()
XBS_sparse[mask_inv, :] = 0
else:
XBS[mask, (i * n_splines) : ((i + 1) * n_splines)] = spl(X[mask, i])
# Note for extrapolation:
# 'continue' is already returned as is by scipy BSplines
if self.extrapolation == "error":
# BSpline with extrapolate=False does not raise an error, but
# outputs np.nan.
if (use_sparse and np.any(np.isnan(XBS_sparse.data))) or (
not use_sparse
and np.any(
np.isnan(XBS[:, (i * n_splines) : ((i + 1) * n_splines)])
)
):
raise ValueError(
"X contains values beyond the limits of the knots."
)
elif self.extrapolation == "constant":
# Set all values beyond xmin and xmax to the value of the
# spline basis functions at those two positions.
# Only the first degree and last degree number of splines
# have non-zero values at the boundaries.
mask = X[:, i] < xmin
if np.any(mask):
if use_sparse:
# Note: See comment about SparseEfficiencyWarning above.
XBS_sparse = XBS_sparse.tolil()
XBS_sparse[mask, :degree] = f_min[:degree]
else:
XBS[mask, (i * n_splines) : (i * n_splines + degree)] = f_min[
:degree
]
mask = X[:, i] > xmax
if np.any(mask):
if use_sparse:
# Note: See comment about SparseEfficiencyWarning above.
XBS_sparse = XBS_sparse.tolil()
XBS_sparse[mask, -degree:] = f_max[-degree:]
else:
XBS[
mask,
((i + 1) * n_splines - degree) : ((i + 1) * n_splines),
] = f_max[-degree:]
elif self.extrapolation == "linear":
# Continue the degree first and degree last spline bases
# linearly beyond the boundaries, with slope = derivative at
# the boundary.
# Note that all others have derivative = value = 0 at the
# boundaries.
# spline derivatives = slopes at boundaries
fp_min, fp_max = spl(xmin, nu=1), spl(xmax, nu=1)
# Compute the linear continuation.
if degree <= 1:
# For degree=1, the derivative of 2nd spline is not zero at
# boundary. For degree=0 it is the same as 'constant'.
degree += 1
for j in range(degree):
mask = X[:, i] < xmin
if np.any(mask):
linear_extr = f_min[j] + (X[mask, i] - xmin) * fp_min[j]
if use_sparse:
# Note: See comment about SparseEfficiencyWarning above.
XBS_sparse = XBS_sparse.tolil()
XBS_sparse[mask, j] = linear_extr
else:
XBS[mask, i * n_splines + j] = linear_extr
mask = X[:, i] > xmax
if np.any(mask):
k = n_splines - 1 - j
linear_extr = f_max[k] + (X[mask, i] - xmax) * fp_max[k]
if use_sparse:
# Note: See comment about SparseEfficiencyWarning above.
XBS_sparse = XBS_sparse.tolil()
XBS_sparse[mask, k : k + 1] = linear_extr[:, None]
else:
XBS[mask, i * n_splines + k] = linear_extr
if use_sparse:
XBS_sparse = XBS_sparse.tocsr()
output_list.append(XBS_sparse)
if use_sparse:
# TODO: Remove this conditional error when the minimum supported version of
# SciPy is 1.9.2
# `scipy.sparse.hstack` breaks in scipy<1.9.2
# when `n_features_out_ > max_int32`
max_int32 = np.iinfo(np.int32).max
all_int32 = True
for mat in output_list:
all_int32 &= mat.indices.dtype == np.int32
if (
sp_version < parse_version("1.9.2")
and self.n_features_out_ > max_int32
and all_int32
):
raise ValueError(
"In scipy versions `<1.9.2`, the function `scipy.sparse.hstack`"
" produces negative columns when:\n1. The output shape contains"
" `n_cols` too large to be represented by a 32bit signed"
" integer.\n. All sub-matrices to be stacked have indices of"
" dtype `np.int32`.\nTo avoid this error, either use a version"
" of scipy `>=1.9.2` or alter the `SplineTransformer`"
" transformer to produce fewer than 2^31 output features"
)
XBS = sparse.hstack(output_list, format="csr")
elif self.sparse_output:
# TODO: Remove ones scipy 1.10 is the minimum version. See comments above.
XBS = sparse.csr_matrix(XBS)
if self.include_bias:
return XBS
else:
# We throw away one spline basis per feature.
# We chose the last one.
indices = [j for j in range(XBS.shape[1]) if (j + 1) % n_splines != 0]
return XBS[:, indices]
| SplineTransformer.transform | Repo-Level |
scikit-learn | 204 | sklearn/linear_model/_glm/glm.py | def fit(self, X, y, sample_weight=None):
"""Fit a Generalized Linear Model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
Fitted model.
"""
| /usr/src/app/target_test_cases/failed_tests__GeneralizedLinearRegressor.fit.txt | def fit(self, X, y, sample_weight=None):
"""Fit a Generalized Linear Model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
Fitted model.
"""
X, y = validate_data(
self,
X,
y,
accept_sparse=["csc", "csr"],
dtype=[np.float64, np.float32],
y_numeric=True,
multi_output=False,
)
# required by losses
if self.solver == "lbfgs":
# lbfgs will force coef and therefore raw_prediction to be float64. The
# base_loss needs y, X @ coef and sample_weight all of same dtype
# (and contiguous).
loss_dtype = np.float64
else:
loss_dtype = min(max(y.dtype, X.dtype), np.float64)
y = check_array(y, dtype=loss_dtype, order="C", ensure_2d=False)
if sample_weight is not None:
# Note that _check_sample_weight calls check_array(order="C") required by
# losses.
sample_weight = _check_sample_weight(sample_weight, X, dtype=loss_dtype)
n_samples, n_features = X.shape
self._base_loss = self._get_loss()
linear_loss = LinearModelLoss(
base_loss=self._base_loss,
fit_intercept=self.fit_intercept,
)
if not linear_loss.base_loss.in_y_true_range(y):
raise ValueError(
"Some value(s) of y are out of the valid range of the loss"
f" {self._base_loss.__class__.__name__!r}."
)
# TODO: if alpha=0 check that X is not rank deficient
# NOTE: Rescaling of sample_weight:
# We want to minimize
# obj = 1/(2 * sum(sample_weight)) * sum(sample_weight * deviance)
# + 1/2 * alpha * L2,
# with
# deviance = 2 * loss.
# The objective is invariant to multiplying sample_weight by a constant. We
# could choose this constant such that sum(sample_weight) = 1 in order to end
# up with
# obj = sum(sample_weight * loss) + 1/2 * alpha * L2.
# But LinearModelLoss.loss() already computes
# average(loss, weights=sample_weight)
# Thus, without rescaling, we have
# obj = LinearModelLoss.loss(...)
if self.warm_start and hasattr(self, "coef_"):
if self.fit_intercept:
# LinearModelLoss needs intercept at the end of coefficient array.
coef = np.concatenate((self.coef_, np.array([self.intercept_])))
else:
coef = self.coef_
coef = coef.astype(loss_dtype, copy=False)
else:
coef = linear_loss.init_zero_coef(X, dtype=loss_dtype)
if self.fit_intercept:
coef[-1] = linear_loss.base_loss.link.link(
np.average(y, weights=sample_weight)
)
l2_reg_strength = self.alpha
n_threads = _openmp_effective_n_threads()
# Algorithms for optimization:
# Note again that our losses implement 1/2 * deviance.
if self.solver == "lbfgs":
func = linear_loss.loss_gradient
opt_res = scipy.optimize.minimize(
func,
coef,
method="L-BFGS-B",
jac=True,
options={
"maxiter": self.max_iter,
"maxls": 50, # default is 20
"iprint": self.verbose - 1,
"gtol": self.tol,
# The constant 64 was found empirically to pass the test suite.
# The point is that ftol is very small, but a bit larger than
# machine precision for float64, which is the dtype used by lbfgs.
"ftol": 64 * np.finfo(float).eps,
},
args=(X, y, sample_weight, l2_reg_strength, n_threads),
)
self.n_iter_ = _check_optimize_result("lbfgs", opt_res)
coef = opt_res.x
elif self.solver == "newton-cholesky":
sol = NewtonCholeskySolver(
coef=coef,
linear_loss=linear_loss,
l2_reg_strength=l2_reg_strength,
tol=self.tol,
max_iter=self.max_iter,
n_threads=n_threads,
verbose=self.verbose,
)
coef = sol.solve(X, y, sample_weight)
self.n_iter_ = sol.iteration
elif issubclass(self.solver, NewtonSolver):
sol = self.solver(
coef=coef,
linear_loss=linear_loss,
l2_reg_strength=l2_reg_strength,
tol=self.tol,
max_iter=self.max_iter,
n_threads=n_threads,
)
coef = sol.solve(X, y, sample_weight)
self.n_iter_ = sol.iteration
else:
raise ValueError(f"Invalid solver={self.solver}.")
if self.fit_intercept:
self.intercept_ = coef[-1]
self.coef_ = coef[:-1]
else:
# set intercept to zero as the other linear models do
self.intercept_ = 0.0
self.coef_ = coef
return self
| _GeneralizedLinearRegressor.fit | Repo-Level |
scikit-learn | 210 | sklearn/metrics/_base.py | def _average_binary_score(binary_metric, y_true, y_score, average, sample_weight=None):
"""Average a binary metric for multilabel classification.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : {None, 'micro', 'macro', 'samples', 'weighted'}, default='macro'
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
Will be ignored when ``y_true`` is binary.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
binary_metric : callable, returns shape [n_classes]
The binary metric function to use.
Returns
-------
score : float or array of shape [n_classes]
If not ``None``, average the score, else return the score for each
classes.
"""
| /usr/src/app/target_test_cases/failed_tests__average_binary_score.txt | def _average_binary_score(binary_metric, y_true, y_score, average, sample_weight=None):
"""Average a binary metric for multilabel classification.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : {None, 'micro', 'macro', 'samples', 'weighted'}, default='macro'
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
Will be ignored when ``y_true`` is binary.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
binary_metric : callable, returns shape [n_classes]
The binary metric function to use.
Returns
-------
score : float or array of shape [n_classes]
If not ``None``, average the score, else return the score for each
classes.
"""
average_options = (None, "micro", "macro", "weighted", "samples")
if average not in average_options:
raise ValueError("average has to be one of {0}".format(average_options))
y_type = type_of_target(y_true)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if y_type == "binary":
return binary_metric(y_true, y_score, sample_weight=sample_weight)
check_consistent_length(y_true, y_score, sample_weight)
y_true = check_array(y_true)
y_score = check_array(y_score)
not_average_axis = 1
score_weight = sample_weight
average_weight = None
if average == "micro":
if score_weight is not None:
score_weight = np.repeat(score_weight, y_true.shape[1])
y_true = y_true.ravel()
y_score = y_score.ravel()
elif average == "weighted":
if score_weight is not None:
average_weight = np.sum(
np.multiply(y_true, np.reshape(score_weight, (-1, 1))), axis=0
)
else:
average_weight = np.sum(y_true, axis=0)
if np.isclose(average_weight.sum(), 0.0):
return 0
elif average == "samples":
# swap average_weight <-> score_weight
average_weight = score_weight
score_weight = None
not_average_axis = 0
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_score.ndim == 1:
y_score = y_score.reshape((-1, 1))
n_classes = y_score.shape[not_average_axis]
score = np.zeros((n_classes,))
for c in range(n_classes):
y_true_c = y_true.take([c], axis=not_average_axis).ravel()
y_score_c = y_score.take([c], axis=not_average_axis).ravel()
score[c] = binary_metric(y_true_c, y_score_c, sample_weight=score_weight)
# Average the results
if average is not None:
if average_weight is not None:
# Scores with 0 weights are forced to be 0, preventing the average
# score from being affected by 0-weighted NaN elements.
average_weight = np.asarray(average_weight)
score[average_weight == 0] = 0
return np.average(score, weights=average_weight)
else:
return score
| _average_binary_score | Repo-Level |
scikit-learn | 218 | sklearn/utils/validation.py | def _check_psd_eigenvalues(lambdas, enable_warnings=False):
"""Check the eigenvalues of a positive semidefinite (PSD) matrix.
Checks the provided array of PSD matrix eigenvalues for numerical or
conditioning issues and returns a fixed validated version. This method
should typically be used if the PSD matrix is user-provided (e.g. a
Gram matrix) or computed using a user-provided dissimilarity metric
(e.g. kernel function), or if the decomposition process uses approximation
methods (randomized SVD, etc.).
It checks for three things:
- that there are no significant imaginary parts in eigenvalues (more than
1e-5 times the maximum real part). If this check fails, it raises a
``ValueError``. Otherwise all non-significant imaginary parts that may
remain are set to zero. This operation is traced with a
``PositiveSpectrumWarning`` when ``enable_warnings=True``.
- that eigenvalues are not all negative. If this check fails, it raises a
``ValueError``
- that there are no significant negative eigenvalues with absolute value
more than 1e-10 (1e-6) and more than 1e-5 (5e-3) times the largest
positive eigenvalue in double (simple) precision. If this check fails,
it raises a ``ValueError``. Otherwise all negative eigenvalues that may
remain are set to zero. This operation is traced with a
``PositiveSpectrumWarning`` when ``enable_warnings=True``.
Finally, all the positive eigenvalues that are too small (with a value
smaller than the maximum eigenvalue multiplied by 1e-12 (2e-7)) are set to
zero. This operation is traced with a ``PositiveSpectrumWarning`` when
``enable_warnings=True``.
Parameters
----------
lambdas : array-like of shape (n_eigenvalues,)
Array of eigenvalues to check / fix.
enable_warnings : bool, default=False
When this is set to ``True``, a ``PositiveSpectrumWarning`` will be
raised when there are imaginary parts, negative eigenvalues, or
extremely small non-zero eigenvalues. Otherwise no warning will be
raised. In both cases, imaginary parts, negative eigenvalues, and
extremely small non-zero eigenvalues will be set to zero.
Returns
-------
lambdas_fixed : ndarray of shape (n_eigenvalues,)
A fixed validated copy of the array of eigenvalues.
Examples
--------
>>> from sklearn.utils.validation import _check_psd_eigenvalues
>>> _check_psd_eigenvalues([1, 2]) # nominal case
array([1, 2])
>>> _check_psd_eigenvalues([5, 5j]) # significant imag part
Traceback (most recent call last):
...
ValueError: There are significant imaginary parts in eigenvalues (1
of the maximum real part). Either the matrix is not PSD, or there was
an issue while computing the eigendecomposition of the matrix.
>>> _check_psd_eigenvalues([5, 5e-5j]) # insignificant imag part
array([5., 0.])
>>> _check_psd_eigenvalues([-5, -1]) # all negative
Traceback (most recent call last):
...
ValueError: All eigenvalues are negative (maximum is -1). Either the
matrix is not PSD, or there was an issue while computing the
eigendecomposition of the matrix.
>>> _check_psd_eigenvalues([5, -1]) # significant negative
Traceback (most recent call last):
...
ValueError: There are significant negative eigenvalues (0.2 of the
maximum positive). Either the matrix is not PSD, or there was an issue
while computing the eigendecomposition of the matrix.
>>> _check_psd_eigenvalues([5, -5e-5]) # insignificant negative
array([5., 0.])
>>> _check_psd_eigenvalues([5, 4e-12]) # bad conditioning (too small)
array([5., 0.])
"""
| /usr/src/app/target_test_cases/failed_tests__check_psd_eigenvalues.txt | def _check_psd_eigenvalues(lambdas, enable_warnings=False):
"""Check the eigenvalues of a positive semidefinite (PSD) matrix.
Checks the provided array of PSD matrix eigenvalues for numerical or
conditioning issues and returns a fixed validated version. This method
should typically be used if the PSD matrix is user-provided (e.g. a
Gram matrix) or computed using a user-provided dissimilarity metric
(e.g. kernel function), or if the decomposition process uses approximation
methods (randomized SVD, etc.).
It checks for three things:
- that there are no significant imaginary parts in eigenvalues (more than
1e-5 times the maximum real part). If this check fails, it raises a
``ValueError``. Otherwise all non-significant imaginary parts that may
remain are set to zero. This operation is traced with a
``PositiveSpectrumWarning`` when ``enable_warnings=True``.
- that eigenvalues are not all negative. If this check fails, it raises a
``ValueError``
- that there are no significant negative eigenvalues with absolute value
more than 1e-10 (1e-6) and more than 1e-5 (5e-3) times the largest
positive eigenvalue in double (simple) precision. If this check fails,
it raises a ``ValueError``. Otherwise all negative eigenvalues that may
remain are set to zero. This operation is traced with a
``PositiveSpectrumWarning`` when ``enable_warnings=True``.
Finally, all the positive eigenvalues that are too small (with a value
smaller than the maximum eigenvalue multiplied by 1e-12 (2e-7)) are set to
zero. This operation is traced with a ``PositiveSpectrumWarning`` when
``enable_warnings=True``.
Parameters
----------
lambdas : array-like of shape (n_eigenvalues,)
Array of eigenvalues to check / fix.
enable_warnings : bool, default=False
When this is set to ``True``, a ``PositiveSpectrumWarning`` will be
raised when there are imaginary parts, negative eigenvalues, or
extremely small non-zero eigenvalues. Otherwise no warning will be
raised. In both cases, imaginary parts, negative eigenvalues, and
extremely small non-zero eigenvalues will be set to zero.
Returns
-------
lambdas_fixed : ndarray of shape (n_eigenvalues,)
A fixed validated copy of the array of eigenvalues.
Examples
--------
>>> from sklearn.utils.validation import _check_psd_eigenvalues
>>> _check_psd_eigenvalues([1, 2]) # nominal case
array([1, 2])
>>> _check_psd_eigenvalues([5, 5j]) # significant imag part
Traceback (most recent call last):
...
ValueError: There are significant imaginary parts in eigenvalues (1
of the maximum real part). Either the matrix is not PSD, or there was
an issue while computing the eigendecomposition of the matrix.
>>> _check_psd_eigenvalues([5, 5e-5j]) # insignificant imag part
array([5., 0.])
>>> _check_psd_eigenvalues([-5, -1]) # all negative
Traceback (most recent call last):
...
ValueError: All eigenvalues are negative (maximum is -1). Either the
matrix is not PSD, or there was an issue while computing the
eigendecomposition of the matrix.
>>> _check_psd_eigenvalues([5, -1]) # significant negative
Traceback (most recent call last):
...
ValueError: There are significant negative eigenvalues (0.2 of the
maximum positive). Either the matrix is not PSD, or there was an issue
while computing the eigendecomposition of the matrix.
>>> _check_psd_eigenvalues([5, -5e-5]) # insignificant negative
array([5., 0.])
>>> _check_psd_eigenvalues([5, 4e-12]) # bad conditioning (too small)
array([5., 0.])
"""
lambdas = np.array(lambdas)
is_double_precision = lambdas.dtype == np.float64
# note: the minimum value available is
# - single-precision: np.finfo('float32').eps = 1.2e-07
# - double-precision: np.finfo('float64').eps = 2.2e-16
# the various thresholds used for validation
# we may wish to change the value according to precision.
significant_imag_ratio = 1e-5
significant_neg_ratio = 1e-5 if is_double_precision else 5e-3
significant_neg_value = 1e-10 if is_double_precision else 1e-6
small_pos_ratio = 1e-12 if is_double_precision else 2e-7
# Check that there are no significant imaginary parts
if not np.isreal(lambdas).all():
max_imag_abs = np.abs(np.imag(lambdas)).max()
max_real_abs = np.abs(np.real(lambdas)).max()
if max_imag_abs > significant_imag_ratio * max_real_abs:
raise ValueError(
"There are significant imaginary parts in eigenvalues (%g "
"of the maximum real part). Either the matrix is not PSD, or "
"there was an issue while computing the eigendecomposition "
"of the matrix." % (max_imag_abs / max_real_abs)
)
# warn about imaginary parts being removed
if enable_warnings:
warnings.warn(
"There are imaginary parts in eigenvalues (%g "
"of the maximum real part). Either the matrix is not"
" PSD, or there was an issue while computing the "
"eigendecomposition of the matrix. Only the real "
"parts will be kept." % (max_imag_abs / max_real_abs),
PositiveSpectrumWarning,
)
# Remove all imaginary parts (even if zero)
lambdas = np.real(lambdas)
# Check that there are no significant negative eigenvalues
max_eig = lambdas.max()
if max_eig < 0:
raise ValueError(
"All eigenvalues are negative (maximum is %g). "
"Either the matrix is not PSD, or there was an "
"issue while computing the eigendecomposition of "
"the matrix." % max_eig
)
else:
min_eig = lambdas.min()
if (
min_eig < -significant_neg_ratio * max_eig
and min_eig < -significant_neg_value
):
raise ValueError(
"There are significant negative eigenvalues (%g"
" of the maximum positive). Either the matrix is "
"not PSD, or there was an issue while computing "
"the eigendecomposition of the matrix." % (-min_eig / max_eig)
)
elif min_eig < 0:
# Remove all negative values and warn about it
if enable_warnings:
warnings.warn(
"There are negative eigenvalues (%g of the "
"maximum positive). Either the matrix is not "
"PSD, or there was an issue while computing the"
" eigendecomposition of the matrix. Negative "
"eigenvalues will be replaced with 0." % (-min_eig / max_eig),
PositiveSpectrumWarning,
)
lambdas[lambdas < 0] = 0
# Check for conditioning (small positive non-zeros)
too_small_lambdas = (0 < lambdas) & (lambdas < small_pos_ratio * max_eig)
if too_small_lambdas.any():
if enable_warnings:
warnings.warn(
"Badly conditioned PSD matrix spectrum: the largest "
"eigenvalue is more than %g times the smallest. "
"Small eigenvalues will be replaced with 0."
"" % (1 / small_pos_ratio),
PositiveSpectrumWarning,
)
lambdas[too_small_lambdas] = 0
return lambdas
| _check_psd_eigenvalues | Self-Contained |
scikit-learn | 221 | sklearn/metrics/_classification.py | def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task.
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``.
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
| /usr/src/app/target_test_cases/failed_tests__check_targets.txt | def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task.
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``.
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
xp, _ = get_namespace(y_true, y_pred)
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true, input_name="y_true")
type_pred = type_of_target(y_pred, input_name="y_pred")
y_type = {type_true, type_pred}
if y_type == {"binary", "multiclass"}:
y_type = {"multiclass"}
if len(y_type) > 1:
raise ValueError(
"Classification metrics can't handle a mix of {0} and {1} targets".format(
type_true, type_pred
)
)
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if y_type not in ["binary", "multiclass", "multilabel-indicator"]:
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
xp, _ = get_namespace(y_true, y_pred)
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type == "binary":
try:
unique_values = _union1d(y_true, y_pred, xp)
except TypeError as e:
# We expect y_true and y_pred to be of the same data type.
# If `y_true` was provided to the classifier as strings,
# `y_pred` given by the classifier will also be encoded with
# strings. So we raise a meaningful error
raise TypeError(
"Labels in y_true and y_pred should be of the same type. "
f"Got y_true={xp.unique(y_true)} and "
f"y_pred={xp.unique(y_pred)}. Make sure that the "
"predictions provided by the classifier coincides with "
"the true labels."
) from e
if unique_values.shape[0] > 2:
y_type = "multiclass"
if y_type.startswith("multilabel"):
if _is_numpy_namespace(xp):
# XXX: do we really want to sparse-encode multilabel indicators when
# they are passed as a dense arrays? This is not possible for array
# API inputs in general hence we only do it for NumPy inputs. But even
# for NumPy the usefulness is questionable.
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = "multilabel-indicator"
return y_type, y_true, y_pred
| _check_targets | Repo-Level |
scikit-learn | 224 | sklearn/utils/_testing.py | def _convert_container(
container,
constructor_name,
columns_name=None,
dtype=None,
minversion=None,
categorical_feature_names=None,
):
"""Convert a given container to a specific array-like with a dtype.
Parameters
----------
container : array-like
The container to convert.
constructor_name : {"list", "tuple", "array", "sparse", "dataframe", \
"series", "index", "slice", "sparse_csr", "sparse_csc", \
"sparse_csr_array", "sparse_csc_array", "pyarrow", "polars", \
"polars_series"}
The type of the returned container.
columns_name : index or array-like, default=None
For pandas container supporting `columns_names`, it will affect
specific names.
dtype : dtype, default=None
Force the dtype of the container. Does not apply to `"slice"`
container.
minversion : str, default=None
Minimum version for package to install.
categorical_feature_names : list of str, default=None
List of column names to cast to categorical dtype.
Returns
-------
converted_container
"""
| /usr/src/app/target_test_cases/failed_tests__convert_container.txt | def _convert_container(
container,
constructor_name,
columns_name=None,
dtype=None,
minversion=None,
categorical_feature_names=None,
):
"""Convert a given container to a specific array-like with a dtype.
Parameters
----------
container : array-like
The container to convert.
constructor_name : {"list", "tuple", "array", "sparse", "dataframe", \
"series", "index", "slice", "sparse_csr", "sparse_csc", \
"sparse_csr_array", "sparse_csc_array", "pyarrow", "polars", \
"polars_series"}
The type of the returned container.
columns_name : index or array-like, default=None
For pandas container supporting `columns_names`, it will affect
specific names.
dtype : dtype, default=None
Force the dtype of the container. Does not apply to `"slice"`
container.
minversion : str, default=None
Minimum version for package to install.
categorical_feature_names : list of str, default=None
List of column names to cast to categorical dtype.
Returns
-------
converted_container
"""
if constructor_name == "list":
if dtype is None:
return list(container)
else:
return np.asarray(container, dtype=dtype).tolist()
elif constructor_name == "tuple":
if dtype is None:
return tuple(container)
else:
return tuple(np.asarray(container, dtype=dtype).tolist())
elif constructor_name == "array":
return np.asarray(container, dtype=dtype)
elif constructor_name in ("pandas", "dataframe"):
pd = pytest.importorskip("pandas", minversion=minversion)
result = pd.DataFrame(container, columns=columns_name, dtype=dtype, copy=False)
if categorical_feature_names is not None:
for col_name in categorical_feature_names:
result[col_name] = result[col_name].astype("category")
return result
elif constructor_name == "pyarrow":
pa = pytest.importorskip("pyarrow", minversion=minversion)
array = np.asarray(container)
if columns_name is None:
columns_name = [f"col{i}" for i in range(array.shape[1])]
data = {name: array[:, i] for i, name in enumerate(columns_name)}
result = pa.Table.from_pydict(data)
if categorical_feature_names is not None:
for col_idx, col_name in enumerate(result.column_names):
if col_name in categorical_feature_names:
result = result.set_column(
col_idx, col_name, result.column(col_name).dictionary_encode()
)
return result
elif constructor_name == "polars":
pl = pytest.importorskip("polars", minversion=minversion)
result = pl.DataFrame(container, schema=columns_name, orient="row")
if categorical_feature_names is not None:
for col_name in categorical_feature_names:
result = result.with_columns(pl.col(col_name).cast(pl.Categorical))
return result
elif constructor_name == "series":
pd = pytest.importorskip("pandas", minversion=minversion)
return pd.Series(container, dtype=dtype)
elif constructor_name == "polars_series":
pl = pytest.importorskip("polars", minversion=minversion)
return pl.Series(values=container)
elif constructor_name == "index":
pd = pytest.importorskip("pandas", minversion=minversion)
return pd.Index(container, dtype=dtype)
elif constructor_name == "slice":
return slice(container[0], container[1])
elif "sparse" in constructor_name:
if not sp.sparse.issparse(container):
# For scipy >= 1.13, sparse array constructed from 1d array may be
# 1d or raise an exception. To avoid this, we make sure that the
# input container is 2d. For more details, see
# https://github.com/scipy/scipy/pull/18530#issuecomment-1878005149
container = np.atleast_2d(container)
if "array" in constructor_name and sp_version < parse_version("1.8"):
raise ValueError(
f"{constructor_name} is only available with scipy>=1.8.0, got "
f"{sp_version}"
)
if constructor_name in ("sparse", "sparse_csr"):
# sparse and sparse_csr are equivalent for legacy reasons
return sp.sparse.csr_matrix(container, dtype=dtype)
elif constructor_name == "sparse_csr_array":
return sp.sparse.csr_array(container, dtype=dtype)
elif constructor_name == "sparse_csc":
return sp.sparse.csc_matrix(container, dtype=dtype)
elif constructor_name == "sparse_csc_array":
return sp.sparse.csc_array(container, dtype=dtype)
| _convert_container | Self-Contained |
scikit-learn | 234 | sklearn/model_selection/_validation.py | def _fit_and_score(
estimator,
X,
y,
*,
scorer,
train,
test,
verbose,
parameters,
fit_params,
score_params,
return_train_score=False,
return_parameters=False,
return_n_test_samples=False,
return_times=False,
return_estimator=False,
split_progress=None,
candidate_progress=None,
error_score=np.nan,
):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
The target variable to try to predict in the case of
supervised learning.
scorer : A single callable or dict mapping scorer name to the callable
If it is a single callable, the return value for ``train_scores`` and
``test_scores`` is a single float.
For a dict, it should be one mapping the scorer name to the scorer
callable object / function.
The callable object / fn should have signature
``scorer(estimator, X, y)``.
train : array-like of shape (n_train_samples,)
Indices of training samples.
test : array-like of shape (n_test_samples,)
Indices of test samples.
verbose : int
The verbosity level.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
score_params : dict or None
Parameters that will be passed to the scorer.
return_train_score : bool, default=False
Compute and return score on training set.
return_parameters : bool, default=False
Return parameters that has been used for the estimator.
split_progress : {list, tuple} of int, default=None
A list or tuple of format (<current_split_id>, <total_num_of_splits>).
candidate_progress : {list, tuple} of int, default=None
A list or tuple of format
(<current_candidate_id>, <total_number_of_candidates>).
return_n_test_samples : bool, default=False
Whether to return the ``n_test_samples``.
return_times : bool, default=False
Whether to return the fit/score times.
return_estimator : bool, default=False
Whether to return the fitted estimator.
Returns
-------
result : dict with the following attributes
train_scores : dict of scorer name -> float
Score on training set (for all the scorers),
returned only if `return_train_score` is `True`.
test_scores : dict of scorer name -> float
Score on testing set (for all the scorers).
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None
The parameters that have been evaluated.
estimator : estimator object
The fitted estimator.
fit_error : str or None
Traceback str if the fit failed, None if the fit succeeded.
"""
| /usr/src/app/target_test_cases/failed_tests__fit_and_score.txt | def _fit_and_score(
estimator,
X,
y,
*,
scorer,
train,
test,
verbose,
parameters,
fit_params,
score_params,
return_train_score=False,
return_parameters=False,
return_n_test_samples=False,
return_times=False,
return_estimator=False,
split_progress=None,
candidate_progress=None,
error_score=np.nan,
):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
The target variable to try to predict in the case of
supervised learning.
scorer : A single callable or dict mapping scorer name to the callable
If it is a single callable, the return value for ``train_scores`` and
``test_scores`` is a single float.
For a dict, it should be one mapping the scorer name to the scorer
callable object / function.
The callable object / fn should have signature
``scorer(estimator, X, y)``.
train : array-like of shape (n_train_samples,)
Indices of training samples.
test : array-like of shape (n_test_samples,)
Indices of test samples.
verbose : int
The verbosity level.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
score_params : dict or None
Parameters that will be passed to the scorer.
return_train_score : bool, default=False
Compute and return score on training set.
return_parameters : bool, default=False
Return parameters that has been used for the estimator.
split_progress : {list, tuple} of int, default=None
A list or tuple of format (<current_split_id>, <total_num_of_splits>).
candidate_progress : {list, tuple} of int, default=None
A list or tuple of format
(<current_candidate_id>, <total_number_of_candidates>).
return_n_test_samples : bool, default=False
Whether to return the ``n_test_samples``.
return_times : bool, default=False
Whether to return the fit/score times.
return_estimator : bool, default=False
Whether to return the fitted estimator.
Returns
-------
result : dict with the following attributes
train_scores : dict of scorer name -> float
Score on training set (for all the scorers),
returned only if `return_train_score` is `True`.
test_scores : dict of scorer name -> float
Score on testing set (for all the scorers).
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None
The parameters that have been evaluated.
estimator : estimator object
The fitted estimator.
fit_error : str or None
Traceback str if the fit failed, None if the fit succeeded.
"""
xp, _ = get_namespace(X)
X_device = device(X)
# Make sure that we can fancy index X even if train and test are provided
# as NumPy arrays by NumPy only cross-validation splitters.
train, test = xp.asarray(train, device=X_device), xp.asarray(test, device=X_device)
if not isinstance(error_score, numbers.Number) and error_score != "raise":
raise ValueError(
"error_score must be the string 'raise' or a numeric value. "
"(Hint: if using 'raise', please make sure that it has been "
"spelled correctly.)"
)
progress_msg = ""
if verbose > 2:
if split_progress is not None:
progress_msg = f" {split_progress[0]+1}/{split_progress[1]}"
if candidate_progress and verbose > 9:
progress_msg += f"; {candidate_progress[0]+1}/{candidate_progress[1]}"
if verbose > 1:
if parameters is None:
params_msg = ""
else:
sorted_keys = sorted(parameters) # Ensure deterministic o/p
params_msg = ", ".join(f"{k}={parameters[k]}" for k in sorted_keys)
if verbose > 9:
start_msg = f"[CV{progress_msg}] START {params_msg}"
print(f"{start_msg}{(80 - len(start_msg)) * '.'}")
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = _check_method_params(X, params=fit_params, indices=train)
score_params = score_params if score_params is not None else {}
score_params_train = _check_method_params(X, params=score_params, indices=train)
score_params_test = _check_method_params(X, params=score_params, indices=test)
if parameters is not None:
# here we clone the parameters, since sometimes the parameters
# themselves might be estimators, e.g. when we search over different
# estimators in a pipeline.
# ref: https://github.com/scikit-learn/scikit-learn/pull/26786
estimator = estimator.set_params(**clone(parameters, safe=False))
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
result = {}
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == "raise":
raise
elif isinstance(error_score, numbers.Number):
if isinstance(scorer, _MultimetricScorer):
test_scores = {name: error_score for name in scorer._scorers}
if return_train_score:
train_scores = test_scores.copy()
else:
test_scores = error_score
if return_train_score:
train_scores = error_score
result["fit_error"] = format_exc()
else:
result["fit_error"] = None
fit_time = time.time() - start_time
test_scores = _score(
estimator, X_test, y_test, scorer, score_params_test, error_score
)
score_time = time.time() - start_time - fit_time
if return_train_score:
train_scores = _score(
estimator, X_train, y_train, scorer, score_params_train, error_score
)
if verbose > 1:
total_time = score_time + fit_time
end_msg = f"[CV{progress_msg}] END "
result_msg = params_msg + (";" if params_msg else "")
if verbose > 2:
if isinstance(test_scores, dict):
for scorer_name in sorted(test_scores):
result_msg += f" {scorer_name}: ("
if return_train_score:
scorer_scores = train_scores[scorer_name]
result_msg += f"train={scorer_scores:.3f}, "
result_msg += f"test={test_scores[scorer_name]:.3f})"
else:
result_msg += ", score="
if return_train_score:
result_msg += f"(train={train_scores:.3f}, test={test_scores:.3f})"
else:
result_msg += f"{test_scores:.3f}"
result_msg += f" total time={logger.short_format_time(total_time)}"
# Right align the result_msg
end_msg += "." * (80 - len(end_msg) - len(result_msg))
end_msg += result_msg
print(end_msg)
result["test_scores"] = test_scores
if return_train_score:
result["train_scores"] = train_scores
if return_n_test_samples:
result["n_test_samples"] = _num_samples(X_test)
if return_times:
result["fit_time"] = fit_time
result["score_time"] = score_time
if return_parameters:
result["parameters"] = parameters
if return_estimator:
result["estimator"] = estimator
return result
| _fit_and_score | Repo-Level |
scikit-learn | 236 | sklearn/utils/graph.py | def _fix_connected_components(
X,
graph,
n_connected_components,
component_labels,
mode="distance",
metric="euclidean",
**kwargs,
):
"""Add connections to sparse graph to connect unconnected components.
For each pair of unconnected components, compute all pairwise distances
from one component to the other, and add a connection on the closest pair
of samples. This is a hacky way to get a graph with a single connected
component, which is necessary for example to compute a shortest path
between all pairs of samples in the graph.
Parameters
----------
X : array of shape (n_samples, n_features) or (n_samples, n_samples)
Features to compute the pairwise distances. If `metric =
"precomputed"`, X is the matrix of pairwise distances.
graph : sparse matrix of shape (n_samples, n_samples)
Graph of connection between samples.
n_connected_components : int
Number of connected components, as computed by
`scipy.sparse.csgraph.connected_components`.
component_labels : array of shape (n_samples)
Labels of connected components, as computed by
`scipy.sparse.csgraph.connected_components`.
mode : {'connectivity', 'distance'}, default='distance'
Type of graph matrix: 'connectivity' corresponds to the connectivity
matrix with ones and zeros, and 'distance' corresponds to the distances
between neighbors according to the given metric.
metric : str
Metric used in `sklearn.metrics.pairwise.pairwise_distances`.
kwargs : kwargs
Keyword arguments passed to
`sklearn.metrics.pairwise.pairwise_distances`.
Returns
-------
graph : sparse matrix of shape (n_samples, n_samples)
Graph of connection between samples, with a single connected component.
"""
| /usr/src/app/target_test_cases/failed_tests__fix_connected_components.txt | def _fix_connected_components(
X,
graph,
n_connected_components,
component_labels,
mode="distance",
metric="euclidean",
**kwargs,
):
"""Add connections to sparse graph to connect unconnected components.
For each pair of unconnected components, compute all pairwise distances
from one component to the other, and add a connection on the closest pair
of samples. This is a hacky way to get a graph with a single connected
component, which is necessary for example to compute a shortest path
between all pairs of samples in the graph.
Parameters
----------
X : array of shape (n_samples, n_features) or (n_samples, n_samples)
Features to compute the pairwise distances. If `metric =
"precomputed"`, X is the matrix of pairwise distances.
graph : sparse matrix of shape (n_samples, n_samples)
Graph of connection between samples.
n_connected_components : int
Number of connected components, as computed by
`scipy.sparse.csgraph.connected_components`.
component_labels : array of shape (n_samples)
Labels of connected components, as computed by
`scipy.sparse.csgraph.connected_components`.
mode : {'connectivity', 'distance'}, default='distance'
Type of graph matrix: 'connectivity' corresponds to the connectivity
matrix with ones and zeros, and 'distance' corresponds to the distances
between neighbors according to the given metric.
metric : str
Metric used in `sklearn.metrics.pairwise.pairwise_distances`.
kwargs : kwargs
Keyword arguments passed to
`sklearn.metrics.pairwise.pairwise_distances`.
Returns
-------
graph : sparse matrix of shape (n_samples, n_samples)
Graph of connection between samples, with a single connected component.
"""
if metric == "precomputed" and sparse.issparse(X):
raise RuntimeError(
"_fix_connected_components with metric='precomputed' requires the "
"full distance matrix in X, and does not work with a sparse "
"neighbors graph."
)
for i in range(n_connected_components):
idx_i = np.flatnonzero(component_labels == i)
Xi = X[idx_i]
for j in range(i):
idx_j = np.flatnonzero(component_labels == j)
Xj = X[idx_j]
if metric == "precomputed":
D = X[np.ix_(idx_i, idx_j)]
else:
D = pairwise_distances(Xi, Xj, metric=metric, **kwargs)
ii, jj = np.unravel_index(D.argmin(axis=None), D.shape)
if mode == "connectivity":
graph[idx_i[ii], idx_j[jj]] = 1
graph[idx_j[jj], idx_i[ii]] = 1
elif mode == "distance":
graph[idx_i[ii], idx_j[jj]] = D[ii, jj]
graph[idx_j[jj], idx_i[ii]] = D[ii, jj]
else:
raise ValueError(
"Unknown mode=%r, should be one of ['connectivity', 'distance']."
% mode
)
return graph
| _fix_connected_components | Repo-Level |
scikit-learn | 240 | sklearn/utils/_response.py | def _get_response_values(
estimator,
X,
response_method,
pos_label=None,
return_response_method_used=False,
):
"""Compute the response values of a classifier, an outlier detector, or a regressor.
The response values are predictions such that it follows the following shape:
- for binary classification, it is a 1d array of shape `(n_samples,)`;
- for multiclass classification, it is a 2d array of shape `(n_samples, n_classes)`;
- for multilabel classification, it is a 2d array of shape `(n_samples, n_outputs)`;
- for outlier detection, it is a 1d array of shape `(n_samples,)`;
- for regression, it is a 1d array of shape `(n_samples,)`.
If `estimator` is a binary classifier, also return the label for the
effective positive class.
This utility is used primarily in the displays and the scikit-learn scorers.
.. versionadded:: 1.3
Parameters
----------
estimator : estimator instance
Fitted classifier, outlier detector, or regressor or a
fitted :class:`~sklearn.pipeline.Pipeline` in which the last estimator is a
classifier, an outlier detector, or a regressor.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
response_method : {"predict_proba", "predict_log_proba", "decision_function", \
"predict"} or list of such str
Specifies the response method to use get prediction from an estimator
(i.e. :term:`predict_proba`, :term:`predict_log_proba`,
:term:`decision_function` or :term:`predict`). Possible choices are:
- if `str`, it corresponds to the name to the method to return;
- if a list of `str`, it provides the method names in order of
preference. The method returned corresponds to the first method in
the list and which is implemented by `estimator`.
pos_label : int, float, bool or str, default=None
The class considered as the positive class when computing
the metrics. If `None` and target is 'binary', `estimators.classes_[1]` is
considered as the positive class.
return_response_method_used : bool, default=False
Whether to return the response method used to compute the response
values.
.. versionadded:: 1.4
Returns
-------
y_pred : ndarray of shape (n_samples,), (n_samples, n_classes) or \
(n_samples, n_outputs)
Target scores calculated from the provided `response_method`
and `pos_label`.
pos_label : int, float, bool, str or None
The class considered as the positive class when computing
the metrics. Returns `None` if `estimator` is a regressor or an outlier
detector.
response_method_used : str
The response method used to compute the response values. Only returned
if `return_response_method_used` is `True`.
.. versionadded:: 1.4
Raises
------
ValueError
If `pos_label` is not a valid label.
If the shape of `y_pred` is not consistent for binary classifier.
If the response method can be applied to a classifier only and
`estimator` is a regressor.
"""
| /usr/src/app/target_test_cases/failed_tests__get_response_values.txt | def _get_response_values(
estimator,
X,
response_method,
pos_label=None,
return_response_method_used=False,
):
"""Compute the response values of a classifier, an outlier detector, or a regressor.
The response values are predictions such that it follows the following shape:
- for binary classification, it is a 1d array of shape `(n_samples,)`;
- for multiclass classification, it is a 2d array of shape `(n_samples, n_classes)`;
- for multilabel classification, it is a 2d array of shape `(n_samples, n_outputs)`;
- for outlier detection, it is a 1d array of shape `(n_samples,)`;
- for regression, it is a 1d array of shape `(n_samples,)`.
If `estimator` is a binary classifier, also return the label for the
effective positive class.
This utility is used primarily in the displays and the scikit-learn scorers.
.. versionadded:: 1.3
Parameters
----------
estimator : estimator instance
Fitted classifier, outlier detector, or regressor or a
fitted :class:`~sklearn.pipeline.Pipeline` in which the last estimator is a
classifier, an outlier detector, or a regressor.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
response_method : {"predict_proba", "predict_log_proba", "decision_function", \
"predict"} or list of such str
Specifies the response method to use get prediction from an estimator
(i.e. :term:`predict_proba`, :term:`predict_log_proba`,
:term:`decision_function` or :term:`predict`). Possible choices are:
- if `str`, it corresponds to the name to the method to return;
- if a list of `str`, it provides the method names in order of
preference. The method returned corresponds to the first method in
the list and which is implemented by `estimator`.
pos_label : int, float, bool or str, default=None
The class considered as the positive class when computing
the metrics. If `None` and target is 'binary', `estimators.classes_[1]` is
considered as the positive class.
return_response_method_used : bool, default=False
Whether to return the response method used to compute the response
values.
.. versionadded:: 1.4
Returns
-------
y_pred : ndarray of shape (n_samples,), (n_samples, n_classes) or \
(n_samples, n_outputs)
Target scores calculated from the provided `response_method`
and `pos_label`.
pos_label : int, float, bool, str or None
The class considered as the positive class when computing
the metrics. Returns `None` if `estimator` is a regressor or an outlier
detector.
response_method_used : str
The response method used to compute the response values. Only returned
if `return_response_method_used` is `True`.
.. versionadded:: 1.4
Raises
------
ValueError
If `pos_label` is not a valid label.
If the shape of `y_pred` is not consistent for binary classifier.
If the response method can be applied to a classifier only and
`estimator` is a regressor.
"""
from sklearn.base import is_classifier, is_outlier_detector # noqa
if is_classifier(estimator):
prediction_method = _check_response_method(estimator, response_method)
classes = estimator.classes_
target_type = type_of_target(classes)
if target_type in ("binary", "multiclass"):
if pos_label is not None and pos_label not in classes.tolist():
raise ValueError(
f"pos_label={pos_label} is not a valid label: It should be "
f"one of {classes}"
)
elif pos_label is None and target_type == "binary":
pos_label = classes[-1]
y_pred = prediction_method(X)
if prediction_method.__name__ in ("predict_proba", "predict_log_proba"):
y_pred = _process_predict_proba(
y_pred=y_pred,
target_type=target_type,
classes=classes,
pos_label=pos_label,
)
elif prediction_method.__name__ == "decision_function":
y_pred = _process_decision_function(
y_pred=y_pred,
target_type=target_type,
classes=classes,
pos_label=pos_label,
)
elif is_outlier_detector(estimator):
prediction_method = _check_response_method(estimator, response_method)
y_pred, pos_label = prediction_method(X), None
else: # estimator is a regressor
if response_method != "predict":
raise ValueError(
f"{estimator.__class__.__name__} should either be a classifier to be "
f"used with response_method={response_method} or the response_method "
"should be 'predict'. Got a regressor with response_method="
f"{response_method} instead."
)
prediction_method = estimator.predict
y_pred, pos_label = prediction_method(X), None
if return_response_method_used:
return y_pred, pos_label, prediction_method.__name__
return y_pred, pos_label
| _get_response_values | Repo-Level |
scikit-learn | 242 | sklearn/manifold/_t_sne.py | def _gradient_descent(
objective,
p0,
it,
max_iter,
n_iter_check=1,
n_iter_without_progress=300,
momentum=0.8,
learning_rate=200.0,
min_gain=0.01,
min_grad_norm=1e-7,
verbose=0,
args=None,
kwargs=None,
):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like of shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
max_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int, default=1
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
n_iter_without_progress : int, default=300
Maximum number of iterations without progress before we abort the
optimization.
momentum : float within (0.0, 1.0), default=0.8
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, default=200.0
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers.
min_gain : float, default=0.01
Minimum individual gain for each parameter.
min_grad_norm : float, default=1e-7
If the gradient norm is below this threshold, the optimization will
be aborted.
verbose : int, default=0
Verbosity level.
args : sequence, default=None
Arguments to pass to objective function.
kwargs : dict, default=None
Keyword arguments to pass to objective function.
Returns
-------
p : ndarray of shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
| /usr/src/app/target_test_cases/failed_tests__gradient_descent.txt | def _gradient_descent(
objective,
p0,
it,
max_iter,
n_iter_check=1,
n_iter_without_progress=300,
momentum=0.8,
learning_rate=200.0,
min_gain=0.01,
min_grad_norm=1e-7,
verbose=0,
args=None,
kwargs=None,
):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like of shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
max_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int, default=1
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
n_iter_without_progress : int, default=300
Maximum number of iterations without progress before we abort the
optimization.
momentum : float within (0.0, 1.0), default=0.8
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, default=200.0
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers.
min_gain : float, default=0.01
Minimum individual gain for each parameter.
min_grad_norm : float, default=1e-7
If the gradient norm is below this threshold, the optimization will
be aborted.
verbose : int, default=0
Verbosity level.
args : sequence, default=None
Arguments to pass to objective function.
kwargs : dict, default=None
Keyword arguments to pass to objective function.
Returns
-------
p : ndarray of shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(float).max
best_error = np.finfo(float).max
best_iter = i = it
tic = time()
for i in range(it, max_iter):
check_convergence = (i + 1) % n_iter_check == 0
# only compute the error when needed
kwargs["compute_error"] = check_convergence or i == max_iter - 1
error, grad = objective(p, *args, **kwargs)
inc = update * grad < 0.0
dec = np.invert(inc)
gains[inc] += 0.2
gains[dec] *= 0.8
np.clip(gains, min_gain, np.inf, out=gains)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if check_convergence:
toc = time()
duration = toc - tic
tic = toc
grad_norm = linalg.norm(grad)
if verbose >= 2:
print(
"[t-SNE] Iteration %d: error = %.7f,"
" gradient norm = %.7f"
" (%s iterations in %0.3fs)"
% (i + 1, error, grad_norm, n_iter_check, duration)
)
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print(
"[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress)
)
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print(
"[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm)
)
break
return p, error, i
| _gradient_descent | Self-Contained |
scikit-learn | 245 | sklearn/inspection/_partial_dependence.py | def _grid_from_X(X, percentiles, is_categorical, grid_resolution):
"""Generate a grid of points based on the percentiles of X.
The grid is a cartesian product between the columns of ``values``. The
ith column of ``values`` consists in ``grid_resolution`` equally-spaced
points between the percentiles of the jth column of X.
If ``grid_resolution`` is bigger than the number of unique values in the
j-th column of X or if the feature is a categorical feature (by inspecting
`is_categorical`) , then those unique values will be used instead.
Parameters
----------
X : array-like of shape (n_samples, n_target_features)
The data.
percentiles : tuple of float
The percentiles which are used to construct the extreme values of
the grid. Must be in [0, 1].
is_categorical : list of bool
For each feature, tells whether it is categorical or not. If a feature
is categorical, then the values used will be the unique ones
(i.e. categories) instead of the percentiles.
grid_resolution : int
The number of equally spaced points to be placed on the grid for each
feature.
Returns
-------
grid : ndarray of shape (n_points, n_target_features)
A value for each feature at each point in the grid. ``n_points`` is
always ``<= grid_resolution ** X.shape[1]``.
values : list of 1d ndarrays
The values with which the grid has been created. The size of each
array ``values[j]`` is either ``grid_resolution``, or the number of
unique values in ``X[:, j]``, whichever is smaller.
"""
| /usr/src/app/target_test_cases/failed_tests__grid_from_X.txt | def _grid_from_X(X, percentiles, is_categorical, grid_resolution):
"""Generate a grid of points based on the percentiles of X.
The grid is a cartesian product between the columns of ``values``. The
ith column of ``values`` consists in ``grid_resolution`` equally-spaced
points between the percentiles of the jth column of X.
If ``grid_resolution`` is bigger than the number of unique values in the
j-th column of X or if the feature is a categorical feature (by inspecting
`is_categorical`) , then those unique values will be used instead.
Parameters
----------
X : array-like of shape (n_samples, n_target_features)
The data.
percentiles : tuple of float
The percentiles which are used to construct the extreme values of
the grid. Must be in [0, 1].
is_categorical : list of bool
For each feature, tells whether it is categorical or not. If a feature
is categorical, then the values used will be the unique ones
(i.e. categories) instead of the percentiles.
grid_resolution : int
The number of equally spaced points to be placed on the grid for each
feature.
Returns
-------
grid : ndarray of shape (n_points, n_target_features)
A value for each feature at each point in the grid. ``n_points`` is
always ``<= grid_resolution ** X.shape[1]``.
values : list of 1d ndarrays
The values with which the grid has been created. The size of each
array ``values[j]`` is either ``grid_resolution``, or the number of
unique values in ``X[:, j]``, whichever is smaller.
"""
if not isinstance(percentiles, Iterable) or len(percentiles) != 2:
raise ValueError("'percentiles' must be a sequence of 2 elements.")
if not all(0 <= x <= 1 for x in percentiles):
raise ValueError("'percentiles' values must be in [0, 1].")
if percentiles[0] >= percentiles[1]:
raise ValueError("percentiles[0] must be strictly less than percentiles[1].")
if grid_resolution <= 1:
raise ValueError("'grid_resolution' must be strictly greater than 1.")
values = []
# TODO: we should handle missing values (i.e. `np.nan`) specifically and store them
# in a different Bunch attribute.
for feature, is_cat in enumerate(is_categorical):
try:
uniques = np.unique(_safe_indexing(X, feature, axis=1))
except TypeError as exc:
# `np.unique` will fail in the presence of `np.nan` and `str` categories
# due to sorting. Temporary, we reraise an error explaining the problem.
raise ValueError(
f"The column #{feature} contains mixed data types. Finding unique "
"categories fail due to sorting. It usually means that the column "
"contains `np.nan` values together with `str` categories. Such use "
"case is not yet supported in scikit-learn."
) from exc
if is_cat or uniques.shape[0] < grid_resolution:
# Use the unique values either because:
# - feature has low resolution use unique values
# - feature is categorical
axis = uniques
else:
# create axis based on percentiles and grid resolution
emp_percentiles = mquantiles(
_safe_indexing(X, feature, axis=1), prob=percentiles, axis=0
)
if np.allclose(emp_percentiles[0], emp_percentiles[1]):
raise ValueError(
"percentiles are too close to each other, "
"unable to build the grid. Please choose percentiles "
"that are further apart."
)
axis = np.linspace(
emp_percentiles[0],
emp_percentiles[1],
num=grid_resolution,
endpoint=True,
)
values.append(axis)
return cartesian(values), values
| _grid_from_X | Repo-Level |
scikit-learn | 247 | sklearn/linear_model/_huber.py | def _huber_loss_and_gradient(w, X, y, epsilon, alpha, sample_weight=None):
"""Returns the Huber loss and the gradient.
Parameters
----------
w : ndarray, shape (n_features + 1,) or (n_features + 2,)
Feature vector.
w[:n_features] gives the coefficients
w[-1] gives the scale factor and if the intercept is fit w[-2]
gives the intercept factor.
X : ndarray of shape (n_samples, n_features)
Input data.
y : ndarray of shape (n_samples,)
Target vector.
epsilon : float
Robustness of the Huber estimator.
alpha : float
Regularization parameter.
sample_weight : ndarray of shape (n_samples,), default=None
Weight assigned to each sample.
Returns
-------
loss : float
Huber loss.
gradient : ndarray, shape (len(w))
Returns the derivative of the Huber loss with respect to each
coefficient, intercept and the scale as a vector.
"""
| /usr/src/app/target_test_cases/failed_tests__huber_loss_and_gradient.txt | def _huber_loss_and_gradient(w, X, y, epsilon, alpha, sample_weight=None):
"""Returns the Huber loss and the gradient.
Parameters
----------
w : ndarray, shape (n_features + 1,) or (n_features + 2,)
Feature vector.
w[:n_features] gives the coefficients
w[-1] gives the scale factor and if the intercept is fit w[-2]
gives the intercept factor.
X : ndarray of shape (n_samples, n_features)
Input data.
y : ndarray of shape (n_samples,)
Target vector.
epsilon : float
Robustness of the Huber estimator.
alpha : float
Regularization parameter.
sample_weight : ndarray of shape (n_samples,), default=None
Weight assigned to each sample.
Returns
-------
loss : float
Huber loss.
gradient : ndarray, shape (len(w))
Returns the derivative of the Huber loss with respect to each
coefficient, intercept and the scale as a vector.
"""
_, n_features = X.shape
fit_intercept = n_features + 2 == w.shape[0]
if fit_intercept:
intercept = w[-2]
sigma = w[-1]
w = w[:n_features]
n_samples = np.sum(sample_weight)
# Calculate the values where |y - X'w -c / sigma| > epsilon
# The values above this threshold are outliers.
linear_loss = y - safe_sparse_dot(X, w)
if fit_intercept:
linear_loss -= intercept
abs_linear_loss = np.abs(linear_loss)
outliers_mask = abs_linear_loss > epsilon * sigma
# Calculate the linear loss due to the outliers.
# This is equal to (2 * M * |y - X'w -c / sigma| - M**2) * sigma
outliers = abs_linear_loss[outliers_mask]
num_outliers = np.count_nonzero(outliers_mask)
n_non_outliers = X.shape[0] - num_outliers
# n_sq_outliers includes the weight give to the outliers while
# num_outliers is just the number of outliers.
outliers_sw = sample_weight[outliers_mask]
n_sw_outliers = np.sum(outliers_sw)
outlier_loss = (
2.0 * epsilon * np.sum(outliers_sw * outliers)
- sigma * n_sw_outliers * epsilon**2
)
# Calculate the quadratic loss due to the non-outliers.-
# This is equal to |(y - X'w - c)**2 / sigma**2| * sigma
non_outliers = linear_loss[~outliers_mask]
weighted_non_outliers = sample_weight[~outliers_mask] * non_outliers
weighted_loss = np.dot(weighted_non_outliers.T, non_outliers)
squared_loss = weighted_loss / sigma
if fit_intercept:
grad = np.zeros(n_features + 2)
else:
grad = np.zeros(n_features + 1)
# Gradient due to the squared loss.
X_non_outliers = -axis0_safe_slice(X, ~outliers_mask, n_non_outliers)
grad[:n_features] = (
2.0 / sigma * safe_sparse_dot(weighted_non_outliers, X_non_outliers)
)
# Gradient due to the linear loss.
signed_outliers = np.ones_like(outliers)
signed_outliers_mask = linear_loss[outliers_mask] < 0
signed_outliers[signed_outliers_mask] = -1.0
X_outliers = axis0_safe_slice(X, outliers_mask, num_outliers)
sw_outliers = sample_weight[outliers_mask] * signed_outliers
grad[:n_features] -= 2.0 * epsilon * (safe_sparse_dot(sw_outliers, X_outliers))
# Gradient due to the penalty.
grad[:n_features] += alpha * 2.0 * w
# Gradient due to sigma.
grad[-1] = n_samples
grad[-1] -= n_sw_outliers * epsilon**2
grad[-1] -= squared_loss / sigma
# Gradient due to the intercept.
if fit_intercept:
grad[-2] = -2.0 * np.sum(weighted_non_outliers) / sigma
grad[-2] -= 2.0 * epsilon * np.sum(sw_outliers)
loss = n_samples * sigma + squared_loss + outlier_loss
loss += alpha * np.dot(w, w)
return loss, grad
| _huber_loss_and_gradient | Repo-Level |
scikit-learn | 253 | sklearn/linear_model/_least_angle.py | def _lars_path_residues(
X_train,
y_train,
X_test,
y_test,
Gram=None,
copy=True,
method="lar",
verbose=False,
fit_intercept=True,
max_iter=500,
eps=np.finfo(float).eps,
positive=False,
):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array-like of shape (n_samples, n_features)
The data to fit the LARS on
y_train : array-like of shape (n_samples,)
The target variable to fit LARS on
X_test : array-like of shape (n_samples, n_features)
The data to compute the residues on
y_test : array-like of shape (n_samples,)
The target variable to compute the residues on
Gram : None, 'auto' or array-like of shape (n_features, n_features), \
default=None
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : bool, default=True
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : {'lar' , 'lasso'}, default='lar'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : bool or int, default=False
Sets the amount of verbosity
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
positive : bool, default=False
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
max_iter : int, default=500
Maximum number of iterations to perform.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array-like of shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array-like of shape (n_features, n_alphas)
Coefficients along the path
residues : array-like of shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
| /usr/src/app/target_test_cases/failed_tests__lars_path_residues.txt | def _lars_path_residues(
X_train,
y_train,
X_test,
y_test,
Gram=None,
copy=True,
method="lar",
verbose=False,
fit_intercept=True,
max_iter=500,
eps=np.finfo(float).eps,
positive=False,
):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array-like of shape (n_samples, n_features)
The data to fit the LARS on
y_train : array-like of shape (n_samples,)
The target variable to fit LARS on
X_test : array-like of shape (n_samples, n_features)
The data to compute the residues on
y_test : array-like of shape (n_samples,)
The target variable to compute the residues on
Gram : None, 'auto' or array-like of shape (n_features, n_features), \
default=None
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : bool, default=True
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : {'lar' , 'lasso'}, default='lar'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : bool or int, default=False
Sets the amount of verbosity
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
positive : bool, default=False
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
max_iter : int, default=500
Maximum number of iterations to perform.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array-like of shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array-like of shape (n_features, n_alphas)
Coefficients along the path
residues : array-like of shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
alphas, active, coefs = lars_path(
X_train,
y_train,
Gram=Gram,
copy_X=False,
copy_Gram=False,
method=method,
verbose=max(0, verbose - 1),
max_iter=max_iter,
eps=eps,
positive=positive,
)
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
| _lars_path_residues | Repo-Level |
scikit-learn | 254 | sklearn/linear_model/_logistic.py | def _log_reg_scoring_path(
X,
y,
train,
test,
*,
pos_class,
Cs,
scoring,
fit_intercept,
max_iter,
tol,
class_weight,
verbose,
solver,
penalty,
dual,
intercept_scaling,
multi_class,
random_state,
max_squared_sum,
sample_weight,
l1_ratio,
score_params,
):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int or list of floats
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
scoring : callable
A string (see :ref:`scoring_parameter`) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced'
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}
Decides which solver to use.
penalty : {'l1', 'l2', 'elasticnet'}
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'auto', 'ovr', 'multinomial'}
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
random_state : int, RandomState instance
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
max_squared_sum : float
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like of shape(n_samples,)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
score_params : dict
Parameters to pass to the `score` method of the underlying scorer.
Returns
-------
coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray of shape (n_cs,)
Scores obtained for each Cs.
n_iter : ndarray of shape(n_cs,)
Actual number of iteration for each Cs.
"""
| /usr/src/app/target_test_cases/failed_tests__log_reg_scoring_path.txt | def _log_reg_scoring_path(
X,
y,
train,
test,
*,
pos_class,
Cs,
scoring,
fit_intercept,
max_iter,
tol,
class_weight,
verbose,
solver,
penalty,
dual,
intercept_scaling,
multi_class,
random_state,
max_squared_sum,
sample_weight,
l1_ratio,
score_params,
):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int or list of floats
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
scoring : callable
A string (see :ref:`scoring_parameter`) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced'
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}
Decides which solver to use.
penalty : {'l1', 'l2', 'elasticnet'}
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'auto', 'ovr', 'multinomial'}
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
random_state : int, RandomState instance
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
max_squared_sum : float
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like of shape(n_samples,)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
score_params : dict
Parameters to pass to the `score` method of the underlying scorer.
Returns
-------
coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray of shape (n_cs,)
Scores obtained for each Cs.
n_iter : ndarray of shape(n_cs,)
Actual number of iteration for each Cs.
"""
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
sw_train, sw_test = None, None
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
sw_train = sample_weight[train]
sw_test = sample_weight[test]
coefs, Cs, n_iter = _logistic_regression_path(
X_train,
y_train,
Cs=Cs,
l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
class_weight=class_weight,
pos_class=pos_class,
multi_class=multi_class,
tol=tol,
verbose=verbose,
dual=dual,
penalty=penalty,
intercept_scaling=intercept_scaling,
random_state=random_state,
check_input=False,
max_squared_sum=max_squared_sum,
sample_weight=sw_train,
)
log_reg = LogisticRegression(solver=solver, multi_class=multi_class)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == "ovr":
log_reg.classes_ = np.array([-1, 1])
elif multi_class == "multinomial":
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError(
"multi_class should be either multinomial or ovr, got %d" % multi_class
)
if pos_class is not None:
mask = y_test == pos_class
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.0
scores = list()
scoring = get_scorer(scoring)
for w in coefs:
if multi_class == "ovr":
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.0
if scoring is None:
scores.append(log_reg.score(X_test, y_test, sample_weight=sw_test))
else:
score_params = score_params or {}
score_params = _check_method_params(X=X, params=score_params, indices=test)
scores.append(scoring(log_reg, X_test, y_test, **score_params))
return coefs, Cs, np.array(scores), n_iter
| _log_reg_scoring_path | Repo-Level |
scikit-learn | 256 | sklearn/linear_model/_logistic.py | def _logistic_regression_path(
X,
y,
pos_class=None,
Cs=10,
fit_intercept=True,
max_iter=100,
tol=1e-4,
verbose=0,
solver="lbfgs",
coef=None,
class_weight=None,
dual=False,
penalty="l2",
intercept_scaling=1.0,
multi_class="auto",
random_state=None,
check_input=True,
max_squared_sum=None,
sample_weight=None,
l1_ratio=None,
n_threads=1,
):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
pos_class : int, default=None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int or array-like of shape (n_cs,), default=10
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool, default=True
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int, default=100
Maximum number of iterations for the solver.
tol : float, default=1e-4
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int, default=0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}, \
default='lbfgs'
Numerical solver to use.
coef : array-like of shape (n_features,), default=None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool, default=False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : {'l1', 'l2', 'elasticnet'}, default='l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
intercept_scaling : float, default=1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'ovr', 'multinomial', 'auto'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.22
Default changed from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
check_input : bool, default=True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default=None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like of shape(n_samples,), default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float, default=None
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
n_threads : int, default=1
Number of OpenMP threads to use.
Returns
-------
coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs,
n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array of shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
| /usr/src/app/target_test_cases/failed_tests__logistic_regression_path.txt | def _logistic_regression_path(
X,
y,
pos_class=None,
Cs=10,
fit_intercept=True,
max_iter=100,
tol=1e-4,
verbose=0,
solver="lbfgs",
coef=None,
class_weight=None,
dual=False,
penalty="l2",
intercept_scaling=1.0,
multi_class="auto",
random_state=None,
check_input=True,
max_squared_sum=None,
sample_weight=None,
l1_ratio=None,
n_threads=1,
):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
pos_class : int, default=None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int or array-like of shape (n_cs,), default=10
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool, default=True
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int, default=100
Maximum number of iterations for the solver.
tol : float, default=1e-4
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int, default=0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}, \
default='lbfgs'
Numerical solver to use.
coef : array-like of shape (n_features,), default=None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool, default=False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : {'l1', 'l2', 'elasticnet'}, default='l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
intercept_scaling : float, default=1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'ovr', 'multinomial', 'auto'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.22
Default changed from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
check_input : bool, default=True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default=None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like of shape(n_samples,), default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float, default=None
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
n_threads : int, default=1
Number of OpenMP threads to use.
Returns
-------
coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs,
n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array of shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
solver = _check_solver(solver, penalty, dual)
# Preprocessing.
if check_input:
X = check_array(
X,
accept_sparse="csr",
dtype=np.float64,
accept_large_sparse=solver not in ["liblinear", "sag", "saga"],
)
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
n_samples, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
multi_class = _check_multi_class(multi_class, solver, len(classes))
if pos_class is None and multi_class != "multinomial":
if classes.size > 2:
raise ValueError("To fit OvR, use the pos_class argument")
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
if sample_weight is not None or class_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype, copy=True)
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or (
multi_class == "multinomial" and class_weight is not None
):
class_weight_ = compute_class_weight(class_weight, classes=classes, y=y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. For the
# multinomial case this is not necessary.
if multi_class == "ovr":
w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
mask = y == pos_class
y_bin = np.ones(y.shape, dtype=X.dtype)
if solver == "liblinear":
mask_classes = np.array([-1, 1])
y_bin[~mask] = -1.0
else:
# HalfBinomialLoss, used for those solvers, represents y in [0, 1] instead
# of in [-1, 1].
mask_classes = np.array([0, 1])
y_bin[~mask] = 0.0
# for compute_class_weight
if class_weight == "balanced":
class_weight_ = compute_class_weight(
class_weight, classes=mask_classes, y=y_bin
)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
if solver in ["sag", "saga", "lbfgs", "newton-cg"]:
# SAG, lbfgs and newton-cg multinomial solvers need LabelEncoder,
# not LabelBinarizer, i.e. y as a 1d-array of integers.
# LabelEncoder also saves memory compared to LabelBinarizer, especially
# when n_classes is large.
le = LabelEncoder()
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
else:
# For liblinear solver, apply LabelBinarizer, i.e. y is one-hot encoded.
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
w0 = np.zeros(
(classes.size, n_features + int(fit_intercept)), order="F", dtype=X.dtype
)
# IMPORTANT NOTE:
# All solvers relying on LinearModelLoss need to scale the penalty with n_samples
# or the sum of sample weights because the implemented logistic regression
# objective here is (unfortunately)
# C * sum(pointwise_loss) + penalty
# instead of (as LinearModelLoss does)
# mean(pointwise_loss) + 1/C * penalty
if solver in ["lbfgs", "newton-cg", "newton-cholesky"]:
# This needs to be calculated after sample_weight is multiplied by
# class_weight. It is even tested that passing class_weight is equivalent to
# passing sample_weights according to class_weight.
sw_sum = n_samples if sample_weight is None else np.sum(sample_weight)
if coef is not None:
# it must work both giving the bias term and not
if multi_class == "ovr":
if coef.size not in (n_features, w0.size):
raise ValueError(
"Initialization coef is of shape %d, expected shape %d or %d"
% (coef.size, n_features, w0.size)
)
w0[: coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if coef.shape[0] != n_classes or coef.shape[1] not in (
n_features,
n_features + 1,
):
raise ValueError(
"Initialization coef is of shape (%d, %d), expected "
"shape (%d, %d) or (%d, %d)"
% (
coef.shape[0],
coef.shape[1],
classes.size,
n_features,
classes.size,
n_features + 1,
)
)
if n_classes == 1:
w0[0, : coef.shape[1]] = -coef
w0[1, : coef.shape[1]] = coef
else:
w0[:, : coef.shape[1]] = coef
if multi_class == "multinomial":
if solver in ["lbfgs", "newton-cg"]:
# scipy.optimize.minimize and newton-cg accept only ravelled parameters,
# i.e. 1d-arrays. LinearModelLoss expects classes to be contiguous and
# reconstructs the 2d-array via w0.reshape((n_classes, -1), order="F").
# As w0 is F-contiguous, ravel(order="F") also avoids a copy.
w0 = w0.ravel(order="F")
loss = LinearModelLoss(
base_loss=HalfMultinomialLoss(n_classes=classes.size),
fit_intercept=fit_intercept,
)
target = Y_multi
if solver == "lbfgs":
func = loss.loss_gradient
elif solver == "newton-cg":
func = loss.loss
grad = loss.gradient
hess = loss.gradient_hessian_product # hess = [gradient, hessp]
warm_start_sag = {"coef": w0.T}
else:
target = y_bin
if solver == "lbfgs":
loss = LinearModelLoss(
base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept
)
func = loss.loss_gradient
elif solver == "newton-cg":
loss = LinearModelLoss(
base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept
)
func = loss.loss
grad = loss.gradient
hess = loss.gradient_hessian_product # hess = [gradient, hessp]
elif solver == "newton-cholesky":
loss = LinearModelLoss(
base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept
)
warm_start_sag = {"coef": np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == "lbfgs":
l2_reg_strength = 1.0 / (C * sw_sum)
iprint = [-1, 50, 1, 100, 101][
np.searchsorted(np.array([0, 1, 2, 3]), verbose)
]
opt_res = optimize.minimize(
func,
w0,
method="L-BFGS-B",
jac=True,
args=(X, target, sample_weight, l2_reg_strength, n_threads),
options={
"maxiter": max_iter,
"maxls": 50, # default is 20
"iprint": iprint,
"gtol": tol,
"ftol": 64 * np.finfo(float).eps,
},
)
n_iter_i = _check_optimize_result(
solver,
opt_res,
max_iter,
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG,
)
w0, loss = opt_res.x, opt_res.fun
elif solver == "newton-cg":
l2_reg_strength = 1.0 / (C * sw_sum)
args = (X, target, sample_weight, l2_reg_strength, n_threads)
w0, n_iter_i = _newton_cg(
grad_hess=hess,
func=func,
grad=grad,
x0=w0,
args=args,
maxiter=max_iter,
tol=tol,
verbose=verbose,
)
elif solver == "newton-cholesky":
l2_reg_strength = 1.0 / (C * sw_sum)
sol = NewtonCholeskySolver(
coef=w0,
linear_loss=loss,
l2_reg_strength=l2_reg_strength,
tol=tol,
max_iter=max_iter,
n_threads=n_threads,
verbose=verbose,
)
w0 = sol.solve(X=X, y=target, sample_weight=sample_weight)
n_iter_i = sol.iteration
elif solver == "liblinear":
(
coef_,
intercept_,
n_iter_i,
) = _fit_liblinear(
X,
target,
C,
fit_intercept,
intercept_scaling,
None,
penalty,
dual,
verbose,
max_iter,
tol,
random_state,
sample_weight=sample_weight,
)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
# n_iter_i is an array for each class. However, `target` is always encoded
# in {-1, 1}, so we only take the first element of n_iter_i.
n_iter_i = n_iter_i.item()
elif solver in ["sag", "saga"]:
if multi_class == "multinomial":
target = target.astype(X.dtype, copy=False)
loss = "multinomial"
else:
loss = "log"
# alpha is for L2-norm, beta is for L1-norm
if penalty == "l1":
alpha = 0.0
beta = 1.0 / C
elif penalty == "l2":
alpha = 1.0 / C
beta = 0.0
else: # Elastic-Net penalty
alpha = (1.0 / C) * (1 - l1_ratio)
beta = (1.0 / C) * l1_ratio
w0, n_iter_i, warm_start_sag = sag_solver(
X,
target,
sample_weight,
loss,
alpha,
beta,
max_iter,
tol,
verbose,
random_state,
False,
max_squared_sum,
warm_start_sag,
is_saga=(solver == "saga"),
)
else:
raise ValueError(
"solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver
)
if multi_class == "multinomial":
n_classes = max(2, classes.size)
if solver in ["lbfgs", "newton-cg"]:
multi_w0 = np.reshape(w0, (n_classes, -1), order="F")
else:
multi_w0 = w0
if n_classes == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0.copy())
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return np.array(coefs), np.array(Cs), n_iter
| _logistic_regression_path | Repo-Level |
scikit-learn | 257 | sklearn/cluster/_kmeans.py | def _mini_batch_step(
X,
sample_weight,
centers,
centers_new,
weight_sums,
random_state,
random_reassign=False,
reassignment_ratio=0.01,
verbose=False,
n_threads=1,
):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The original data array. If sparse, must be in CSR format.
x_squared_norms : ndarray of shape (n_samples,)
Squared euclidean norm of each data point.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in `X`.
centers : ndarray of shape (n_clusters, n_features)
The cluster centers before the current iteration
centers_new : ndarray of shape (n_clusters, n_features)
The cluster centers after the current iteration. Modified in-place.
weight_sums : ndarray of shape (n_clusters,)
The vector in which we keep track of the numbers of points in a
cluster. This array is modified in place.
random_state : RandomState instance
Determines random number generation for low count centers reassignment.
See :term:`Glossary <random_state>`.
random_reassign : boolean, default=False
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, default=0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, default=False
Controls the verbosity.
n_threads : int, default=1
The number of OpenMP threads to use for the computation.
Returns
-------
inertia : float
Sum of squared distances of samples to their closest cluster center.
The inertia is computed after finding the labels and before updating
the centers.
"""
| /usr/src/app/target_test_cases/failed_tests__mini_batch_step.txt | def _mini_batch_step(
X,
sample_weight,
centers,
centers_new,
weight_sums,
random_state,
random_reassign=False,
reassignment_ratio=0.01,
verbose=False,
n_threads=1,
):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The original data array. If sparse, must be in CSR format.
x_squared_norms : ndarray of shape (n_samples,)
Squared euclidean norm of each data point.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in `X`.
centers : ndarray of shape (n_clusters, n_features)
The cluster centers before the current iteration
centers_new : ndarray of shape (n_clusters, n_features)
The cluster centers after the current iteration. Modified in-place.
weight_sums : ndarray of shape (n_clusters,)
The vector in which we keep track of the numbers of points in a
cluster. This array is modified in place.
random_state : RandomState instance
Determines random number generation for low count centers reassignment.
See :term:`Glossary <random_state>`.
random_reassign : boolean, default=False
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, default=0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, default=False
Controls the verbosity.
n_threads : int, default=1
The number of OpenMP threads to use for the computation.
Returns
-------
inertia : float
Sum of squared distances of samples to their closest cluster center.
The inertia is computed after finding the labels and before updating
the centers.
"""
# Perform label assignment to nearest centers
# For better efficiency, it's better to run _mini_batch_step in a
# threadpool_limit context than using _labels_inertia_threadpool_limit here
labels, inertia = _labels_inertia(X, sample_weight, centers, n_threads=n_threads)
# Update centers according to the labels
if sp.issparse(X):
_minibatch_update_sparse(
X, sample_weight, centers, centers_new, weight_sums, labels, n_threads
)
else:
_minibatch_update_dense(
X,
sample_weight,
centers,
centers_new,
weight_sums,
labels,
n_threads,
)
# Reassign clusters that have very low weight
if random_reassign and reassignment_ratio > 0:
to_reassign = weight_sums < reassignment_ratio * weight_sums.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > 0.5 * X.shape[0]:
indices_dont_reassign = np.argsort(weight_sums)[int(0.5 * X.shape[0]) :]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = random_state.choice(
X.shape[0], replace=False, size=n_reassigns
)
if verbose:
print(f"[MiniBatchKMeans] Reassigning {n_reassigns} cluster centers.")
if sp.issparse(X):
assign_rows_csr(
X,
new_centers.astype(np.intp, copy=False),
np.where(to_reassign)[0].astype(np.intp, copy=False),
centers_new,
)
else:
centers_new[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
weight_sums[to_reassign] = np.min(weight_sums[~to_reassign])
return inertia
| _mini_batch_step | File-Level |
scikit-learn | 260 | sklearn/utils/optimize.py | def _newton_cg(
grad_hess,
func,
grad,
x0,
args=(),
tol=1e-4,
maxiter=100,
maxinner=200,
line_search=True,
warn=True,
verbose=0,
):
"""
Minimization of scalar function of one or more variables using the
Newton-CG algorithm.
Parameters
----------
grad_hess : callable
Should return the gradient and a callable returning the matvec product
of the Hessian.
func : callable
Should return the value of the function.
grad : callable
Should return the function value and the gradient. This is used
by the linesearch functions.
x0 : array of float
Initial guess.
args : tuple, default=()
Arguments passed to func_grad_hess, func and grad.
tol : float, default=1e-4
Stopping criterion. The iteration will stop when
``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
maxiter : int, default=100
Number of Newton iterations.
maxinner : int, default=200
Number of CG iterations.
line_search : bool, default=True
Whether to use a line search or not.
warn : bool, default=True
Whether to warn when didn't converge.
Returns
-------
xk : ndarray of float
Estimated minimum.
"""
| /usr/src/app/target_test_cases/failed_tests__newton_cg.txt | def _newton_cg(
grad_hess,
func,
grad,
x0,
args=(),
tol=1e-4,
maxiter=100,
maxinner=200,
line_search=True,
warn=True,
verbose=0,
):
"""
Minimization of scalar function of one or more variables using the
Newton-CG algorithm.
Parameters
----------
grad_hess : callable
Should return the gradient and a callable returning the matvec product
of the Hessian.
func : callable
Should return the value of the function.
grad : callable
Should return the function value and the gradient. This is used
by the linesearch functions.
x0 : array of float
Initial guess.
args : tuple, default=()
Arguments passed to func_grad_hess, func and grad.
tol : float, default=1e-4
Stopping criterion. The iteration will stop when
``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
maxiter : int, default=100
Number of Newton iterations.
maxinner : int, default=200
Number of CG iterations.
line_search : bool, default=True
Whether to use a line search or not.
warn : bool, default=True
Whether to warn when didn't converge.
Returns
-------
xk : ndarray of float
Estimated minimum.
"""
x0 = np.asarray(x0).flatten()
xk = np.copy(x0)
k = 0
if line_search:
old_fval = func(x0, *args)
old_old_fval = None
else:
old_fval = 0
is_verbose = verbose > 0
# Outer loop: our Newton iteration
while k < maxiter:
# Compute a search direction pk by applying the CG method to
# del2 f(xk) p = - fgrad f(xk) starting from 0.
fgrad, fhess_p = grad_hess(xk, *args)
absgrad = np.abs(fgrad)
max_absgrad = np.max(absgrad)
check = max_absgrad <= tol
if is_verbose:
print(f"Newton-CG iter = {k}")
print(" Check Convergence")
print(f" max |gradient| <= tol: {max_absgrad} <= {tol} {check}")
if check:
break
maggrad = np.sum(absgrad)
eta = min([0.5, np.sqrt(maggrad)])
termcond = eta * maggrad
# Inner loop: solve the Newton update by conjugate gradient, to
# avoid inverting the Hessian
xsupi = _cg(fhess_p, fgrad, maxiter=maxinner, tol=termcond, verbose=verbose)
alphak = 1.0
if line_search:
try:
alphak, fc, gc, old_fval, old_old_fval, gfkp1 = _line_search_wolfe12(
func,
grad,
xk,
xsupi,
fgrad,
old_fval,
old_old_fval,
verbose=verbose,
args=args,
)
except _LineSearchError:
warnings.warn("Line Search failed")
break
xk += alphak * xsupi # upcast if necessary
k += 1
if warn and k >= maxiter:
warnings.warn(
(
f"newton-cg failed to converge at loss = {old_fval}. Increase the"
" number of iterations."
),
ConvergenceWarning,
)
elif is_verbose:
print(f" Solver did converge at loss = {old_fval}.")
return xk, k
| _newton_cg | File-Level |
scikit-learn | 263 | sklearn/inspection/_partial_dependence.py | def _partial_dependence_brute(
est, grid, features, X, response_method, sample_weight=None
):
"""Calculate partial dependence via the brute force method.
The brute method explicitly averages the predictions of an estimator over a
grid of feature values.
For each `grid` value, all the samples from `X` have their variables of
interest replaced by that specific `grid` value. The predictions are then made
and averaged across the samples.
This method is slower than the `'recursion'`
(:func:`~sklearn.inspection._partial_dependence._partial_dependence_recursion`)
version for estimators with this second option. However, with the `'brute'`
force method, the average will be done with the given `X` and not the `X`
used during training, as it is done in the `'recursion'` version. Therefore
the average can always accept `sample_weight` (even when the estimator was
fitted without).
Parameters
----------
est : BaseEstimator
A fitted estimator object implementing :term:`predict`,
:term:`predict_proba`, or :term:`decision_function`.
Multioutput-multiclass classifiers are not supported.
grid : array-like of shape (n_points, n_target_features)
The grid of feature values for which the partial dependence is calculated.
Note that `n_points` is the number of points in the grid and `n_target_features`
is the number of features you are doing partial dependence at.
features : array-like of {int, str}
The feature (e.g. `[0]`) or pair of interacting features
(e.g. `[(0, 1)]`) for which the partial dependency should be computed.
X : array-like of shape (n_samples, n_features)
`X` is used to generate values for the complement features. That is, for
each value in `grid`, the method will average the prediction of each
sample from `X` having that grid value for `features`.
response_method : {'auto', 'predict_proba', 'decision_function'}, \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. For regressors
this parameter is ignored and the response is always the output of
:term:`predict`. By default, :term:`predict_proba` is tried first
and we revert to :term:`decision_function` if it doesn't exist.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights are used to calculate weighted means when averaging the
model output. If `None`, then samples are equally weighted. Note that
`sample_weight` does not change the individual predictions.
Returns
-------
averaged_predictions : array-like of shape (n_targets, n_points)
The averaged predictions for the given `grid` of features values.
Note that `n_targets` is the number of targets (e.g. 1 for binary
classification, `n_tasks` for multi-output regression, and `n_classes` for
multiclass classification) and `n_points` is the number of points in the `grid`.
predictions : array-like
The predictions for the given `grid` of features values over the samples
from `X`. For non-multioutput regression and binary classification the
shape is `(n_instances, n_points)` and for multi-output regression and
multiclass classification the shape is `(n_targets, n_instances, n_points)`,
where `n_targets` is the number of targets (`n_tasks` for multi-output
regression, and `n_classes` for multiclass classification), `n_instances`
is the number of instances in `X`, and `n_points` is the number of points
in the `grid`.
"""
| /usr/src/app/target_test_cases/failed_tests__partial_dependence_brute.txt | def _partial_dependence_brute(
est, grid, features, X, response_method, sample_weight=None
):
"""Calculate partial dependence via the brute force method.
The brute method explicitly averages the predictions of an estimator over a
grid of feature values.
For each `grid` value, all the samples from `X` have their variables of
interest replaced by that specific `grid` value. The predictions are then made
and averaged across the samples.
This method is slower than the `'recursion'`
(:func:`~sklearn.inspection._partial_dependence._partial_dependence_recursion`)
version for estimators with this second option. However, with the `'brute'`
force method, the average will be done with the given `X` and not the `X`
used during training, as it is done in the `'recursion'` version. Therefore
the average can always accept `sample_weight` (even when the estimator was
fitted without).
Parameters
----------
est : BaseEstimator
A fitted estimator object implementing :term:`predict`,
:term:`predict_proba`, or :term:`decision_function`.
Multioutput-multiclass classifiers are not supported.
grid : array-like of shape (n_points, n_target_features)
The grid of feature values for which the partial dependence is calculated.
Note that `n_points` is the number of points in the grid and `n_target_features`
is the number of features you are doing partial dependence at.
features : array-like of {int, str}
The feature (e.g. `[0]`) or pair of interacting features
(e.g. `[(0, 1)]`) for which the partial dependency should be computed.
X : array-like of shape (n_samples, n_features)
`X` is used to generate values for the complement features. That is, for
each value in `grid`, the method will average the prediction of each
sample from `X` having that grid value for `features`.
response_method : {'auto', 'predict_proba', 'decision_function'}, \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. For regressors
this parameter is ignored and the response is always the output of
:term:`predict`. By default, :term:`predict_proba` is tried first
and we revert to :term:`decision_function` if it doesn't exist.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights are used to calculate weighted means when averaging the
model output. If `None`, then samples are equally weighted. Note that
`sample_weight` does not change the individual predictions.
Returns
-------
averaged_predictions : array-like of shape (n_targets, n_points)
The averaged predictions for the given `grid` of features values.
Note that `n_targets` is the number of targets (e.g. 1 for binary
classification, `n_tasks` for multi-output regression, and `n_classes` for
multiclass classification) and `n_points` is the number of points in the `grid`.
predictions : array-like
The predictions for the given `grid` of features values over the samples
from `X`. For non-multioutput regression and binary classification the
shape is `(n_instances, n_points)` and for multi-output regression and
multiclass classification the shape is `(n_targets, n_instances, n_points)`,
where `n_targets` is the number of targets (`n_tasks` for multi-output
regression, and `n_classes` for multiclass classification), `n_instances`
is the number of instances in `X`, and `n_points` is the number of points
in the `grid`.
"""
predictions = []
averaged_predictions = []
# define the prediction_method (predict, predict_proba, decision_function).
if is_regressor(est):
prediction_method = est.predict
else:
predict_proba = getattr(est, "predict_proba", None)
decision_function = getattr(est, "decision_function", None)
if response_method == "auto":
# try predict_proba, then decision_function if it doesn't exist
prediction_method = predict_proba or decision_function
else:
prediction_method = (
predict_proba
if response_method == "predict_proba"
else decision_function
)
if prediction_method is None:
if response_method == "auto":
raise ValueError(
"The estimator has no predict_proba and no "
"decision_function method."
)
elif response_method == "predict_proba":
raise ValueError("The estimator has no predict_proba method.")
else:
raise ValueError("The estimator has no decision_function method.")
X_eval = X.copy()
for new_values in grid:
for i, variable in enumerate(features):
_safe_assign(X_eval, new_values[i], column_indexer=variable)
try:
# Note: predictions is of shape
# (n_points,) for non-multioutput regressors
# (n_points, n_tasks) for multioutput regressors
# (n_points, 1) for the regressors in cross_decomposition (I think)
# (n_points, 2) for binary classification
# (n_points, n_classes) for multiclass classification
pred = prediction_method(X_eval)
predictions.append(pred)
# average over samples
averaged_predictions.append(np.average(pred, axis=0, weights=sample_weight))
except NotFittedError as e:
raise ValueError("'estimator' parameter must be a fitted estimator") from e
n_samples = X.shape[0]
# reshape to (n_targets, n_instances, n_points) where n_targets is:
# - 1 for non-multioutput regression and binary classification (shape is
# already correct in those cases)
# - n_tasks for multi-output regression
# - n_classes for multiclass classification.
predictions = np.array(predictions).T
if is_regressor(est) and predictions.ndim == 2:
# non-multioutput regression, shape is (n_instances, n_points,)
predictions = predictions.reshape(n_samples, -1)
elif is_classifier(est) and predictions.shape[0] == 2:
# Binary classification, shape is (2, n_instances, n_points).
# we output the effect of **positive** class
predictions = predictions[1]
predictions = predictions.reshape(n_samples, -1)
# reshape averaged_predictions to (n_targets, n_points) where n_targets is:
# - 1 for non-multioutput regression and binary classification (shape is
# already correct in those cases)
# - n_tasks for multi-output regression
# - n_classes for multiclass classification.
averaged_predictions = np.array(averaged_predictions).T
if is_regressor(est) and averaged_predictions.ndim == 1:
# non-multioutput regression, shape is (n_points,)
averaged_predictions = averaged_predictions.reshape(1, -1)
elif is_classifier(est) and averaged_predictions.shape[0] == 2:
# Binary classification, shape is (2, n_points).
# we output the effect of **positive** class
averaged_predictions = averaged_predictions[1]
averaged_predictions = averaged_predictions.reshape(1, -1)
return averaged_predictions, predictions
| _partial_dependence_brute | Repo-Level |
scikit-learn | 266 | sklearn/utils/extmath.py | def _randomized_eigsh(
M,
n_components,
*,
n_oversamples=10,
n_iter="auto",
power_iteration_normalizer="auto",
selection="module",
random_state=None,
):
"""Computes a truncated eigendecomposition using randomized methods
This method solves the fixed-rank approximation problem described in the
Halko et al paper.
The choice of which components to select can be tuned with the `selection`
parameter.
.. versionadded:: 0.24
Parameters
----------
M : ndarray or sparse matrix
Matrix to decompose, it should be real symmetric square or complex
hermitian
n_components : int
Number of eigenvalues and vectors to extract.
n_oversamples : int, default=10
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of eigenvectors and eigenvalues. Users might wish
to increase this parameter up to `2*k - n_components` where k is the
effective rank, for large matrices, noisy problems, matrices with
slowly decaying spectrums, or to increase precision accuracy. See Halko
et al (pages 5, 23 and 26).
n_iter : int or 'auto', default='auto'
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) in which case `n_iter` is set to 7.
This improves precision with few components. Note that in general
users should rather increase `n_oversamples` before increasing `n_iter`
as the principle of the randomized method is to avoid usage of these
more costly power iterations steps. When `n_components` is equal
or greater to the effective matrix rank and the spectrum does not
present a slow decay, `n_iter=0` or `1` should even work fine in theory
(see Halko et al paper, page 9).
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
selection : {'value', 'module'}, default='module'
Strategy used to select the n components. When `selection` is `'value'`
(not yet implemented, will become the default when implemented), the
components corresponding to the n largest eigenvalues are returned.
When `selection` is `'module'`, the components corresponding to the n
eigenvalues with largest modules are returned.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator to use when shuffling
the data, i.e. getting the random vectors to initialize the algorithm.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Notes
-----
This algorithm finds a (usually very good) approximate truncated
eigendecomposition using randomized methods to speed up the computations.
This method is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision). To increase the precision it is recommended to
increase `n_oversamples`, up to `2*k-n_components` where k is the
effective rank. Usually, `n_components` is chosen to be greater than k
so increasing `n_oversamples` up to `n_components` should be enough.
Strategy 'value': not implemented yet.
Algorithms 5.3, 5.4 and 5.5 in the Halko et al paper should provide good
candidates for a future implementation.
Strategy 'module':
The principle is that for diagonalizable matrices, the singular values and
eigenvalues are related: if t is an eigenvalue of A, then :math:`|t|` is a
singular value of A. This method relies on a randomized SVD to find the n
singular components corresponding to the n singular values with largest
modules, and then uses the signs of the singular vectors to find the true
sign of t: if the sign of left and right singular vectors are different
then the corresponding eigenvalue is negative.
Returns
-------
eigvals : 1D array of shape (n_components,) containing the `n_components`
eigenvalues selected (see ``selection`` parameter).
eigvecs : 2D array of shape (M.shape[0], n_components) containing the
`n_components` eigenvectors corresponding to the `eigvals`, in the
corresponding order. Note that this follows the `scipy.linalg.eigh`
convention.
See Also
--------
:func:`randomized_svd`
References
----------
* :arxiv:`"Finding structure with randomness:
Stochastic algorithms for constructing approximate matrix decompositions"
(Algorithm 4.3 for strategy 'module') <0909.4061>`
Halko, et al. (2009)
"""
| /usr/src/app/target_test_cases/failed_tests__randomized_eigsh.txt | def _randomized_eigsh(
M,
n_components,
*,
n_oversamples=10,
n_iter="auto",
power_iteration_normalizer="auto",
selection="module",
random_state=None,
):
"""Computes a truncated eigendecomposition using randomized methods
This method solves the fixed-rank approximation problem described in the
Halko et al paper.
The choice of which components to select can be tuned with the `selection`
parameter.
.. versionadded:: 0.24
Parameters
----------
M : ndarray or sparse matrix
Matrix to decompose, it should be real symmetric square or complex
hermitian
n_components : int
Number of eigenvalues and vectors to extract.
n_oversamples : int, default=10
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of eigenvectors and eigenvalues. Users might wish
to increase this parameter up to `2*k - n_components` where k is the
effective rank, for large matrices, noisy problems, matrices with
slowly decaying spectrums, or to increase precision accuracy. See Halko
et al (pages 5, 23 and 26).
n_iter : int or 'auto', default='auto'
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) in which case `n_iter` is set to 7.
This improves precision with few components. Note that in general
users should rather increase `n_oversamples` before increasing `n_iter`
as the principle of the randomized method is to avoid usage of these
more costly power iterations steps. When `n_components` is equal
or greater to the effective matrix rank and the spectrum does not
present a slow decay, `n_iter=0` or `1` should even work fine in theory
(see Halko et al paper, page 9).
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
selection : {'value', 'module'}, default='module'
Strategy used to select the n components. When `selection` is `'value'`
(not yet implemented, will become the default when implemented), the
components corresponding to the n largest eigenvalues are returned.
When `selection` is `'module'`, the components corresponding to the n
eigenvalues with largest modules are returned.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator to use when shuffling
the data, i.e. getting the random vectors to initialize the algorithm.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Notes
-----
This algorithm finds a (usually very good) approximate truncated
eigendecomposition using randomized methods to speed up the computations.
This method is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision). To increase the precision it is recommended to
increase `n_oversamples`, up to `2*k-n_components` where k is the
effective rank. Usually, `n_components` is chosen to be greater than k
so increasing `n_oversamples` up to `n_components` should be enough.
Strategy 'value': not implemented yet.
Algorithms 5.3, 5.4 and 5.5 in the Halko et al paper should provide good
candidates for a future implementation.
Strategy 'module':
The principle is that for diagonalizable matrices, the singular values and
eigenvalues are related: if t is an eigenvalue of A, then :math:`|t|` is a
singular value of A. This method relies on a randomized SVD to find the n
singular components corresponding to the n singular values with largest
modules, and then uses the signs of the singular vectors to find the true
sign of t: if the sign of left and right singular vectors are different
then the corresponding eigenvalue is negative.
Returns
-------
eigvals : 1D array of shape (n_components,) containing the `n_components`
eigenvalues selected (see ``selection`` parameter).
eigvecs : 2D array of shape (M.shape[0], n_components) containing the
`n_components` eigenvectors corresponding to the `eigvals`, in the
corresponding order. Note that this follows the `scipy.linalg.eigh`
convention.
See Also
--------
:func:`randomized_svd`
References
----------
* :arxiv:`"Finding structure with randomness:
Stochastic algorithms for constructing approximate matrix decompositions"
(Algorithm 4.3 for strategy 'module') <0909.4061>`
Halko, et al. (2009)
"""
if selection == "value": # pragma: no cover
# to do : an algorithm can be found in the Halko et al reference
raise NotImplementedError()
elif selection == "module":
# Note: no need for deterministic U and Vt (flip_sign=True),
# as we only use the dot product UVt afterwards
U, S, Vt = randomized_svd(
M,
n_components=n_components,
n_oversamples=n_oversamples,
n_iter=n_iter,
power_iteration_normalizer=power_iteration_normalizer,
flip_sign=False,
random_state=random_state,
)
eigvecs = U[:, :n_components]
eigvals = S[:n_components]
# Conversion of Singular values into Eigenvalues:
# For any eigenvalue t, the corresponding singular value is |t|.
# So if there is a negative eigenvalue t, the corresponding singular
# value will be -t, and the left (U) and right (V) singular vectors
# will have opposite signs.
# Fastest way: see <https://stackoverflow.com/a/61974002/7262247>
diag_VtU = np.einsum("ji,ij->j", Vt[:n_components, :], U[:, :n_components])
signs = np.sign(diag_VtU)
eigvals = eigvals * signs
else: # pragma: no cover
raise ValueError("Invalid `selection`: %r" % selection)
return eigvals, eigvecs
| _randomized_eigsh | File-Level |
scikit-learn | 273 | sklearn/calibration.py | def _sigmoid_calibration(
predictions, y, sample_weight=None, max_abs_prediction_threshold=30
):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
predictions : ndarray of shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray of shape (n_samples,)
The targets.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
| /usr/src/app/target_test_cases/failed_tests__sigmoid_calibration.txt | def _sigmoid_calibration(
predictions, y, sample_weight=None, max_abs_prediction_threshold=30
):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
predictions : ndarray of shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray of shape (n_samples,)
The targets.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
predictions = column_or_1d(predictions)
y = column_or_1d(y)
F = predictions # F follows Platt's notations
scale_constant = 1.0
max_prediction = np.max(np.abs(F))
# If the predictions have large values we scale them in order to bring
# them within a suitable range. This has no effect on the final
# (prediction) result because linear models like Logisitic Regression
# without a penalty are invariant to multiplying the features by a
# constant.
if max_prediction >= max_abs_prediction_threshold:
scale_constant = max_prediction
# We rescale the features in a copy: inplace rescaling could confuse
# the caller and make the code harder to reason about.
F = F / scale_constant
# Bayesian priors (see Platt end of section 2.2):
# It corresponds to the number of samples, taking into account the
# `sample_weight`.
mask_negative_samples = y <= 0
if sample_weight is not None:
prior0 = (sample_weight[mask_negative_samples]).sum()
prior1 = (sample_weight[~mask_negative_samples]).sum()
else:
prior0 = float(np.sum(mask_negative_samples))
prior1 = y.shape[0] - prior0
T = np.zeros_like(y, dtype=predictions.dtype)
T[y > 0] = (prior1 + 1.0) / (prior1 + 2.0)
T[y <= 0] = 1.0 / (prior0 + 2.0)
bin_loss = HalfBinomialLoss()
def loss_grad(AB):
# .astype below is needed to ensure y_true and raw_prediction have the
# same dtype. With result = np.float64(0) * np.array([1, 2], dtype=np.float32)
# - in Numpy 2, result.dtype is float64
# - in Numpy<2, result.dtype is float32
raw_prediction = -(AB[0] * F + AB[1]).astype(dtype=predictions.dtype)
l, g = bin_loss.loss_gradient(
y_true=T,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
)
loss = l.sum()
# TODO: Remove casting to np.float64 when minimum supported SciPy is 1.11.2
# With SciPy >= 1.11.2, the LBFGS implementation will cast to float64
# https://github.com/scipy/scipy/pull/18825.
# Here we cast to float64 to support SciPy < 1.11.2
grad = np.asarray([-g @ F, -g.sum()], dtype=np.float64)
return loss, grad
AB0 = np.array([0.0, log((prior0 + 1.0) / (prior1 + 1.0))])
opt_result = minimize(
loss_grad,
AB0,
method="L-BFGS-B",
jac=True,
options={
"gtol": 1e-6,
"ftol": 64 * np.finfo(float).eps,
},
)
AB_ = opt_result.x
# The tuned multiplicative parameter is converted back to the original
# input feature scale. The offset parameter does not need rescaling since
# we did not rescale the outcome variable.
return AB_[0] / scale_constant, AB_[1]
| _sigmoid_calibration | Repo-Level |
scikit-learn | 278 | sklearn/utils/_estimator_html_repr.py | def _write_label_html(
out,
name,
name_details,
name_caption=None,
doc_link_label=None,
outer_class="sk-label-container",
inner_class="sk-label",
checked=False,
doc_link="",
is_fitted_css_class="",
is_fitted_icon="",
):
"""Write labeled html with or without a dropdown with named details.
Parameters
----------
out : file-like object
The file to write the HTML representation to.
name : str
The label for the estimator. It corresponds either to the estimator class name
for a simple estimator or in the case of a `Pipeline` and `ColumnTransformer`,
it corresponds to the name of the step.
name_details : str
The details to show as content in the dropdown part of the toggleable label. It
can contain information such as non-default parameters or column information for
`ColumnTransformer`.
name_caption : str, default=None
The caption below the name. If `None`, no caption will be created.
doc_link_label : str, default=None
The label for the documentation link. If provided, the label would be
"Documentation for {doc_link_label}". Otherwise it will look for `name`.
outer_class : {"sk-label-container", "sk-item"}, default="sk-label-container"
The CSS class for the outer container.
inner_class : {"sk-label", "sk-estimator"}, default="sk-label"
The CSS class for the inner container.
checked : bool, default=False
Whether the dropdown is folded or not. With a single estimator, we intend to
unfold the content.
doc_link : str, default=""
The link to the documentation for the estimator. If an empty string, no link is
added to the diagram. This can be generated for an estimator if it uses the
`_HTMLDocumentationLinkMixin`.
is_fitted_css_class : {"", "fitted"}
The CSS class to indicate whether or not the estimator is fitted. The
empty string means that the estimator is not fitted and "fitted" means that the
estimator is fitted.
is_fitted_icon : str, default=""
The HTML representation to show the fitted information in the diagram. An empty
string means that no information is shown.
"""
| /usr/src/app/target_test_cases/failed_tests__write_label_html.txt | def _write_label_html(
out,
name,
name_details,
name_caption=None,
doc_link_label=None,
outer_class="sk-label-container",
inner_class="sk-label",
checked=False,
doc_link="",
is_fitted_css_class="",
is_fitted_icon="",
):
"""Write labeled html with or without a dropdown with named details.
Parameters
----------
out : file-like object
The file to write the HTML representation to.
name : str
The label for the estimator. It corresponds either to the estimator class name
for a simple estimator or in the case of a `Pipeline` and `ColumnTransformer`,
it corresponds to the name of the step.
name_details : str
The details to show as content in the dropdown part of the toggleable label. It
can contain information such as non-default parameters or column information for
`ColumnTransformer`.
name_caption : str, default=None
The caption below the name. If `None`, no caption will be created.
doc_link_label : str, default=None
The label for the documentation link. If provided, the label would be
"Documentation for {doc_link_label}". Otherwise it will look for `name`.
outer_class : {"sk-label-container", "sk-item"}, default="sk-label-container"
The CSS class for the outer container.
inner_class : {"sk-label", "sk-estimator"}, default="sk-label"
The CSS class for the inner container.
checked : bool, default=False
Whether the dropdown is folded or not. With a single estimator, we intend to
unfold the content.
doc_link : str, default=""
The link to the documentation for the estimator. If an empty string, no link is
added to the diagram. This can be generated for an estimator if it uses the
`_HTMLDocumentationLinkMixin`.
is_fitted_css_class : {"", "fitted"}
The CSS class to indicate whether or not the estimator is fitted. The
empty string means that the estimator is not fitted and "fitted" means that the
estimator is fitted.
is_fitted_icon : str, default=""
The HTML representation to show the fitted information in the diagram. An empty
string means that no information is shown.
"""
out.write(
f'<div class="{outer_class}"><div'
f' class="{inner_class} {is_fitted_css_class} sk-toggleable">'
)
name = html.escape(name)
if name_details is not None:
name_details = html.escape(str(name_details))
checked_str = "checked" if checked else ""
est_id = _ESTIMATOR_ID_COUNTER.get_id()
if doc_link:
doc_label = "<span>Online documentation</span>"
if doc_link_label is not None:
doc_label = f"<span>Documentation for {doc_link_label}</span>"
elif name is not None:
doc_label = f"<span>Documentation for {name}</span>"
doc_link = (
f'<a class="sk-estimator-doc-link {is_fitted_css_class}"'
f' rel="noreferrer" target="_blank" href="{doc_link}">?{doc_label}</a>'
)
name_caption_div = (
""
if name_caption is None
else f'<div class="caption">{html.escape(name_caption)}</div>'
)
name_caption_div = f"<div><div>{name}</div>{name_caption_div}</div>"
links_div = (
f"<div>{doc_link}{is_fitted_icon}</div>"
if doc_link or is_fitted_icon
else ""
)
label_html = (
f'<label for="{est_id}" class="sk-toggleable__label {is_fitted_css_class} '
f'sk-toggleable__label-arrow">{name_caption_div}{links_div}</label>'
)
fmt_str = (
f'<input class="sk-toggleable__control sk-hidden--visually" id="{est_id}" '
f'type="checkbox" {checked_str}>{label_html}<div '
f'class="sk-toggleable__content {is_fitted_css_class}"><pre>{name_details}'
"</pre></div> "
)
out.write(fmt_str)
else:
out.write(f"<label>{name}</label>")
out.write("</div></div>") # outer_class inner_class
| _write_label_html | Self-Contained |
scikit-learn | 285 | sklearn/utils/validation.py | def check_X_y(
X,
y,
accept_sparse=False,
*,
accept_large_sparse=True,
dtype="numeric",
order=None,
copy=False,
force_writeable=False,
force_all_finite="deprecated",
ensure_all_finite=None,
ensure_2d=True,
allow_nd=False,
multi_output=False,
ensure_min_samples=1,
ensure_min_features=1,
y_numeric=False,
estimator=None,
):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X to be 2D and y 1D. By
default, X is checked to be non-empty and containing only finite values.
Standard input checks are also applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2D and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : {ndarray, list, sparse matrix}
Input data.
y : {ndarray, list, sparse matrix}
Labels.
accept_sparse : str, bool or list of str, default=False
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool, default=True
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse will cause it to be accepted only
if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : 'numeric', type, list of type or None, default='numeric'
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : {'F', 'C'}, default=None
Whether an array will be forced to be fortran or c-style. If
`None`, then the input data's order is preserved when possible.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_writeable : bool, default=False
Whether to force the output array to be writeable. If True, the returned array
is guaranteed to be writeable, which may require a copy. Otherwise the
writeability of the input array is preserved.
.. versionadded:: 1.6
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. This parameter
does not influence whether y can have np.inf, np.nan, pd.NA values.
The possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
.. deprecated:: 1.6
`force_all_finite` was renamed to `ensure_all_finite` and will be removed
in 1.8.
ensure_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. This parameter
does not influence whether y can have np.inf, np.nan, pd.NA values.
The possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 1.6
`force_all_finite` was renamed to `ensure_all_finite`.
ensure_2d : bool, default=True
Whether to raise a value error if X is not 2D.
allow_nd : bool, default=False
Whether to allow X.ndim > 2.
multi_output : bool, default=False
Whether to allow 2D y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int, default=1
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int, default=1
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : bool, default=False
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
estimator : str or estimator instance, default=None
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
Examples
--------
>>> from sklearn.utils.validation import check_X_y
>>> X = [[1, 2], [3, 4], [5, 6]]
>>> y = [1, 2, 3]
>>> X, y = check_X_y(X, y)
>>> X
array([[1, 2],
[3, 4],
[5, 6]])
>>> y
array([1, 2, 3])
"""
| /usr/src/app/target_test_cases/failed_tests_check_X_y.txt | def check_X_y(
X,
y,
accept_sparse=False,
*,
accept_large_sparse=True,
dtype="numeric",
order=None,
copy=False,
force_writeable=False,
force_all_finite="deprecated",
ensure_all_finite=None,
ensure_2d=True,
allow_nd=False,
multi_output=False,
ensure_min_samples=1,
ensure_min_features=1,
y_numeric=False,
estimator=None,
):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X to be 2D and y 1D. By
default, X is checked to be non-empty and containing only finite values.
Standard input checks are also applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2D and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : {ndarray, list, sparse matrix}
Input data.
y : {ndarray, list, sparse matrix}
Labels.
accept_sparse : str, bool or list of str, default=False
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool, default=True
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse will cause it to be accepted only
if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : 'numeric', type, list of type or None, default='numeric'
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : {'F', 'C'}, default=None
Whether an array will be forced to be fortran or c-style. If
`None`, then the input data's order is preserved when possible.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_writeable : bool, default=False
Whether to force the output array to be writeable. If True, the returned array
is guaranteed to be writeable, which may require a copy. Otherwise the
writeability of the input array is preserved.
.. versionadded:: 1.6
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. This parameter
does not influence whether y can have np.inf, np.nan, pd.NA values.
The possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
.. deprecated:: 1.6
`force_all_finite` was renamed to `ensure_all_finite` and will be removed
in 1.8.
ensure_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. This parameter
does not influence whether y can have np.inf, np.nan, pd.NA values.
The possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 1.6
`force_all_finite` was renamed to `ensure_all_finite`.
ensure_2d : bool, default=True
Whether to raise a value error if X is not 2D.
allow_nd : bool, default=False
Whether to allow X.ndim > 2.
multi_output : bool, default=False
Whether to allow 2D y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int, default=1
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int, default=1
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : bool, default=False
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
estimator : str or estimator instance, default=None
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
Examples
--------
>>> from sklearn.utils.validation import check_X_y
>>> X = [[1, 2], [3, 4], [5, 6]]
>>> y = [1, 2, 3]
>>> X, y = check_X_y(X, y)
>>> X
array([[1, 2],
[3, 4],
[5, 6]])
>>> y
array([1, 2, 3])
"""
if y is None:
if estimator is None:
estimator_name = "estimator"
else:
estimator_name = _check_estimator_name(estimator)
raise ValueError(
f"{estimator_name} requires y to be passed, but the target y is None"
)
ensure_all_finite = _deprecate_force_all_finite(force_all_finite, ensure_all_finite)
X = check_array(
X,
accept_sparse=accept_sparse,
accept_large_sparse=accept_large_sparse,
dtype=dtype,
order=order,
copy=copy,
force_writeable=force_writeable,
ensure_all_finite=ensure_all_finite,
ensure_2d=ensure_2d,
allow_nd=allow_nd,
ensure_min_samples=ensure_min_samples,
ensure_min_features=ensure_min_features,
estimator=estimator,
input_name="X",
)
y = _check_y(y, multi_output=multi_output, y_numeric=y_numeric, estimator=estimator)
check_consistent_length(X, y)
return X, y
| check_X_y | Repo-Level |
scikit-learn | 286 | sklearn/utils/validation.py | def check_array(
array,
accept_sparse=False,
*,
accept_large_sparse=True,
dtype="numeric",
order=None,
copy=False,
force_writeable=False,
force_all_finite="deprecated",
ensure_all_finite=None,
ensure_non_negative=False,
ensure_2d=True,
allow_nd=False,
ensure_min_samples=1,
ensure_min_features=1,
estimator=None,
input_name="",
):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is checked to be a non-empty 2D array containing
only finite values. If the dtype of the array is object, attempt
converting to float, raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : str, bool or list/tuple of str, default=False
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool, default=True
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse=False will cause it to be accepted
only if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : 'numeric', type, list of type or None, default='numeric'
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : {'F', 'C'} or None, default=None
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_writeable : bool, default=False
Whether to force the output array to be writeable. If True, the returned array
is guaranteed to be writeable, which may require a copy. Otherwise the
writeability of the input array is preserved.
.. versionadded:: 1.6
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
.. deprecated:: 1.6
`force_all_finite` was renamed to `ensure_all_finite` and will be removed
in 1.8.
ensure_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 1.6
`force_all_finite` was renamed to `ensure_all_finite`.
ensure_non_negative : bool, default=False
Make sure the array has only non-negative values. If True, an array that
contains negative values will raise a ValueError.
.. versionadded:: 1.6
ensure_2d : bool, default=True
Whether to raise a value error if array is not 2D.
allow_nd : bool, default=False
Whether to allow array.ndim > 2.
ensure_min_samples : int, default=1
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int, default=1
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
estimator : str or estimator instance, default=None
If passed, include the name of the estimator in warning messages.
input_name : str, default=""
The data name used to construct the error message. In particular
if `input_name` is "X" and the data has NaN values and
allow_nan is False, the error message will link to the imputer
documentation.
.. versionadded:: 1.1.0
Returns
-------
array_converted : object
The converted and validated array.
Examples
--------
>>> from sklearn.utils.validation import check_array
>>> X = [[1, 2, 3], [4, 5, 6]]
>>> X_checked = check_array(X)
>>> X_checked
array([[1, 2, 3], [4, 5, 6]])
"""
| /usr/src/app/target_test_cases/failed_tests_check_array.txt | def check_array(
array,
accept_sparse=False,
*,
accept_large_sparse=True,
dtype="numeric",
order=None,
copy=False,
force_writeable=False,
force_all_finite="deprecated",
ensure_all_finite=None,
ensure_non_negative=False,
ensure_2d=True,
allow_nd=False,
ensure_min_samples=1,
ensure_min_features=1,
estimator=None,
input_name="",
):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is checked to be a non-empty 2D array containing
only finite values. If the dtype of the array is object, attempt
converting to float, raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : str, bool or list/tuple of str, default=False
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool, default=True
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse=False will cause it to be accepted
only if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : 'numeric', type, list of type or None, default='numeric'
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : {'F', 'C'} or None, default=None
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_writeable : bool, default=False
Whether to force the output array to be writeable. If True, the returned array
is guaranteed to be writeable, which may require a copy. Otherwise the
writeability of the input array is preserved.
.. versionadded:: 1.6
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
.. deprecated:: 1.6
`force_all_finite` was renamed to `ensure_all_finite` and will be removed
in 1.8.
ensure_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 1.6
`force_all_finite` was renamed to `ensure_all_finite`.
ensure_non_negative : bool, default=False
Make sure the array has only non-negative values. If True, an array that
contains negative values will raise a ValueError.
.. versionadded:: 1.6
ensure_2d : bool, default=True
Whether to raise a value error if array is not 2D.
allow_nd : bool, default=False
Whether to allow array.ndim > 2.
ensure_min_samples : int, default=1
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int, default=1
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
estimator : str or estimator instance, default=None
If passed, include the name of the estimator in warning messages.
input_name : str, default=""
The data name used to construct the error message. In particular
if `input_name` is "X" and the data has NaN values and
allow_nan is False, the error message will link to the imputer
documentation.
.. versionadded:: 1.1.0
Returns
-------
array_converted : object
The converted and validated array.
Examples
--------
>>> from sklearn.utils.validation import check_array
>>> X = [[1, 2, 3], [4, 5, 6]]
>>> X_checked = check_array(X)
>>> X_checked
array([[1, 2, 3], [4, 5, 6]])
"""
ensure_all_finite = _deprecate_force_all_finite(force_all_finite, ensure_all_finite)
if isinstance(array, np.matrix):
raise TypeError(
"np.matrix is not supported. Please convert to a numpy array with "
"np.asarray. For more information see: "
"https://numpy.org/doc/stable/reference/generated/numpy.matrix.html"
)
xp, is_array_api_compliant = get_namespace(array)
# store reference to original array to check if copy is needed when
# function returns
array_orig = array
# store whether originally we wanted numeric dtype
dtype_numeric = isinstance(dtype, str) and dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not is_array_api_compliant and not hasattr(dtype_orig, "kind"):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
# check if the object contains several dtypes (typically a pandas
# DataFrame), and store them. If not, store None.
dtypes_orig = None
pandas_requires_conversion = False
# track if we have a Series-like object to raise a better error message
type_if_series = None
if hasattr(array, "dtypes") and hasattr(array.dtypes, "__array__"):
# throw warning if columns are sparse. If all columns are sparse, then
# array.sparse exists and sparsity will be preserved (later).
with suppress(ImportError):
from pandas import SparseDtype
def is_sparse(dtype):
return isinstance(dtype, SparseDtype)
if not hasattr(array, "sparse") and array.dtypes.apply(is_sparse).any():
warnings.warn(
"pandas.DataFrame with sparse columns found."
"It will be converted to a dense numpy array."
)
dtypes_orig = list(array.dtypes)
pandas_requires_conversion = any(
_pandas_dtype_needs_early_conversion(i) for i in dtypes_orig
)
if all(isinstance(dtype_iter, np.dtype) for dtype_iter in dtypes_orig):
dtype_orig = np.result_type(*dtypes_orig)
elif pandas_requires_conversion and any(d == object for d in dtypes_orig):
# Force object if any of the dtypes is an object
dtype_orig = object
elif (_is_extension_array_dtype(array) or hasattr(array, "iloc")) and hasattr(
array, "dtype"
):
# array is a pandas series
type_if_series = type(array)
pandas_requires_conversion = _pandas_dtype_needs_early_conversion(array.dtype)
if isinstance(array.dtype, np.dtype):
dtype_orig = array.dtype
else:
# Set to None to let array.astype work out the best dtype
dtype_orig = None
if dtype_numeric:
if (
dtype_orig is not None
and hasattr(dtype_orig, "kind")
and dtype_orig.kind == "O"
):
# if input is object, convert to float.
dtype = xp.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if pandas_requires_conversion:
# pandas dataframe requires conversion earlier to handle extension dtypes with
# nans
# Use the original dtype for conversion if dtype is None
new_dtype = dtype_orig if dtype is None else dtype
array = array.astype(new_dtype)
# Since we converted here, we do not need to convert again later
dtype = None
if ensure_all_finite not in (True, False, "allow-nan"):
raise ValueError(
"ensure_all_finite should be a bool or 'allow-nan'. Got "
f"{ensure_all_finite!r} instead."
)
if dtype is not None and _is_numpy_namespace(xp):
# convert to dtype object to conform to Array API to be use `xp.isdtype` later
dtype = np.dtype(dtype)
estimator_name = _check_estimator_name(estimator)
context = " by %s" % estimator_name if estimator is not None else ""
# When all dataframe columns are sparse, convert to a sparse array
if hasattr(array, "sparse") and array.ndim > 1:
with suppress(ImportError):
from pandas import SparseDtype # noqa: F811
def is_sparse(dtype):
return isinstance(dtype, SparseDtype)
if array.dtypes.apply(is_sparse).all():
# DataFrame.sparse only supports `to_coo`
array = array.sparse.to_coo()
if array.dtype == np.dtype("object"):
unique_dtypes = set([dt.subtype.name for dt in array_orig.dtypes])
if len(unique_dtypes) > 1:
raise ValueError(
"Pandas DataFrame with mixed sparse extension arrays "
"generated a sparse matrix with object dtype which "
"can not be converted to a scipy sparse matrix."
"Sparse extension arrays should all have the same "
"numeric type."
)
if sp.issparse(array):
_ensure_no_complex_data(array)
array = _ensure_sparse_format(
array,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
ensure_all_finite=ensure_all_finite,
accept_large_sparse=accept_large_sparse,
estimator_name=estimator_name,
input_name=input_name,
)
if ensure_2d and array.ndim < 2:
raise ValueError(
f"Expected 2D input, got input with shape {array.shape}.\n"
"Reshape your data either using array.reshape(-1, 1) if "
"your data has a single feature or array.reshape(1, -1) "
"if it contains a single sample."
)
else:
# If np.array(..) gives ComplexWarning, then we convert the warning
# to an error. This is needed because specifying a non complex
# dtype to the function converts complex to real dtype,
# thereby passing the test made in the lines following the scope
# of warnings context manager.
with warnings.catch_warnings():
try:
warnings.simplefilter("error", ComplexWarning)
if dtype is not None and xp.isdtype(dtype, "integral"):
# Conversion float -> int should not contain NaN or
# inf (numpy#14412). We cannot use casting='safe' because
# then conversion float -> int would be disallowed.
array = _asarray_with_order(array, order=order, xp=xp)
if xp.isdtype(array.dtype, ("real floating", "complex floating")):
_assert_all_finite(
array,
allow_nan=False,
msg_dtype=dtype,
estimator_name=estimator_name,
input_name=input_name,
)
array = xp.astype(array, dtype, copy=False)
else:
array = _asarray_with_order(array, order=order, dtype=dtype, xp=xp)
except ComplexWarning as complex_warning:
raise ValueError(
"Complex data not supported\n{}\n".format(array)
) from complex_warning
# It is possible that the np.array(..) gave no warning. This happens
# when no dtype conversion happened, for example dtype = None. The
# result is that np.array(..) produces an array of complex dtype
# and we need to catch and raise exception for such cases.
_ensure_no_complex_data(array)
if ensure_2d:
# If input is scalar raise error
if array.ndim == 0:
raise ValueError(
"Expected 2D array, got scalar array instead:\narray={}.\n"
"Reshape your data either using array.reshape(-1, 1) if "
"your data has a single feature or array.reshape(1, -1) "
"if it contains a single sample.".format(array)
)
# If input is 1D raise error
if array.ndim == 1:
# If input is a Series-like object (eg. pandas Series or polars Series)
if type_if_series is not None:
msg = (
f"Expected a 2-dimensional container but got {type_if_series} "
"instead. Pass a DataFrame containing a single row (i.e. "
"single sample) or a single column (i.e. single feature) "
"instead."
)
else:
msg = (
f"Expected 2D array, got 1D array instead:\narray={array}.\n"
"Reshape your data either using array.reshape(-1, 1) if "
"your data has a single feature or array.reshape(1, -1) "
"if it contains a single sample."
)
raise ValueError(msg)
if dtype_numeric and hasattr(array.dtype, "kind") and array.dtype.kind in "USV":
raise ValueError(
"dtype='numeric' is not compatible with arrays of bytes/strings."
"Convert your data to numeric values explicitly instead."
)
if not allow_nd and array.ndim >= 3:
raise ValueError(
"Found array with dim %d. %s expected <= 2."
% (array.ndim, estimator_name)
)
if ensure_all_finite:
_assert_all_finite(
array,
input_name=input_name,
estimator_name=estimator_name,
allow_nan=ensure_all_finite == "allow-nan",
)
if copy:
if _is_numpy_namespace(xp):
# only make a copy if `array` and `array_orig` may share memory`
if np.may_share_memory(array, array_orig):
array = _asarray_with_order(
array, dtype=dtype, order=order, copy=True, xp=xp
)
else:
# always make a copy for non-numpy arrays
array = _asarray_with_order(
array, dtype=dtype, order=order, copy=True, xp=xp
)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError(
"Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, array.shape, ensure_min_samples, context)
)
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError(
"Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, array.shape, ensure_min_features, context)
)
if ensure_non_negative:
whom = input_name
if estimator_name:
whom += f" in {estimator_name}"
check_non_negative(array, whom)
if force_writeable:
# By default, array.copy() creates a C-ordered copy. We set order=K to
# preserve the order of the array.
copy_params = {"order": "K"} if not sp.issparse(array) else {}
array_data = array.data if sp.issparse(array) else array
flags = getattr(array_data, "flags", None)
if not getattr(flags, "writeable", True):
# This situation can only happen when copy=False, the array is read-only and
# a writeable output is requested. This is an ambiguous setting so we chose
# to always (except for one specific setting, see below) make a copy to
# ensure that the output is writeable, even if avoidable, to not overwrite
# the user's data by surprise.
if _is_pandas_df_or_series(array_orig):
try:
# In pandas >= 3, np.asarray(df), called earlier in check_array,
# returns a read-only intermediate array. It can be made writeable
# safely without copy because if the original DataFrame was backed
# by a read-only array, trying to change the flag would raise an
# error, in which case we make a copy.
array_data.flags.writeable = True
except ValueError:
array = array.copy(**copy_params)
else:
array = array.copy(**copy_params)
return array
| check_array | Repo-Level |
scikit-learn | 288 | sklearn/metrics/pairwise.py | def check_pairwise_arrays(
X,
Y,
*,
precomputed=False,
dtype="infer_float",
accept_sparse="csr",
force_all_finite="deprecated",
ensure_all_finite=None,
ensure_2d=True,
copy=False,
):
"""Set X and Y appropriately and checks inputs.
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
precomputed : bool, default=False
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : str, type, list of type or None default="infer_float"
Data type required for X and Y. If "infer_float", the dtype will be an
appropriate float type selected by _return_float_dtype. If None, the
dtype of the input is preserved.
.. versionadded:: 0.18
accept_sparse : str, bool or list/tuple of str, default='csr'
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.22
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`.
.. deprecated:: 1.6
`force_all_finite` was renamed to `ensure_all_finite` and will be removed
in 1.8.
ensure_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 1.6
`force_all_finite` was renamed to `ensure_all_finite`.
ensure_2d : bool, default=True
Whether to raise an error when the input arrays are not 2-dimensional. Setting
this to `False` is necessary when using a custom metric with certain
non-numerical inputs (e.g. a list of strings).
.. versionadded:: 1.5
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
.. versionadded:: 0.22
Returns
-------
safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
| /usr/src/app/target_test_cases/failed_tests_check_pairwise_arrays.txt | def check_pairwise_arrays(
X,
Y,
*,
precomputed=False,
dtype="infer_float",
accept_sparse="csr",
force_all_finite="deprecated",
ensure_all_finite=None,
ensure_2d=True,
copy=False,
):
"""Set X and Y appropriately and checks inputs.
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
precomputed : bool, default=False
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : str, type, list of type or None default="infer_float"
Data type required for X and Y. If "infer_float", the dtype will be an
appropriate float type selected by _return_float_dtype. If None, the
dtype of the input is preserved.
.. versionadded:: 0.18
accept_sparse : str, bool or list/tuple of str, default='csr'
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.22
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`.
.. deprecated:: 1.6
`force_all_finite` was renamed to `ensure_all_finite` and will be removed
in 1.8.
ensure_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 1.6
`force_all_finite` was renamed to `ensure_all_finite`.
ensure_2d : bool, default=True
Whether to raise an error when the input arrays are not 2-dimensional. Setting
this to `False` is necessary when using a custom metric with certain
non-numerical inputs (e.g. a list of strings).
.. versionadded:: 1.5
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
.. versionadded:: 0.22
Returns
-------
safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
ensure_all_finite = _deprecate_force_all_finite(force_all_finite, ensure_all_finite)
xp, _ = get_namespace(X, Y)
if any([issparse(X), issparse(Y)]) or _is_numpy_namespace(xp):
X, Y, dtype_float = _return_float_dtype(X, Y)
else:
dtype_float = _find_matching_floating_dtype(X, Y, xp=xp)
estimator = "check_pairwise_arrays"
if dtype == "infer_float":
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(
X,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
ensure_all_finite=ensure_all_finite,
estimator=estimator,
ensure_2d=ensure_2d,
)
else:
X = check_array(
X,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
ensure_all_finite=ensure_all_finite,
estimator=estimator,
ensure_2d=ensure_2d,
)
Y = check_array(
Y,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
ensure_all_finite=ensure_all_finite,
estimator=estimator,
ensure_2d=ensure_2d,
)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError(
"Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." % (X.shape[0], X.shape[1], Y.shape[0])
)
elif ensure_2d and X.shape[1] != Y.shape[1]:
# Only check the number of features if 2d arrays are enforced. Otherwise,
# validation is left to the user for custom metrics.
raise ValueError(
"Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (X.shape[1], Y.shape[1])
)
return X, Y
| check_pairwise_arrays | Repo-Level |
scikit-learn | 293 | sklearn/cluster/_spectral.py | def discretize(
vectors, *, copy=True, max_svd_restarts=30, n_iter_max=20, random_state=None
):
"""Search for a partition matrix which is closest to the eigenvector embedding.
This implementation was proposed in [1]_.
Parameters
----------
vectors : array-like of shape (n_samples, n_clusters)
The embedding space of the samples.
copy : bool, default=True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, default=30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, default=30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state : int, RandomState instance, default=None
Determines random number generation for rotation matrix initialization.
Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
.. [1] `Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
<https://people.eecs.berkeley.edu/~jordan/courses/281B-spring04/readings/yu-shi.pdf>`_
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
| /usr/src/app/target_test_cases/failed_tests_discretize.txt | def discretize(
vectors, *, copy=True, max_svd_restarts=30, n_iter_max=20, random_state=None
):
"""Search for a partition matrix which is closest to the eigenvector embedding.
This implementation was proposed in [1]_.
Parameters
----------
vectors : array-like of shape (n_samples, n_clusters)
The embedding space of the samples.
copy : bool, default=True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, default=30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, default=30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state : int, RandomState instance, default=None
Determines random number generation for rotation matrix initialization.
Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
.. [1] `Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
<https://people.eecs.berkeley.edu/~jordan/courses/281B-spring04/readings/yu-shi.pdf>`_
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / np.linalg.norm(vectors[:, i])) * norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors**2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components),
)
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
except LinAlgError:
svd_restarts += 1
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if (abs(ncut_value - last_objective_value) < eps) or (n_iter > n_iter_max):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError("SVD did not converge")
return labels
| discretize | Repo-Level |
scikit-learn | 295 | sklearn/covariance/_robust_covariance.py | def fast_mcd(
X,
support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None,
):
"""Estimate the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, default=None
The proportion of points to be included in the support of the raw
MCD estimate. Default is `None`, which implies that the minimum
value of `support_fraction` will be used within the algorithm:
`(n_samples + n_features + 1) / 2 * n_samples`. This parameter must be
in the range (0, 1).
cov_computation_method : callable, \
default=:func:`sklearn.covariance.empirical_covariance`
The function which will be used to compute the covariance.
Must return an array of shape (n_features, n_features).
random_state : int, RandomState instance or None, default=None
Determines the pseudo random number generator for shuffling the data.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
location : ndarray of shape (n_features,)
Robust location of the data.
covariance : ndarray of shape (n_features, n_features)
Robust covariance of the features.
support : ndarray of shape (n_samples,), dtype=bool
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [RouseeuwVan]_,
see the MinCovDet object.
References
----------
.. [RouseeuwVan] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
"""
| /usr/src/app/target_test_cases/failed_tests_fast_mcd.txt | def fast_mcd(
X,
support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None,
):
"""Estimate the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, default=None
The proportion of points to be included in the support of the raw
MCD estimate. Default is `None`, which implies that the minimum
value of `support_fraction` will be used within the algorithm:
`(n_samples + n_features + 1) / 2 * n_samples`. This parameter must be
in the range (0, 1).
cov_computation_method : callable, \
default=:func:`sklearn.covariance.empirical_covariance`
The function which will be used to compute the covariance.
Must return an array of shape (n_features, n_features).
random_state : int, RandomState instance or None, default=None
Determines the pseudo random number generator for shuffling the data.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
location : ndarray of shape (n_features,)
Robust location of the data.
covariance : ndarray of shape (n_features, n_features)
Robust covariance of the features.
support : ndarray of shape (n_samples,), dtype=bool
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [RouseeuwVan]_,
see the MinCovDet object.
References
----------
.. [RouseeuwVan] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
"""
random_state = check_random_state(random_state)
X = check_array(X, ensure_min_samples=2, estimator="fast_mcd")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[: (n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = (
0.5
* (X_sorted[n_support + halves_start] + X_sorted[halves_start]).mean()
)
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = linalg.pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = linalg.pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets * (n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features, n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
n_best_tot = 10
all_best_covariances = np.zeros((n_best_tot, n_features, n_features))
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset,
h_subset,
n_trials,
select=n_best_sub,
n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged * (n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = select_candidates(
X[selection],
h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = select_candidates(
X,
n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X,
n_support,
n_trials=n_trials,
select=n_best,
n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X,
n_support,
n_trials=(locations_best, covariances_best),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
| fast_mcd | Repo-Level |
scikit-learn | 299 | sklearn/utils/sparsefuncs.py | def incr_mean_variance_axis(X, *, axis, last_mean, last_var, last_n, weights=None):
"""Compute incremental mean and variance along an axis on a CSR or CSC matrix.
last_mean, last_var are the statistics computed at the last step by this
function. Both must be initialized to 0-arrays of the proper size, i.e.
the number of features in X. last_n is the number of samples encountered
until now.
Parameters
----------
X : CSR or CSC sparse matrix of shape (n_samples, n_features)
Input data.
axis : {0, 1}
Axis along which the axis should be computed.
last_mean : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Array of means to update with the new data X.
Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1.
last_var : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Array of variances to update with the new data X.
Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1.
last_n : float or ndarray of shape (n_features,) or (n_samples,), \
dtype=floating
Sum of the weights seen so far, excluding the current weights
If not float, it should be of shape (n_features,) if
axis=0 or (n_samples,) if axis=1. If float it corresponds to
having same weights for all samples (or features).
weights : ndarray of shape (n_samples,) or (n_features,), default=None
If axis is set to 0 shape is (n_samples,) or
if axis is set to 1 shape is (n_features,).
If it is set to None, then samples are equally weighted.
.. versionadded:: 0.24
Returns
-------
means : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Updated feature-wise means if axis = 0 or
sample-wise means if axis = 1.
variances : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Updated feature-wise variances if axis = 0 or
sample-wise variances if axis = 1.
n : ndarray of shape (n_features,) or (n_samples,), dtype=integral
Updated number of seen samples per feature if axis=0
or number of seen features per sample if axis=1.
If weights is not None, n is a sum of the weights of the seen
samples or features instead of the actual number of seen
samples or features.
Notes
-----
NaNs are ignored in the algorithm.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 3, 4, 4, 4])
>>> indices = np.array([0, 1, 2, 2])
>>> data = np.array([8, 1, 2, 5])
>>> scale = np.array([2, 3, 2])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.incr_mean_variance_axis(
... csr, axis=0, last_mean=np.zeros(3), last_var=np.zeros(3), last_n=2
... )
(array([1.3..., 0.1..., 1.1...]), array([8.8..., 0.1..., 3.4...]),
array([6., 6., 6.]))
"""
| /usr/src/app/target_test_cases/failed_tests_incr_mean_variance_axis.txt | def incr_mean_variance_axis(X, *, axis, last_mean, last_var, last_n, weights=None):
"""Compute incremental mean and variance along an axis on a CSR or CSC matrix.
last_mean, last_var are the statistics computed at the last step by this
function. Both must be initialized to 0-arrays of the proper size, i.e.
the number of features in X. last_n is the number of samples encountered
until now.
Parameters
----------
X : CSR or CSC sparse matrix of shape (n_samples, n_features)
Input data.
axis : {0, 1}
Axis along which the axis should be computed.
last_mean : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Array of means to update with the new data X.
Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1.
last_var : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Array of variances to update with the new data X.
Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1.
last_n : float or ndarray of shape (n_features,) or (n_samples,), \
dtype=floating
Sum of the weights seen so far, excluding the current weights
If not float, it should be of shape (n_features,) if
axis=0 or (n_samples,) if axis=1. If float it corresponds to
having same weights for all samples (or features).
weights : ndarray of shape (n_samples,) or (n_features,), default=None
If axis is set to 0 shape is (n_samples,) or
if axis is set to 1 shape is (n_features,).
If it is set to None, then samples are equally weighted.
.. versionadded:: 0.24
Returns
-------
means : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Updated feature-wise means if axis = 0 or
sample-wise means if axis = 1.
variances : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Updated feature-wise variances if axis = 0 or
sample-wise variances if axis = 1.
n : ndarray of shape (n_features,) or (n_samples,), dtype=integral
Updated number of seen samples per feature if axis=0
or number of seen features per sample if axis=1.
If weights is not None, n is a sum of the weights of the seen
samples or features instead of the actual number of seen
samples or features.
Notes
-----
NaNs are ignored in the algorithm.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 3, 4, 4, 4])
>>> indices = np.array([0, 1, 2, 2])
>>> data = np.array([8, 1, 2, 5])
>>> scale = np.array([2, 3, 2])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.incr_mean_variance_axis(
... csr, axis=0, last_mean=np.zeros(3), last_var=np.zeros(3), last_n=2
... )
(array([1.3..., 0.1..., 1.1...]), array([8.8..., 0.1..., 3.4...]),
array([6., 6., 6.]))
"""
_raise_error_wrong_axis(axis)
if not (sp.issparse(X) and X.format in ("csc", "csr")):
_raise_typeerror(X)
if np.size(last_n) == 1:
last_n = np.full(last_mean.shape, last_n, dtype=last_mean.dtype)
if not (np.size(last_mean) == np.size(last_var) == np.size(last_n)):
raise ValueError("last_mean, last_var, last_n do not have the same shapes.")
if axis == 1:
if np.size(last_mean) != X.shape[0]:
raise ValueError(
"If axis=1, then last_mean, last_n, last_var should be of "
f"size n_samples {X.shape[0]} (Got {np.size(last_mean)})."
)
else: # axis == 0
if np.size(last_mean) != X.shape[1]:
raise ValueError(
"If axis=0, then last_mean, last_n, last_var should be of "
f"size n_features {X.shape[1]} (Got {np.size(last_mean)})."
)
X = X.T if axis == 1 else X
if weights is not None:
weights = _check_sample_weight(weights, X, dtype=X.dtype)
return _incr_mean_var_axis0(
X, last_mean=last_mean, last_var=last_var, last_n=last_n, weights=weights
)
| incr_mean_variance_axis | Repo-Level |
scikit-learn | 301 | sklearn/cluster/_agglomerative.py | def linkage_tree(
X,
connectivity=None,
n_clusters=None,
linkage="complete",
affinity="euclidean",
return_distance=False,
):
"""Linkage agglomerative clustering based on a Feature matrix.
The inertia matrix uses a Heapq-based representation.
This is the structured version, that takes into account some topological
structure between samples.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Feature matrix representing `n_samples` samples to be clustered.
connectivity : sparse matrix, default=None
Connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is `None`, i.e, the Ward algorithm is unstructured.
n_clusters : int, default=None
Stop early the construction of the tree at `n_clusters`. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. In this case, the
complete tree is not computed, thus the 'children' output is of
limited use, and the 'parents' output should rather be used.
This option is valid only when specifying a connectivity matrix.
linkage : {"average", "complete", "single"}, default="complete"
Which linkage criteria to use. The linkage criterion determines which
distance to use between sets of observation.
- "average" uses the average of the distances of each observation of
the two sets.
- "complete" or maximum linkage uses the maximum distances between
all observations of the two sets.
- "single" uses the minimum of the distances between all
observations of the two sets.
affinity : str or callable, default='euclidean'
Which metric to use. Can be 'euclidean', 'manhattan', or any
distance known to paired distance (see metric.pairwise).
return_distance : bool, default=False
Whether or not to return the distances between the clusters.
Returns
-------
children : ndarray of shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`.
n_connected_components : int
The number of connected components in the graph.
n_leaves : int
The number of leaves in the tree.
parents : ndarray of shape (n_nodes, ) or None
The parent of each node. Only returned when a connectivity matrix
is specified, elsewhere 'None' is returned.
distances : ndarray of shape (n_nodes-1,)
Returned when `return_distance` is set to `True`.
distances[i] refers to the distance between children[i][0] and
children[i][1] when they are merged.
See Also
--------
ward_tree : Hierarchical clustering with ward linkage.
"""
| /usr/src/app/target_test_cases/failed_tests_linkage_tree.txt | def linkage_tree(
X,
connectivity=None,
n_clusters=None,
linkage="complete",
affinity="euclidean",
return_distance=False,
):
"""Linkage agglomerative clustering based on a Feature matrix.
The inertia matrix uses a Heapq-based representation.
This is the structured version, that takes into account some topological
structure between samples.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Feature matrix representing `n_samples` samples to be clustered.
connectivity : sparse matrix, default=None
Connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is `None`, i.e, the Ward algorithm is unstructured.
n_clusters : int, default=None
Stop early the construction of the tree at `n_clusters`. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. In this case, the
complete tree is not computed, thus the 'children' output is of
limited use, and the 'parents' output should rather be used.
This option is valid only when specifying a connectivity matrix.
linkage : {"average", "complete", "single"}, default="complete"
Which linkage criteria to use. The linkage criterion determines which
distance to use between sets of observation.
- "average" uses the average of the distances of each observation of
the two sets.
- "complete" or maximum linkage uses the maximum distances between
all observations of the two sets.
- "single" uses the minimum of the distances between all
observations of the two sets.
affinity : str or callable, default='euclidean'
Which metric to use. Can be 'euclidean', 'manhattan', or any
distance known to paired distance (see metric.pairwise).
return_distance : bool, default=False
Whether or not to return the distances between the clusters.
Returns
-------
children : ndarray of shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`.
n_connected_components : int
The number of connected components in the graph.
n_leaves : int
The number of leaves in the tree.
parents : ndarray of shape (n_nodes, ) or None
The parent of each node. Only returned when a connectivity matrix
is specified, elsewhere 'None' is returned.
distances : ndarray of shape (n_nodes-1,)
Returned when `return_distance` is set to `True`.
distances[i] refers to the distance between children[i][0] and
children[i][1] when they are merged.
See Also
--------
ward_tree : Hierarchical clustering with ward linkage.
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (-1, 1))
n_samples, n_features = X.shape
linkage_choices = {
"complete": _hierarchical.max_merge,
"average": _hierarchical.average_merge,
"single": None,
} # Single linkage is handled differently
try:
join_func = linkage_choices[linkage]
except KeyError as e:
raise ValueError(
"Unknown linkage option, linkage should be one of %s, but %s was given"
% (linkage_choices.keys(), linkage)
) from e
if affinity == "cosine" and np.any(~np.any(X, axis=1)):
raise ValueError("Cosine affinity cannot be used when X contains zero vectors")
if connectivity is None:
from scipy.cluster import hierarchy # imports PIL
if n_clusters is not None:
warnings.warn(
(
"Partial build of the tree is implemented "
"only for structured clustering (i.e. with "
"explicit connectivity). The algorithm "
"will build the full tree and only "
"retain the lower branches required "
"for the specified number of clusters"
),
stacklevel=2,
)
if affinity == "precomputed":
# for the linkage function of hierarchy to work on precomputed
# data, provide as first argument an ndarray of the shape returned
# by sklearn.metrics.pairwise_distances.
if X.shape[0] != X.shape[1]:
raise ValueError(
f"Distance matrix should be square, got matrix of shape {X.shape}"
)
i, j = np.triu_indices(X.shape[0], k=1)
X = X[i, j]
elif affinity == "l2":
# Translate to something understood by scipy
affinity = "euclidean"
elif affinity in ("l1", "manhattan"):
affinity = "cityblock"
elif callable(affinity):
X = affinity(X)
i, j = np.triu_indices(X.shape[0], k=1)
X = X[i, j]
if (
linkage == "single"
and affinity != "precomputed"
and not callable(affinity)
and affinity in METRIC_MAPPING64
):
# We need the fast cythonized metric from neighbors
dist_metric = DistanceMetric.get_metric(affinity)
# The Cython routines used require contiguous arrays
X = np.ascontiguousarray(X, dtype=np.double)
mst = _hierarchical.mst_linkage_core(X, dist_metric)
# Sort edges of the min_spanning_tree by weight
mst = mst[np.argsort(mst.T[2], kind="mergesort"), :]
# Convert edge list into standard hierarchical clustering format
out = _hierarchical.single_linkage_label(mst)
else:
out = hierarchy.linkage(X, method=linkage, metric=affinity)
children_ = out[:, :2].astype(int, copy=False)
if return_distance:
distances = out[:, 2]
return children_, 1, n_samples, None, distances
return children_, 1, n_samples, None
connectivity, n_connected_components = _fix_connectivity(
X, connectivity, affinity=affinity
)
connectivity = connectivity.tocoo()
# Put the diagonal to zero
diag_mask = connectivity.row != connectivity.col
connectivity.row = connectivity.row[diag_mask]
connectivity.col = connectivity.col[diag_mask]
connectivity.data = connectivity.data[diag_mask]
del diag_mask
if affinity == "precomputed":
distances = X[connectivity.row, connectivity.col].astype(np.float64, copy=False)
else:
# FIXME We compute all the distances, while we could have only computed
# the "interesting" distances
distances = paired_distances(
X[connectivity.row], X[connectivity.col], metric=affinity
)
connectivity.data = distances
if n_clusters is None:
n_nodes = 2 * n_samples - 1
else:
assert n_clusters <= n_samples
n_nodes = 2 * n_samples - n_clusters
if linkage == "single":
return _single_linkage_tree(
connectivity,
n_samples,
n_nodes,
n_clusters,
n_connected_components,
return_distance,
)
if return_distance:
distances = np.empty(n_nodes - n_samples)
# create inertia heap and connection matrix
A = np.empty(n_nodes, dtype=object)
inertia = list()
# LIL seems to the best format to access the rows quickly,
# without the numpy overhead of slicing CSR indices and data.
connectivity = connectivity.tolil()
# We are storing the graph in a list of IntFloatDict
for ind, (data, row) in enumerate(zip(connectivity.data, connectivity.rows)):
A[ind] = IntFloatDict(
np.asarray(row, dtype=np.intp), np.asarray(data, dtype=np.float64)
)
# We keep only the upper triangular for the heap
# Generator expressions are faster than arrays on the following
inertia.extend(
_hierarchical.WeightedEdge(d, ind, r) for r, d in zip(row, data) if r < ind
)
del connectivity
heapify(inertia)
# prepare the main fields
parent = np.arange(n_nodes, dtype=np.intp)
used_node = np.ones(n_nodes, dtype=np.intp)
children = []
# recursive merge loop
for k in range(n_samples, n_nodes):
# identify the merge
while True:
edge = heappop(inertia)
if used_node[edge.a] and used_node[edge.b]:
break
i = edge.a
j = edge.b
if return_distance:
# store distances
distances[k - n_samples] = edge.weight
parent[i] = parent[j] = k
children.append((i, j))
# Keep track of the number of elements per cluster
n_i = used_node[i]
n_j = used_node[j]
used_node[k] = n_i + n_j
used_node[i] = used_node[j] = False
# update the structure matrix A and the inertia matrix
# a clever 'min', or 'max' operation between A[i] and A[j]
coord_col = join_func(A[i], A[j], used_node, n_i, n_j)
for col, d in coord_col:
A[col].append(k, d)
# Here we use the information from coord_col (containing the
# distances) to update the heap
heappush(inertia, _hierarchical.WeightedEdge(d, k, col))
A[k] = coord_col
# Clear A[i] and A[j] to save memory
A[i] = A[j] = 0
# Separate leaves in children (empty lists up to now)
n_leaves = n_samples
# # return numpy array for efficient caching
children = np.array(children)[:, ::-1]
if return_distance:
return children, n_connected_components, n_leaves, parent, distances
return children, n_connected_components, n_leaves, parent
| linkage_tree | Repo-Level |
scikit-learn | 303 | sklearn/datasets/_base.py | def load_diabetes(*, return_X_y=False, as_frame=False, scaled=True):
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
.. note::
The meaning of each feature (i.e. `feature_names`) might be unclear
(especially for `ltg`) as the documentation of the original dataset is
not explicit. We provide information that seems correct in regard with
the scientific literature in this field of research.
Read more in the :ref:`User Guide <diabetes_dataset>`.
Parameters
----------
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
scaled : bool, default=True
If True, the feature variables are mean centered and scaled by the
standard deviation times the square root of `n_samples`.
If False, raw data is returned for the feature variables.
.. versionadded:: 1.1
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (442, 10)
The data matrix. If `as_frame=True`, `data` will be a pandas
DataFrame.
target: {ndarray, Series} of shape (442,)
The regression target. If `as_frame=True`, `target` will be
a pandas Series.
feature_names: list
The names of the dataset columns.
frame: DataFrame of shape (442, 11)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
DESCR: str
The full description of the dataset.
data_filename: str
The path to the location of the data.
target_filename: str
The path to the location of the target.
(data, target) : tuple if ``return_X_y`` is True
Returns a tuple of two ndarray of shape (n_samples, n_features)
A 2D array with each row representing one sample and each column
representing the features and/or target of a given sample.
.. versionadded:: 0.18
Examples
--------
>>> from sklearn.datasets import load_diabetes
>>> diabetes = load_diabetes()
>>> diabetes.target[:3]
array([151., 75., 141.])
>>> diabetes.data.shape
(442, 10)
"""
| /usr/src/app/target_test_cases/failed_tests_load_diabetes.txt | def load_diabetes(*, return_X_y=False, as_frame=False, scaled=True):
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
.. note::
The meaning of each feature (i.e. `feature_names`) might be unclear
(especially for `ltg`) as the documentation of the original dataset is
not explicit. We provide information that seems correct in regard with
the scientific literature in this field of research.
Read more in the :ref:`User Guide <diabetes_dataset>`.
Parameters
----------
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
scaled : bool, default=True
If True, the feature variables are mean centered and scaled by the
standard deviation times the square root of `n_samples`.
If False, raw data is returned for the feature variables.
.. versionadded:: 1.1
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (442, 10)
The data matrix. If `as_frame=True`, `data` will be a pandas
DataFrame.
target: {ndarray, Series} of shape (442,)
The regression target. If `as_frame=True`, `target` will be
a pandas Series.
feature_names: list
The names of the dataset columns.
frame: DataFrame of shape (442, 11)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
DESCR: str
The full description of the dataset.
data_filename: str
The path to the location of the data.
target_filename: str
The path to the location of the target.
(data, target) : tuple if ``return_X_y`` is True
Returns a tuple of two ndarray of shape (n_samples, n_features)
A 2D array with each row representing one sample and each column
representing the features and/or target of a given sample.
.. versionadded:: 0.18
Examples
--------
>>> from sklearn.datasets import load_diabetes
>>> diabetes = load_diabetes()
>>> diabetes.target[:3]
array([151., 75., 141.])
>>> diabetes.data.shape
(442, 10)
"""
data_filename = "diabetes_data_raw.csv.gz"
target_filename = "diabetes_target.csv.gz"
data = load_gzip_compressed_csv_data(data_filename)
target = load_gzip_compressed_csv_data(target_filename)
if scaled:
data = scale(data, copy=False)
data /= data.shape[0] ** 0.5
fdescr = load_descr("diabetes.rst")
feature_names = ["age", "sex", "bmi", "bp", "s1", "s2", "s3", "s4", "s5", "s6"]
frame = None
target_columns = [
"target",
]
if as_frame:
frame, data, target = _convert_data_dataframe(
"load_diabetes", data, target, feature_names, target_columns
)
if return_X_y:
return data, target
return Bunch(
data=data,
target=target,
frame=frame,
DESCR=fdescr,
feature_names=feature_names,
data_filename=data_filename,
target_filename=target_filename,
data_module=DATA_MODULE,
)
| load_diabetes | Repo-Level |
scikit-learn | 305 | sklearn/datasets/_samples_generator.py | def make_blobs(
n_samples=100,
n_features=2,
*,
centers=None,
cluster_std=1.0,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=None,
return_centers=False,
):
"""Generate isotropic Gaussian blobs for clustering.
For an example of usage, see
:ref:`sphx_glr_auto_examples_datasets_plot_random_dataset.py`.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int or array-like, default=100
If int, it is the total number of points equally divided among
clusters.
If array-like, each element of the sequence indicates
the number of samples per cluster.
.. versionchanged:: v0.20
one can now pass an array-like to the ``n_samples`` parameter
n_features : int, default=2
The number of features for each sample.
centers : int or array-like of shape (n_centers, n_features), default=None
The number of centers to generate, or the fixed center locations.
If n_samples is an int and centers is None, 3 centers are generated.
If n_samples is array-like, centers must be
either None or an array of length equal to the length of n_samples.
cluster_std : float or array-like of float, default=1.0
The standard deviation of the clusters.
center_box : tuple of float (min, max), default=(-10.0, 10.0)
The bounding box for each cluster center when centers are
generated at random.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
return_centers : bool, default=False
If True, then return the centers of each cluster.
.. versionadded:: 0.23
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels for cluster membership of each sample.
centers : ndarray of shape (n_centers, n_features)
The centers of each cluster. Only returned if
``return_centers=True``.
See Also
--------
make_classification : A more intricate variant.
Examples
--------
>>> from sklearn.datasets import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
>>> X, y = make_blobs(n_samples=[3, 3, 4], centers=None, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 1, 2, 0, 2, 2, 2, 1, 1, 0])
"""
| /usr/src/app/target_test_cases/failed_tests_make_blobs.txt | def make_blobs(
n_samples=100,
n_features=2,
*,
centers=None,
cluster_std=1.0,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=None,
return_centers=False,
):
"""Generate isotropic Gaussian blobs for clustering.
For an example of usage, see
:ref:`sphx_glr_auto_examples_datasets_plot_random_dataset.py`.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int or array-like, default=100
If int, it is the total number of points equally divided among
clusters.
If array-like, each element of the sequence indicates
the number of samples per cluster.
.. versionchanged:: v0.20
one can now pass an array-like to the ``n_samples`` parameter
n_features : int, default=2
The number of features for each sample.
centers : int or array-like of shape (n_centers, n_features), default=None
The number of centers to generate, or the fixed center locations.
If n_samples is an int and centers is None, 3 centers are generated.
If n_samples is array-like, centers must be
either None or an array of length equal to the length of n_samples.
cluster_std : float or array-like of float, default=1.0
The standard deviation of the clusters.
center_box : tuple of float (min, max), default=(-10.0, 10.0)
The bounding box for each cluster center when centers are
generated at random.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
return_centers : bool, default=False
If True, then return the centers of each cluster.
.. versionadded:: 0.23
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels for cluster membership of each sample.
centers : ndarray of shape (n_centers, n_features)
The centers of each cluster. Only returned if
``return_centers=True``.
See Also
--------
make_classification : A more intricate variant.
Examples
--------
>>> from sklearn.datasets import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
>>> X, y = make_blobs(n_samples=[3, 3, 4], centers=None, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 1, 2, 0, 2, 2, 2, 1, 1, 0])
"""
generator = check_random_state(random_state)
if isinstance(n_samples, numbers.Integral):
# Set n_centers by looking at centers arg
if centers is None:
centers = 3
if isinstance(centers, numbers.Integral):
n_centers = centers
centers = generator.uniform(
center_box[0], center_box[1], size=(n_centers, n_features)
)
else:
centers = check_array(centers)
n_features = centers.shape[1]
n_centers = centers.shape[0]
else:
# Set n_centers by looking at [n_samples] arg
n_centers = len(n_samples)
if centers is None:
centers = generator.uniform(
center_box[0], center_box[1], size=(n_centers, n_features)
)
if not isinstance(centers, Iterable):
raise ValueError(
"Parameter `centers` must be array-like. Got {!r} instead".format(
centers
)
)
if len(centers) != n_centers:
raise ValueError(
"Length of `n_samples` not consistent with number of "
f"centers. Got n_samples = {n_samples} and centers = {centers}"
)
centers = check_array(centers)
n_features = centers.shape[1]
# stds: if cluster_std is given as list, it must be consistent
# with the n_centers
if hasattr(cluster_std, "__len__") and len(cluster_std) != n_centers:
raise ValueError(
"Length of `clusters_std` not consistent with "
"number of centers. Got centers = {} "
"and cluster_std = {}".format(centers, cluster_std)
)
if isinstance(cluster_std, numbers.Real):
cluster_std = np.full(len(centers), cluster_std)
if isinstance(n_samples, Iterable):
n_samples_per_center = n_samples
else:
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
cum_sum_n_samples = np.cumsum(n_samples_per_center)
X = np.empty(shape=(sum(n_samples_per_center), n_features), dtype=np.float64)
y = np.empty(shape=(sum(n_samples_per_center),), dtype=int)
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
start_idx = cum_sum_n_samples[i - 1] if i > 0 else 0
end_idx = cum_sum_n_samples[i]
X[start_idx:end_idx] = generator.normal(
loc=centers[i], scale=std, size=(n, n_features)
)
y[start_idx:end_idx] = i
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if return_centers:
return X, y, centers
else:
return X, y
| make_blobs | Repo-Level |
scikit-learn | 313 | sklearn/utils/multiclass.py | def type_of_target(y, input_name=""):
"""Determine the type of data indicated by the target.
Note that this type is the most specific type that can be inferred.
For example:
* ``binary`` is more specific but compatible with ``multiclass``.
* ``multiclass`` of integers is more specific but compatible with ``continuous``.
* ``multilabel-indicator`` is more specific but compatible with
``multiclass-multioutput``.
Parameters
----------
y : {array-like, sparse matrix}
Target values. If a sparse matrix, `y` is expected to be a
CSR/CSC matrix.
input_name : str, default=""
The data name used to construct the error message.
.. versionadded:: 1.1.0
Returns
-------
target_type : str
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> from sklearn.utils.multiclass import type_of_target
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multilabel-indicator'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
| /usr/src/app/target_test_cases/failed_tests_type_of_target.txt | def type_of_target(y, input_name=""):
"""Determine the type of data indicated by the target.
Note that this type is the most specific type that can be inferred.
For example:
* ``binary`` is more specific but compatible with ``multiclass``.
* ``multiclass`` of integers is more specific but compatible with ``continuous``.
* ``multilabel-indicator`` is more specific but compatible with
``multiclass-multioutput``.
Parameters
----------
y : {array-like, sparse matrix}
Target values. If a sparse matrix, `y` is expected to be a
CSR/CSC matrix.
input_name : str, default=""
The data name used to construct the error message.
.. versionadded:: 1.1.0
Returns
-------
target_type : str
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> from sklearn.utils.multiclass import type_of_target
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multilabel-indicator'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
xp, is_array_api_compliant = get_namespace(y)
valid = (
(isinstance(y, Sequence) or issparse(y) or hasattr(y, "__array__"))
and not isinstance(y, str)
or is_array_api_compliant
)
if not valid:
raise ValueError(
"Expected array-like (array or non-string sequence), got %r" % y
)
sparse_pandas = y.__class__.__name__ in ["SparseSeries", "SparseArray"]
if sparse_pandas:
raise ValueError("y cannot be class 'SparseSeries' or 'SparseArray'")
if is_multilabel(y):
return "multilabel-indicator"
# DeprecationWarning will be replaced by ValueError, see NEP 34
# https://numpy.org/neps/nep-0034-infer-dtype-is-object.html
# We therefore catch both deprecation (NumPy < 1.24) warning and
# value error (NumPy >= 1.24).
check_y_kwargs = dict(
accept_sparse=True,
allow_nd=True,
ensure_all_finite=False,
ensure_2d=False,
ensure_min_samples=0,
ensure_min_features=0,
)
with warnings.catch_warnings():
warnings.simplefilter("error", VisibleDeprecationWarning)
if not issparse(y):
try:
y = check_array(y, dtype=None, **check_y_kwargs)
except (VisibleDeprecationWarning, ValueError) as e:
if str(e).startswith("Complex data not supported"):
raise
# dtype=object should be provided explicitly for ragged arrays,
# see NEP 34
y = check_array(y, dtype=object, **check_y_kwargs)
try:
# TODO(1.7): Change to ValueError when byte labels is deprecated.
# labels in bytes format
first_row_or_val = y[[0], :] if issparse(y) else y[0]
if isinstance(first_row_or_val, bytes):
warnings.warn(
(
"Support for labels represented as bytes is deprecated in v1.5 and"
" will error in v1.7. Convert the labels to a string or integer"
" format."
),
FutureWarning,
)
# The old sequence of sequences format
if (
not hasattr(first_row_or_val, "__array__")
and isinstance(first_row_or_val, Sequence)
and not isinstance(first_row_or_val, str)
):
raise ValueError(
"You appear to be using a legacy multi-label data"
" representation. Sequence of sequences are no"
" longer supported; use a binary array or sparse"
" matrix instead - the MultiLabelBinarizer"
" transformer can convert to this format."
)
except IndexError:
pass
# Invalid inputs
if y.ndim not in (1, 2):
# Number of dimension greater than 2: [[[1, 2]]]
return "unknown"
if not min(y.shape):
# Empty ndarray: []/[[]]
if y.ndim == 1:
# 1-D empty array: []
return "binary" # []
# 2-D empty array: [[]]
return "unknown"
if not issparse(y) and y.dtype == object and not isinstance(y.flat[0], str):
# [obj_1] and not ["label_1"]
return "unknown"
# Check if multioutput
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# Check float and contains non-integer float values
if xp.isdtype(y.dtype, "real floating"):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
data = y.data if issparse(y) else y
if xp.any(data != xp.astype(data, int)):
_assert_all_finite(data, input_name=input_name)
return "continuous" + suffix
# Check multiclass
if issparse(first_row_or_val):
first_row_or_val = first_row_or_val.data
if cached_unique(y).shape[0] > 2 or (y.ndim == 2 and len(first_row_or_val) > 1):
# [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
return "multiclass" + suffix
else:
return "binary" # [1, 2] or [["a"], ["b"]]
| type_of_target | Repo-Level |
astropy | 0 | astropy/modeling/physical_models.py | def evaluate(self, x, temperature, scale):
"""Evaluate the model.
Parameters
----------
x : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['frequency']
Frequency at which to compute the blackbody. If no units are given,
this defaults to Hz (or AA if `scale` was initialized with units
equivalent to erg / (cm ** 2 * s * AA * sr)).
temperature : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Temperature of the blackbody. If no units are given, this defaults
to Kelvin.
scale : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['dimensionless']
Desired scale for the blackbody.
Returns
-------
y : number or ndarray
Blackbody spectrum. The units are determined from the units of
``scale``.
.. note::
Use `numpy.errstate` to suppress Numpy warnings, if desired.
.. warning::
Output values might contain ``nan`` and ``inf``.
Raises
------
ValueError
Invalid temperature.
ZeroDivisionError
Wavelength is zero (when converting to frequency).
"""
| /usr/src/app/target_test_cases/failed_tests_BlackBody.evaluate.txt | def evaluate(self, x, temperature, scale):
"""Evaluate the model.
Parameters
----------
x : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['frequency']
Frequency at which to compute the blackbody. If no units are given,
this defaults to Hz (or AA if `scale` was initialized with units
equivalent to erg / (cm ** 2 * s * AA * sr)).
temperature : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Temperature of the blackbody. If no units are given, this defaults
to Kelvin.
scale : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['dimensionless']
Desired scale for the blackbody.
Returns
-------
y : number or ndarray
Blackbody spectrum. The units are determined from the units of
``scale``.
.. note::
Use `numpy.errstate` to suppress Numpy warnings, if desired.
.. warning::
Output values might contain ``nan`` and ``inf``.
Raises
------
ValueError
Invalid temperature.
ZeroDivisionError
Wavelength is zero (when converting to frequency).
"""
if not isinstance(temperature, u.Quantity):
in_temp = u.Quantity(temperature, u.K)
else:
in_temp = temperature
if not isinstance(x, u.Quantity):
# then we assume it has input_units which depends on the
# requested output units (either Hz or AA)
in_x = u.Quantity(x, self.input_units["x"])
else:
in_x = x
# Convert to units for calculations, also force double precision
with u.add_enabled_equivalencies(u.spectral() + u.temperature()):
freq = u.Quantity(in_x, u.Hz, dtype=np.float64)
temp = u.Quantity(in_temp, u.K)
# Check if input values are physically possible
if np.any(temp < 0):
raise ValueError(f"Temperature should be positive: {temp}")
if not np.all(np.isfinite(freq)) or np.any(freq <= 0):
warnings.warn(
"Input contains invalid wavelength/frequency value(s)",
AstropyUserWarning,
)
log_boltz = const.h * freq / (const.k_B * temp)
boltzm1 = np.expm1(log_boltz)
# Calculate blackbody flux
bb_nu = 2.0 * const.h * freq**3 / (const.c**2 * boltzm1) / u.sr
if self.scale.unit is not None:
# Will be dimensionless at this point, but may not be dimensionless_unscaled
if not hasattr(scale, "unit"):
# during fitting, scale will be passed without units
# but we still need to convert from the input dimensionless
# to dimensionless unscaled
scale = scale * self.scale.unit
scale = scale.to(u.dimensionless_unscaled).value
# NOTE: scale is already stripped of any input units
y = scale * bb_nu.to(self._output_units, u.spectral_density(freq))
# If the temperature parameter has no unit, we should return a unitless
# value. This occurs for instance during fitting, since we drop the
# units temporarily.
if hasattr(temperature, "unit"):
return y
return y.value
| BlackBody.evaluate | Self-Contained |
astropy | 2 | astropy/timeseries/periodograms/bls/core.py | def compute_stats(self, period, duration, transit_time):
"""Compute descriptive statistics for a given transit model.
These statistics are commonly used for vetting of transit candidates.
Parameters
----------
period : float or `~astropy.units.Quantity` ['time']
The period of the transits.
duration : float or `~astropy.units.Quantity` ['time']
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
stats : dict
A dictionary containing several descriptive statistics:
- ``depth``: The depth and uncertainty (as a tuple with two
values) on the depth for the fiducial model.
- ``depth_odd``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period.
- ``depth_even``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period and the
phase is offset by one orbital period.
- ``depth_half``: The depth and uncertainty for a model with a
period of half the fiducial period.
- ``depth_phased``: The depth and uncertainty for a model with the
fiducial period and the phase offset by half a period.
- ``harmonic_amplitude``: The amplitude of the best fit sinusoidal
model.
- ``harmonic_delta_log_likelihood``: The difference in log
likelihood between a sinusoidal model and the transit model.
If ``harmonic_delta_log_likelihood`` is greater than zero, the
sinusoidal model is preferred.
- ``transit_times``: The mid-transit time for each transit in the
baseline.
- ``per_transit_count``: An array with a count of the number of
data points in each unique transit included in the baseline.
- ``per_transit_log_likelihood``: An array with the value of the
log likelihood for each unique transit included in the
baseline.
"""
| /usr/src/app/target_test_cases/failed_tests_BoxLeastSquares.compute_stats.txt | def compute_stats(self, period, duration, transit_time):
"""Compute descriptive statistics for a given transit model.
These statistics are commonly used for vetting of transit candidates.
Parameters
----------
period : float or `~astropy.units.Quantity` ['time']
The period of the transits.
duration : float or `~astropy.units.Quantity` ['time']
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
stats : dict
A dictionary containing several descriptive statistics:
- ``depth``: The depth and uncertainty (as a tuple with two
values) on the depth for the fiducial model.
- ``depth_odd``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period.
- ``depth_even``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period and the
phase is offset by one orbital period.
- ``depth_half``: The depth and uncertainty for a model with a
period of half the fiducial period.
- ``depth_phased``: The depth and uncertainty for a model with the
fiducial period and the phase offset by half a period.
- ``harmonic_amplitude``: The amplitude of the best fit sinusoidal
model.
- ``harmonic_delta_log_likelihood``: The difference in log
likelihood between a sinusoidal model and the transit model.
If ``harmonic_delta_log_likelihood`` is greater than zero, the
sinusoidal model is preferred.
- ``transit_times``: The mid-transit time for each transit in the
baseline.
- ``per_transit_count``: An array with a count of the number of
data points in each unique transit included in the baseline.
- ``per_transit_log_likelihood``: An array with the value of the
log likelihood for each unique transit included in the
baseline.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = self._as_relative_time("transit_time", transit_time)
period = float(strip_units(period[0]))
duration = float(strip_units(duration[0]))
transit_time = float(strip_units(transit_time))
t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = (
1.0 / np.ascontiguousarray(strip_units(self.dy), dtype=np.float64) ** 2
)
# This a helper function that will compute the depth for several
# different hypothesized transit models with different parameters
def _compute_depth(m, y_out=None, var_out=None):
if np.any(m) and (var_out is None or np.isfinite(var_out)):
var_m = 1.0 / np.sum(ivar[m])
y_m = np.sum(y[m] * ivar[m]) * var_m
if y_out is None:
return y_m, var_m
return y_out - y_m, np.sqrt(var_m + var_out)
return 0.0, np.inf
# Compute the depth of the fiducial model and the two models at twice
# the period
hp = 0.5 * period
m_in = np.abs((t - transit_time + hp) % period - hp) < 0.5 * duration
m_out = ~m_in
m_odd = np.abs((t - transit_time) % (2 * period) - period) < 0.5 * duration
m_even = (
np.abs((t - transit_time + period) % (2 * period) - period) < 0.5 * duration
)
y_out, var_out = _compute_depth(m_out)
depth = _compute_depth(m_in, y_out, var_out)
depth_odd = _compute_depth(m_odd, y_out, var_out)
depth_even = _compute_depth(m_even, y_out, var_out)
y_in = y_out - depth[0]
# Compute the depth of the model at a phase of 0.5*period
m_phase = np.abs((t - transit_time) % period - hp) < 0.5 * duration
depth_phase = _compute_depth(m_phase, *_compute_depth((~m_phase) & m_out))
# Compute the depth of a model with a period of 0.5*period
m_half = (
np.abs((t - transit_time + 0.25 * period) % (0.5 * period) - 0.25 * period)
< 0.5 * duration
)
depth_half = _compute_depth(m_half, *_compute_depth(~m_half))
# Compute the number of points in each transit
transit_id = np.round((t[m_in] - transit_time) / period).astype(int)
transit_times = (
period * np.arange(transit_id.min(), transit_id.max() + 1) + transit_time
)
unique_ids, unique_counts = np.unique(transit_id, return_counts=True)
unique_ids -= np.min(transit_id)
transit_id -= np.min(transit_id)
counts = np.zeros(np.max(transit_id) + 1, dtype=int)
counts[unique_ids] = unique_counts
# Compute the per-transit log likelihood
ll = -0.5 * ivar[m_in] * ((y[m_in] - y_in) ** 2 - (y[m_in] - y_out) ** 2)
lls = np.zeros(len(counts))
for i in unique_ids:
lls[i] = np.sum(ll[transit_id == i])
full_ll = -0.5 * np.sum(ivar[m_in] * (y[m_in] - y_in) ** 2)
full_ll -= 0.5 * np.sum(ivar[m_out] * (y[m_out] - y_out) ** 2)
# Compute the log likelihood of a sine model
A = np.vstack(
(
np.sin(2 * np.pi * t / period),
np.cos(2 * np.pi * t / period),
np.ones_like(t),
)
).T
w = np.linalg.solve(np.dot(A.T, A * ivar[:, None]), np.dot(A.T, y * ivar))
mod = np.dot(A, w)
sin_ll = -0.5 * np.sum((y - mod) ** 2 * ivar)
# Format the results
y_unit = self._y_unit()
ll_unit = 1
if self.dy is None:
ll_unit = y_unit * y_unit
return dict(
transit_times=self._as_absolute_time_if_needed(
"transit_times", transit_times * self._t_unit()
),
per_transit_count=counts,
per_transit_log_likelihood=lls * ll_unit,
depth=(depth[0] * y_unit, depth[1] * y_unit),
depth_phased=(depth_phase[0] * y_unit, depth_phase[1] * y_unit),
depth_half=(depth_half[0] * y_unit, depth_half[1] * y_unit),
depth_odd=(depth_odd[0] * y_unit, depth_odd[1] * y_unit),
depth_even=(depth_even[0] * y_unit, depth_even[1] * y_unit),
harmonic_amplitude=np.sqrt(np.sum(w[:2] ** 2)) * y_unit,
harmonic_delta_log_likelihood=(sin_ll - full_ll) * ll_unit,
)
| BoxLeastSquares.compute_stats | File-Level |
astropy | 6 | astropy/nddata/ccddata.py | def to_hdu(
self,
hdu_mask="MASK",
hdu_uncertainty="UNCERT",
hdu_flags=None,
wcs_relax=True,
key_uncertainty_type="UTYPE",
as_image_hdu=False,
hdu_psf="PSFIMAGE",
):
"""Creates an HDUList object from a CCDData object.
Parameters
----------
hdu_mask, hdu_uncertainty, hdu_flags, hdu_psf : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty,
``'PSFIMAGE'`` for psf, and `None` for flags.
wcs_relax : bool
Value of the ``relax`` parameter to use in converting the WCS to a
FITS header using `~astropy.wcs.WCS.to_header`. The common
``CTYPE`` ``RA---TAN-SIP`` and ``DEC--TAN-SIP`` requires
``relax=True`` for the ``-SIP`` part of the ``CTYPE`` to be
preserved.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
as_image_hdu : bool
If this option is `True`, the first item of the returned
`~astropy.io.fits.HDUList` is a `~astropy.io.fits.ImageHDU`, instead
of the default `~astropy.io.fits.PrimaryHDU`.
Raises
------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a astropy uncertainty type.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
| /usr/src/app/target_test_cases/failed_tests_CCDData.to_hdu.txt | def to_hdu(
self,
hdu_mask="MASK",
hdu_uncertainty="UNCERT",
hdu_flags=None,
wcs_relax=True,
key_uncertainty_type="UTYPE",
as_image_hdu=False,
hdu_psf="PSFIMAGE",
):
"""Creates an HDUList object from a CCDData object.
Parameters
----------
hdu_mask, hdu_uncertainty, hdu_flags, hdu_psf : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty,
``'PSFIMAGE'`` for psf, and `None` for flags.
wcs_relax : bool
Value of the ``relax`` parameter to use in converting the WCS to a
FITS header using `~astropy.wcs.WCS.to_header`. The common
``CTYPE`` ``RA---TAN-SIP`` and ``DEC--TAN-SIP`` requires
``relax=True`` for the ``-SIP`` part of the ``CTYPE`` to be
preserved.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
as_image_hdu : bool
If this option is `True`, the first item of the returned
`~astropy.io.fits.HDUList` is a `~astropy.io.fits.ImageHDU`, instead
of the default `~astropy.io.fits.PrimaryHDU`.
Raises
------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a astropy uncertainty type.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
if isinstance(self.header, fits.Header):
# Copy here so that we can modify the HDU header by adding WCS
# information without changing the header of the CCDData object.
header = self.header.copy()
else:
# Because _insert_in_metadata_fits_safe is written as a method
# we need to create a dummy CCDData instance to hold the FITS
# header we are constructing. This probably indicates that
# _insert_in_metadata_fits_safe should be rewritten in a more
# sensible way...
dummy_ccd = CCDData([1], meta=fits.Header(), unit="adu")
for k, v in self.header.items():
dummy_ccd._insert_in_metadata_fits_safe(k, v)
header = dummy_ccd.header
if self.unit is not u.dimensionless_unscaled:
header["bunit"] = self.unit.to_string()
if self.wcs:
# Simply extending the FITS header with the WCS can lead to
# duplicates of the WCS keywords; iterating over the WCS
# header should be safer.
#
# Turns out if I had read the io.fits.Header.extend docs more
# carefully, I would have realized that the keywords exist to
# avoid duplicates and preserve, as much as possible, the
# structure of the commentary cards.
#
# Note that until astropy/astropy#3967 is closed, the extend
# will fail if there are comment cards in the WCS header but
# not header.
wcs_header = self.wcs.to_header(relax=wcs_relax)
header.extend(wcs_header, useblanks=False, update=True)
if as_image_hdu:
hdus = [fits.ImageHDU(self.data, header)]
else:
hdus = [fits.PrimaryHDU(self.data, header)]
if hdu_mask and self.mask is not None:
# Always assuming that the mask is a np.ndarray (check that it has
# a 'shape').
if not hasattr(self.mask, "shape"):
raise ValueError("only a numpy.ndarray mask can be saved.")
# Convert boolean mask to uint since io.fits cannot handle bool.
hduMask = fits.ImageHDU(self.mask.astype(np.uint8), name=hdu_mask)
hdus.append(hduMask)
if hdu_uncertainty and self.uncertainty is not None:
# We need to save some kind of information which uncertainty was
# used so that loading the HDUList can infer the uncertainty type.
# No idea how this can be done so only allow StdDevUncertainty.
uncertainty_cls = self.uncertainty.__class__
if uncertainty_cls not in _known_uncertainties:
raise ValueError(
f"only uncertainties of type {_known_uncertainties} can be saved."
)
uncertainty_name = _unc_cls_to_name[uncertainty_cls]
hdr_uncertainty = fits.Header()
hdr_uncertainty[key_uncertainty_type] = uncertainty_name
# Assuming uncertainty is an StdDevUncertainty save just the array
# this might be problematic if the Uncertainty has a unit differing
# from the data so abort for different units. This is important for
# astropy > 1.2
if hasattr(self.uncertainty, "unit") and self.uncertainty.unit is not None:
if not _uncertainty_unit_equivalent_to_parent(
uncertainty_cls, self.uncertainty.unit, self.unit
):
raise ValueError(
"saving uncertainties with a unit that is not "
"equivalent to the unit from the data unit is not "
"supported."
)
hduUncert = fits.ImageHDU(
self.uncertainty.array, hdr_uncertainty, name=hdu_uncertainty
)
hdus.append(hduUncert)
if hdu_flags and self.flags:
raise NotImplementedError(
"adding the flags to a HDU is not supported at this time."
)
if hdu_psf and self.psf is not None:
# The PSF is an image, so write it as a separate ImageHDU.
hdu_psf = fits.ImageHDU(self.psf, name=hdu_psf)
hdus.append(hdu_psf)
hdulist = fits.HDUList(hdus)
return hdulist
| CCDData.to_hdu | File-Level |
astropy | 24 | astropy/coordinates/sky_coordinate.py | def apply_space_motion(self, new_obstime=None, dt=None):
"""Compute the position to a new time using the velocities.
Compute the position of the source represented by this coordinate object
to a new time using the velocities stored in this object and assuming
linear space motion (including relativistic corrections). This is
sometimes referred to as an "epoch transformation".
The initial time before the evolution is taken from the ``obstime``
attribute of this coordinate. Note that this method currently does not
support evolving coordinates where the *frame* has an ``obstime`` frame
attribute, so the ``obstime`` is only used for storing the before and
after times, not actually as an attribute of the frame. Alternatively,
if ``dt`` is given, an ``obstime`` need not be provided at all.
Parameters
----------
new_obstime : `~astropy.time.Time`, optional
The time at which to evolve the position to. Requires that the
``obstime`` attribute be present on this frame.
dt : `~astropy.units.Quantity`, `~astropy.time.TimeDelta`, optional
An amount of time to evolve the position of the source. Cannot be
given at the same time as ``new_obstime``.
Returns
-------
new_coord : |SkyCoord|
A new coordinate object with the evolved location of this coordinate
at the new time. ``obstime`` will be set on this object to the new
time only if ``self`` also has ``obstime``.
"""
| /usr/src/app/target_test_cases/failed_tests_SkyCoord.apply_space_motion.txt | def apply_space_motion(self, new_obstime=None, dt=None):
"""Compute the position to a new time using the velocities.
Compute the position of the source represented by this coordinate object
to a new time using the velocities stored in this object and assuming
linear space motion (including relativistic corrections). This is
sometimes referred to as an "epoch transformation".
The initial time before the evolution is taken from the ``obstime``
attribute of this coordinate. Note that this method currently does not
support evolving coordinates where the *frame* has an ``obstime`` frame
attribute, so the ``obstime`` is only used for storing the before and
after times, not actually as an attribute of the frame. Alternatively,
if ``dt`` is given, an ``obstime`` need not be provided at all.
Parameters
----------
new_obstime : `~astropy.time.Time`, optional
The time at which to evolve the position to. Requires that the
``obstime`` attribute be present on this frame.
dt : `~astropy.units.Quantity`, `~astropy.time.TimeDelta`, optional
An amount of time to evolve the position of the source. Cannot be
given at the same time as ``new_obstime``.
Returns
-------
new_coord : |SkyCoord|
A new coordinate object with the evolved location of this coordinate
at the new time. ``obstime`` will be set on this object to the new
time only if ``self`` also has ``obstime``.
"""
from .builtin_frames.icrs import ICRS
if (new_obstime is None) == (dt is None):
raise ValueError(
"You must specify one of `new_obstime` or `dt`, but not both."
)
# Validate that we have velocity info
if "s" not in self.frame.data.differentials:
raise ValueError("SkyCoord requires velocity data to evolve the position.")
if "obstime" in self.frame.frame_attributes:
raise NotImplementedError(
"Updating the coordinates in a frame with explicit time dependence is"
" currently not supported. If you would like this functionality, please"
" open an issue on github:\nhttps://github.com/astropy/astropy"
)
if new_obstime is not None and self.obstime is None:
# If no obstime is already on this object, raise an error if a new
# obstime is passed: we need to know the time / epoch at which the
# the position / velocity were measured initially
raise ValueError(
"This object has no associated `obstime`. apply_space_motion() must"
" receive a time difference, `dt`, and not a new obstime."
)
# Compute t1 and t2, the times used in the starpm call, which *only*
# uses them to compute a delta-time
t1 = self.obstime
if dt is None:
# self.obstime is not None and new_obstime is not None b/c of above
# checks
t2 = new_obstime
else:
# new_obstime is definitely None b/c of the above checks
if t1 is None:
# MAGIC NUMBER: if the current SkyCoord object has no obstime,
# assume J2000 to do the dt offset. This is not actually used
# for anything except a delta-t in starpm, so it's OK that it's
# not necessarily the "real" obstime
t1 = Time("J2000")
new_obstime = None # we don't actually know the initial obstime
t2 = t1 + dt
else:
t2 = t1 + dt
new_obstime = t2
# starpm wants tdb time
t1 = t1.tdb
t2 = t2.tdb
# proper motion in RA should not include the cos(dec) term, see the
# erfa function eraStarpv, comment (4). So we convert to the regular
# spherical differentials.
icrsrep = self.icrs.represent_as(SphericalRepresentation, SphericalDifferential)
icrsvel = icrsrep.differentials["s"]
parallax_zero = False
try:
plx = icrsrep.distance.to_value(u.arcsecond, u.parallax())
except u.UnitConversionError: # No distance: set to 0 by convention
plx = 0.0
parallax_zero = True
try:
rv = icrsvel.d_distance.to_value(u.km / u.s)
except u.UnitConversionError: # No RV
rv = 0.0
starpm = erfa.pmsafe(
icrsrep.lon.radian,
icrsrep.lat.radian,
icrsvel.d_lon.to_value(u.radian / u.yr),
icrsvel.d_lat.to_value(u.radian / u.yr),
plx,
rv,
t1.jd1,
t1.jd2,
t2.jd1,
t2.jd2,
)
if parallax_zero:
new_distance = None
else:
new_distance = Distance(parallax=starpm[4] << u.arcsec)
icrs2 = ICRS(
ra=u.Quantity(starpm[0], u.radian, copy=COPY_IF_NEEDED),
dec=u.Quantity(starpm[1], u.radian, copy=COPY_IF_NEEDED),
pm_ra=u.Quantity(starpm[2], u.radian / u.yr, copy=COPY_IF_NEEDED),
pm_dec=u.Quantity(starpm[3], u.radian / u.yr, copy=COPY_IF_NEEDED),
distance=new_distance,
radial_velocity=u.Quantity(starpm[5], u.km / u.s, copy=COPY_IF_NEEDED),
differential_type=SphericalDifferential,
)
# Update the obstime of the returned SkyCoord, and need to carry along
# the frame attributes
frattrs = {
attrnm: getattr(self, attrnm) for attrnm in self._extra_frameattr_names
}
frattrs["obstime"] = new_obstime
result = self.__class__(icrs2, **frattrs).transform_to(self.frame)
# Without this the output might not have the right differential type.
# Not sure if this fixes the problem or just hides it. See #11932
result.differential_type = self.differential_type
return result
| SkyCoord.apply_space_motion | Self-Contained |
astropy | 28 | astropy/coordinates/spectral_quantity.py | def to(self, unit, equivalencies=[], doppler_rest=None, doppler_convention=None):
"""
Return a new `~astropy.coordinates.SpectralQuantity` object with the specified unit.
By default, the ``spectral`` equivalency will be enabled, as well as
one of the Doppler equivalencies if converting to/from velocities.
Parameters
----------
unit : unit-like
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package, and should be a spectral unit.
equivalencies : list of `~astropy.units.equivalencies.Equivalency`, optional
A list of equivalence pairs to try if the units are not
directly convertible (along with spectral).
See :ref:`astropy:unit_equivalencies`.
If not provided or ``[]``, spectral equivalencies will be used.
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
doppler_rest : `~astropy.units.Quantity` ['speed'], optional
The rest value used when converting to/from velocities. This will
also be set at an attribute on the output
`~astropy.coordinates.SpectralQuantity`.
doppler_convention : {'relativistic', 'optical', 'radio'}, optional
The Doppler convention used when converting to/from velocities.
This will also be set at an attribute on the output
`~astropy.coordinates.SpectralQuantity`.
Returns
-------
`SpectralQuantity`
New spectral coordinate object with data converted to the new unit.
"""
| /usr/src/app/target_test_cases/failed_tests_SpectralQuantity.to.txt | def to(self, unit, equivalencies=[], doppler_rest=None, doppler_convention=None):
"""
Return a new `~astropy.coordinates.SpectralQuantity` object with the specified unit.
By default, the ``spectral`` equivalency will be enabled, as well as
one of the Doppler equivalencies if converting to/from velocities.
Parameters
----------
unit : unit-like
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package, and should be a spectral unit.
equivalencies : list of `~astropy.units.equivalencies.Equivalency`, optional
A list of equivalence pairs to try if the units are not
directly convertible (along with spectral).
See :ref:`astropy:unit_equivalencies`.
If not provided or ``[]``, spectral equivalencies will be used.
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
doppler_rest : `~astropy.units.Quantity` ['speed'], optional
The rest value used when converting to/from velocities. This will
also be set at an attribute on the output
`~astropy.coordinates.SpectralQuantity`.
doppler_convention : {'relativistic', 'optical', 'radio'}, optional
The Doppler convention used when converting to/from velocities.
This will also be set at an attribute on the output
`~astropy.coordinates.SpectralQuantity`.
Returns
-------
`SpectralQuantity`
New spectral coordinate object with data converted to the new unit.
"""
# Make sure units can be passed as strings
unit = Unit(unit)
# If equivalencies is explicitly set to None, we should just use the
# default Quantity.to with equivalencies also set to None
if equivalencies is None:
result = super().to(unit, equivalencies=None)
result = result.view(self.__class__)
result.__array_finalize__(self)
return result
# FIXME: need to consider case where doppler equivalency is passed in
# equivalencies list, or is u.spectral equivalency is already passed
if doppler_rest is None:
doppler_rest = self._doppler_rest
if doppler_convention is None:
doppler_convention = self._doppler_convention
elif doppler_convention not in DOPPLER_CONVENTIONS:
raise ValueError(
"doppler_convention should be one of"
f" {'/'.join(sorted(DOPPLER_CONVENTIONS))}"
)
if self.unit.is_equivalent(KMS) and unit.is_equivalent(KMS):
# Special case: if the current and final units are both velocity,
# and either the rest value or the convention are different, we
# need to convert back to frequency temporarily.
if doppler_convention is not None and self._doppler_convention is None:
raise ValueError("Original doppler_convention not set")
if doppler_rest is not None and self._doppler_rest is None:
raise ValueError("Original doppler_rest not set")
if doppler_rest is None and doppler_convention is None:
result = super().to(unit, equivalencies=equivalencies)
result = result.view(self.__class__)
result.__array_finalize__(self)
return result
elif (doppler_rest is None) is not (doppler_convention is None):
raise ValueError(
"Either both or neither doppler_rest and doppler_convention should"
" be defined for velocity conversions"
)
vel_equiv1 = DOPPLER_CONVENTIONS[self._doppler_convention](
self._doppler_rest
)
freq = super().to(si.Hz, equivalencies=equivalencies + vel_equiv1)
vel_equiv2 = DOPPLER_CONVENTIONS[doppler_convention](doppler_rest)
result = freq.to(unit, equivalencies=equivalencies + vel_equiv2)
else:
additional_equivalencies = eq.spectral()
if self.unit.is_equivalent(KMS) or unit.is_equivalent(KMS):
if doppler_convention is None:
raise ValueError(
"doppler_convention not set, cannot convert to/from velocities"
)
if doppler_rest is None:
raise ValueError(
"doppler_rest not set, cannot convert to/from velocities"
)
additional_equivalencies = (
additional_equivalencies
+ DOPPLER_CONVENTIONS[doppler_convention](doppler_rest)
)
result = super().to(
unit, equivalencies=equivalencies + additional_equivalencies
)
# Since we have to explicitly specify when we want to keep this as a
# SpectralQuantity, we need to convert it back from a Quantity to
# a SpectralQuantity here. Note that we don't use __array_finalize__
# here since we might need to set the output doppler convention and
# rest based on the parameters passed to 'to'
result = result.view(self.__class__)
result.__array_finalize__(self)
result._doppler_convention = doppler_convention
result._doppler_rest = doppler_rest
return result
| SpectralQuantity.to | Self-Contained |
astropy | 36 | astropy/timeseries/sampled.py | def fold(
self,
period=None,
epoch_time=None,
epoch_phase=0,
wrap_phase=None,
normalize_phase=False,
):
"""
Return a new `~astropy.timeseries.TimeSeries` folded with a period and
epoch.
Parameters
----------
period : `~astropy.units.Quantity` ['time']
The period to use for folding
epoch_time : `~astropy.time.Time`
The time to use as the reference epoch, at which the relative time
offset / phase will be ``epoch_phase``. Defaults to the first time
in the time series.
epoch_phase : float or `~astropy.units.Quantity` ['dimensionless', 'time']
Phase of ``epoch_time``. If ``normalize_phase`` is `True`, this
should be a dimensionless value, while if ``normalize_phase`` is
``False``, this should be a `~astropy.units.Quantity` with time
units. Defaults to 0.
wrap_phase : float or `~astropy.units.Quantity` ['dimensionless', 'time']
The value of the phase above which values are wrapped back by one
period. If ``normalize_phase`` is `True`, this should be a
dimensionless value, while if ``normalize_phase`` is ``False``,
this should be a `~astropy.units.Quantity` with time units.
Defaults to half the period, so that the resulting time series goes
from ``-period / 2`` to ``period / 2`` (if ``normalize_phase`` is
`False`) or -0.5 to 0.5 (if ``normalize_phase`` is `True`).
normalize_phase : bool
If `False` phase is returned as `~astropy.time.TimeDelta`,
otherwise as a dimensionless `~astropy.units.Quantity`.
Returns
-------
folded_timeseries : `~astropy.timeseries.TimeSeries`
The folded time series object with phase as the ``time`` column.
"""
| /usr/src/app/target_test_cases/failed_tests_TimeSeries.fold.txt | def fold(
self,
period=None,
epoch_time=None,
epoch_phase=0,
wrap_phase=None,
normalize_phase=False,
):
"""
Return a new `~astropy.timeseries.TimeSeries` folded with a period and
epoch.
Parameters
----------
period : `~astropy.units.Quantity` ['time']
The period to use for folding
epoch_time : `~astropy.time.Time`
The time to use as the reference epoch, at which the relative time
offset / phase will be ``epoch_phase``. Defaults to the first time
in the time series.
epoch_phase : float or `~astropy.units.Quantity` ['dimensionless', 'time']
Phase of ``epoch_time``. If ``normalize_phase`` is `True`, this
should be a dimensionless value, while if ``normalize_phase`` is
``False``, this should be a `~astropy.units.Quantity` with time
units. Defaults to 0.
wrap_phase : float or `~astropy.units.Quantity` ['dimensionless', 'time']
The value of the phase above which values are wrapped back by one
period. If ``normalize_phase`` is `True`, this should be a
dimensionless value, while if ``normalize_phase`` is ``False``,
this should be a `~astropy.units.Quantity` with time units.
Defaults to half the period, so that the resulting time series goes
from ``-period / 2`` to ``period / 2`` (if ``normalize_phase`` is
`False`) or -0.5 to 0.5 (if ``normalize_phase`` is `True`).
normalize_phase : bool
If `False` phase is returned as `~astropy.time.TimeDelta`,
otherwise as a dimensionless `~astropy.units.Quantity`.
Returns
-------
folded_timeseries : `~astropy.timeseries.TimeSeries`
The folded time series object with phase as the ``time`` column.
"""
if not isinstance(period, Quantity) or period.unit.physical_type != "time":
raise UnitsError("period should be a Quantity in units of time")
folded = self.copy()
if epoch_time is None:
epoch_time = self.time[0]
else:
epoch_time = Time(epoch_time)
period_sec = period.to_value(u.s)
if normalize_phase:
if (
isinstance(epoch_phase, Quantity)
and epoch_phase.unit.physical_type != "dimensionless"
):
raise UnitsError(
"epoch_phase should be a dimensionless Quantity "
"or a float when normalize_phase=True"
)
epoch_phase_sec = epoch_phase * period_sec
else:
if epoch_phase == 0:
epoch_phase_sec = 0.0
else:
if (
not isinstance(epoch_phase, Quantity)
or epoch_phase.unit.physical_type != "time"
):
raise UnitsError(
"epoch_phase should be a Quantity in units "
"of time when normalize_phase=False"
)
epoch_phase_sec = epoch_phase.to_value(u.s)
if wrap_phase is None:
wrap_phase = period_sec / 2
else:
if normalize_phase:
if isinstance(
wrap_phase, Quantity
) and not wrap_phase.unit.is_equivalent(u.one):
raise UnitsError(
"wrap_phase should be dimensionless when normalize_phase=True"
)
else:
if wrap_phase < 0 or wrap_phase > 1:
raise ValueError("wrap_phase should be between 0 and 1")
else:
wrap_phase = wrap_phase * period_sec
else:
if (
isinstance(wrap_phase, Quantity)
and wrap_phase.unit.physical_type == "time"
):
if wrap_phase < 0 or wrap_phase > period:
raise ValueError(
"wrap_phase should be between 0 and the period"
)
else:
wrap_phase = wrap_phase.to_value(u.s)
else:
raise UnitsError(
"wrap_phase should be a Quantity in units "
"of time when normalize_phase=False"
)
relative_time_sec = (
(self.time - epoch_time).sec + epoch_phase_sec + (period_sec - wrap_phase)
) % period_sec - (period_sec - wrap_phase)
folded_time = TimeDelta(relative_time_sec * u.s)
if normalize_phase:
folded_time = (folded_time / period).decompose()
period = period_sec = 1
with folded._delay_required_column_checks():
folded.remove_column("time")
folded.add_column(folded_time, name="time", index=0)
return folded
| TimeSeries.fold | Self-Contained |
astropy | 40 | astropy/timeseries/downsample.py | def aggregate_downsample(
time_series,
*,
time_bin_size=None,
time_bin_start=None,
time_bin_end=None,
n_bins=None,
aggregate_func=None,
):
"""
Downsample a time series by binning values into bins with a fixed size or
custom sizes, using a single function to combine the values in the bin.
Parameters
----------
time_series : :class:`~astropy.timeseries.TimeSeries`
The time series to downsample.
time_bin_size : `~astropy.units.Quantity` or `~astropy.time.TimeDelta` ['time'], optional
The time interval for the binned time series - this is either a scalar
value (in which case all time bins will be assumed to have the same
duration) or as an array of values (in which case each time bin can
have a different duration). If this argument is provided,
``time_bin_end`` should not be provided.
time_bin_start : `~astropy.time.Time` or iterable, optional
The start time for the binned time series - this can be either given
directly as a `~astropy.time.Time` array or as any iterable that
initializes the `~astropy.time.Time` class. This can also be a scalar
value if ``time_bin_size`` or ``time_bin_end`` is provided.
Defaults to the first time in the sampled time series.
time_bin_end : `~astropy.time.Time` or iterable, optional
The times of the end of each bin - this can be either given directly as
a `~astropy.time.Time` array or as any iterable that initializes the
`~astropy.time.Time` class. This can only be given if ``time_bin_start``
is provided or its default is used. If ``time_bin_end`` is scalar and
``time_bin_start`` is an array, time bins are assumed to be contiguous;
the end of each bin is the start of the next one, and ``time_bin_end`` gives
the end time for the last bin. If ``time_bin_end`` is an array and
``time_bin_start`` is scalar, bins will be contiguous. If both ``time_bin_end``
and ``time_bin_start`` are arrays, bins do not need to be contiguous.
If this argument is provided, ``time_bin_size`` should not be provided.
n_bins : int, optional
The number of bins to use. Defaults to the number needed to fit all
the original points. If both ``time_bin_start`` and ``time_bin_size``
are provided and are scalar values, this determines the total bins
within that interval. If ``time_bin_start`` is an iterable, this
parameter will be ignored.
aggregate_func : callable, optional
The function to use for combining points in the same bin. Defaults
to np.nanmean.
Returns
-------
binned_time_series : :class:`~astropy.timeseries.BinnedTimeSeries`
The downsampled time series.
"""
| /usr/src/app/target_test_cases/failed_tests_aggregate_downsample.txt | def aggregate_downsample(
time_series,
*,
time_bin_size=None,
time_bin_start=None,
time_bin_end=None,
n_bins=None,
aggregate_func=None,
):
"""
Downsample a time series by binning values into bins with a fixed size or
custom sizes, using a single function to combine the values in the bin.
Parameters
----------
time_series : :class:`~astropy.timeseries.TimeSeries`
The time series to downsample.
time_bin_size : `~astropy.units.Quantity` or `~astropy.time.TimeDelta` ['time'], optional
The time interval for the binned time series - this is either a scalar
value (in which case all time bins will be assumed to have the same
duration) or as an array of values (in which case each time bin can
have a different duration). If this argument is provided,
``time_bin_end`` should not be provided.
time_bin_start : `~astropy.time.Time` or iterable, optional
The start time for the binned time series - this can be either given
directly as a `~astropy.time.Time` array or as any iterable that
initializes the `~astropy.time.Time` class. This can also be a scalar
value if ``time_bin_size`` or ``time_bin_end`` is provided.
Defaults to the first time in the sampled time series.
time_bin_end : `~astropy.time.Time` or iterable, optional
The times of the end of each bin - this can be either given directly as
a `~astropy.time.Time` array or as any iterable that initializes the
`~astropy.time.Time` class. This can only be given if ``time_bin_start``
is provided or its default is used. If ``time_bin_end`` is scalar and
``time_bin_start`` is an array, time bins are assumed to be contiguous;
the end of each bin is the start of the next one, and ``time_bin_end`` gives
the end time for the last bin. If ``time_bin_end`` is an array and
``time_bin_start`` is scalar, bins will be contiguous. If both ``time_bin_end``
and ``time_bin_start`` are arrays, bins do not need to be contiguous.
If this argument is provided, ``time_bin_size`` should not be provided.
n_bins : int, optional
The number of bins to use. Defaults to the number needed to fit all
the original points. If both ``time_bin_start`` and ``time_bin_size``
are provided and are scalar values, this determines the total bins
within that interval. If ``time_bin_start`` is an iterable, this
parameter will be ignored.
aggregate_func : callable, optional
The function to use for combining points in the same bin. Defaults
to np.nanmean.
Returns
-------
binned_time_series : :class:`~astropy.timeseries.BinnedTimeSeries`
The downsampled time series.
"""
if not isinstance(time_series, TimeSeries):
raise TypeError("time_series should be a TimeSeries")
if time_bin_size is not None and not isinstance(
time_bin_size, (u.Quantity, TimeDelta)
):
raise TypeError("'time_bin_size' should be a Quantity or a TimeDelta")
if time_bin_start is not None and not isinstance(time_bin_start, (Time, TimeDelta)):
time_bin_start = Time(time_bin_start)
if time_bin_end is not None and not isinstance(time_bin_end, (Time, TimeDelta)):
time_bin_end = Time(time_bin_end)
# Use the table sorted by time
ts_sorted = time_series.iloc[:]
# If start time is not provided, it is assumed to be the start of the timeseries
if time_bin_start is None:
time_bin_start = ts_sorted.time[0]
# Total duration of the timeseries is needed for determining either
# `time_bin_size` or `nbins` in the case of scalar `time_bin_start`
if time_bin_start.isscalar:
time_duration = (ts_sorted.time[-1] - time_bin_start).sec
if time_bin_size is None and time_bin_end is None:
if time_bin_start.isscalar:
if n_bins is None:
raise TypeError(
"With single 'time_bin_start' either 'n_bins', "
"'time_bin_size' or time_bin_end' must be provided"
)
else:
# `nbins` defaults to the number needed to fit all points
time_bin_size = time_duration / n_bins * u.s
else:
time_bin_end = np.maximum(ts_sorted.time[-1], time_bin_start[-1])
if time_bin_start.isscalar:
if time_bin_size is not None:
if time_bin_size.isscalar:
# Determine the number of bins
if n_bins is None:
bin_size_sec = time_bin_size.to_value(u.s)
n_bins = int(np.ceil(time_duration / bin_size_sec))
elif time_bin_end is not None:
if not time_bin_end.isscalar:
# Convert start time to an array and populate using `time_bin_end`
scalar_start_time = time_bin_start
time_bin_start = time_bin_end.replicate(copy=True)
time_bin_start[0] = scalar_start_time
time_bin_start[1:] = time_bin_end[:-1]
# Check for overlapping bins, and warn if they are present
if time_bin_end is not None:
if (
not time_bin_end.isscalar
and not time_bin_start.isscalar
and np.any(time_bin_start[1:] < time_bin_end[:-1])
):
warnings.warn(
"Overlapping bins should be avoided since they "
"can lead to double-counting of data during binning.",
AstropyUserWarning,
)
binned = BinnedTimeSeries(
time_bin_size=time_bin_size,
time_bin_start=time_bin_start,
time_bin_end=time_bin_end,
n_bins=n_bins,
)
if aggregate_func is None:
aggregate_func = np.nanmean
# Start and end times of the binned timeseries
bin_start = binned.time_bin_start
bin_end = binned.time_bin_end
# Set `n_bins` to match the length of `time_bin_start` if
# `n_bins` is unspecified or if `time_bin_start` is an iterable
if n_bins is None or not time_bin_start.isscalar:
n_bins = len(bin_start)
# Find the subset of the table that is inside the union of all bins
# - output: `keep` a mask to create the subset
# - use relative time in seconds `np.longdouble`` in in creating `keep` to speed up
# (`Time` object comparison is rather slow)
# - tiny sacrifice on precision (< 0.01ns on 64 bit platform)
rel_base = ts_sorted.time[0]
rel_bin_start = _to_relative_longdouble(bin_start, rel_base)
rel_bin_end = _to_relative_longdouble(bin_end, rel_base)
rel_ts_sorted_time = _to_relative_longdouble(ts_sorted.time, rel_base)
keep = (rel_ts_sorted_time >= rel_bin_start[0]) & (
rel_ts_sorted_time <= rel_bin_end[-1]
)
# Find out indices to be removed because of noncontiguous bins
#
# Only need to check when adjacent bins have gaps, i.e.,
# bin_start[ind + 1] > bin_end[ind]
# - see: https://github.com/astropy/astropy/issues/13058#issuecomment-1090846697
# on thoughts on how to reduce the number of times to loop
noncontiguous_bins_indices = np.where(rel_bin_start[1:] > rel_bin_end[:-1])[0]
for ind in noncontiguous_bins_indices:
delete_indices = np.where(
np.logical_and(
rel_ts_sorted_time > rel_bin_end[ind],
rel_ts_sorted_time < rel_bin_start[ind + 1],
)
)
keep[delete_indices] = False
rel_subset_time = rel_ts_sorted_time[keep]
# Figure out which bin each row falls in by sorting with respect
# to the bin end times
indices = np.searchsorted(rel_bin_end, rel_subset_time)
# For time == bin_start[i+1] == bin_end[i], let bin_start takes precedence
if len(indices) and np.all(rel_bin_start[1:] >= rel_bin_end[:-1]):
indices_start = np.searchsorted(
rel_subset_time, rel_bin_start[rel_bin_start <= rel_ts_sorted_time[-1]]
)
indices[indices_start] = np.arange(len(indices_start))
# Determine rows where values are defined
if len(indices):
groups = np.hstack([0, np.nonzero(np.diff(indices))[0] + 1])
else:
groups = np.array([])
# Find unique indices to determine which rows in the final time series
# will not be empty.
unique_indices = np.unique(indices)
# Add back columns
subset = ts_sorted[keep]
for colname in subset.colnames:
if colname == "time":
continue
values = subset[colname]
# FIXME: figure out how to avoid the following, if possible
if not isinstance(values, (np.ndarray, u.Quantity)):
warnings.warn(
"Skipping column {0} since it has a mix-in type", AstropyUserWarning
)
continue
if isinstance(values, u.Quantity):
data = u.Quantity(np.repeat(np.nan, n_bins), unit=values.unit)
data[unique_indices] = u.Quantity(
reduceat(values.value, groups, aggregate_func), values.unit, copy=False
)
else:
data = np.ma.zeros(n_bins, dtype=values.dtype)
data.mask = 1
data[unique_indices] = reduceat(values, groups, aggregate_func)
data.mask[unique_indices] = 0
binned[colname] = data
return binned
| aggregate_downsample | File-Level |
astropy | 44 | astropy/io/fits/hdu/compressed/_tiled_compression.py | def compress_image_data(
image_data,
compression_type,
compressed_header,
compressed_coldefs,
):
"""
Compress the data in a `~astropy.io.fits.CompImageHDU`.
The input HDU is expected to have a uncompressed numpy array as it's
``.data`` attribute.
Parameters
----------
image_data : `~numpy.ndarray`
The image data to compress
compression_type : str
The compression algorithm
compressed_header : `~astropy.io.fits.Header`
The header of the compressed binary table
compressed_coldefs : `~astropy.io.fits.ColDefs`
The ColDefs object for the compressed binary table
Returns
-------
nbytes : `int`
The number of bytes of the heap.
heap : `bytes`
The bytes of the FITS table heap.
"""
| /usr/src/app/target_test_cases/failed_tests_compress_image_data.txt | def compress_image_data(
image_data,
compression_type,
compressed_header,
compressed_coldefs,
):
"""
Compress the data in a `~astropy.io.fits.CompImageHDU`.
The input HDU is expected to have a uncompressed numpy array as it's
``.data`` attribute.
Parameters
----------
image_data : `~numpy.ndarray`
The image data to compress
compression_type : str
The compression algorithm
compressed_header : `~astropy.io.fits.Header`
The header of the compressed binary table
compressed_coldefs : `~astropy.io.fits.ColDefs`
The ColDefs object for the compressed binary table
Returns
-------
nbytes : `int`
The number of bytes of the heap.
heap : `bytes`
The bytes of the FITS table heap.
"""
if not isinstance(image_data, np.ndarray):
raise TypeError("Image data must be a numpy.ndarray")
_check_compressed_header(compressed_header)
# TODO: This implementation is memory inefficient as it generates all the
# compressed bytes before forming them into the heap, leading to 2x the
# potential memory usage. Directly storing the compressed bytes into an
# expanding heap would fix this.
tile_shape = _tile_shape(compressed_header)
data_shape = _data_shape(compressed_header)
compressed_bytes = []
gzip_fallback = []
scales = []
zeros = []
zblank = None
noisebit = _get_compression_setting(compressed_header, "noisebit", 0)
settings = _header_to_settings(compressed_header)
for irow, tile_slices in _iter_array_tiles(data_shape, tile_shape):
tile_data = image_data[tile_slices]
settings = _update_tile_settings(settings, compression_type, tile_data.shape)
quantize = "ZSCALE" in compressed_coldefs.dtype.names
if tile_data.dtype.kind == "f" and quantize:
dither_method = DITHER_METHODS[
compressed_header.get("ZQUANTIZ", "NO_DITHER")
]
dither_seed = compressed_header.get("ZDITHER0", 0)
q = Quantize(
row=(irow + dither_seed) if dither_method != -1 else 0,
dither_method=dither_method,
quantize_level=noisebit,
bitpix=compressed_header["ZBITPIX"],
)
original_shape = tile_data.shape
# If there are any NaN values in the data, we should reset them to
# a value that will not affect the quantization (an already existing
# data value in the array) and we can then reset this after quantization
# to ZBLANK and set the appropriate header keyword
nan_mask = np.isnan(tile_data)
any_nan = np.any(nan_mask)
if any_nan:
# Note that we need to copy here to avoid modifying the input array.
tile_data = tile_data.copy()
if np.all(nan_mask):
tile_data[nan_mask] = 0
else:
tile_data[nan_mask] = np.nanmin(tile_data)
try:
tile_data, scale, zero = q.encode_quantized(tile_data)
except QuantizationFailedException:
if any_nan:
# reset NaN values since we will losslessly compress.
tile_data[nan_mask] = np.nan
scales.append(0)
zeros.append(0)
gzip_fallback.append(True)
else:
tile_data = np.asarray(tile_data).reshape(original_shape)
if any_nan:
if not tile_data.flags.writeable:
tile_data = tile_data.copy()
# For now, we just use the default ZBLANK value and assume
# this is the same for all tiles. We could generalize this
# to allow different ZBLANK values (for example if the data
# includes this value by chance) and to allow different values
# per tile, which is allowed by the FITS standard.
tile_data[nan_mask] = DEFAULT_ZBLANK
zblank = DEFAULT_ZBLANK
scales.append(scale)
zeros.append(zero)
gzip_fallback.append(False)
else:
scales.append(0)
zeros.append(0)
gzip_fallback.append(False)
if gzip_fallback[-1]:
cbytes = _compress_tile(tile_data, algorithm="GZIP_1")
else:
cbytes = _compress_tile(tile_data, algorithm=compression_type, **settings)
compressed_bytes.append(cbytes)
if zblank is not None:
compressed_header["ZBLANK"] = zblank
table = np.zeros(
len(compressed_bytes), dtype=compressed_coldefs.dtype.newbyteorder(">")
)
if "ZSCALE" in table.dtype.names:
table["ZSCALE"] = np.array(scales)
table["ZZERO"] = np.array(zeros)
for irow, cbytes in enumerate(compressed_bytes):
table["COMPRESSED_DATA"][irow, 0] = len(cbytes)
table["COMPRESSED_DATA"][:1, 1] = 0
table["COMPRESSED_DATA"][1:, 1] = np.cumsum(table["COMPRESSED_DATA"][:-1, 0])
for irow in range(len(compressed_bytes)):
if gzip_fallback[irow]:
table["GZIP_COMPRESSED_DATA"][irow] = table["COMPRESSED_DATA"][irow]
table["COMPRESSED_DATA"][irow] = 0
# For PLIO_1, the size of each heap element is a factor of two lower than
# the real size - not clear if this is deliberate or bug somewhere.
if compression_type == "PLIO_1":
table["COMPRESSED_DATA"][:, 0] //= 2
# For PLIO_1, it looks like the compressed data is always stored big endian
if compression_type == "PLIO_1":
for irow in range(len(compressed_bytes)):
if not gzip_fallback[irow]:
array = np.frombuffer(compressed_bytes[irow], dtype="i2")
if array.dtype.byteorder == "<" or (
array.dtype.byteorder == "=" and sys.byteorder == "little"
):
compressed_bytes[irow] = array.astype(">i2", copy=False).tobytes()
compressed_bytes = b"".join(compressed_bytes)
table_bytes = table.tobytes()
heap = table.tobytes() + compressed_bytes
return len(compressed_bytes), np.frombuffer(heap, dtype=np.uint8)
| compress_image_data | Repo-Level |
astropy | 49 | astropy/utils/data.py | def download_file(
remote_url,
cache=False,
show_progress=True,
timeout=None,
sources=None,
pkgname="astropy",
http_headers=None,
ssl_context=None,
allow_insecure=False,
):
"""Downloads a URL and optionally caches the result.
It returns the filename of a file containing the URL's contents.
If ``cache=True`` and the file is present in the cache, just
returns the filename; if the file had to be downloaded, add it
to the cache. If ``cache="update"`` always download and add it
to the cache.
The cache is effectively a dictionary mapping URLs to files; by default the
file contains the contents of the URL that is its key, but in practice
these can be obtained from a mirror (using ``sources``) or imported from
the local filesystem (using `~import_file_to_cache` or
`~import_download_cache`). Regardless, each file is regarded as
representing the contents of a particular URL, and this URL should be used
to look them up or otherwise manipulate them.
The files in the cache directory are named according to a cryptographic
hash of their URLs (currently MD5, so hackers can cause collisions).
The modification times on these files normally indicate when they were
last downloaded from the Internet.
Parameters
----------
remote_url : str
The URL of the file to download
cache : bool or "update", optional
Whether to cache the contents of remote URLs. If "update",
always download the remote URL in case there is a new version
and store the result in the cache.
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`). Regardless of this setting, the progress bar is only
displayed when outputting to a terminal.
timeout : float, optional
Timeout for remote requests in seconds (default is the configurable
`astropy.utils.data.Conf.remote_timeout`).
sources : list of str, optional
If provided, a list of URLs to try to obtain the file from. The
result will be stored under the original URL. The original URL
will *not* be tried unless it is in this list; this is to prevent
long waits for a primary server that is known to be inaccessible
at the moment. If an empty list is passed, then ``download_file``
will not attempt to connect to the Internet, that is, if the file
is not in the cache a KeyError will be raised.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
http_headers : dict or None
HTTP request headers to pass into ``urlopen`` if needed. (These headers
are ignored if the protocol for the ``name_or_obj``/``sources`` entry
is not a remote HTTP URL.) In the default case (None), the headers are
``User-Agent: some_value`` and ``Accept: */*``, where ``some_value``
is set by ``astropy.utils.data.conf.default_http_user_agent``.
ssl_context : dict, optional
Keyword arguments to pass to `ssl.create_default_context` when
downloading from HTTPS or TLS+FTP sources. This can be used provide
alternative paths to root CA certificates. Additionally, if the key
``'certfile'`` and optionally ``'keyfile'`` and ``'password'`` are
included, they are passed to `ssl.SSLContext.load_cert_chain`. This
can be used for performing SSL/TLS client certificate authentication
for servers that require it.
allow_insecure : bool, optional
Allow downloading files over a TLS/SSL connection even when the server
certificate verification failed. When set to `True` the potentially
insecure download is allowed to proceed, but an
`~astropy.utils.exceptions.AstropyWarning` is issued. If you are
frequently getting certificate verification warnings, consider
installing or upgrading `certifi`_ package, which provides frequently
updated certificates for common root CAs (i.e., a set similar to those
used by web browsers). If installed, Astropy will use it
automatically.
.. _certifi: https://pypi.org/project/certifi/
Returns
-------
local_path : str
Returns the local path that the file was download to.
Raises
------
urllib.error.URLError
Whenever there's a problem getting the remote file.
KeyError
When a file was requested from the cache but is missing and no
sources were provided to obtain it from the Internet.
Notes
-----
Because this function returns a filename, another process could run
`clear_download_cache` before you actually open the file, leaving
you with a filename that no longer points to a usable file.
"""
| /usr/src/app/target_test_cases/failed_tests_download_file.txt | def download_file(
remote_url,
cache=False,
show_progress=True,
timeout=None,
sources=None,
pkgname="astropy",
http_headers=None,
ssl_context=None,
allow_insecure=False,
):
"""Downloads a URL and optionally caches the result.
It returns the filename of a file containing the URL's contents.
If ``cache=True`` and the file is present in the cache, just
returns the filename; if the file had to be downloaded, add it
to the cache. If ``cache="update"`` always download and add it
to the cache.
The cache is effectively a dictionary mapping URLs to files; by default the
file contains the contents of the URL that is its key, but in practice
these can be obtained from a mirror (using ``sources``) or imported from
the local filesystem (using `~import_file_to_cache` or
`~import_download_cache`). Regardless, each file is regarded as
representing the contents of a particular URL, and this URL should be used
to look them up or otherwise manipulate them.
The files in the cache directory are named according to a cryptographic
hash of their URLs (currently MD5, so hackers can cause collisions).
The modification times on these files normally indicate when they were
last downloaded from the Internet.
Parameters
----------
remote_url : str
The URL of the file to download
cache : bool or "update", optional
Whether to cache the contents of remote URLs. If "update",
always download the remote URL in case there is a new version
and store the result in the cache.
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`). Regardless of this setting, the progress bar is only
displayed when outputting to a terminal.
timeout : float, optional
Timeout for remote requests in seconds (default is the configurable
`astropy.utils.data.Conf.remote_timeout`).
sources : list of str, optional
If provided, a list of URLs to try to obtain the file from. The
result will be stored under the original URL. The original URL
will *not* be tried unless it is in this list; this is to prevent
long waits for a primary server that is known to be inaccessible
at the moment. If an empty list is passed, then ``download_file``
will not attempt to connect to the Internet, that is, if the file
is not in the cache a KeyError will be raised.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
http_headers : dict or None
HTTP request headers to pass into ``urlopen`` if needed. (These headers
are ignored if the protocol for the ``name_or_obj``/``sources`` entry
is not a remote HTTP URL.) In the default case (None), the headers are
``User-Agent: some_value`` and ``Accept: */*``, where ``some_value``
is set by ``astropy.utils.data.conf.default_http_user_agent``.
ssl_context : dict, optional
Keyword arguments to pass to `ssl.create_default_context` when
downloading from HTTPS or TLS+FTP sources. This can be used provide
alternative paths to root CA certificates. Additionally, if the key
``'certfile'`` and optionally ``'keyfile'`` and ``'password'`` are
included, they are passed to `ssl.SSLContext.load_cert_chain`. This
can be used for performing SSL/TLS client certificate authentication
for servers that require it.
allow_insecure : bool, optional
Allow downloading files over a TLS/SSL connection even when the server
certificate verification failed. When set to `True` the potentially
insecure download is allowed to proceed, but an
`~astropy.utils.exceptions.AstropyWarning` is issued. If you are
frequently getting certificate verification warnings, consider
installing or upgrading `certifi`_ package, which provides frequently
updated certificates for common root CAs (i.e., a set similar to those
used by web browsers). If installed, Astropy will use it
automatically.
.. _certifi: https://pypi.org/project/certifi/
Returns
-------
local_path : str
Returns the local path that the file was download to.
Raises
------
urllib.error.URLError
Whenever there's a problem getting the remote file.
KeyError
When a file was requested from the cache but is missing and no
sources were provided to obtain it from the Internet.
Notes
-----
Because this function returns a filename, another process could run
`clear_download_cache` before you actually open the file, leaving
you with a filename that no longer points to a usable file.
"""
if timeout is None:
timeout = conf.remote_timeout
if sources is None:
sources = [remote_url]
if http_headers is None:
http_headers = {"User-Agent": conf.default_http_user_agent, "Accept": "*/*"}
missing_cache = ""
url_key = remote_url
if cache:
try:
dldir = _get_download_cache_loc(pkgname)
except OSError as e:
cache = False
missing_cache = (
f"Cache directory cannot be read or created ({e}), "
"providing data in temporary file instead."
)
else:
if cache == "update":
pass
elif isinstance(cache, str):
raise ValueError(
f"Cache value '{cache}' was requested but "
"'update' is the only recognized string; "
"otherwise use a boolean"
)
else:
filename = os.path.join(dldir, _url_to_dirname(url_key), "contents")
if os.path.exists(filename):
return os.path.abspath(filename)
errors = {}
for source_url in sources:
try:
f_name = _download_file_from_source(
source_url,
timeout=timeout,
show_progress=show_progress,
cache=cache,
remote_url=remote_url,
pkgname=pkgname,
http_headers=http_headers,
ssl_context=ssl_context,
allow_insecure=allow_insecure,
)
# Success!
break
except urllib.error.URLError as e:
# errno 8 is from SSL "EOF occurred in violation of protocol"
if (
hasattr(e, "reason")
and hasattr(e.reason, "errno")
and e.reason.errno == 8
):
e.reason.strerror = f"{e.reason.strerror}. requested URL: {remote_url}"
e.reason.args = (e.reason.errno, e.reason.strerror)
errors[source_url] = e
else: # No success
if not sources:
raise KeyError(
f"No sources listed and file {remote_url} not in cache! "
"Please include primary URL in sources if you want it to be "
"included as a valid source."
)
elif len(sources) == 1:
raise errors[sources[0]]
else:
raise urllib.error.URLError(
f"Unable to open any source! Exceptions were {errors}"
) from errors[sources[0]]
if cache:
try:
return import_file_to_cache(
url_key,
f_name,
remove_original=True,
replace=(cache == "update"),
pkgname=pkgname,
)
except PermissionError as e:
# Cache is readonly, we can't update it
missing_cache = (
f"Cache directory appears to be read-only ({e}), unable to import "
f"downloaded file, providing data in temporary file {f_name} "
"instead."
)
# FIXME: other kinds of cache problem can occur?
if missing_cache:
warn(CacheMissingWarning(missing_cache, f_name))
if conf.delete_temporary_downloads_at_exit:
_tempfilestodel.append(f_name)
return os.path.abspath(f_name)
| download_file | File-Level |
astropy | 52 | astropy/coordinates/funcs.py | def get_constellation(coord, short_name=False, constellation_list="iau"):
"""
Determines the constellation(s) a given coordinate object contains.
Parameters
----------
coord : coordinate-like
The object to determine the constellation of.
short_name : bool
If True, the returned names are the IAU-sanctioned abbreviated
names. Otherwise, full names for the constellations are used.
constellation_list : str
The set of constellations to use. Currently only ``'iau'`` is
supported, meaning the 88 "modern" constellations endorsed by the IAU.
Returns
-------
constellation : str or string array
If ``coords`` contains a scalar coordinate, returns the name of the
constellation. If it is an array coordinate object, it returns an array
of names.
Notes
-----
To determine which constellation a point on the sky is in, this precesses
to B1875, and then uses the Delporte boundaries of the 88 modern
constellations, as tabulated by
`Roman 1987 <https://cdsarc.cds.unistra.fr/viz-bin/cat/VI/42>`_.
"""
| /usr/src/app/target_test_cases/failed_tests_get_constellation.txt | def get_constellation(coord, short_name=False, constellation_list="iau"):
"""
Determines the constellation(s) a given coordinate object contains.
Parameters
----------
coord : coordinate-like
The object to determine the constellation of.
short_name : bool
If True, the returned names are the IAU-sanctioned abbreviated
names. Otherwise, full names for the constellations are used.
constellation_list : str
The set of constellations to use. Currently only ``'iau'`` is
supported, meaning the 88 "modern" constellations endorsed by the IAU.
Returns
-------
constellation : str or string array
If ``coords`` contains a scalar coordinate, returns the name of the
constellation. If it is an array coordinate object, it returns an array
of names.
Notes
-----
To determine which constellation a point on the sky is in, this precesses
to B1875, and then uses the Delporte boundaries of the 88 modern
constellations, as tabulated by
`Roman 1987 <https://cdsarc.cds.unistra.fr/viz-bin/cat/VI/42>`_.
"""
if constellation_list != "iau":
raise ValueError("only 'iau' us currently supported for constellation_list")
# read the data files and cache them if they haven't been already
if not _constellation_data:
cdata = data.get_pkg_data_contents("data/constellation_data_roman87.dat")
ctable = ascii.read(cdata, names=["ral", "rau", "decl", "name"])
cnames = data.get_pkg_data_contents(
"data/constellation_names.dat", encoding="UTF8"
)
cnames_short_to_long = {
l[:3]: l[4:] for l in cnames.split("\n") if not l.startswith("#")
}
cnames_long = np.array([cnames_short_to_long[nm] for nm in ctable["name"]])
_constellation_data["ctable"] = ctable
_constellation_data["cnames_long"] = cnames_long
else:
ctable = _constellation_data["ctable"]
cnames_long = _constellation_data["cnames_long"]
isscalar = coord.isscalar
# if it is geocentric, we reproduce the frame but with the 1875 equinox,
# which is where the constellations are defined
# this yields a "dubious year" warning because ERFA considers the year 1875
# "dubious", probably because UTC isn't well-defined then and precession
# models aren't precisely calibrated back to then. But it's plenty
# sufficient for constellations
with warnings.catch_warnings():
warnings.simplefilter("ignore", erfa.ErfaWarning)
constel_coord = coord.transform_to(PrecessedGeocentric(equinox="B1875"))
if isscalar:
rah = constel_coord.ra.ravel().hour
decd = constel_coord.dec.ravel().deg
else:
rah = constel_coord.ra.hour
decd = constel_coord.dec.deg
constellidx = -np.ones(len(rah), dtype=int)
notided = constellidx == -1 # should be all
for i, row in enumerate(ctable):
msk = (row["ral"] < rah) & (rah < row["rau"]) & (decd > row["decl"])
constellidx[notided & msk] = i
notided = constellidx == -1
if np.sum(notided) == 0:
break
else:
raise ValueError(
f"Could not find constellation for coordinates {constel_coord[notided]}"
)
if short_name:
names = ctable["name"][constellidx]
else:
names = cnames_long[constellidx]
if isscalar:
return names[0]
else:
return names
| get_constellation | Self-Contained |
astropy | 67 | astropy/nddata/utils.py | def overlap_slices(large_array_shape, small_array_shape, position, mode="partial"):
"""
Get slices for the overlapping part of a small and a large array.
Given a certain position of the center of the small array, with
respect to the large array, tuples of slices are returned which can be
used to extract, add or subtract the small array at the given
position. This function takes care of the correct behavior at the
boundaries, where the small array is cut of appropriately.
Integer positions are at the pixel centers.
Parameters
----------
large_array_shape : tuple of int or int
The shape of the large array (for 1D arrays, this can be an
`int`).
small_array_shape : int or tuple thereof
The shape of the small array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : number or tuple thereof
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers.
For any axis where ``small_array_shape`` is even, the position
is rounded up, e.g. extracting two elements with a center of
``1`` will define the extracted region as ``[0, 1]``.
mode : {'partial', 'trim', 'strict'}, optional
In ``'partial'`` mode, a partial overlap of the small and the
large array is sufficient. The ``'trim'`` mode is similar to
the ``'partial'`` mode, but ``slices_small`` will be adjusted to
return only the overlapping elements. In the ``'strict'`` mode,
the small array has to be fully contained in the large array,
otherwise an `~astropy.nddata.utils.PartialOverlapError` is
raised. In all modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`.
Returns
-------
slices_large : tuple of slice
A tuple of slice objects for each axis of the large array, such
that ``large_array[slices_large]`` extracts the region of the
large array that overlaps with the small array.
slices_small : tuple of slice
A tuple of slice objects for each axis of the small array, such
that ``small_array[slices_small]`` extracts the region that is
inside the large array.
"""
| /usr/src/app/target_test_cases/failed_tests_overlap_slices.txt | def overlap_slices(large_array_shape, small_array_shape, position, mode="partial"):
"""
Get slices for the overlapping part of a small and a large array.
Given a certain position of the center of the small array, with
respect to the large array, tuples of slices are returned which can be
used to extract, add or subtract the small array at the given
position. This function takes care of the correct behavior at the
boundaries, where the small array is cut of appropriately.
Integer positions are at the pixel centers.
Parameters
----------
large_array_shape : tuple of int or int
The shape of the large array (for 1D arrays, this can be an
`int`).
small_array_shape : int or tuple thereof
The shape of the small array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : number or tuple thereof
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers.
For any axis where ``small_array_shape`` is even, the position
is rounded up, e.g. extracting two elements with a center of
``1`` will define the extracted region as ``[0, 1]``.
mode : {'partial', 'trim', 'strict'}, optional
In ``'partial'`` mode, a partial overlap of the small and the
large array is sufficient. The ``'trim'`` mode is similar to
the ``'partial'`` mode, but ``slices_small`` will be adjusted to
return only the overlapping elements. In the ``'strict'`` mode,
the small array has to be fully contained in the large array,
otherwise an `~astropy.nddata.utils.PartialOverlapError` is
raised. In all modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`.
Returns
-------
slices_large : tuple of slice
A tuple of slice objects for each axis of the large array, such
that ``large_array[slices_large]`` extracts the region of the
large array that overlaps with the small array.
slices_small : tuple of slice
A tuple of slice objects for each axis of the small array, such
that ``small_array[slices_small]`` extracts the region that is
inside the large array.
"""
if mode not in ["partial", "trim", "strict"]:
raise ValueError('Mode can be only "partial", "trim", or "strict".')
if np.isscalar(small_array_shape):
small_array_shape = (small_array_shape,)
if np.isscalar(large_array_shape):
large_array_shape = (large_array_shape,)
if np.isscalar(position):
position = (position,)
if any(~np.isfinite(position)):
raise ValueError("Input position contains invalid values (NaNs or infs).")
if len(small_array_shape) != len(large_array_shape):
raise ValueError(
'"large_array_shape" and "small_array_shape" must '
"have the same number of dimensions."
)
if len(small_array_shape) != len(position):
raise ValueError(
'"position" must have the same number of dimensions as "small_array_shape".'
)
# define the min/max pixel indices
indices_min = [
int(np.ceil(pos - (small_shape / 2.0)))
for (pos, small_shape) in zip(position, small_array_shape)
]
indices_max = [
int(np.ceil(pos + (small_shape / 2.0)))
for (pos, small_shape) in zip(position, small_array_shape)
]
for e_max in indices_max:
if e_max < 0 or (e_max == 0 and small_array_shape != (0, 0)):
raise NoOverlapError("Arrays do not overlap.")
for e_min, large_shape in zip(indices_min, large_array_shape):
if e_min >= large_shape:
raise NoOverlapError("Arrays do not overlap.")
if mode == "strict":
for e_min in indices_min:
if e_min < 0:
raise PartialOverlapError("Arrays overlap only partially.")
for e_max, large_shape in zip(indices_max, large_array_shape):
if e_max > large_shape:
raise PartialOverlapError("Arrays overlap only partially.")
# Set up slices
slices_large = tuple(
slice(max(0, indices_min), min(large_shape, indices_max))
for (indices_min, indices_max, large_shape) in zip(
indices_min, indices_max, large_array_shape
)
)
if mode == "trim":
slices_small = tuple(slice(0, slc.stop - slc.start) for slc in slices_large)
else:
slices_small = tuple(
slice(
max(0, -indices_min),
min(large_shape - indices_min, indices_max - indices_min),
)
for (indices_min, indices_max, large_shape) in zip(
indices_min, indices_max, large_array_shape
)
)
return slices_large, slices_small
| overlap_slices | Self-Contained |
astropy | 68 | astropy/io/votable/table.py | def parse(
source,
columns=None,
invalid="exception",
verify=None,
chunk_size=tree.DEFAULT_CHUNK_SIZE,
table_number=None,
table_id=None,
filename=None,
unit_format=None,
datatype_mapping=None,
_debug_python_based_parser=False,
):
"""
Parses a VOTABLE_ xml file (or file-like object), and returns a
`~astropy.io.votable.tree.VOTableFile` object.
Parameters
----------
source : path-like or file-like
Path or file-like object containing a VOTABLE_ xml file.
If file, must be readable.
columns : sequence of str, optional
List of field names to include in the output. The default is
to include all fields.
invalid : str, optional
One of the following values:
- 'exception': throw an exception when an invalid value is
encountered (default)
- 'mask': mask out invalid values
verify : {'ignore', 'warn', 'exception'}, optional
When ``'exception'``, raise an error when the file violates the spec,
otherwise either issue a warning (``'warn'``) or silently continue
(``'ignore'``). Warnings may be controlled using the standard Python
mechanisms. See the `warnings` module in the Python standard library
for more information. When not provided, uses the configuration setting
``astropy.io.votable.verify``, which defaults to 'ignore'.
.. versionchanged:: 4.0
``verify`` replaces the ``pedantic`` argument, which will be
deprecated in future.
.. versionchanged:: 5.0
The ``pedantic`` argument is deprecated.
.. versionchanged:: 6.0
The ``pedantic`` argument is removed.
chunk_size : int, optional
The number of rows to read before converting to an array.
Higher numbers are likely to be faster, but will consume more
memory.
table_number : int, optional
The number of table in the file to read in. If `None`, all
tables will be read. If a number, 0 refers to the first table
in the file, and only that numbered table will be parsed and
read in. Should not be used with ``table_id``.
table_id : str, optional
The ID of the table in the file to read in. Should not be
used with ``table_number``.
filename : str, optional
A filename, URL or other identifier to use in error messages.
If *filename* is None and *source* is a string (i.e. a path),
then *source* will be used as a filename for error messages.
Therefore, *filename* is only required when source is a
file-like object.
unit_format : str, astropy.units.format.Base subclass or None, optional
The unit format to use when parsing unit attributes. If a
string, must be the name of a unit formatter. The built-in
formats include ``generic``, ``fits``, ``cds``, and
``vounit``. A custom formatter may be provided by passing a
`~astropy.units.format.Base` subclass. If `None` (default),
the unit format to use will be the one specified by the
VOTable specification (which is ``cds`` up to version 1.3 of
VOTable, and ``vounit`` in more recent versions of the spec).
datatype_mapping : dict, optional
A mapping of datatype names (`str`) to valid VOTable datatype names
(str). For example, if the file being read contains the datatype
"unsignedInt" (an invalid datatype in VOTable), include the mapping
``{"unsignedInt": "long"}``.
Returns
-------
votable : `~astropy.io.votable.tree.VOTableFile` object
See Also
--------
astropy.io.votable.exceptions : The exceptions this function may raise.
"""
| /usr/src/app/target_test_cases/failed_tests_parse.txt | def parse(
source,
columns=None,
invalid="exception",
verify=None,
chunk_size=tree.DEFAULT_CHUNK_SIZE,
table_number=None,
table_id=None,
filename=None,
unit_format=None,
datatype_mapping=None,
_debug_python_based_parser=False,
):
"""
Parses a VOTABLE_ xml file (or file-like object), and returns a
`~astropy.io.votable.tree.VOTableFile` object.
Parameters
----------
source : path-like or file-like
Path or file-like object containing a VOTABLE_ xml file.
If file, must be readable.
columns : sequence of str, optional
List of field names to include in the output. The default is
to include all fields.
invalid : str, optional
One of the following values:
- 'exception': throw an exception when an invalid value is
encountered (default)
- 'mask': mask out invalid values
verify : {'ignore', 'warn', 'exception'}, optional
When ``'exception'``, raise an error when the file violates the spec,
otherwise either issue a warning (``'warn'``) or silently continue
(``'ignore'``). Warnings may be controlled using the standard Python
mechanisms. See the `warnings` module in the Python standard library
for more information. When not provided, uses the configuration setting
``astropy.io.votable.verify``, which defaults to 'ignore'.
.. versionchanged:: 4.0
``verify`` replaces the ``pedantic`` argument, which will be
deprecated in future.
.. versionchanged:: 5.0
The ``pedantic`` argument is deprecated.
.. versionchanged:: 6.0
The ``pedantic`` argument is removed.
chunk_size : int, optional
The number of rows to read before converting to an array.
Higher numbers are likely to be faster, but will consume more
memory.
table_number : int, optional
The number of table in the file to read in. If `None`, all
tables will be read. If a number, 0 refers to the first table
in the file, and only that numbered table will be parsed and
read in. Should not be used with ``table_id``.
table_id : str, optional
The ID of the table in the file to read in. Should not be
used with ``table_number``.
filename : str, optional
A filename, URL or other identifier to use in error messages.
If *filename* is None and *source* is a string (i.e. a path),
then *source* will be used as a filename for error messages.
Therefore, *filename* is only required when source is a
file-like object.
unit_format : str, astropy.units.format.Base subclass or None, optional
The unit format to use when parsing unit attributes. If a
string, must be the name of a unit formatter. The built-in
formats include ``generic``, ``fits``, ``cds``, and
``vounit``. A custom formatter may be provided by passing a
`~astropy.units.format.Base` subclass. If `None` (default),
the unit format to use will be the one specified by the
VOTable specification (which is ``cds`` up to version 1.3 of
VOTable, and ``vounit`` in more recent versions of the spec).
datatype_mapping : dict, optional
A mapping of datatype names (`str`) to valid VOTable datatype names
(str). For example, if the file being read contains the datatype
"unsignedInt" (an invalid datatype in VOTable), include the mapping
``{"unsignedInt": "long"}``.
Returns
-------
votable : `~astropy.io.votable.tree.VOTableFile` object
See Also
--------
astropy.io.votable.exceptions : The exceptions this function may raise.
"""
from . import VERIFY_OPTIONS, conf
invalid = invalid.lower()
if invalid not in ("exception", "mask"):
raise ValueError(
"accepted values of ``invalid`` are: ``'exception'`` or ``'mask'``."
)
if verify is None:
verify = conf.verify
elif verify not in VERIFY_OPTIONS:
raise ValueError(f"verify should be one of {'/'.join(VERIFY_OPTIONS)}")
if datatype_mapping is None:
datatype_mapping = {}
config = {
"columns": columns,
"invalid": invalid,
"verify": verify,
"chunk_size": chunk_size,
"table_number": table_number,
"filename": filename,
"unit_format": unit_format,
"datatype_mapping": datatype_mapping,
}
if isinstance(source, str):
source = os.path.expanduser(source)
if filename is None and isinstance(source, str):
config["filename"] = source
with iterparser.get_xml_iterator(
source, _debug_python_based_parser=_debug_python_based_parser
) as iterator:
return tree.VOTableFile(config=config, pos=(1, 1)).parse(iterator, config)
| parse | Self-Contained |
astropy | 72 | astropy/io/fits/convenience.py | def printdiff(inputa, inputb, *args, **kwargs):
"""
Compare two parts of a FITS file, including entire FITS files,
FITS `HDUList` objects and FITS ``HDU`` objects.
Parameters
----------
inputa : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputb``.
inputb : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputa``.
ext, extname, extver
Additional positional arguments are for HDU specification if your
inputs are string filenames (will not work if
``inputa`` and ``inputb`` are ``HDU`` objects or `HDUList` objects).
They are flexible and are best illustrated by examples. In addition
to using these arguments positionally you can directly call the
keyword parameters ``ext``, ``extname``.
By HDU number::
printdiff('inA.fits', 'inB.fits', 0) # the primary HDU
printdiff('inA.fits', 'inB.fits', 2) # the second extension HDU
printdiff('inA.fits', 'inB.fits', ext=2) # the second extension HDU
By name, i.e., ``EXTNAME`` value (if unique). ``EXTNAME`` values are
not case sensitive:
printdiff('inA.fits', 'inB.fits', 'sci')
printdiff('inA.fits', 'inB.fits', extname='sci') # equivalent
By combination of ``EXTNAME`` and ``EXTVER`` as separate
arguments or as a tuple::
printdiff('inA.fits', 'inB.fits', 'sci', 2) # EXTNAME='SCI'
# & EXTVER=2
printdiff('inA.fits', 'inB.fits', extname='sci', extver=2)
# equivalent
printdiff('inA.fits', 'inB.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception::
printdiff('inA.fits', 'inB.fits',
ext=('sci', 1), extname='err', extver=2)
**kwargs
Any additional keyword arguments to be passed to
`~astropy.io.fits.FITSDiff`.
Notes
-----
The primary use for the `printdiff` function is to allow quick print out
of a FITS difference report and will write to ``sys.stdout``.
To save the diff report to a file please use `~astropy.io.fits.FITSDiff`
directly.
"""
| /usr/src/app/target_test_cases/failed_tests_printdiff.txt | def printdiff(inputa, inputb, *args, **kwargs):
"""
Compare two parts of a FITS file, including entire FITS files,
FITS `HDUList` objects and FITS ``HDU`` objects.
Parameters
----------
inputa : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputb``.
inputb : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputa``.
ext, extname, extver
Additional positional arguments are for HDU specification if your
inputs are string filenames (will not work if
``inputa`` and ``inputb`` are ``HDU`` objects or `HDUList` objects).
They are flexible and are best illustrated by examples. In addition
to using these arguments positionally you can directly call the
keyword parameters ``ext``, ``extname``.
By HDU number::
printdiff('inA.fits', 'inB.fits', 0) # the primary HDU
printdiff('inA.fits', 'inB.fits', 2) # the second extension HDU
printdiff('inA.fits', 'inB.fits', ext=2) # the second extension HDU
By name, i.e., ``EXTNAME`` value (if unique). ``EXTNAME`` values are
not case sensitive:
printdiff('inA.fits', 'inB.fits', 'sci')
printdiff('inA.fits', 'inB.fits', extname='sci') # equivalent
By combination of ``EXTNAME`` and ``EXTVER`` as separate
arguments or as a tuple::
printdiff('inA.fits', 'inB.fits', 'sci', 2) # EXTNAME='SCI'
# & EXTVER=2
printdiff('inA.fits', 'inB.fits', extname='sci', extver=2)
# equivalent
printdiff('inA.fits', 'inB.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception::
printdiff('inA.fits', 'inB.fits',
ext=('sci', 1), extname='err', extver=2)
**kwargs
Any additional keyword arguments to be passed to
`~astropy.io.fits.FITSDiff`.
Notes
-----
The primary use for the `printdiff` function is to allow quick print out
of a FITS difference report and will write to ``sys.stdout``.
To save the diff report to a file please use `~astropy.io.fits.FITSDiff`
directly.
"""
# Pop extension keywords
extension = {
key: kwargs.pop(key) for key in ["ext", "extname", "extver"] if key in kwargs
}
has_extensions = args or extension
if isinstance(inputa, str) and has_extensions:
# Use handy _getext to interpret any ext keywords, but
# will need to close a if fails
modea, closeda = _get_file_mode(inputa)
modeb, closedb = _get_file_mode(inputb)
hdulista, extidxa = _getext(inputa, modea, *args, **extension)
# Have to close a if b doesn't make it
try:
hdulistb, extidxb = _getext(inputb, modeb, *args, **extension)
except Exception:
hdulista.close(closed=closeda)
raise
try:
hdua = hdulista[extidxa]
hdub = hdulistb[extidxb]
# See below print for note
print(HDUDiff(hdua, hdub, **kwargs).report())
finally:
hdulista.close(closed=closeda)
hdulistb.close(closed=closedb)
# If input is not a string, can feed HDU objects or HDUList directly,
# but can't currently handle extensions
elif isinstance(inputa, _ValidHDU) and has_extensions:
raise ValueError("Cannot use extension keywords when providing an HDU object.")
elif isinstance(inputa, _ValidHDU) and not has_extensions:
print(HDUDiff(inputa, inputb, **kwargs).report())
elif isinstance(inputa, HDUList) and has_extensions:
raise NotImplementedError(
"Extension specification with HDUList objects not implemented."
)
# This function is EXCLUSIVELY for printing the diff report to screen
# in a one-liner call, hence the use of print instead of logging
else:
print(FITSDiff(inputa, inputb, **kwargs).report())
| printdiff | Repo-Level |
astropy | 74 | astropy/stats/sigma_clipping.py | def sigma_clipped_stats(
data: ArrayLike,
mask: NDArray | None = None,
mask_value: float | None = None,
sigma: float | None = 3.0,
sigma_lower: float | None = None,
sigma_upper: float | None = None,
maxiters: int | None = 5,
cenfunc: Literal["median", "mean"] | Callable | None = "median",
stdfunc: Literal["std", "mad_std"] | Callable | None = "std",
std_ddof: int | None = 0,
axis: int | tuple[int, ...] | None = None,
grow: float | Literal[False] | None = False,
) -> tuple[float, float, float]:
"""
Calculate sigma-clipped statistics on the provided data.
Parameters
----------
data : array-like or `~numpy.ma.MaskedArray`
Data array or object that can be converted to an array.
mask : `numpy.ndarray` (bool), optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are excluded when computing the statistics.
mask_value : float, optional
A data value (e.g., ``0.0``) that is ignored when computing the
statistics. ``mask_value`` will be masked in addition to any
input ``mask``.
sigma : float, optional
The number of standard deviations to use for both the lower
and upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is 3.
sigma_lower : float or None, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or None, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or None, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute
the center value for the clipping. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanmean`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'median'``.
stdfunc : {'std', 'mad_std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanstd`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
std_ddof : int, optional
The delta degrees of freedom for the standard deviation
calculation. The divisor used in the calculation is ``N -
std_ddof``, where ``N`` represents the number of elements. The
default is 0.
axis : None or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If `None`,
then the flattened data will be used. ``axis`` is passed to the
``cenfunc`` and ``stdfunc``. The default is `None`.
grow : float or `False`, optional
Radius within which to mask the neighbouring pixels of those
that fall outwith the clipping limits (only applied along
``axis``, if specified). As an example, for a 2D image a value
of 1 will mask the nearest pixels in a cross pattern around each
deviant pixel, while 1.5 will also reject the nearest diagonal
neighbours and so on.
Notes
-----
The best performance will typically be obtained by setting
``cenfunc`` and ``stdfunc`` to one of the built-in functions
specified as a string. If one of the options is set to a string
while the other has a custom callable, you may in some cases see
better performance if you have the `bottleneck`_ package installed.
.. _bottleneck: https://github.com/pydata/bottleneck
Returns
-------
mean, median, stddev : float
The mean, median, and standard deviation of the sigma-clipped
data.
See Also
--------
SigmaClip, sigma_clip
"""
| /usr/src/app/target_test_cases/failed_tests_sigma_clipped_stats.txt | def sigma_clipped_stats(
data: ArrayLike,
mask: NDArray | None = None,
mask_value: float | None = None,
sigma: float | None = 3.0,
sigma_lower: float | None = None,
sigma_upper: float | None = None,
maxiters: int | None = 5,
cenfunc: Literal["median", "mean"] | Callable | None = "median",
stdfunc: Literal["std", "mad_std"] | Callable | None = "std",
std_ddof: int | None = 0,
axis: int | tuple[int, ...] | None = None,
grow: float | Literal[False] | None = False,
) -> tuple[float, float, float]:
"""
Calculate sigma-clipped statistics on the provided data.
Parameters
----------
data : array-like or `~numpy.ma.MaskedArray`
Data array or object that can be converted to an array.
mask : `numpy.ndarray` (bool), optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are excluded when computing the statistics.
mask_value : float, optional
A data value (e.g., ``0.0``) that is ignored when computing the
statistics. ``mask_value`` will be masked in addition to any
input ``mask``.
sigma : float, optional
The number of standard deviations to use for both the lower
and upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is 3.
sigma_lower : float or None, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or None, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or None, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute
the center value for the clipping. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanmean`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'median'``.
stdfunc : {'std', 'mad_std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanstd`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
std_ddof : int, optional
The delta degrees of freedom for the standard deviation
calculation. The divisor used in the calculation is ``N -
std_ddof``, where ``N`` represents the number of elements. The
default is 0.
axis : None or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If `None`,
then the flattened data will be used. ``axis`` is passed to the
``cenfunc`` and ``stdfunc``. The default is `None`.
grow : float or `False`, optional
Radius within which to mask the neighbouring pixels of those
that fall outwith the clipping limits (only applied along
``axis``, if specified). As an example, for a 2D image a value
of 1 will mask the nearest pixels in a cross pattern around each
deviant pixel, while 1.5 will also reject the nearest diagonal
neighbours and so on.
Notes
-----
The best performance will typically be obtained by setting
``cenfunc`` and ``stdfunc`` to one of the built-in functions
specified as a string. If one of the options is set to a string
while the other has a custom callable, you may in some cases see
better performance if you have the `bottleneck`_ package installed.
.. _bottleneck: https://github.com/pydata/bottleneck
Returns
-------
mean, median, stddev : float
The mean, median, and standard deviation of the sigma-clipped
data.
See Also
--------
SigmaClip, sigma_clip
"""
if mask is not None:
data = np.ma.MaskedArray(data, mask)
if mask_value is not None:
data = np.ma.masked_values(data, mask_value)
if isinstance(data, np.ma.MaskedArray) and data.mask.all():
return np.ma.masked, np.ma.masked, np.ma.masked
sigclip = SigmaClip(
sigma=sigma,
sigma_lower=sigma_lower,
sigma_upper=sigma_upper,
maxiters=maxiters,
cenfunc=cenfunc,
stdfunc=stdfunc,
grow=grow,
)
data_clipped = sigclip(
data, axis=axis, masked=False, return_bounds=False, copy=True
)
mean = nanmean(data_clipped, axis=axis)
median = nanmedian(data_clipped, axis=axis)
std = nanstd(data_clipped, ddof=std_ddof, axis=axis)
return mean, median, std
| sigma_clipped_stats | Self-Contained |
astropy | 77 | astropy/io/fits/convenience.py | def table_to_hdu(table, character_as_bytes=False):
"""
Convert an `~astropy.table.Table` object to a FITS
`~astropy.io.fits.BinTableHDU`.
Parameters
----------
table : astropy.table.Table
The table to convert.
character_as_bytes : bool
Whether to return bytes for string columns when accessed from the HDU.
By default this is `False` and (unicode) strings are returned, but for
large tables this may use up a lot of memory.
Returns
-------
table_hdu : `~astropy.io.fits.BinTableHDU`
The FITS binary table HDU.
"""
| /usr/src/app/target_test_cases/failed_tests_table_to_hdu.txt | def table_to_hdu(table, character_as_bytes=False):
"""
Convert an `~astropy.table.Table` object to a FITS
`~astropy.io.fits.BinTableHDU`.
Parameters
----------
table : astropy.table.Table
The table to convert.
character_as_bytes : bool
Whether to return bytes for string columns when accessed from the HDU.
By default this is `False` and (unicode) strings are returned, but for
large tables this may use up a lot of memory.
Returns
-------
table_hdu : `~astropy.io.fits.BinTableHDU`
The FITS binary table HDU.
"""
# Avoid circular imports
from .column import python_to_tdisp
from .connect import REMOVE_KEYWORDS, is_column_keyword
# Header to store Time related metadata
hdr = None
# Not all tables with mixin columns are supported
if table.has_mixin_columns:
# Import is done here, in order to avoid it at build time as erfa is not
# yet available then.
from astropy.table.column import BaseColumn
from astropy.time import Time
from astropy.units import Quantity
from .fitstime import time_to_fits
# Only those columns which are instances of BaseColumn, Quantity or Time can
# be written
unsupported_cols = table.columns.not_isinstance((BaseColumn, Quantity, Time))
if unsupported_cols:
unsupported_names = [col.info.name for col in unsupported_cols]
raise ValueError(
f"cannot write table with mixin column(s) {unsupported_names}"
)
time_cols = table.columns.isinstance(Time)
if time_cols:
table, hdr = time_to_fits(table)
# Create a new HDU object
tarray = table.as_array()
if isinstance(tarray, np.ma.MaskedArray):
# Fill masked values carefully:
# float column's default mask value needs to be Nan and
# string column's default mask should be an empty string.
# Note: getting the fill value for the structured array is
# more reliable than for individual columns for string entries.
# (no 'N/A' for a single-element string, where it should be 'N').
default_fill_value = np.ma.default_fill_value(tarray.dtype)
for colname, (coldtype, _) in tarray.dtype.fields.items():
if np.all(tarray.fill_value[colname] == default_fill_value[colname]):
# Since multi-element columns with dtypes such as '2f8' have
# a subdtype, we should look up the type of column on that.
coltype = (
coldtype.subdtype[0].type if coldtype.subdtype else coldtype.type
)
if issubclass(coltype, np.complexfloating):
tarray.fill_value[colname] = complex(np.nan, np.nan)
elif issubclass(coltype, np.inexact):
tarray.fill_value[colname] = np.nan
elif issubclass(coltype, np.character):
tarray.fill_value[colname] = ""
# TODO: it might be better to construct the FITS table directly from
# the Table columns, rather than go via a structured array.
table_hdu = BinTableHDU.from_columns(
tarray.filled(), header=hdr, character_as_bytes=character_as_bytes
)
for col in table_hdu.columns:
# Binary FITS tables support TNULL *only* for integer data columns
# TODO: Determine a schema for handling non-integer masked columns
# with non-default fill values in FITS (if at all possible).
# Be careful that we do not set null for columns that were not masked!
int_formats = ("B", "I", "J", "K")
if (
col.format in int_formats or col.format.p_format in int_formats
) and hasattr(table[col.name], "mask"):
fill_value = tarray[col.name].fill_value
col.null = fill_value.astype(int)
else:
table_hdu = BinTableHDU.from_columns(
tarray, header=hdr, character_as_bytes=character_as_bytes
)
# Set units and format display for output HDU
for col in table_hdu.columns:
if table[col.name].info.format is not None:
# check for boolean types, special format case
logical = table[col.name].info.dtype == bool
tdisp_format = python_to_tdisp(
table[col.name].info.format, logical_dtype=logical
)
if tdisp_format is not None:
col.disp = tdisp_format
unit = table[col.name].unit
if unit is not None:
# Local imports to avoid importing units when it is not required,
# e.g. for command-line scripts
from astropy.units import Unit
from astropy.units.format.fits import UnitScaleError
try:
col.unit = unit.to_string(format="fits")
except UnitScaleError:
scale = unit.scale
raise UnitScaleError(
f"The column '{col.name}' could not be stored in FITS "
f"format because it has a scale '({str(scale)})' that "
"is not recognized by the FITS standard. Either scale "
"the data or change the units."
)
except ValueError:
# Warn that the unit is lost, but let the details depend on
# whether the column was serialized (because it was a
# quantity), since then the unit can be recovered by astropy.
warning = (
f"The unit '{unit.to_string()}' could not be saved in "
"native FITS format "
)
if any(
"SerializedColumn" in item and "name: " + col.name in item
for item in table.meta.get("comments", [])
):
warning += (
"and hence will be lost to non-astropy fits readers. "
"Within astropy, the unit can roundtrip using QTable, "
"though one has to enable the unit before reading."
)
else:
warning += (
"and cannot be recovered in reading. It can roundtrip "
"within astropy by using QTable both to write and read "
"back, though one has to enable the unit before reading."
)
warnings.warn(warning, AstropyUserWarning)
else:
# Try creating a Unit to issue a warning if the unit is not
# FITS compliant
Unit(col.unit, format="fits", parse_strict="warn")
# Column-specific override keywords for coordinate columns
coord_meta = table.meta.pop("__coordinate_columns__", {})
for col_name, col_info in coord_meta.items():
col = table_hdu.columns[col_name]
# Set the column coordinate attributes from data saved earlier.
# Note: have to set these, even if we have no data.
for attr in "coord_type", "coord_unit":
setattr(col, attr, col_info.get(attr, None))
trpos = col_info.get("time_ref_pos", None)
if trpos is not None:
col.time_ref_pos = trpos
for key, value in table.meta.items():
if is_column_keyword(key.upper()) or key.upper() in REMOVE_KEYWORDS:
warnings.warn(
f"Meta-data keyword {key} will be ignored since it conflicts "
"with a FITS reserved keyword",
AstropyUserWarning,
)
continue
# Convert to FITS format
if key == "comments":
key = "comment"
if isinstance(value, list):
for item in value:
try:
table_hdu.header.append((key, item))
except ValueError:
warnings.warn(
f"Attribute `{key}` of type {type(value)} cannot be "
"added to FITS Header - skipping",
AstropyUserWarning,
)
else:
try:
table_hdu.header[key] = value
except ValueError:
warnings.warn(
f"Attribute `{key}` of type {type(value)} cannot be "
"added to FITS Header - skipping",
AstropyUserWarning,
)
return table_hdu
| table_to_hdu | Repo-Level |
astropy | 78 | astropy/io/fits/fitstime.py | def time_to_fits(table):
"""
Replace Time columns in a Table with non-mixin columns containing
each element as a vector of two doubles (jd1, jd2) and return a FITS
header with appropriate time coordinate keywords.
jd = jd1 + jd2 represents time in the Julian Date format with
high-precision.
Parameters
----------
table : `~astropy.table.Table`
The table whose Time columns are to be replaced.
Returns
-------
table : `~astropy.table.Table`
The table with replaced Time columns
hdr : `~astropy.io.fits.header.Header`
Header containing global time reference frame FITS keywords
"""
| /usr/src/app/target_test_cases/failed_tests_time_to_fits.txt | def time_to_fits(table):
"""
Replace Time columns in a Table with non-mixin columns containing
each element as a vector of two doubles (jd1, jd2) and return a FITS
header with appropriate time coordinate keywords.
jd = jd1 + jd2 represents time in the Julian Date format with
high-precision.
Parameters
----------
table : `~astropy.table.Table`
The table whose Time columns are to be replaced.
Returns
-------
table : `~astropy.table.Table`
The table with replaced Time columns
hdr : `~astropy.io.fits.header.Header`
Header containing global time reference frame FITS keywords
"""
# Make a light copy of table (to the extent possible) and clear any indices along
# the way. Indices are not serialized and cause problems later, but they are not
# needed here so just drop. For Column subclasses take advantage of copy() method,
# but for others it is required to actually copy the data if there are attached
# indices. See #8077 and #9009 for further discussion.
new_cols = []
for col in table.itercols():
if isinstance(col, Column):
new_col = col.copy(copy_data=False) # Also drops any indices
else:
new_col = col_copy(col, copy_indices=False) if col.info.indices else col
new_cols.append(new_col)
newtable = table.__class__(new_cols, copy=False)
newtable.meta = table.meta
# Global time coordinate frame keywords
hdr = Header(
[
Card(keyword=key, value=val[0], comment=val[1])
for key, val in GLOBAL_TIME_INFO.items()
]
)
# Store coordinate column-specific metadata
newtable.meta["__coordinate_columns__"] = defaultdict(OrderedDict)
coord_meta = newtable.meta["__coordinate_columns__"]
time_cols = table.columns.isinstance(Time)
# Geocentric location
location = None
for col in time_cols:
# By default, Time objects are written in full precision, i.e. we store both
# jd1 and jd2 (serialize_method['fits'] = 'jd1_jd2'). Formatted values for
# Time can be stored if the user explicitly chooses to do so.
col_cls = MaskedColumn if col.masked else Column
if col.info.serialize_method["fits"] == "formatted_value":
newtable.replace_column(col.info.name, col_cls(col.value))
continue
# The following is necessary to deal with multi-dimensional ``Time`` objects
# (i.e. where Time.shape is non-trivial).
# Note: easier would be np.stack([col.jd1, col.jd2], axis=-1), but that
# fails for np.ma.MaskedArray, as it returns just the data, ignoring the mask.
jd12 = np.empty_like(col.jd1, shape=col.jd1.shape + (2,))
jd12[..., 0] = col.jd1
jd12[..., 1] = col.jd2
newtable.replace_column(col.info.name, col_cls(jd12, unit="d"))
# Time column-specific override keywords
coord_meta[col.info.name]["coord_type"] = col.scale.upper()
coord_meta[col.info.name]["coord_unit"] = "d"
# Time column reference position
if col.location is None:
coord_meta[col.info.name]["time_ref_pos"] = None
if location is not None:
warnings.warn(
(
f'Time Column "{col.info.name}" has no specified location, '
"but global Time Position is present, which will be the "
"default for this column in FITS specification."
),
AstropyUserWarning,
)
else:
coord_meta[col.info.name]["time_ref_pos"] = "TOPOCENTER"
# Compatibility of Time Scales and Reference Positions
if col.scale in BARYCENTRIC_SCALES:
warnings.warn(
(
f'Earth Location "TOPOCENTER" for Time Column "{col.info.name}" '
f'is incompatible with scale "{col.scale.upper()}".'
),
AstropyUserWarning,
)
if location is None:
# Set global geocentric location
location = col.location
if location.size > 1:
for dim in ("x", "y", "z"):
newtable.add_column(
Column(getattr(location, dim).to_value(u.m)),
name=f"OBSGEO-{dim.upper()}",
)
else:
hdr.extend(
[
Card(
keyword=f"OBSGEO-{dim.upper()}",
value=getattr(location, dim).to_value(u.m),
)
for dim in ("x", "y", "z")
]
)
elif np.any(location != col.location):
raise ValueError(
"Multiple Time Columns with different geocentric "
f"observatory locations ({location}, {col.location}) encountered."
"This is not supported by the FITS standard."
)
return newtable, hdr
| time_to_fits | Self-Contained |
flask | 7 | src/flask/app.py | def make_response(self, rv: ft.ResponseReturnValue) -> Response:
"""Convert the return value from a view function to an instance of
:attr:`response_class`.
:param rv: the return value from the view function. The view function
must return a response. Returning ``None``, or the view ending
without returning, is not allowed. The following types are allowed
for ``view_rv``:
``str``
A response object is created with the string encoded to UTF-8
as the body.
``bytes``
A response object is created with the bytes as the body.
``dict``
A dictionary that will be jsonify'd before being returned.
``list``
A list that will be jsonify'd before being returned.
``generator`` or ``iterator``
A generator that returns ``str`` or ``bytes`` to be
streamed as the response.
``tuple``
Either ``(body, status, headers)``, ``(body, status)``, or
``(body, headers)``, where ``body`` is any of the other types
allowed here, ``status`` is a string or an integer, and
``headers`` is a dictionary or a list of ``(key, value)``
tuples. If ``body`` is a :attr:`response_class` instance,
``status`` overwrites the exiting value and ``headers`` are
extended.
:attr:`response_class`
The object is returned unchanged.
other :class:`~werkzeug.wrappers.Response` class
The object is coerced to :attr:`response_class`.
:func:`callable`
The function is called as a WSGI application. The result is
used to create a response object.
.. versionchanged:: 2.2
A generator will be converted to a streaming response.
A list will be converted to a JSON response.
.. versionchanged:: 1.1
A dict will be converted to a JSON response.
.. versionchanged:: 0.9
Previously a tuple was interpreted as the arguments for the
response object.
"""
| /usr/src/app/target_test_cases/failed_tests_app.Flask.make_response.txt | def make_response(self, rv: ft.ResponseReturnValue) -> Response:
"""Convert the return value from a view function to an instance of
:attr:`response_class`.
:param rv: the return value from the view function. The view function
must return a response. Returning ``None``, or the view ending
without returning, is not allowed. The following types are allowed
for ``view_rv``:
``str``
A response object is created with the string encoded to UTF-8
as the body.
``bytes``
A response object is created with the bytes as the body.
``dict``
A dictionary that will be jsonify'd before being returned.
``list``
A list that will be jsonify'd before being returned.
``generator`` or ``iterator``
A generator that returns ``str`` or ``bytes`` to be
streamed as the response.
``tuple``
Either ``(body, status, headers)``, ``(body, status)``, or
``(body, headers)``, where ``body`` is any of the other types
allowed here, ``status`` is a string or an integer, and
``headers`` is a dictionary or a list of ``(key, value)``
tuples. If ``body`` is a :attr:`response_class` instance,
``status`` overwrites the exiting value and ``headers`` are
extended.
:attr:`response_class`
The object is returned unchanged.
other :class:`~werkzeug.wrappers.Response` class
The object is coerced to :attr:`response_class`.
:func:`callable`
The function is called as a WSGI application. The result is
used to create a response object.
.. versionchanged:: 2.2
A generator will be converted to a streaming response.
A list will be converted to a JSON response.
.. versionchanged:: 1.1
A dict will be converted to a JSON response.
.. versionchanged:: 0.9
Previously a tuple was interpreted as the arguments for the
response object.
"""
status = headers = None
# unpack tuple returns
if isinstance(rv, tuple):
len_rv = len(rv)
# a 3-tuple is unpacked directly
if len_rv == 3:
rv, status, headers = rv # type: ignore[misc]
# decide if a 2-tuple has status or headers
elif len_rv == 2:
if isinstance(rv[1], (Headers, dict, tuple, list)):
rv, headers = rv
else:
rv, status = rv # type: ignore[assignment,misc]
# other sized tuples are not allowed
else:
raise TypeError(
"The view function did not return a valid response tuple."
" The tuple must have the form (body, status, headers),"
" (body, status), or (body, headers)."
)
# the body must not be None
if rv is None:
raise TypeError(
f"The view function for {request.endpoint!r} did not"
" return a valid response. The function either returned"
" None or ended without a return statement."
)
# make sure the body is an instance of the response class
if not isinstance(rv, self.response_class):
if isinstance(rv, (str, bytes, bytearray)) or isinstance(rv, cabc.Iterator):
# let the response class set the status and headers instead of
# waiting to do it manually, so that the class can handle any
# special logic
rv = self.response_class(
rv,
status=status,
headers=headers, # type: ignore[arg-type]
)
status = headers = None
elif isinstance(rv, (dict, list)):
rv = self.json.response(rv)
elif isinstance(rv, BaseResponse) or callable(rv):
# evaluate a WSGI callable, or coerce a different response
# class to the correct type
try:
rv = self.response_class.force_type(
rv, # type: ignore[arg-type]
request.environ,
)
except TypeError as e:
raise TypeError(
f"{e}\nThe view function did not return a valid"
" response. The return type must be a string,"
" dict, list, tuple with headers or status,"
" Response instance, or WSGI callable, but it"
f" was a {type(rv).__name__}."
).with_traceback(sys.exc_info()[2]) from None
else:
raise TypeError(
"The view function did not return a valid"
" response. The return type must be a string,"
" dict, list, tuple with headers or status,"
" Response instance, or WSGI callable, but it was a"
f" {type(rv).__name__}."
)
rv = t.cast(Response, rv)
# prefer the status if it was provided
if status is not None:
if isinstance(status, (str, bytes, bytearray)):
rv.status = status
else:
rv.status_code = status
# extend existing headers with provided headers
if headers:
rv.headers.update(headers) # type: ignore[arg-type]
return rv
| app.Flask.make_response | Self-Contained |
flask | 10 | src/flask/app.py | def run(
self,
host: str | None = None,
port: int | None = None,
debug: bool | None = None,
load_dotenv: bool = True,
**options: t.Any,
) -> None:
"""Runs the application on a local development server.
Do not use ``run()`` in a production setting. It is not intended to
meet security and performance requirements for a production server.
Instead, see :doc:`/deploying/index` for WSGI server recommendations.
If the :attr:`debug` flag is set the server will automatically reload
for code changes and show a debugger in case an exception happened.
If you want to run the application in debug mode, but disable the
code execution on the interactive debugger, you can pass
``use_evalex=False`` as parameter. This will keep the debugger's
traceback screen active, but disable code execution.
It is not recommended to use this function for development with
automatic reloading as this is badly supported. Instead you should
be using the :command:`flask` command line script's ``run`` support.
.. admonition:: Keep in Mind
Flask will suppress any server error with a generic error page
unless it is in debug mode. As such to enable just the
interactive debugger without the code reloading, you have to
invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.
Setting ``use_debugger`` to ``True`` without being in debug mode
won't catch any exceptions because there won't be any to
catch.
:param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
have the server available externally as well. Defaults to
``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable
if present.
:param port: the port of the webserver. Defaults to ``5000`` or the
port defined in the ``SERVER_NAME`` config variable if present.
:param debug: if given, enable or disable debug mode. See
:attr:`debug`.
:param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
:param options: the options to be forwarded to the underlying Werkzeug
server. See :func:`werkzeug.serving.run_simple` for more
information.
.. versionchanged:: 1.0
If installed, python-dotenv will be used to load environment
variables from :file:`.env` and :file:`.flaskenv` files.
The :envvar:`FLASK_DEBUG` environment variable will override :attr:`debug`.
Threaded mode is enabled by default.
.. versionchanged:: 0.10
The default port is now picked from the ``SERVER_NAME``
variable.
"""
| /usr/src/app/target_test_cases/failed_tests_app.Flask.run.txt | def run(
self,
host: str | None = None,
port: int | None = None,
debug: bool | None = None,
load_dotenv: bool = True,
**options: t.Any,
) -> None:
"""Runs the application on a local development server.
Do not use ``run()`` in a production setting. It is not intended to
meet security and performance requirements for a production server.
Instead, see :doc:`/deploying/index` for WSGI server recommendations.
If the :attr:`debug` flag is set the server will automatically reload
for code changes and show a debugger in case an exception happened.
If you want to run the application in debug mode, but disable the
code execution on the interactive debugger, you can pass
``use_evalex=False`` as parameter. This will keep the debugger's
traceback screen active, but disable code execution.
It is not recommended to use this function for development with
automatic reloading as this is badly supported. Instead you should
be using the :command:`flask` command line script's ``run`` support.
.. admonition:: Keep in Mind
Flask will suppress any server error with a generic error page
unless it is in debug mode. As such to enable just the
interactive debugger without the code reloading, you have to
invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.
Setting ``use_debugger`` to ``True`` without being in debug mode
won't catch any exceptions because there won't be any to
catch.
:param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
have the server available externally as well. Defaults to
``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable
if present.
:param port: the port of the webserver. Defaults to ``5000`` or the
port defined in the ``SERVER_NAME`` config variable if present.
:param debug: if given, enable or disable debug mode. See
:attr:`debug`.
:param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
:param options: the options to be forwarded to the underlying Werkzeug
server. See :func:`werkzeug.serving.run_simple` for more
information.
.. versionchanged:: 1.0
If installed, python-dotenv will be used to load environment
variables from :file:`.env` and :file:`.flaskenv` files.
The :envvar:`FLASK_DEBUG` environment variable will override :attr:`debug`.
Threaded mode is enabled by default.
.. versionchanged:: 0.10
The default port is now picked from the ``SERVER_NAME``
variable.
"""
# Ignore this call so that it doesn't start another server if
# the 'flask run' command is used.
if os.environ.get("FLASK_RUN_FROM_CLI") == "true":
if not is_running_from_reloader():
click.secho(
" * Ignoring a call to 'app.run()' that would block"
" the current 'flask' CLI command.\n"
" Only call 'app.run()' in an 'if __name__ =="
' "__main__"\' guard.',
fg="red",
)
return
if get_load_dotenv(load_dotenv):
cli.load_dotenv()
# if set, env var overrides existing value
if "FLASK_DEBUG" in os.environ:
self.debug = get_debug_flag()
# debug passed to method overrides all other sources
if debug is not None:
self.debug = bool(debug)
server_name = self.config.get("SERVER_NAME")
sn_host = sn_port = None
if server_name:
sn_host, _, sn_port = server_name.partition(":")
if not host:
if sn_host:
host = sn_host
else:
host = "127.0.0.1"
if port or port == 0:
port = int(port)
elif sn_port:
port = int(sn_port)
else:
port = 5000
options.setdefault("use_reloader", self.debug)
options.setdefault("use_debugger", self.debug)
options.setdefault("threaded", True)
cli.show_server_banner(self.debug, self.name)
from werkzeug.serving import run_simple
try:
run_simple(t.cast(str, host), port, self, **options)
finally:
# reset the first request information if the development server
# reset normally. This makes it possible to restart the server
# without reloader and that stuff from an interactive shell.
self._got_first_request = False
| app.Flask.run | Repo-Level |
flask | 13 | src/flask/app.py | def url_for(
self,
/,
endpoint: str,
*,
_anchor: str | None = None,
_method: str | None = None,
_scheme: str | None = None,
_external: bool | None = None,
**values: t.Any,
) -> str:
"""Generate a URL to the given endpoint with the given values.
This is called by :func:`flask.url_for`, and can be called
directly as well.
An *endpoint* is the name of a URL rule, usually added with
:meth:`@app.route() <route>`, and usually the same name as the
view function. A route defined in a :class:`~flask.Blueprint`
will prepend the blueprint's name separated by a ``.`` to the
endpoint.
In some cases, such as email messages, you want URLs to include
the scheme and domain, like ``https://example.com/hello``. When
not in an active request, URLs will be external by default, but
this requires setting :data:`SERVER_NAME` so Flask knows what
domain to use. :data:`APPLICATION_ROOT` and
:data:`PREFERRED_URL_SCHEME` should also be configured as
needed. This config is only used when not in an active request.
Functions can be decorated with :meth:`url_defaults` to modify
keyword arguments before the URL is built.
If building fails for some reason, such as an unknown endpoint
or incorrect values, the app's :meth:`handle_url_build_error`
method is called. If that returns a string, that is returned,
otherwise a :exc:`~werkzeug.routing.BuildError` is raised.
:param endpoint: The endpoint name associated with the URL to
generate. If this starts with a ``.``, the current blueprint
name (if any) will be used.
:param _anchor: If given, append this as ``#anchor`` to the URL.
:param _method: If given, generate the URL associated with this
method for the endpoint.
:param _scheme: If given, the URL will have this scheme if it
is external.
:param _external: If given, prefer the URL to be internal
(False) or require it to be external (True). External URLs
include the scheme and domain. When not in an active
request, URLs are external by default.
:param values: Values to use for the variable parts of the URL
rule. Unknown keys are appended as query string arguments,
like ``?a=b&c=d``.
.. versionadded:: 2.2
Moved from ``flask.url_for``, which calls this method.
"""
| /usr/src/app/target_test_cases/failed_tests_app.Flask.url_for.txt | def url_for(
self,
/,
endpoint: str,
*,
_anchor: str | None = None,
_method: str | None = None,
_scheme: str | None = None,
_external: bool | None = None,
**values: t.Any,
) -> str:
"""Generate a URL to the given endpoint with the given values.
This is called by :func:`flask.url_for`, and can be called
directly as well.
An *endpoint* is the name of a URL rule, usually added with
:meth:`@app.route() <route>`, and usually the same name as the
view function. A route defined in a :class:`~flask.Blueprint`
will prepend the blueprint's name separated by a ``.`` to the
endpoint.
In some cases, such as email messages, you want URLs to include
the scheme and domain, like ``https://example.com/hello``. When
not in an active request, URLs will be external by default, but
this requires setting :data:`SERVER_NAME` so Flask knows what
domain to use. :data:`APPLICATION_ROOT` and
:data:`PREFERRED_URL_SCHEME` should also be configured as
needed. This config is only used when not in an active request.
Functions can be decorated with :meth:`url_defaults` to modify
keyword arguments before the URL is built.
If building fails for some reason, such as an unknown endpoint
or incorrect values, the app's :meth:`handle_url_build_error`
method is called. If that returns a string, that is returned,
otherwise a :exc:`~werkzeug.routing.BuildError` is raised.
:param endpoint: The endpoint name associated with the URL to
generate. If this starts with a ``.``, the current blueprint
name (if any) will be used.
:param _anchor: If given, append this as ``#anchor`` to the URL.
:param _method: If given, generate the URL associated with this
method for the endpoint.
:param _scheme: If given, the URL will have this scheme if it
is external.
:param _external: If given, prefer the URL to be internal
(False) or require it to be external (True). External URLs
include the scheme and domain. When not in an active
request, URLs are external by default.
:param values: Values to use for the variable parts of the URL
rule. Unknown keys are appended as query string arguments,
like ``?a=b&c=d``.
.. versionadded:: 2.2
Moved from ``flask.url_for``, which calls this method.
"""
req_ctx = _cv_request.get(None)
if req_ctx is not None:
url_adapter = req_ctx.url_adapter
blueprint_name = req_ctx.request.blueprint
# If the endpoint starts with "." and the request matches a
# blueprint, the endpoint is relative to the blueprint.
if endpoint[:1] == ".":
if blueprint_name is not None:
endpoint = f"{blueprint_name}{endpoint}"
else:
endpoint = endpoint[1:]
# When in a request, generate a URL without scheme and
# domain by default, unless a scheme is given.
if _external is None:
_external = _scheme is not None
else:
app_ctx = _cv_app.get(None)
# If called by helpers.url_for, an app context is active,
# use its url_adapter. Otherwise, app.url_for was called
# directly, build an adapter.
if app_ctx is not None:
url_adapter = app_ctx.url_adapter
else:
url_adapter = self.create_url_adapter(None)
if url_adapter is None:
raise RuntimeError(
"Unable to build URLs outside an active request"
" without 'SERVER_NAME' configured. Also configure"
" 'APPLICATION_ROOT' and 'PREFERRED_URL_SCHEME' as"
" needed."
)
# When outside a request, generate a URL with scheme and
# domain by default.
if _external is None:
_external = True
# It is an error to set _scheme when _external=False, in order
# to avoid accidental insecure URLs.
if _scheme is not None and not _external:
raise ValueError("When specifying '_scheme', '_external' must be True.")
self.inject_url_defaults(endpoint, values)
try:
rv = url_adapter.build( # type: ignore[union-attr]
endpoint,
values,
method=_method,
url_scheme=_scheme,
force_external=_external,
)
except BuildError as error:
values.update(
_anchor=_anchor, _method=_method, _scheme=_scheme, _external=_external
)
return self.handle_url_build_error(error, endpoint, values)
if _anchor is not None:
_anchor = _url_quote(_anchor, safe="%!#$&'()*+,/:;=?@")
rv = f"{rv}#{_anchor}"
return rv
| app.Flask.url_for | File-Level |
more-itertools | 13 | more_itertools/more.py | def distinct_permutations(iterable, r=None):
"""Yield successive distinct permutations of the elements in *iterable*.
>>> sorted(distinct_permutations([1, 0, 1]))
[(0, 1, 1), (1, 0, 1), (1, 1, 0)]
Equivalent to yielding from ``set(permutations(iterable))``, except
duplicates are not generated and thrown away. For larger input sequences
this is much more efficient.
Duplicate permutations arise when there are duplicated elements in the
input iterable. The number of items returned is
`n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of
items input, and each `x_i` is the count of a distinct item in the input
sequence.
If *r* is given, only the *r*-length permutations are yielded.
>>> sorted(distinct_permutations([1, 0, 1], r=2))
[(0, 1), (1, 0), (1, 1)]
>>> sorted(distinct_permutations(range(3), r=2))
[(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
*iterable* need not be sortable, but note that using equal (``x == y``)
but non-identical (``id(x) != id(y)``) elements may produce surprising
behavior. For example, ``1`` and ``True`` are equal but non-identical:
>>> list(distinct_permutations([1, True, '3'])) # doctest: +SKIP
[
(1, True, '3'),
(1, '3', True),
('3', 1, True)
]
>>> list(distinct_permutations([1, 2, '3'])) # doctest: +SKIP
[
(1, 2, '3'),
(1, '3', 2),
(2, 1, '3'),
(2, '3', 1),
('3', 1, 2),
('3', 2, 1)
]
"""
| /usr/src/app/target_test_cases/failed_tests_more.distinct_permutations.txt | def distinct_permutations(iterable, r=None):
"""Yield successive distinct permutations of the elements in *iterable*.
>>> sorted(distinct_permutations([1, 0, 1]))
[(0, 1, 1), (1, 0, 1), (1, 1, 0)]
Equivalent to yielding from ``set(permutations(iterable))``, except
duplicates are not generated and thrown away. For larger input sequences
this is much more efficient.
Duplicate permutations arise when there are duplicated elements in the
input iterable. The number of items returned is
`n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of
items input, and each `x_i` is the count of a distinct item in the input
sequence.
If *r* is given, only the *r*-length permutations are yielded.
>>> sorted(distinct_permutations([1, 0, 1], r=2))
[(0, 1), (1, 0), (1, 1)]
>>> sorted(distinct_permutations(range(3), r=2))
[(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
*iterable* need not be sortable, but note that using equal (``x == y``)
but non-identical (``id(x) != id(y)``) elements may produce surprising
behavior. For example, ``1`` and ``True`` are equal but non-identical:
>>> list(distinct_permutations([1, True, '3'])) # doctest: +SKIP
[
(1, True, '3'),
(1, '3', True),
('3', 1, True)
]
>>> list(distinct_permutations([1, 2, '3'])) # doctest: +SKIP
[
(1, 2, '3'),
(1, '3', 2),
(2, 1, '3'),
(2, '3', 1),
('3', 1, 2),
('3', 2, 1)
]
"""
# Algorithm: https://w.wiki/Qai
def _full(A):
while True:
# Yield the permutation we have
yield tuple(A)
# Find the largest index i such that A[i] < A[i + 1]
for i in range(size - 2, -1, -1):
if A[i] < A[i + 1]:
break
# If no such index exists, this permutation is the last one
else:
return
# Find the largest index j greater than j such that A[i] < A[j]
for j in range(size - 1, i, -1):
if A[i] < A[j]:
break
# Swap the value of A[i] with that of A[j], then reverse the
# sequence from A[i + 1] to form the new permutation
A[i], A[j] = A[j], A[i]
A[i + 1 :] = A[: i - size : -1] # A[i + 1:][::-1]
# Algorithm: modified from the above
def _partial(A, r):
# Split A into the first r items and the last r items
head, tail = A[:r], A[r:]
right_head_indexes = range(r - 1, -1, -1)
left_tail_indexes = range(len(tail))
while True:
# Yield the permutation we have
yield tuple(head)
# Starting from the right, find the first index of the head with
# value smaller than the maximum value of the tail - call it i.
pivot = tail[-1]
for i in right_head_indexes:
if head[i] < pivot:
break
pivot = head[i]
else:
return
# Starting from the left, find the first value of the tail
# with a value greater than head[i] and swap.
for j in left_tail_indexes:
if tail[j] > head[i]:
head[i], tail[j] = tail[j], head[i]
break
# If we didn't find one, start from the right and find the first
# index of the head with a value greater than head[i] and swap.
else:
for j in right_head_indexes:
if head[j] > head[i]:
head[i], head[j] = head[j], head[i]
break
# Reverse head[i + 1:] and swap it with tail[:r - (i + 1)]
tail += head[: i - r : -1] # head[i + 1:][::-1]
i += 1
head[i:], tail[:] = tail[: r - i], tail[r - i :]
items = list(iterable)
try:
items.sort()
sortable = True
except TypeError:
sortable = False
indices_dict = defaultdict(list)
for item in items:
indices_dict[items.index(item)].append(item)
indices = [items.index(item) for item in items]
indices.sort()
equivalent_items = {k: cycle(v) for k, v in indices_dict.items()}
def permuted_items(permuted_indices):
return tuple(
next(equivalent_items[index]) for index in permuted_indices
)
size = len(items)
if r is None:
r = size
# functools.partial(_partial, ... )
algorithm = _full if (r == size) else partial(_partial, r=r)
if 0 < r <= size:
if sortable:
return algorithm(items)
else:
return (
permuted_items(permuted_indices)
for permuted_indices in algorithm(indices)
)
return iter(() if r else ((),))
| more.distinct_permutations | File-Level |
plotly.py | 1 | packages/python/plotly/plotly/figure_factory/_bullet.py | def create_bullet(
data,
markers=None,
measures=None,
ranges=None,
subtitles=None,
titles=None,
orientation="h",
range_colors=("rgb(200, 200, 200)", "rgb(245, 245, 245)"),
measure_colors=("rgb(31, 119, 180)", "rgb(176, 196, 221)"),
horizontal_spacing=None,
vertical_spacing=None,
scatter_options={},
**layout_options,
):
"""
**deprecated**, use instead the plotly.graph_objects trace
:class:`plotly.graph_objects.Indicator`.
:param (pd.DataFrame | list | tuple) data: either a list/tuple of
dictionaries or a pandas DataFrame.
:param (str) markers: the column name or dictionary key for the markers in
each subplot.
:param (str) measures: the column name or dictionary key for the measure
bars in each subplot. This bar usually represents the quantitative
measure of performance, usually a list of two values [a, b] and are
the blue bars in the foreground of each subplot by default.
:param (str) ranges: the column name or dictionary key for the qualitative
ranges of performance, usually a 3-item list [bad, okay, good]. They
correspond to the grey bars in the background of each chart.
:param (str) subtitles: the column name or dictionary key for the subtitle
of each subplot chart. The subplots are displayed right underneath
each title.
:param (str) titles: the column name or dictionary key for the main label
of each subplot chart.
:param (bool) orientation: if 'h', the bars are placed horizontally as
rows. If 'v' the bars are placed vertically in the chart.
:param (list) range_colors: a tuple of two colors between which all
the rectangles for the range are drawn. These rectangles are meant to
be qualitative indicators against which the marker and measure bars
are compared.
Default=('rgb(200, 200, 200)', 'rgb(245, 245, 245)')
:param (list) measure_colors: a tuple of two colors which is used to color
the thin quantitative bars in the bullet chart.
Default=('rgb(31, 119, 180)', 'rgb(176, 196, 221)')
:param (float) horizontal_spacing: see the 'horizontal_spacing' param in
plotly.tools.make_subplots. Ranges between 0 and 1.
:param (float) vertical_spacing: see the 'vertical_spacing' param in
plotly.tools.make_subplots. Ranges between 0 and 1.
:param (dict) scatter_options: describes attributes for the scatter trace
in each subplot such as name and marker size. Call
help(plotly.graph_objs.Scatter) for more information on valid params.
:param layout_options: describes attributes for the layout of the figure
such as title, height and width. Call help(plotly.graph_objs.Layout)
for more information on valid params.
Example 1: Use a Dictionary
>>> import plotly.figure_factory as ff
>>> data = [
... {"label": "revenue", "sublabel": "us$, in thousands",
... "range": [150, 225, 300], "performance": [220,270], "point": [250]},
... {"label": "Profit", "sublabel": "%", "range": [20, 25, 30],
... "performance": [21, 23], "point": [26]},
... {"label": "Order Size", "sublabel":"US$, average","range": [350, 500, 600],
... "performance": [100,320],"point": [550]},
... {"label": "New Customers", "sublabel": "count", "range": [1400, 2000, 2500],
... "performance": [1000, 1650],"point": [2100]},
... {"label": "Satisfaction", "sublabel": "out of 5","range": [3.5, 4.25, 5],
... "performance": [3.2, 4.7], "point": [4.4]}
... ]
>>> fig = ff.create_bullet(
... data, titles='label', subtitles='sublabel', markers='point',
... measures='performance', ranges='range', orientation='h',
... title='my simple bullet chart'
... )
>>> fig.show()
Example 2: Use a DataFrame with Custom Colors
>>> import plotly.figure_factory as ff
>>> import pandas as pd
>>> data = pd.read_json('https://cdn.rawgit.com/plotly/datasets/master/BulletData.json')
>>> fig = ff.create_bullet(
... data, titles='title', markers='markers', measures='measures',
... orientation='v', measure_colors=['rgb(14, 52, 75)', 'rgb(31, 141, 127)'],
... scatter_options={'marker': {'symbol': 'circle'}}, width=700)
>>> fig.show()
"""
| /usr/src/app/target_test_cases/failed_tests__bullet.create_bullet.txt | def create_bullet(
data,
markers=None,
measures=None,
ranges=None,
subtitles=None,
titles=None,
orientation="h",
range_colors=("rgb(200, 200, 200)", "rgb(245, 245, 245)"),
measure_colors=("rgb(31, 119, 180)", "rgb(176, 196, 221)"),
horizontal_spacing=None,
vertical_spacing=None,
scatter_options={},
**layout_options,
):
"""
**deprecated**, use instead the plotly.graph_objects trace
:class:`plotly.graph_objects.Indicator`.
:param (pd.DataFrame | list | tuple) data: either a list/tuple of
dictionaries or a pandas DataFrame.
:param (str) markers: the column name or dictionary key for the markers in
each subplot.
:param (str) measures: the column name or dictionary key for the measure
bars in each subplot. This bar usually represents the quantitative
measure of performance, usually a list of two values [a, b] and are
the blue bars in the foreground of each subplot by default.
:param (str) ranges: the column name or dictionary key for the qualitative
ranges of performance, usually a 3-item list [bad, okay, good]. They
correspond to the grey bars in the background of each chart.
:param (str) subtitles: the column name or dictionary key for the subtitle
of each subplot chart. The subplots are displayed right underneath
each title.
:param (str) titles: the column name or dictionary key for the main label
of each subplot chart.
:param (bool) orientation: if 'h', the bars are placed horizontally as
rows. If 'v' the bars are placed vertically in the chart.
:param (list) range_colors: a tuple of two colors between which all
the rectangles for the range are drawn. These rectangles are meant to
be qualitative indicators against which the marker and measure bars
are compared.
Default=('rgb(200, 200, 200)', 'rgb(245, 245, 245)')
:param (list) measure_colors: a tuple of two colors which is used to color
the thin quantitative bars in the bullet chart.
Default=('rgb(31, 119, 180)', 'rgb(176, 196, 221)')
:param (float) horizontal_spacing: see the 'horizontal_spacing' param in
plotly.tools.make_subplots. Ranges between 0 and 1.
:param (float) vertical_spacing: see the 'vertical_spacing' param in
plotly.tools.make_subplots. Ranges between 0 and 1.
:param (dict) scatter_options: describes attributes for the scatter trace
in each subplot such as name and marker size. Call
help(plotly.graph_objs.Scatter) for more information on valid params.
:param layout_options: describes attributes for the layout of the figure
such as title, height and width. Call help(plotly.graph_objs.Layout)
for more information on valid params.
Example 1: Use a Dictionary
>>> import plotly.figure_factory as ff
>>> data = [
... {"label": "revenue", "sublabel": "us$, in thousands",
... "range": [150, 225, 300], "performance": [220,270], "point": [250]},
... {"label": "Profit", "sublabel": "%", "range": [20, 25, 30],
... "performance": [21, 23], "point": [26]},
... {"label": "Order Size", "sublabel":"US$, average","range": [350, 500, 600],
... "performance": [100,320],"point": [550]},
... {"label": "New Customers", "sublabel": "count", "range": [1400, 2000, 2500],
... "performance": [1000, 1650],"point": [2100]},
... {"label": "Satisfaction", "sublabel": "out of 5","range": [3.5, 4.25, 5],
... "performance": [3.2, 4.7], "point": [4.4]}
... ]
>>> fig = ff.create_bullet(
... data, titles='label', subtitles='sublabel', markers='point',
... measures='performance', ranges='range', orientation='h',
... title='my simple bullet chart'
... )
>>> fig.show()
Example 2: Use a DataFrame with Custom Colors
>>> import plotly.figure_factory as ff
>>> import pandas as pd
>>> data = pd.read_json('https://cdn.rawgit.com/plotly/datasets/master/BulletData.json')
>>> fig = ff.create_bullet(
... data, titles='title', markers='markers', measures='measures',
... orientation='v', measure_colors=['rgb(14, 52, 75)', 'rgb(31, 141, 127)'],
... scatter_options={'marker': {'symbol': 'circle'}}, width=700)
>>> fig.show()
"""
# validate df
if not pd:
raise ImportError("'pandas' must be installed for this figure factory.")
if utils.is_sequence(data):
if not all(isinstance(item, dict) for item in data):
raise exceptions.PlotlyError(
"Every entry of the data argument list, tuple, etc must "
"be a dictionary."
)
elif not isinstance(data, pd.DataFrame):
raise exceptions.PlotlyError(
"You must input a pandas DataFrame, or a list of dictionaries."
)
# make DataFrame from data with correct column headers
col_names = ["titles", "subtitle", "markers", "measures", "ranges"]
if utils.is_sequence(data):
df = pd.DataFrame(
[
[d[titles] for d in data] if titles else [""] * len(data),
[d[subtitles] for d in data] if subtitles else [""] * len(data),
[d[markers] for d in data] if markers else [[]] * len(data),
[d[measures] for d in data] if measures else [[]] * len(data),
[d[ranges] for d in data] if ranges else [[]] * len(data),
],
index=col_names,
)
elif isinstance(data, pd.DataFrame):
df = pd.DataFrame(
[
data[titles].tolist() if titles else [""] * len(data),
data[subtitles].tolist() if subtitles else [""] * len(data),
data[markers].tolist() if markers else [[]] * len(data),
data[measures].tolist() if measures else [[]] * len(data),
data[ranges].tolist() if ranges else [[]] * len(data),
],
index=col_names,
)
df = pd.DataFrame.transpose(df)
# make sure ranges, measures, 'markers' are not NAN or NONE
for needed_key in ["ranges", "measures", "markers"]:
for idx, r in enumerate(df[needed_key]):
try:
r_is_nan = math.isnan(r)
if r_is_nan or r is None:
df[needed_key][idx] = []
except TypeError:
pass
# validate custom colors
for colors_list in [range_colors, measure_colors]:
if colors_list:
if len(colors_list) != 2:
raise exceptions.PlotlyError(
"Both 'range_colors' or 'measure_colors' must be a list "
"of two valid colors."
)
clrs.validate_colors(colors_list)
colors_list = clrs.convert_colors_to_same_type(colors_list, "rgb")[0]
# default scatter options
default_scatter = {
"marker": {"size": 12, "symbol": "diamond-tall", "color": "rgb(0, 0, 0)"}
}
if scatter_options == {}:
scatter_options.update(default_scatter)
else:
# add default options to scatter_options if they are not present
for k in default_scatter["marker"]:
if k not in scatter_options["marker"]:
scatter_options["marker"][k] = default_scatter["marker"][k]
fig = _bullet(
df,
markers,
measures,
ranges,
subtitles,
titles,
orientation,
range_colors,
measure_colors,
horizontal_spacing,
vertical_spacing,
scatter_options,
layout_options,
)
return fig
| _bullet.create_bullet | File-Level |
plotly.py | 4 | packages/python/plotly/plotly/graph_objs/histogram2dcontour/_contours.py | def __init__(
self,
arg=None,
coloring=None,
end=None,
labelfont=None,
labelformat=None,
operation=None,
showlabels=None,
showlines=None,
size=None,
start=None,
type=None,
value=None,
**kwargs,
):
"""
Construct a new Contours object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram2dcontour.Contours`
coloring
Determines the coloring method showing the contour
values. If "fill", coloring is done evenly between each
contour level If "heatmap", a heatmap gradient coloring
is applied between each contour level. If "lines",
coloring is done on the contour lines. If "none", no
coloring is applied on this trace.
end
Sets the end contour level value. Must be more than
`contours.start`
labelfont
Sets the font used for labeling the contour levels. The
default color comes from the lines, if shown. The
default family and size come from `layout.font`.
labelformat
Sets the contour label formatting rule using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
operation
Sets the constraint operation. "=" keeps regions equal
to `value` "<" and "<=" keep regions less than `value`
">" and ">=" keep regions greater than `value` "[]",
"()", "[)", and "(]" keep regions inside `value[0]` to
`value[1]` "][", ")(", "](", ")[" keep regions outside
`value[0]` to value[1]` Open vs. closed intervals make
no difference to constraint display, but all versions
are allowed for consistency with filter transforms.
showlabels
Determines whether to label the contour lines with
their values.
showlines
Determines whether or not the contour lines are drawn.
Has an effect only if `contours.coloring` is set to
"fill".
size
Sets the step between each contour level. Must be
positive.
start
Sets the starting contour level value. Must be less
than `contours.end`
type
If `levels`, the data is represented as a contour plot
with multiple levels displayed. If `constraint`, the
data is represented as constraints with the invalid
region shaded as specified by the `operation` and
`value` parameters.
value
Sets the value or values of the constraint boundary.
When `operation` is set to one of the comparison values
(=,<,>=,>,<=) "value" is expected to be a number. When
`operation` is set to one of the interval values
([],(),[),(],][,)(,](,)[) "value" is expected to be an
array of two numbers where the first is the lower bound
and the second is the upper bound.
Returns
-------
Contours
"""
| /usr/src/app/target_test_cases/failed_tests__contours.Contours.__init__.txt | def __init__(
self,
arg=None,
coloring=None,
end=None,
labelfont=None,
labelformat=None,
operation=None,
showlabels=None,
showlines=None,
size=None,
start=None,
type=None,
value=None,
**kwargs,
):
"""
Construct a new Contours object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram2dcontour.Contours`
coloring
Determines the coloring method showing the contour
values. If "fill", coloring is done evenly between each
contour level If "heatmap", a heatmap gradient coloring
is applied between each contour level. If "lines",
coloring is done on the contour lines. If "none", no
coloring is applied on this trace.
end
Sets the end contour level value. Must be more than
`contours.start`
labelfont
Sets the font used for labeling the contour levels. The
default color comes from the lines, if shown. The
default family and size come from `layout.font`.
labelformat
Sets the contour label formatting rule using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
operation
Sets the constraint operation. "=" keeps regions equal
to `value` "<" and "<=" keep regions less than `value`
">" and ">=" keep regions greater than `value` "[]",
"()", "[)", and "(]" keep regions inside `value[0]` to
`value[1]` "][", ")(", "](", ")[" keep regions outside
`value[0]` to value[1]` Open vs. closed intervals make
no difference to constraint display, but all versions
are allowed for consistency with filter transforms.
showlabels
Determines whether to label the contour lines with
their values.
showlines
Determines whether or not the contour lines are drawn.
Has an effect only if `contours.coloring` is set to
"fill".
size
Sets the step between each contour level. Must be
positive.
start
Sets the starting contour level value. Must be less
than `contours.end`
type
If `levels`, the data is represented as a contour plot
with multiple levels displayed. If `constraint`, the
data is represented as constraints with the invalid
region shaded as specified by the `operation` and
`value` parameters.
value
Sets the value or values of the constraint boundary.
When `operation` is set to one of the comparison values
(=,<,>=,>,<=) "value" is expected to be a number. When
`operation` is set to one of the interval values
([],(),[),(],][,)(,](,)[) "value" is expected to be an
array of two numbers where the first is the lower bound
and the second is the upper bound.
Returns
-------
Contours
"""
super(Contours, self).__init__("contours")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.histogram2dcontour.Contours
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram2dcontour.Contours`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("coloring", None)
_v = coloring if coloring is not None else _v
if _v is not None:
self["coloring"] = _v
_v = arg.pop("end", None)
_v = end if end is not None else _v
if _v is not None:
self["end"] = _v
_v = arg.pop("labelfont", None)
_v = labelfont if labelfont is not None else _v
if _v is not None:
self["labelfont"] = _v
_v = arg.pop("labelformat", None)
_v = labelformat if labelformat is not None else _v
if _v is not None:
self["labelformat"] = _v
_v = arg.pop("operation", None)
_v = operation if operation is not None else _v
if _v is not None:
self["operation"] = _v
_v = arg.pop("showlabels", None)
_v = showlabels if showlabels is not None else _v
if _v is not None:
self["showlabels"] = _v
_v = arg.pop("showlines", None)
_v = showlines if showlines is not None else _v
if _v is not None:
self["showlines"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("start", None)
_v = start if start is not None else _v
if _v is not None:
self["start"] = _v
_v = arg.pop("type", None)
_v = type if type is not None else _v
if _v is not None:
self["type"] = _v
_v = arg.pop("value", None)
_v = value if value is not None else _v
if _v is not None:
self["value"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| _contours.Contours.__init__ | Self-Contained |
plotly.py | 5 | packages/python/plotly/plotly/figure_factory/_county_choropleth.py | def create_choropleth(
fips,
values,
scope=["usa"],
binning_endpoints=None,
colorscale=None,
order=None,
simplify_county=0.02,
simplify_state=0.02,
asp=None,
show_hover=True,
show_state_data=True,
state_outline=None,
county_outline=None,
centroid_marker=None,
round_legend_values=False,
exponent_format=False,
legend_title="",
**layout_options,
):
"""
**deprecated**, use instead
:func:`plotly.express.choropleth` with custom GeoJSON.
This function also requires `shapely`, `geopandas` and `plotly-geo` to be installed.
Returns figure for county choropleth. Uses data from package_data.
:param (list) fips: list of FIPS values which correspond to the con
catination of state and county ids. An example is '01001'.
:param (list) values: list of numbers/strings which correspond to the
fips list. These are the values that will determine how the counties
are colored.
:param (list) scope: list of states and/or states abbreviations. Fits
all states in the camera tightly. Selecting ['usa'] is the equivalent
of appending all 50 states into your scope list. Selecting only 'usa'
does not include 'Alaska', 'Puerto Rico', 'American Samoa',
'Commonwealth of the Northern Mariana Islands', 'Guam',
'United States Virgin Islands'. These must be added manually to the
list.
Default = ['usa']
:param (list) binning_endpoints: ascending numbers which implicitly define
real number intervals which are used as bins. The colorscale used must
have the same number of colors as the number of bins and this will
result in a categorical colormap.
:param (list) colorscale: a list of colors with length equal to the
number of categories of colors. The length must match either all
unique numbers in the 'values' list or if endpoints is being used, the
number of categories created by the endpoints.\n
For example, if binning_endpoints = [4, 6, 8], then there are 4 bins:
[-inf, 4), [4, 6), [6, 8), [8, inf)
:param (list) order: a list of the unique categories (numbers/bins) in any
desired order. This is helpful if you want to order string values to
a chosen colorscale.
:param (float) simplify_county: determines the simplification factor
for the counties. The larger the number, the fewer vertices and edges
each polygon has. See
http://toblerity.org/shapely/manual.html#object.simplify for more
information.
Default = 0.02
:param (float) simplify_state: simplifies the state outline polygon.
See http://toblerity.org/shapely/manual.html#object.simplify for more
information.
Default = 0.02
:param (float) asp: the width-to-height aspect ratio for the camera.
Default = 2.5
:param (bool) show_hover: show county hover and centroid info
:param (bool) show_state_data: reveals state boundary lines
:param (dict) state_outline: dict of attributes of the state outline
including width and color. See
https://plot.ly/python/reference/#scatter-marker-line for all valid
params
:param (dict) county_outline: dict of attributes of the county outline
including width and color. See
https://plot.ly/python/reference/#scatter-marker-line for all valid
params
:param (dict) centroid_marker: dict of attributes of the centroid marker.
The centroid markers are invisible by default and appear visible on
selection. See https://plot.ly/python/reference/#scatter-marker for
all valid params
:param (bool) round_legend_values: automatically round the numbers that
appear in the legend to the nearest integer.
Default = False
:param (bool) exponent_format: if set to True, puts numbers in the K, M,
B number format. For example 4000.0 becomes 4.0K
Default = False
:param (str) legend_title: title that appears above the legend
:param **layout_options: a **kwargs argument for all layout parameters
Example 1: Florida::
import plotly.plotly as py
import plotly.figure_factory as ff
import numpy as np
import pandas as pd
df_sample = pd.read_csv(
'https://raw.githubusercontent.com/plotly/datasets/master/minoritymajority.csv'
)
df_sample_r = df_sample[df_sample['STNAME'] == 'Florida']
values = df_sample_r['TOT_POP'].tolist()
fips = df_sample_r['FIPS'].tolist()
binning_endpoints = list(np.mgrid[min(values):max(values):4j])
colorscale = ["#030512","#1d1d3b","#323268","#3d4b94","#3e6ab0",
"#4989bc","#60a7c7","#85c5d3","#b7e0e4","#eafcfd"]
fig = ff.create_choropleth(
fips=fips, values=values, scope=['Florida'], show_state_data=True,
colorscale=colorscale, binning_endpoints=binning_endpoints,
round_legend_values=True, plot_bgcolor='rgb(229,229,229)',
paper_bgcolor='rgb(229,229,229)', legend_title='Florida Population',
county_outline={'color': 'rgb(255,255,255)', 'width': 0.5},
exponent_format=True,
)
Example 2: New England::
import plotly.figure_factory as ff
import pandas as pd
NE_states = ['Connecticut', 'Maine', 'Massachusetts',
'New Hampshire', 'Rhode Island']
df_sample = pd.read_csv(
'https://raw.githubusercontent.com/plotly/datasets/master/minoritymajority.csv'
)
df_sample_r = df_sample[df_sample['STNAME'].isin(NE_states)]
colorscale = ['rgb(68.0, 1.0, 84.0)',
'rgb(66.0, 64.0, 134.0)',
'rgb(38.0, 130.0, 142.0)',
'rgb(63.0, 188.0, 115.0)',
'rgb(216.0, 226.0, 25.0)']
values = df_sample_r['TOT_POP'].tolist()
fips = df_sample_r['FIPS'].tolist()
fig = ff.create_choropleth(
fips=fips, values=values, scope=NE_states, show_state_data=True
)
fig.show()
Example 3: California and Surrounding States::
import plotly.figure_factory as ff
import pandas as pd
df_sample = pd.read_csv(
'https://raw.githubusercontent.com/plotly/datasets/master/minoritymajority.csv'
)
df_sample_r = df_sample[df_sample['STNAME'] == 'California']
values = df_sample_r['TOT_POP'].tolist()
fips = df_sample_r['FIPS'].tolist()
colorscale = [
'rgb(193, 193, 193)',
'rgb(239,239,239)',
'rgb(195, 196, 222)',
'rgb(144,148,194)',
'rgb(101,104,168)',
'rgb(65, 53, 132)'
]
fig = ff.create_choropleth(
fips=fips, values=values, colorscale=colorscale,
scope=['CA', 'AZ', 'Nevada', 'Oregon', ' Idaho'],
binning_endpoints=[14348, 63983, 134827, 426762, 2081313],
county_outline={'color': 'rgb(255,255,255)', 'width': 0.5},
legend_title='California Counties',
title='California and Nearby States'
)
fig.show()
Example 4: USA::
import plotly.figure_factory as ff
import numpy as np
import pandas as pd
df_sample = pd.read_csv(
'https://raw.githubusercontent.com/plotly/datasets/master/laucnty16.csv'
)
df_sample['State FIPS Code'] = df_sample['State FIPS Code'].apply(
lambda x: str(x).zfill(2)
)
df_sample['County FIPS Code'] = df_sample['County FIPS Code'].apply(
lambda x: str(x).zfill(3)
)
df_sample['FIPS'] = (
df_sample['State FIPS Code'] + df_sample['County FIPS Code']
)
binning_endpoints = list(np.linspace(1, 12, len(colorscale) - 1))
colorscale = ["#f7fbff", "#ebf3fb", "#deebf7", "#d2e3f3", "#c6dbef",
"#b3d2e9", "#9ecae1", "#85bcdb", "#6baed6", "#57a0ce",
"#4292c6", "#3082be", "#2171b5", "#1361a9", "#08519c",
"#0b4083","#08306b"]
fips = df_sample['FIPS']
values = df_sample['Unemployment Rate (%)']
fig = ff.create_choropleth(
fips=fips, values=values, scope=['usa'],
binning_endpoints=binning_endpoints, colorscale=colorscale,
show_hover=True, centroid_marker={'opacity': 0},
asp=2.9, title='USA by Unemployment %',
legend_title='Unemployment %'
)
fig.show()
"""
| /usr/src/app/target_test_cases/failed_tests__county_choropleth.create_choropleth.txt | def create_choropleth(
fips,
values,
scope=["usa"],
binning_endpoints=None,
colorscale=None,
order=None,
simplify_county=0.02,
simplify_state=0.02,
asp=None,
show_hover=True,
show_state_data=True,
state_outline=None,
county_outline=None,
centroid_marker=None,
round_legend_values=False,
exponent_format=False,
legend_title="",
**layout_options,
):
"""
**deprecated**, use instead
:func:`plotly.express.choropleth` with custom GeoJSON.
This function also requires `shapely`, `geopandas` and `plotly-geo` to be installed.
Returns figure for county choropleth. Uses data from package_data.
:param (list) fips: list of FIPS values which correspond to the con
catination of state and county ids. An example is '01001'.
:param (list) values: list of numbers/strings which correspond to the
fips list. These are the values that will determine how the counties
are colored.
:param (list) scope: list of states and/or states abbreviations. Fits
all states in the camera tightly. Selecting ['usa'] is the equivalent
of appending all 50 states into your scope list. Selecting only 'usa'
does not include 'Alaska', 'Puerto Rico', 'American Samoa',
'Commonwealth of the Northern Mariana Islands', 'Guam',
'United States Virgin Islands'. These must be added manually to the
list.
Default = ['usa']
:param (list) binning_endpoints: ascending numbers which implicitly define
real number intervals which are used as bins. The colorscale used must
have the same number of colors as the number of bins and this will
result in a categorical colormap.
:param (list) colorscale: a list of colors with length equal to the
number of categories of colors. The length must match either all
unique numbers in the 'values' list or if endpoints is being used, the
number of categories created by the endpoints.\n
For example, if binning_endpoints = [4, 6, 8], then there are 4 bins:
[-inf, 4), [4, 6), [6, 8), [8, inf)
:param (list) order: a list of the unique categories (numbers/bins) in any
desired order. This is helpful if you want to order string values to
a chosen colorscale.
:param (float) simplify_county: determines the simplification factor
for the counties. The larger the number, the fewer vertices and edges
each polygon has. See
http://toblerity.org/shapely/manual.html#object.simplify for more
information.
Default = 0.02
:param (float) simplify_state: simplifies the state outline polygon.
See http://toblerity.org/shapely/manual.html#object.simplify for more
information.
Default = 0.02
:param (float) asp: the width-to-height aspect ratio for the camera.
Default = 2.5
:param (bool) show_hover: show county hover and centroid info
:param (bool) show_state_data: reveals state boundary lines
:param (dict) state_outline: dict of attributes of the state outline
including width and color. See
https://plot.ly/python/reference/#scatter-marker-line for all valid
params
:param (dict) county_outline: dict of attributes of the county outline
including width and color. See
https://plot.ly/python/reference/#scatter-marker-line for all valid
params
:param (dict) centroid_marker: dict of attributes of the centroid marker.
The centroid markers are invisible by default and appear visible on
selection. See https://plot.ly/python/reference/#scatter-marker for
all valid params
:param (bool) round_legend_values: automatically round the numbers that
appear in the legend to the nearest integer.
Default = False
:param (bool) exponent_format: if set to True, puts numbers in the K, M,
B number format. For example 4000.0 becomes 4.0K
Default = False
:param (str) legend_title: title that appears above the legend
:param **layout_options: a **kwargs argument for all layout parameters
Example 1: Florida::
import plotly.plotly as py
import plotly.figure_factory as ff
import numpy as np
import pandas as pd
df_sample = pd.read_csv(
'https://raw.githubusercontent.com/plotly/datasets/master/minoritymajority.csv'
)
df_sample_r = df_sample[df_sample['STNAME'] == 'Florida']
values = df_sample_r['TOT_POP'].tolist()
fips = df_sample_r['FIPS'].tolist()
binning_endpoints = list(np.mgrid[min(values):max(values):4j])
colorscale = ["#030512","#1d1d3b","#323268","#3d4b94","#3e6ab0",
"#4989bc","#60a7c7","#85c5d3","#b7e0e4","#eafcfd"]
fig = ff.create_choropleth(
fips=fips, values=values, scope=['Florida'], show_state_data=True,
colorscale=colorscale, binning_endpoints=binning_endpoints,
round_legend_values=True, plot_bgcolor='rgb(229,229,229)',
paper_bgcolor='rgb(229,229,229)', legend_title='Florida Population',
county_outline={'color': 'rgb(255,255,255)', 'width': 0.5},
exponent_format=True,
)
Example 2: New England::
import plotly.figure_factory as ff
import pandas as pd
NE_states = ['Connecticut', 'Maine', 'Massachusetts',
'New Hampshire', 'Rhode Island']
df_sample = pd.read_csv(
'https://raw.githubusercontent.com/plotly/datasets/master/minoritymajority.csv'
)
df_sample_r = df_sample[df_sample['STNAME'].isin(NE_states)]
colorscale = ['rgb(68.0, 1.0, 84.0)',
'rgb(66.0, 64.0, 134.0)',
'rgb(38.0, 130.0, 142.0)',
'rgb(63.0, 188.0, 115.0)',
'rgb(216.0, 226.0, 25.0)']
values = df_sample_r['TOT_POP'].tolist()
fips = df_sample_r['FIPS'].tolist()
fig = ff.create_choropleth(
fips=fips, values=values, scope=NE_states, show_state_data=True
)
fig.show()
Example 3: California and Surrounding States::
import plotly.figure_factory as ff
import pandas as pd
df_sample = pd.read_csv(
'https://raw.githubusercontent.com/plotly/datasets/master/minoritymajority.csv'
)
df_sample_r = df_sample[df_sample['STNAME'] == 'California']
values = df_sample_r['TOT_POP'].tolist()
fips = df_sample_r['FIPS'].tolist()
colorscale = [
'rgb(193, 193, 193)',
'rgb(239,239,239)',
'rgb(195, 196, 222)',
'rgb(144,148,194)',
'rgb(101,104,168)',
'rgb(65, 53, 132)'
]
fig = ff.create_choropleth(
fips=fips, values=values, colorscale=colorscale,
scope=['CA', 'AZ', 'Nevada', 'Oregon', ' Idaho'],
binning_endpoints=[14348, 63983, 134827, 426762, 2081313],
county_outline={'color': 'rgb(255,255,255)', 'width': 0.5},
legend_title='California Counties',
title='California and Nearby States'
)
fig.show()
Example 4: USA::
import plotly.figure_factory as ff
import numpy as np
import pandas as pd
df_sample = pd.read_csv(
'https://raw.githubusercontent.com/plotly/datasets/master/laucnty16.csv'
)
df_sample['State FIPS Code'] = df_sample['State FIPS Code'].apply(
lambda x: str(x).zfill(2)
)
df_sample['County FIPS Code'] = df_sample['County FIPS Code'].apply(
lambda x: str(x).zfill(3)
)
df_sample['FIPS'] = (
df_sample['State FIPS Code'] + df_sample['County FIPS Code']
)
binning_endpoints = list(np.linspace(1, 12, len(colorscale) - 1))
colorscale = ["#f7fbff", "#ebf3fb", "#deebf7", "#d2e3f3", "#c6dbef",
"#b3d2e9", "#9ecae1", "#85bcdb", "#6baed6", "#57a0ce",
"#4292c6", "#3082be", "#2171b5", "#1361a9", "#08519c",
"#0b4083","#08306b"]
fips = df_sample['FIPS']
values = df_sample['Unemployment Rate (%)']
fig = ff.create_choropleth(
fips=fips, values=values, scope=['usa'],
binning_endpoints=binning_endpoints, colorscale=colorscale,
show_hover=True, centroid_marker={'opacity': 0},
asp=2.9, title='USA by Unemployment %',
legend_title='Unemployment %'
)
fig.show()
"""
# ensure optional modules imported
if not _plotly_geo:
raise ValueError(
"""
The create_choropleth figure factory requires the plotly-geo package.
Install using pip with:
$ pip install plotly-geo
Or, install using conda with
$ conda install -c plotly plotly-geo
"""
)
if not gp or not shapefile or not shapely:
raise ImportError(
"geopandas, pyshp and shapely must be installed for this figure "
"factory.\n\nRun the following commands to install the correct "
"versions of the following modules:\n\n"
"```\n"
"$ pip install geopandas==0.3.0\n"
"$ pip install pyshp==1.2.10\n"
"$ pip install shapely==1.6.3\n"
"```\n"
"If you are using Windows, follow this post to properly "
"install geopandas and dependencies:"
"http://geoffboeing.com/2014/09/using-geopandas-windows/\n\n"
"If you are using Anaconda, do not use PIP to install the "
"packages above. Instead use conda to install them:\n\n"
"```\n"
"$ conda install plotly\n"
"$ conda install geopandas\n"
"```"
)
df, df_state = _create_us_counties_df(st_to_state_name_dict, state_to_st_dict)
fips_polygon_map = dict(zip(df["FIPS"].tolist(), df["geometry"].tolist()))
if not state_outline:
state_outline = {"color": "rgb(240, 240, 240)", "width": 1}
if not county_outline:
county_outline = {"color": "rgb(0, 0, 0)", "width": 0}
if not centroid_marker:
centroid_marker = {"size": 3, "color": "white", "opacity": 1}
# ensure centroid markers appear on selection
if "opacity" not in centroid_marker:
centroid_marker.update({"opacity": 1})
if len(fips) != len(values):
raise PlotlyError("fips and values must be the same length")
# make fips, values into lists
if isinstance(fips, pd.core.series.Series):
fips = fips.tolist()
if isinstance(values, pd.core.series.Series):
values = values.tolist()
# make fips numeric
fips = map(lambda x: int(x), fips)
if binning_endpoints:
intervals = utils.endpts_to_intervals(binning_endpoints)
LEVELS = _intervals_as_labels(intervals, round_legend_values, exponent_format)
else:
if not order:
LEVELS = sorted(list(set(values)))
else:
# check if order is permutation
# of unique color col values
same_sets = sorted(list(set(values))) == set(order)
no_duplicates = not any(order.count(x) > 1 for x in order)
if same_sets and no_duplicates:
LEVELS = order
else:
raise PlotlyError(
"if you are using a custom order of unique values from "
"your color column, you must: have all the unique values "
"in your order and have no duplicate items"
)
if not colorscale:
colorscale = []
viridis_colors = clrs.colorscale_to_colors(clrs.PLOTLY_SCALES["Viridis"])
viridis_colors = clrs.color_parser(viridis_colors, clrs.hex_to_rgb)
viridis_colors = clrs.color_parser(viridis_colors, clrs.label_rgb)
viri_len = len(viridis_colors) + 1
viri_intervals = utils.endpts_to_intervals(list(np.linspace(0, 1, viri_len)))[
1:-1
]
for L in np.linspace(0, 1, len(LEVELS)):
for idx, inter in enumerate(viri_intervals):
if L == 0:
break
elif inter[0] < L <= inter[1]:
break
intermed = (L - viri_intervals[idx][0]) / (
viri_intervals[idx][1] - viri_intervals[idx][0]
)
float_color = clrs.find_intermediate_color(
viridis_colors[idx], viridis_colors[idx], intermed, colortype="rgb"
)
# make R,G,B into int values
float_color = clrs.unlabel_rgb(float_color)
float_color = clrs.unconvert_from_RGB_255(float_color)
int_rgb = clrs.convert_to_RGB_255(float_color)
int_rgb = clrs.label_rgb(int_rgb)
colorscale.append(int_rgb)
if len(colorscale) < len(LEVELS):
raise PlotlyError(
"You have {} LEVELS. Your number of colors in 'colorscale' must "
"be at least the number of LEVELS: {}. If you are "
"using 'binning_endpoints' then 'colorscale' must have at "
"least len(binning_endpoints) + 2 colors".format(
len(LEVELS), min(LEVELS, LEVELS[:20])
)
)
color_lookup = dict(zip(LEVELS, colorscale))
x_traces = dict(zip(LEVELS, [[] for i in range(len(LEVELS))]))
y_traces = dict(zip(LEVELS, [[] for i in range(len(LEVELS))]))
# scope
if isinstance(scope, str):
raise PlotlyError("'scope' must be a list/tuple/sequence")
scope_names = []
extra_states = [
"Alaska",
"Commonwealth of the Northern Mariana Islands",
"Puerto Rico",
"Guam",
"United States Virgin Islands",
"American Samoa",
]
for state in scope:
if state.lower() == "usa":
scope_names = df["STATE_NAME"].unique()
scope_names = list(scope_names)
for ex_st in extra_states:
try:
scope_names.remove(ex_st)
except ValueError:
pass
else:
if state in st_to_state_name_dict.keys():
state = st_to_state_name_dict[state]
scope_names.append(state)
df_state = df_state[df_state["STATE_NAME"].isin(scope_names)]
plot_data = []
x_centroids = []
y_centroids = []
centroid_text = []
fips_not_in_shapefile = []
if not binning_endpoints:
for index, f in enumerate(fips):
level = values[index]
try:
fips_polygon_map[f].type
(
x_traces,
y_traces,
x_centroids,
y_centroids,
centroid_text,
) = _calculations(
df,
fips,
values,
index,
f,
simplify_county,
level,
x_centroids,
y_centroids,
centroid_text,
x_traces,
y_traces,
fips_polygon_map,
)
except KeyError:
fips_not_in_shapefile.append(f)
else:
for index, f in enumerate(fips):
for j, inter in enumerate(intervals):
if inter[0] < values[index] <= inter[1]:
break
level = LEVELS[j]
try:
fips_polygon_map[f].type
(
x_traces,
y_traces,
x_centroids,
y_centroids,
centroid_text,
) = _calculations(
df,
fips,
values,
index,
f,
simplify_county,
level,
x_centroids,
y_centroids,
centroid_text,
x_traces,
y_traces,
fips_polygon_map,
)
except KeyError:
fips_not_in_shapefile.append(f)
if len(fips_not_in_shapefile) > 0:
msg = (
"Unrecognized FIPS Values\n\nWhoops! It looks like you are "
"trying to pass at least one FIPS value that is not in "
"our shapefile of FIPS and data for the counties. Your "
"choropleth will still show up but these counties cannot "
"be shown.\nUnrecognized FIPS are: {}".format(fips_not_in_shapefile)
)
warnings.warn(msg)
x_states = []
y_states = []
for index, row in df_state.iterrows():
if df_state["geometry"][index].type == "Polygon":
x = row.geometry.simplify(simplify_state).exterior.xy[0].tolist()
y = row.geometry.simplify(simplify_state).exterior.xy[1].tolist()
x_states = x_states + x
y_states = y_states + y
elif df_state["geometry"][index].type == "MultiPolygon":
x = [
poly.simplify(simplify_state).exterior.xy[0].tolist()
for poly in df_state["geometry"][index].geoms
]
y = [
poly.simplify(simplify_state).exterior.xy[1].tolist()
for poly in df_state["geometry"][index].geoms
]
for segment in range(len(x)):
x_states = x_states + x[segment]
y_states = y_states + y[segment]
x_states.append(np.nan)
y_states.append(np.nan)
x_states.append(np.nan)
y_states.append(np.nan)
for lev in LEVELS:
county_data = dict(
type="scatter",
mode="lines",
x=x_traces[lev],
y=y_traces[lev],
line=county_outline,
fill="toself",
fillcolor=color_lookup[lev],
name=lev,
hoverinfo="none",
)
plot_data.append(county_data)
if show_hover:
hover_points = dict(
type="scatter",
showlegend=False,
legendgroup="centroids",
x=x_centroids,
y=y_centroids,
text=centroid_text,
name="US Counties",
mode="markers",
marker={"color": "white", "opacity": 0},
hoverinfo="text",
)
centroids_on_select = dict(
selected=dict(marker=centroid_marker),
unselected=dict(marker=dict(opacity=0)),
)
hover_points.update(centroids_on_select)
plot_data.append(hover_points)
if show_state_data:
state_data = dict(
type="scatter",
legendgroup="States",
line=state_outline,
x=x_states,
y=y_states,
hoverinfo="text",
showlegend=False,
mode="lines",
)
plot_data.append(state_data)
DEFAULT_LAYOUT = dict(
hovermode="closest",
xaxis=dict(
autorange=False,
range=USA_XRANGE,
showgrid=False,
zeroline=False,
fixedrange=True,
showticklabels=False,
),
yaxis=dict(
autorange=False,
range=USA_YRANGE,
showgrid=False,
zeroline=False,
fixedrange=True,
showticklabels=False,
),
margin=dict(t=40, b=20, r=20, l=20),
width=900,
height=450,
dragmode="select",
legend=dict(traceorder="reversed", xanchor="right", yanchor="top", x=1, y=1),
annotations=[],
)
fig = dict(data=plot_data, layout=DEFAULT_LAYOUT)
fig["layout"].update(layout_options)
fig["layout"]["annotations"].append(
dict(
x=1,
y=1.05,
xref="paper",
yref="paper",
xanchor="right",
showarrow=False,
text="<b>" + legend_title + "</b>",
)
)
if len(scope) == 1 and scope[0].lower() == "usa":
xaxis_range_low = -125.0
xaxis_range_high = -55.0
yaxis_range_low = 25.0
yaxis_range_high = 49.0
else:
xaxis_range_low = float("inf")
xaxis_range_high = float("-inf")
yaxis_range_low = float("inf")
yaxis_range_high = float("-inf")
for trace in fig["data"]:
if all(isinstance(n, Number) for n in trace["x"]):
calc_x_min = min(trace["x"] or [float("inf")])
calc_x_max = max(trace["x"] or [float("-inf")])
if calc_x_min < xaxis_range_low:
xaxis_range_low = calc_x_min
if calc_x_max > xaxis_range_high:
xaxis_range_high = calc_x_max
if all(isinstance(n, Number) for n in trace["y"]):
calc_y_min = min(trace["y"] or [float("inf")])
calc_y_max = max(trace["y"] or [float("-inf")])
if calc_y_min < yaxis_range_low:
yaxis_range_low = calc_y_min
if calc_y_max > yaxis_range_high:
yaxis_range_high = calc_y_max
# camera zoom
fig["layout"]["xaxis"]["range"] = [xaxis_range_low, xaxis_range_high]
fig["layout"]["yaxis"]["range"] = [yaxis_range_low, yaxis_range_high]
# aspect ratio
if asp is None:
usa_x_range = USA_XRANGE[1] - USA_XRANGE[0]
usa_y_range = USA_YRANGE[1] - USA_YRANGE[0]
asp = usa_x_range / usa_y_range
# based on your figure
width = float(
fig["layout"]["xaxis"]["range"][1] - fig["layout"]["xaxis"]["range"][0]
)
height = float(
fig["layout"]["yaxis"]["range"][1] - fig["layout"]["yaxis"]["range"][0]
)
center = (
sum(fig["layout"]["xaxis"]["range"]) / 2.0,
sum(fig["layout"]["yaxis"]["range"]) / 2.0,
)
if height / width > (1 / asp):
new_width = asp * height
fig["layout"]["xaxis"]["range"][0] = center[0] - new_width * 0.5
fig["layout"]["xaxis"]["range"][1] = center[0] + new_width * 0.5
else:
new_height = (1 / asp) * width
fig["layout"]["yaxis"]["range"][0] = center[1] - new_height * 0.5
fig["layout"]["yaxis"]["range"][1] = center[1] + new_height * 0.5
return go.Figure(fig)
| _county_choropleth.create_choropleth | File-Level |
plotly.py | 6 | packages/python/plotly/plotly/graph_objs/parcoords/_dimension.py | def __init__(
self,
arg=None,
constraintrange=None,
label=None,
multiselect=None,
name=None,
range=None,
templateitemname=None,
tickformat=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
values=None,
valuessrc=None,
visible=None,
**kwargs,
):
"""
Construct a new Dimension object
The dimensions (variables) of the parallel coordinates chart.
2..60 dimensions are supported.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.parcoords.Dimension`
constraintrange
The domain range to which the filter on the dimension
is constrained. Must be an array of `[fromValue,
toValue]` with `fromValue <= toValue`, or if
`multiselect` is not disabled, you may give an array of
arrays, where each inner array is `[fromValue,
toValue]`.
label
The shown name of the dimension.
multiselect
Do we allow multiple selection ranges or just a single
range?
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
range
The domain range that represents the full, shown axis
extent. Defaults to the `values` extent. Must be an
array of `[fromValue, toValue]` with finite numbers as
elements.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
ticktext
Sets the text displayed at the ticks position via
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
values
Dimension values. `values[n]` represents the value of
the `n`th point in the dataset, therefore the `values`
vector for all dimensions must be the same (longer
vectors will be truncated). Each value must be a finite
number.
valuessrc
Sets the source reference on Chart Studio Cloud for
`values`.
visible
Shows the dimension when set to `true` (the default).
Hides the dimension for `false`.
Returns
-------
Dimension
"""
| /usr/src/app/target_test_cases/failed_tests__dimension.Dimension.__init__.txt | def __init__(
self,
arg=None,
constraintrange=None,
label=None,
multiselect=None,
name=None,
range=None,
templateitemname=None,
tickformat=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
values=None,
valuessrc=None,
visible=None,
**kwargs,
):
"""
Construct a new Dimension object
The dimensions (variables) of the parallel coordinates chart.
2..60 dimensions are supported.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.parcoords.Dimension`
constraintrange
The domain range to which the filter on the dimension
is constrained. Must be an array of `[fromValue,
toValue]` with `fromValue <= toValue`, or if
`multiselect` is not disabled, you may give an array of
arrays, where each inner array is `[fromValue,
toValue]`.
label
The shown name of the dimension.
multiselect
Do we allow multiple selection ranges or just a single
range?
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
range
The domain range that represents the full, shown axis
extent. Defaults to the `values` extent. Must be an
array of `[fromValue, toValue]` with finite numbers as
elements.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
ticktext
Sets the text displayed at the ticks position via
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
values
Dimension values. `values[n]` represents the value of
the `n`th point in the dataset, therefore the `values`
vector for all dimensions must be the same (longer
vectors will be truncated). Each value must be a finite
number.
valuessrc
Sets the source reference on Chart Studio Cloud for
`values`.
visible
Shows the dimension when set to `true` (the default).
Hides the dimension for `false`.
Returns
-------
Dimension
"""
super(Dimension, self).__init__("dimensions")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.parcoords.Dimension
constructor must be a dict or
an instance of :class:`plotly.graph_objs.parcoords.Dimension`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("constraintrange", None)
_v = constraintrange if constraintrange is not None else _v
if _v is not None:
self["constraintrange"] = _v
_v = arg.pop("label", None)
_v = label if label is not None else _v
if _v is not None:
self["label"] = _v
_v = arg.pop("multiselect", None)
_v = multiselect if multiselect is not None else _v
if _v is not None:
self["multiselect"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("range", None)
_v = range if range is not None else _v
if _v is not None:
self["range"] = _v
_v = arg.pop("templateitemname", None)
_v = templateitemname if templateitemname is not None else _v
if _v is not None:
self["templateitemname"] = _v
_v = arg.pop("tickformat", None)
_v = tickformat if tickformat is not None else _v
if _v is not None:
self["tickformat"] = _v
_v = arg.pop("ticktext", None)
_v = ticktext if ticktext is not None else _v
if _v is not None:
self["ticktext"] = _v
_v = arg.pop("ticktextsrc", None)
_v = ticktextsrc if ticktextsrc is not None else _v
if _v is not None:
self["ticktextsrc"] = _v
_v = arg.pop("tickvals", None)
_v = tickvals if tickvals is not None else _v
if _v is not None:
self["tickvals"] = _v
_v = arg.pop("tickvalssrc", None)
_v = tickvalssrc if tickvalssrc is not None else _v
if _v is not None:
self["tickvalssrc"] = _v
_v = arg.pop("values", None)
_v = values if values is not None else _v
if _v is not None:
self["values"] = _v
_v = arg.pop("valuessrc", None)
_v = valuessrc if valuessrc is not None else _v
if _v is not None:
self["valuessrc"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| _dimension.Dimension.__init__ | Self-Contained |
plotly.py | 7 | packages/python/plotly/plotly/figure_factory/_distplot.py | def create_distplot(
hist_data,
group_labels,
bin_size=1.0,
curve_type="kde",
colors=None,
rug_text=None,
histnorm=DEFAULT_HISTNORM,
show_hist=True,
show_curve=True,
show_rug=True,
):
"""
Function that creates a distplot similar to seaborn.distplot;
**this function is deprecated**, use instead :mod:`plotly.express`
functions, for example
>>> import plotly.express as px
>>> tips = px.data.tips()
>>> fig = px.histogram(tips, x="total_bill", y="tip", color="sex", marginal="rug",
... hover_data=tips.columns)
>>> fig.show()
The distplot can be composed of all or any combination of the following
3 components: (1) histogram, (2) curve: (a) kernel density estimation
or (b) normal curve, and (3) rug plot. Additionally, multiple distplots
(from multiple datasets) can be created in the same plot.
:param (list[list]) hist_data: Use list of lists to plot multiple data
sets on the same plot.
:param (list[str]) group_labels: Names for each data set.
:param (list[float]|float) bin_size: Size of histogram bins.
Default = 1.
:param (str) curve_type: 'kde' or 'normal'. Default = 'kde'
:param (str) histnorm: 'probability density' or 'probability'
Default = 'probability density'
:param (bool) show_hist: Add histogram to distplot? Default = True
:param (bool) show_curve: Add curve to distplot? Default = True
:param (bool) show_rug: Add rug to distplot? Default = True
:param (list[str]) colors: Colors for traces.
:param (list[list]) rug_text: Hovertext values for rug_plot,
:return (dict): Representation of a distplot figure.
Example 1: Simple distplot of 1 data set
>>> from plotly.figure_factory import create_distplot
>>> hist_data = [[1.1, 1.1, 2.5, 3.0, 3.5,
... 3.5, 4.1, 4.4, 4.5, 4.5,
... 5.0, 5.0, 5.2, 5.5, 5.5,
... 5.5, 5.5, 5.5, 6.1, 7.0]]
>>> group_labels = ['distplot example']
>>> fig = create_distplot(hist_data, group_labels)
>>> fig.show()
Example 2: Two data sets and added rug text
>>> from plotly.figure_factory import create_distplot
>>> # Add histogram data
>>> hist1_x = [0.8, 1.2, 0.2, 0.6, 1.6,
... -0.9, -0.07, 1.95, 0.9, -0.2,
... -0.5, 0.3, 0.4, -0.37, 0.6]
>>> hist2_x = [0.8, 1.5, 1.5, 0.6, 0.59,
... 1.0, 0.8, 1.7, 0.5, 0.8,
... -0.3, 1.2, 0.56, 0.3, 2.2]
>>> # Group data together
>>> hist_data = [hist1_x, hist2_x]
>>> group_labels = ['2012', '2013']
>>> # Add text
>>> rug_text_1 = ['a1', 'b1', 'c1', 'd1', 'e1',
... 'f1', 'g1', 'h1', 'i1', 'j1',
... 'k1', 'l1', 'm1', 'n1', 'o1']
>>> rug_text_2 = ['a2', 'b2', 'c2', 'd2', 'e2',
... 'f2', 'g2', 'h2', 'i2', 'j2',
... 'k2', 'l2', 'm2', 'n2', 'o2']
>>> # Group text together
>>> rug_text_all = [rug_text_1, rug_text_2]
>>> # Create distplot
>>> fig = create_distplot(
... hist_data, group_labels, rug_text=rug_text_all, bin_size=.2)
>>> # Add title
>>> fig.update_layout(title='Dist Plot') # doctest: +SKIP
>>> fig.show()
Example 3: Plot with normal curve and hide rug plot
>>> from plotly.figure_factory import create_distplot
>>> import numpy as np
>>> x1 = np.random.randn(190)
>>> x2 = np.random.randn(200)+1
>>> x3 = np.random.randn(200)-1
>>> x4 = np.random.randn(210)+2
>>> hist_data = [x1, x2, x3, x4]
>>> group_labels = ['2012', '2013', '2014', '2015']
>>> fig = create_distplot(
... hist_data, group_labels, curve_type='normal',
... show_rug=False, bin_size=.4)
Example 4: Distplot with Pandas
>>> from plotly.figure_factory import create_distplot
>>> import numpy as np
>>> import pandas as pd
>>> df = pd.DataFrame({'2012': np.random.randn(200),
... '2013': np.random.randn(200)+1})
>>> fig = create_distplot([df[c] for c in df.columns], df.columns)
>>> fig.show()
"""
| /usr/src/app/target_test_cases/failed_tests__distplot.create_distplot.txt | def create_distplot(
hist_data,
group_labels,
bin_size=1.0,
curve_type="kde",
colors=None,
rug_text=None,
histnorm=DEFAULT_HISTNORM,
show_hist=True,
show_curve=True,
show_rug=True,
):
"""
Function that creates a distplot similar to seaborn.distplot;
**this function is deprecated**, use instead :mod:`plotly.express`
functions, for example
>>> import plotly.express as px
>>> tips = px.data.tips()
>>> fig = px.histogram(tips, x="total_bill", y="tip", color="sex", marginal="rug",
... hover_data=tips.columns)
>>> fig.show()
The distplot can be composed of all or any combination of the following
3 components: (1) histogram, (2) curve: (a) kernel density estimation
or (b) normal curve, and (3) rug plot. Additionally, multiple distplots
(from multiple datasets) can be created in the same plot.
:param (list[list]) hist_data: Use list of lists to plot multiple data
sets on the same plot.
:param (list[str]) group_labels: Names for each data set.
:param (list[float]|float) bin_size: Size of histogram bins.
Default = 1.
:param (str) curve_type: 'kde' or 'normal'. Default = 'kde'
:param (str) histnorm: 'probability density' or 'probability'
Default = 'probability density'
:param (bool) show_hist: Add histogram to distplot? Default = True
:param (bool) show_curve: Add curve to distplot? Default = True
:param (bool) show_rug: Add rug to distplot? Default = True
:param (list[str]) colors: Colors for traces.
:param (list[list]) rug_text: Hovertext values for rug_plot,
:return (dict): Representation of a distplot figure.
Example 1: Simple distplot of 1 data set
>>> from plotly.figure_factory import create_distplot
>>> hist_data = [[1.1, 1.1, 2.5, 3.0, 3.5,
... 3.5, 4.1, 4.4, 4.5, 4.5,
... 5.0, 5.0, 5.2, 5.5, 5.5,
... 5.5, 5.5, 5.5, 6.1, 7.0]]
>>> group_labels = ['distplot example']
>>> fig = create_distplot(hist_data, group_labels)
>>> fig.show()
Example 2: Two data sets and added rug text
>>> from plotly.figure_factory import create_distplot
>>> # Add histogram data
>>> hist1_x = [0.8, 1.2, 0.2, 0.6, 1.6,
... -0.9, -0.07, 1.95, 0.9, -0.2,
... -0.5, 0.3, 0.4, -0.37, 0.6]
>>> hist2_x = [0.8, 1.5, 1.5, 0.6, 0.59,
... 1.0, 0.8, 1.7, 0.5, 0.8,
... -0.3, 1.2, 0.56, 0.3, 2.2]
>>> # Group data together
>>> hist_data = [hist1_x, hist2_x]
>>> group_labels = ['2012', '2013']
>>> # Add text
>>> rug_text_1 = ['a1', 'b1', 'c1', 'd1', 'e1',
... 'f1', 'g1', 'h1', 'i1', 'j1',
... 'k1', 'l1', 'm1', 'n1', 'o1']
>>> rug_text_2 = ['a2', 'b2', 'c2', 'd2', 'e2',
... 'f2', 'g2', 'h2', 'i2', 'j2',
... 'k2', 'l2', 'm2', 'n2', 'o2']
>>> # Group text together
>>> rug_text_all = [rug_text_1, rug_text_2]
>>> # Create distplot
>>> fig = create_distplot(
... hist_data, group_labels, rug_text=rug_text_all, bin_size=.2)
>>> # Add title
>>> fig.update_layout(title='Dist Plot') # doctest: +SKIP
>>> fig.show()
Example 3: Plot with normal curve and hide rug plot
>>> from plotly.figure_factory import create_distplot
>>> import numpy as np
>>> x1 = np.random.randn(190)
>>> x2 = np.random.randn(200)+1
>>> x3 = np.random.randn(200)-1
>>> x4 = np.random.randn(210)+2
>>> hist_data = [x1, x2, x3, x4]
>>> group_labels = ['2012', '2013', '2014', '2015']
>>> fig = create_distplot(
... hist_data, group_labels, curve_type='normal',
... show_rug=False, bin_size=.4)
Example 4: Distplot with Pandas
>>> from plotly.figure_factory import create_distplot
>>> import numpy as np
>>> import pandas as pd
>>> df = pd.DataFrame({'2012': np.random.randn(200),
... '2013': np.random.randn(200)+1})
>>> fig = create_distplot([df[c] for c in df.columns], df.columns)
>>> fig.show()
"""
if colors is None:
colors = []
if rug_text is None:
rug_text = []
validate_distplot(hist_data, curve_type)
utils.validate_equal_length(hist_data, group_labels)
if isinstance(bin_size, (float, int)):
bin_size = [bin_size] * len(hist_data)
data = []
if show_hist:
hist = _Distplot(
hist_data,
histnorm,
group_labels,
bin_size,
curve_type,
colors,
rug_text,
show_hist,
show_curve,
).make_hist()
data.append(hist)
if show_curve:
if curve_type == "normal":
curve = _Distplot(
hist_data,
histnorm,
group_labels,
bin_size,
curve_type,
colors,
rug_text,
show_hist,
show_curve,
).make_normal()
else:
curve = _Distplot(
hist_data,
histnorm,
group_labels,
bin_size,
curve_type,
colors,
rug_text,
show_hist,
show_curve,
).make_kde()
data.append(curve)
if show_rug:
rug = _Distplot(
hist_data,
histnorm,
group_labels,
bin_size,
curve_type,
colors,
rug_text,
show_hist,
show_curve,
).make_rug()
data.append(rug)
layout = graph_objs.Layout(
barmode="overlay",
hovermode="closest",
legend=dict(traceorder="reversed"),
xaxis1=dict(domain=[0.0, 1.0], anchor="y2", zeroline=False),
yaxis1=dict(domain=[0.35, 1], anchor="free", position=0.0),
yaxis2=dict(domain=[0, 0.25], anchor="x1", dtick=1, showticklabels=False),
)
else:
layout = graph_objs.Layout(
barmode="overlay",
hovermode="closest",
legend=dict(traceorder="reversed"),
xaxis1=dict(domain=[0.0, 1.0], anchor="y2", zeroline=False),
yaxis1=dict(domain=[0.0, 1], anchor="free", position=0.0),
)
data = sum(data, [])
return graph_objs.Figure(data=data, layout=layout)
| _distplot.create_distplot | File-Level |
plotly.py | 8 | packages/python/plotly/plotly/figure_factory/_facet_grid.py | def create_facet_grid(
df,
x=None,
y=None,
facet_row=None,
facet_col=None,
color_name=None,
colormap=None,
color_is_cat=False,
facet_row_labels=None,
facet_col_labels=None,
height=None,
width=None,
trace_type="scatter",
scales="fixed",
dtick_x=None,
dtick_y=None,
show_boxes=True,
ggplot2=False,
binsize=1,
**kwargs,
):
"""
Returns figure for facet grid; **this function is deprecated**, since
plotly.express functions should be used instead, for example
>>> import plotly.express as px
>>> tips = px.data.tips()
>>> fig = px.scatter(tips,
... x='total_bill',
... y='tip',
... facet_row='sex',
... facet_col='smoker',
... color='size')
:param (pd.DataFrame) df: the dataframe of columns for the facet grid.
:param (str) x: the name of the dataframe column for the x axis data.
:param (str) y: the name of the dataframe column for the y axis data.
:param (str) facet_row: the name of the dataframe column that is used to
facet the grid into row panels.
:param (str) facet_col: the name of the dataframe column that is used to
facet the grid into column panels.
:param (str) color_name: the name of your dataframe column that will
function as the colormap variable.
:param (str|list|dict) colormap: the param that determines how the
color_name column colors the data. If the dataframe contains numeric
data, then a dictionary of colors will group the data categorically
while a Plotly Colorscale name or a custom colorscale will treat it
numerically. To learn more about colors and types of colormap, run
`help(plotly.colors)`.
:param (bool) color_is_cat: determines whether a numerical column for the
colormap will be treated as categorical (True) or sequential (False).
Default = False.
:param (str|dict) facet_row_labels: set to either 'name' or a dictionary
of all the unique values in the faceting row mapped to some text to
show up in the label annotations. If None, labeling works like usual.
:param (str|dict) facet_col_labels: set to either 'name' or a dictionary
of all the values in the faceting row mapped to some text to show up
in the label annotations. If None, labeling works like usual.
:param (int) height: the height of the facet grid figure.
:param (int) width: the width of the facet grid figure.
:param (str) trace_type: decides the type of plot to appear in the
facet grid. The options are 'scatter', 'scattergl', 'histogram',
'bar', and 'box'.
Default = 'scatter'.
:param (str) scales: determines if axes have fixed ranges or not. Valid
settings are 'fixed' (all axes fixed), 'free_x' (x axis free only),
'free_y' (y axis free only) or 'free' (both axes free).
:param (float) dtick_x: determines the distance between each tick on the
x-axis. Default is None which means dtick_x is set automatically.
:param (float) dtick_y: determines the distance between each tick on the
y-axis. Default is None which means dtick_y is set automatically.
:param (bool) show_boxes: draws grey boxes behind the facet titles.
:param (bool) ggplot2: draws the facet grid in the style of `ggplot2`. See
http://ggplot2.tidyverse.org/reference/facet_grid.html for reference.
Default = False
:param (int) binsize: groups all data into bins of a given length.
:param (dict) kwargs: a dictionary of scatterplot arguments.
Examples 1: One Way Faceting
>>> import plotly.figure_factory as ff
>>> import pandas as pd
>>> mpg = pd.read_table('https://raw.githubusercontent.com/plotly/datasets/master/mpg_2017.txt')
>>> fig = ff.create_facet_grid(
... mpg,
... x='displ',
... y='cty',
... facet_col='cyl',
... )
>>> fig.show()
Example 2: Two Way Faceting
>>> import plotly.figure_factory as ff
>>> import pandas as pd
>>> mpg = pd.read_table('https://raw.githubusercontent.com/plotly/datasets/master/mpg_2017.txt')
>>> fig = ff.create_facet_grid(
... mpg,
... x='displ',
... y='cty',
... facet_row='drv',
... facet_col='cyl',
... )
>>> fig.show()
Example 3: Categorical Coloring
>>> import plotly.figure_factory as ff
>>> import pandas as pd
>>> mtcars = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/mtcars.csv')
>>> mtcars.cyl = mtcars.cyl.astype(str)
>>> fig = ff.create_facet_grid(
... mtcars,
... x='mpg',
... y='wt',
... facet_col='cyl',
... color_name='cyl',
... color_is_cat=True,
... )
>>> fig.show()
"""
| /usr/src/app/target_test_cases/failed_tests__facet_grid.create_facet_grid.txt | def create_facet_grid(
df,
x=None,
y=None,
facet_row=None,
facet_col=None,
color_name=None,
colormap=None,
color_is_cat=False,
facet_row_labels=None,
facet_col_labels=None,
height=None,
width=None,
trace_type="scatter",
scales="fixed",
dtick_x=None,
dtick_y=None,
show_boxes=True,
ggplot2=False,
binsize=1,
**kwargs,
):
"""
Returns figure for facet grid; **this function is deprecated**, since
plotly.express functions should be used instead, for example
>>> import plotly.express as px
>>> tips = px.data.tips()
>>> fig = px.scatter(tips,
... x='total_bill',
... y='tip',
... facet_row='sex',
... facet_col='smoker',
... color='size')
:param (pd.DataFrame) df: the dataframe of columns for the facet grid.
:param (str) x: the name of the dataframe column for the x axis data.
:param (str) y: the name of the dataframe column for the y axis data.
:param (str) facet_row: the name of the dataframe column that is used to
facet the grid into row panels.
:param (str) facet_col: the name of the dataframe column that is used to
facet the grid into column panels.
:param (str) color_name: the name of your dataframe column that will
function as the colormap variable.
:param (str|list|dict) colormap: the param that determines how the
color_name column colors the data. If the dataframe contains numeric
data, then a dictionary of colors will group the data categorically
while a Plotly Colorscale name or a custom colorscale will treat it
numerically. To learn more about colors and types of colormap, run
`help(plotly.colors)`.
:param (bool) color_is_cat: determines whether a numerical column for the
colormap will be treated as categorical (True) or sequential (False).
Default = False.
:param (str|dict) facet_row_labels: set to either 'name' or a dictionary
of all the unique values in the faceting row mapped to some text to
show up in the label annotations. If None, labeling works like usual.
:param (str|dict) facet_col_labels: set to either 'name' or a dictionary
of all the values in the faceting row mapped to some text to show up
in the label annotations. If None, labeling works like usual.
:param (int) height: the height of the facet grid figure.
:param (int) width: the width of the facet grid figure.
:param (str) trace_type: decides the type of plot to appear in the
facet grid. The options are 'scatter', 'scattergl', 'histogram',
'bar', and 'box'.
Default = 'scatter'.
:param (str) scales: determines if axes have fixed ranges or not. Valid
settings are 'fixed' (all axes fixed), 'free_x' (x axis free only),
'free_y' (y axis free only) or 'free' (both axes free).
:param (float) dtick_x: determines the distance between each tick on the
x-axis. Default is None which means dtick_x is set automatically.
:param (float) dtick_y: determines the distance between each tick on the
y-axis. Default is None which means dtick_y is set automatically.
:param (bool) show_boxes: draws grey boxes behind the facet titles.
:param (bool) ggplot2: draws the facet grid in the style of `ggplot2`. See
http://ggplot2.tidyverse.org/reference/facet_grid.html for reference.
Default = False
:param (int) binsize: groups all data into bins of a given length.
:param (dict) kwargs: a dictionary of scatterplot arguments.
Examples 1: One Way Faceting
>>> import plotly.figure_factory as ff
>>> import pandas as pd
>>> mpg = pd.read_table('https://raw.githubusercontent.com/plotly/datasets/master/mpg_2017.txt')
>>> fig = ff.create_facet_grid(
... mpg,
... x='displ',
... y='cty',
... facet_col='cyl',
... )
>>> fig.show()
Example 2: Two Way Faceting
>>> import plotly.figure_factory as ff
>>> import pandas as pd
>>> mpg = pd.read_table('https://raw.githubusercontent.com/plotly/datasets/master/mpg_2017.txt')
>>> fig = ff.create_facet_grid(
... mpg,
... x='displ',
... y='cty',
... facet_row='drv',
... facet_col='cyl',
... )
>>> fig.show()
Example 3: Categorical Coloring
>>> import plotly.figure_factory as ff
>>> import pandas as pd
>>> mtcars = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/mtcars.csv')
>>> mtcars.cyl = mtcars.cyl.astype(str)
>>> fig = ff.create_facet_grid(
... mtcars,
... x='mpg',
... y='wt',
... facet_col='cyl',
... color_name='cyl',
... color_is_cat=True,
... )
>>> fig.show()
"""
if not pd:
raise ImportError("'pandas' must be installed for this figure_factory.")
if not isinstance(df, pd.DataFrame):
raise exceptions.PlotlyError("You must input a pandas DataFrame.")
# make sure all columns are of homogenous datatype
utils.validate_dataframe(df)
if trace_type in ["scatter", "scattergl"]:
if not x or not y:
raise exceptions.PlotlyError(
"You need to input 'x' and 'y' if you are you are using a "
"trace_type of 'scatter' or 'scattergl'."
)
for key in [x, y, facet_row, facet_col, color_name]:
if key is not None:
try:
df[key]
except KeyError:
raise exceptions.PlotlyError(
"x, y, facet_row, facet_col and color_name must be keys "
"in your dataframe."
)
# autoscale histogram bars
if trace_type not in ["scatter", "scattergl"]:
scales = "free"
# validate scales
if scales not in ["fixed", "free_x", "free_y", "free"]:
raise exceptions.PlotlyError(
"'scales' must be set to 'fixed', 'free_x', 'free_y' and 'free'."
)
if trace_type not in VALID_TRACE_TYPES:
raise exceptions.PlotlyError(
"'trace_type' must be in {}".format(VALID_TRACE_TYPES)
)
if trace_type == "histogram":
SUBPLOT_SPACING = 0.06
else:
SUBPLOT_SPACING = 0.015
# seperate kwargs for marker and else
if "marker" in kwargs:
kwargs_marker = kwargs["marker"]
else:
kwargs_marker = {}
marker_color = kwargs_marker.pop("color", None)
kwargs.pop("marker", None)
kwargs_trace = kwargs
if "size" not in kwargs_marker:
if ggplot2:
kwargs_marker["size"] = 5
else:
kwargs_marker["size"] = 8
if "opacity" not in kwargs_marker:
if not ggplot2:
kwargs_trace["opacity"] = 0.6
if "line" not in kwargs_marker:
if not ggplot2:
kwargs_marker["line"] = {"color": "darkgrey", "width": 1}
else:
kwargs_marker["line"] = {}
# default marker size
if not ggplot2:
if not marker_color:
marker_color = "rgb(31, 119, 180)"
else:
marker_color = "rgb(0, 0, 0)"
num_of_rows = 1
num_of_cols = 1
flipped_rows = False
flipped_cols = False
if facet_row:
num_of_rows = len(df[facet_row].unique())
flipped_rows = _is_flipped(num_of_rows)
if isinstance(facet_row_labels, dict):
for key in df[facet_row].unique():
if key not in facet_row_labels.keys():
unique_keys = df[facet_row].unique().tolist()
raise exceptions.PlotlyError(CUSTOM_LABEL_ERROR.format(unique_keys))
if facet_col:
num_of_cols = len(df[facet_col].unique())
flipped_cols = _is_flipped(num_of_cols)
if isinstance(facet_col_labels, dict):
for key in df[facet_col].unique():
if key not in facet_col_labels.keys():
unique_keys = df[facet_col].unique().tolist()
raise exceptions.PlotlyError(CUSTOM_LABEL_ERROR.format(unique_keys))
show_legend = False
if color_name:
if isinstance(df[color_name].iloc[0], str) or color_is_cat:
show_legend = True
if isinstance(colormap, dict):
clrs.validate_colors_dict(colormap, "rgb")
for val in df[color_name].unique():
if val not in colormap.keys():
raise exceptions.PlotlyError(
"If using 'colormap' as a dictionary, make sure "
"all the values of the colormap column are in "
"the keys of your dictionary."
)
else:
# use default plotly colors for dictionary
default_colors = clrs.DEFAULT_PLOTLY_COLORS
colormap = {}
j = 0
for val in df[color_name].unique():
if j >= len(default_colors):
j = 0
colormap[val] = default_colors[j]
j += 1
fig, annotations = _facet_grid_color_categorical(
df,
x,
y,
facet_row,
facet_col,
color_name,
colormap,
num_of_rows,
num_of_cols,
facet_row_labels,
facet_col_labels,
trace_type,
flipped_rows,
flipped_cols,
show_boxes,
SUBPLOT_SPACING,
marker_color,
kwargs_trace,
kwargs_marker,
)
elif isinstance(df[color_name].iloc[0], Number):
if isinstance(colormap, dict):
show_legend = True
clrs.validate_colors_dict(colormap, "rgb")
for val in df[color_name].unique():
if val not in colormap.keys():
raise exceptions.PlotlyError(
"If using 'colormap' as a dictionary, make sure "
"all the values of the colormap column are in "
"the keys of your dictionary."
)
fig, annotations = _facet_grid_color_categorical(
df,
x,
y,
facet_row,
facet_col,
color_name,
colormap,
num_of_rows,
num_of_cols,
facet_row_labels,
facet_col_labels,
trace_type,
flipped_rows,
flipped_cols,
show_boxes,
SUBPLOT_SPACING,
marker_color,
kwargs_trace,
kwargs_marker,
)
elif isinstance(colormap, list):
colorscale_list = colormap
clrs.validate_colorscale(colorscale_list)
fig, annotations = _facet_grid_color_numerical(
df,
x,
y,
facet_row,
facet_col,
color_name,
colorscale_list,
num_of_rows,
num_of_cols,
facet_row_labels,
facet_col_labels,
trace_type,
flipped_rows,
flipped_cols,
show_boxes,
SUBPLOT_SPACING,
marker_color,
kwargs_trace,
kwargs_marker,
)
elif isinstance(colormap, str):
if colormap in clrs.PLOTLY_SCALES.keys():
colorscale_list = clrs.PLOTLY_SCALES[colormap]
else:
raise exceptions.PlotlyError(
"If 'colormap' is a string, it must be the name "
"of a Plotly Colorscale. The available colorscale "
"names are {}".format(clrs.PLOTLY_SCALES.keys())
)
fig, annotations = _facet_grid_color_numerical(
df,
x,
y,
facet_row,
facet_col,
color_name,
colorscale_list,
num_of_rows,
num_of_cols,
facet_row_labels,
facet_col_labels,
trace_type,
flipped_rows,
flipped_cols,
show_boxes,
SUBPLOT_SPACING,
marker_color,
kwargs_trace,
kwargs_marker,
)
else:
colorscale_list = clrs.PLOTLY_SCALES["Reds"]
fig, annotations = _facet_grid_color_numerical(
df,
x,
y,
facet_row,
facet_col,
color_name,
colorscale_list,
num_of_rows,
num_of_cols,
facet_row_labels,
facet_col_labels,
trace_type,
flipped_rows,
flipped_cols,
show_boxes,
SUBPLOT_SPACING,
marker_color,
kwargs_trace,
kwargs_marker,
)
else:
fig, annotations = _facet_grid(
df,
x,
y,
facet_row,
facet_col,
num_of_rows,
num_of_cols,
facet_row_labels,
facet_col_labels,
trace_type,
flipped_rows,
flipped_cols,
show_boxes,
SUBPLOT_SPACING,
marker_color,
kwargs_trace,
kwargs_marker,
)
if not height:
height = max(600, 100 * num_of_rows)
if not width:
width = max(600, 100 * num_of_cols)
fig["layout"].update(
height=height, width=width, title="", paper_bgcolor="rgb(251, 251, 251)"
)
if ggplot2:
fig["layout"].update(
plot_bgcolor=PLOT_BGCOLOR,
paper_bgcolor="rgb(255, 255, 255)",
hovermode="closest",
)
# axis titles
x_title_annot = _axis_title_annotation(x, "x")
y_title_annot = _axis_title_annotation(y, "y")
# annotations
annotations.append(x_title_annot)
annotations.append(y_title_annot)
# legend
fig["layout"]["showlegend"] = show_legend
fig["layout"]["legend"]["bgcolor"] = LEGEND_COLOR
fig["layout"]["legend"]["borderwidth"] = LEGEND_BORDER_WIDTH
fig["layout"]["legend"]["x"] = 1.05
fig["layout"]["legend"]["y"] = 1
fig["layout"]["legend"]["yanchor"] = "top"
if show_legend:
fig["layout"]["showlegend"] = show_legend
if ggplot2:
if color_name:
legend_annot = _legend_annotation(color_name)
annotations.append(legend_annot)
fig["layout"]["margin"]["r"] = 150
# assign annotations to figure
fig["layout"]["annotations"] = annotations
# add shaded boxes behind axis titles
if show_boxes and ggplot2:
_add_shapes_to_fig(fig, ANNOT_RECT_COLOR, flipped_rows, flipped_cols)
# all xaxis and yaxis labels
axis_labels = {"x": [], "y": []}
for key in fig["layout"]:
if "xaxis" in key:
axis_labels["x"].append(key)
elif "yaxis" in key:
axis_labels["y"].append(key)
string_number_in_data = False
for var in [v for v in [x, y] if v]:
if isinstance(df[var].tolist()[0], str):
for item in df[var]:
try:
int(item)
string_number_in_data = True
except ValueError:
pass
if string_number_in_data:
for x_y in axis_labels.keys():
for axis_name in axis_labels[x_y]:
fig["layout"][axis_name]["type"] = "category"
if scales == "fixed":
fixed_axes = ["x", "y"]
elif scales == "free_x":
fixed_axes = ["y"]
elif scales == "free_y":
fixed_axes = ["x"]
elif scales == "free":
fixed_axes = []
# fixed ranges
for x_y in fixed_axes:
min_ranges = []
max_ranges = []
for trace in fig["data"]:
if trace[x_y] is not None and len(trace[x_y]) > 0:
min_ranges.append(min(trace[x_y]))
max_ranges.append(max(trace[x_y]))
while None in min_ranges:
min_ranges.remove(None)
while None in max_ranges:
max_ranges.remove(None)
min_range = min(min_ranges)
max_range = max(max_ranges)
range_are_numbers = isinstance(min_range, Number) and isinstance(
max_range, Number
)
if range_are_numbers:
min_range = math.floor(min_range)
max_range = math.ceil(max_range)
# extend widen frame by 5% on each side
min_range -= 0.05 * (max_range - min_range)
max_range += 0.05 * (max_range - min_range)
if x_y == "x":
if dtick_x:
dtick = dtick_x
else:
dtick = math.floor((max_range - min_range) / MAX_TICKS_PER_AXIS)
elif x_y == "y":
if dtick_y:
dtick = dtick_y
else:
dtick = math.floor((max_range - min_range) / MAX_TICKS_PER_AXIS)
else:
dtick = 1
for axis_title in axis_labels[x_y]:
fig["layout"][axis_title]["dtick"] = dtick
fig["layout"][axis_title]["ticklen"] = 0
fig["layout"][axis_title]["zeroline"] = False
if ggplot2:
fig["layout"][axis_title]["tickwidth"] = 1
fig["layout"][axis_title]["ticklen"] = 4
fig["layout"][axis_title]["gridwidth"] = GRID_WIDTH
fig["layout"][axis_title]["gridcolor"] = GRID_COLOR
fig["layout"][axis_title]["gridwidth"] = 2
fig["layout"][axis_title]["tickfont"] = {
"color": TICK_COLOR,
"size": 10,
}
# insert ranges into fig
if x_y in fixed_axes:
for key in fig["layout"]:
if "{}axis".format(x_y) in key and range_are_numbers:
fig["layout"][key]["range"] = [min_range, max_range]
return fig
| _facet_grid.create_facet_grid | File-Level |
plotly.py | 9 | packages/python/plotly/plotly/graph_objs/_funnel.py | def __init__(
self,
arg=None,
alignmentgroup=None,
cliponaxis=None,
connector=None,
constraintext=None,
customdata=None,
customdatasrc=None,
dx=None,
dy=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
insidetextanchor=None,
insidetextfont=None,
legend=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
marker=None,
meta=None,
metasrc=None,
name=None,
offset=None,
offsetgroup=None,
opacity=None,
orientation=None,
outsidetextfont=None,
selectedpoints=None,
showlegend=None,
stream=None,
text=None,
textangle=None,
textfont=None,
textinfo=None,
textposition=None,
textpositionsrc=None,
textsrc=None,
texttemplate=None,
texttemplatesrc=None,
uid=None,
uirevision=None,
visible=None,
width=None,
x=None,
x0=None,
xaxis=None,
xhoverformat=None,
xperiod=None,
xperiod0=None,
xperiodalignment=None,
xsrc=None,
y=None,
y0=None,
yaxis=None,
yhoverformat=None,
yperiod=None,
yperiod0=None,
yperiodalignment=None,
ysrc=None,
zorder=None,
**kwargs,
):
"""
Construct a new Funnel object
Visualize stages in a process using length-encoded bars. This
trace can be used to show data in either a part-to-whole
representation wherein each item appears in a single stage, or
in a "drop-off" representation wherein each item appears in
each stage it traversed. See also the "funnelarea" trace type
for a different approach to visualizing funnel data.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Funnel`
alignmentgroup
Set several traces linked to the same position axis or
matching axes to the same alignmentgroup. This controls
whether bars compute their positional range dependently
or independently.
cliponaxis
Determines whether the text nodes are clipped about the
subplot axes. To show the text nodes above axis lines
and tick labels, make sure to set `xaxis.layer` and
`yaxis.layer` to *below traces*.
connector
:class:`plotly.graph_objects.funnel.Connector` instance
or dict with compatible properties
constraintext
Constrain the size of text inside or outside a bar to
be no larger than the bar itself.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
dx
Sets the x coordinate step. See `x0` for more info.
dy
Sets the y coordinate step. See `y0` for more info.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.funnel.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `percentInitial`, `percentPrevious` and
`percentTotal`. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Sets hover text elements associated with each (x,y)
pair. If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to the this trace's (x,y)
coordinates. To be seen, trace `hoverinfo` must contain
a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
insidetextanchor
Determines if texts are kept at center or start/end
points in `textposition` "inside" mode.
insidetextfont
Sets the font used for `text` lying inside the bar.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.funnel.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
marker
:class:`plotly.graph_objects.funnel.Marker` instance or
dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
offset
Shifts the position where the bar is drawn (in position
axis units). In "group" barmode, traces that set
"offset" will be excluded and drawn in "overlay" mode
instead.
offsetgroup
Set several traces linked to the same position axis or
matching axes to the same offsetgroup where bars of the
same position coordinate will line up.
opacity
Sets the opacity of the trace.
orientation
Sets the orientation of the funnels. With "v" ("h"),
the value of the each bar spans along the vertical
(horizontal). By default funnels are tend to be
oriented horizontally; unless only "y" array is
presented or orientation is set to "v". Also regarding
graphs including only 'horizontal' funnels, "autorange"
on the "y-axis" are set to "reversed".
outsidetextfont
Sets the font used for `text` lying outside the bar.
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.funnel.Stream` instance or
dict with compatible properties
text
Sets text elements associated with each (x,y) pair. If
a single string, the same string appears over all the
data points. If an array of string, the items are
mapped in order to the this trace's (x,y) coordinates.
If trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textangle
Sets the angle of the tick labels with respect to the
bar. For example, a `tickangle` of -90 draws the tick
labels vertically. With "auto" the texts may
automatically be rotated to fit with the maximum size
in bars.
textfont
Sets the font used for `text`.
textinfo
Determines which trace information appear on the graph.
In the case of having multiple funnels, percentages &
totals are computed separately (per trace).
textposition
Specifies the location of the `text`. "inside"
positions `text` inside, next to the bar end (rotated
and scaled if needed). "outside" positions `text`
outside, next to the bar end (scaled if needed), unless
there is another bar stacked on this one, then the text
gets pushed inside. "auto" tries to position `text`
inside the bar, but if the bar is too small and no bar
is stacked on this one the text is moved outside. If
"none", no text appears.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
`textposition`.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `percentInitial`, `percentPrevious`,
`percentTotal`, `label` and `value`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
width
Sets the bar width (in position axis units).
x
Sets the x coordinates.
x0
Alternate to `x`. Builds a linear space of x
coordinates. Use with `dx` where `x0` is the starting
coordinate and `dx` the step.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xperiod
Only relevant when the axis `type` is "date". Sets the
period positioning in milliseconds or "M<n>" on the x
axis. Special values in the form of "M<n>" could be
used to declare the number of months. In this case `n`
must be a positive integer.
xperiod0
Only relevant when the axis `type` is "date". Sets the
base for period positioning in milliseconds or date
string on the x0 axis. When `x0period` is round number
of weeks, the `x0period0` by default would be on a
Sunday i.e. 2000-01-02, otherwise it would be at
2000-01-01.
xperiodalignment
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the x axis.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
y
Sets the y coordinates.
y0
Alternate to `y`. Builds a linear space of y
coordinates. Use with `dy` where `y0` is the starting
coordinate and `dy` the step.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
yperiod
Only relevant when the axis `type` is "date". Sets the
period positioning in milliseconds or "M<n>" on the y
axis. Special values in the form of "M<n>" could be
used to declare the number of months. In this case `n`
must be a positive integer.
yperiod0
Only relevant when the axis `type` is "date". Sets the
base for period positioning in milliseconds or date
string on the y0 axis. When `y0period` is round number
of weeks, the `y0period0` by default would be on a
Sunday i.e. 2000-01-02, otherwise it would be at
2000-01-01.
yperiodalignment
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the y axis.
ysrc
Sets the source reference on Chart Studio Cloud for
`y`.
zorder
Sets the layer on which this trace is displayed,
relative to other SVG traces on the same subplot. SVG
traces with higher `zorder` appear in front of those
with lower `zorder`.
Returns
-------
Funnel
"""
| /usr/src/app/target_test_cases/failed_tests__funnel.Funnel.__init__.txt | def __init__(
self,
arg=None,
alignmentgroup=None,
cliponaxis=None,
connector=None,
constraintext=None,
customdata=None,
customdatasrc=None,
dx=None,
dy=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
insidetextanchor=None,
insidetextfont=None,
legend=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
marker=None,
meta=None,
metasrc=None,
name=None,
offset=None,
offsetgroup=None,
opacity=None,
orientation=None,
outsidetextfont=None,
selectedpoints=None,
showlegend=None,
stream=None,
text=None,
textangle=None,
textfont=None,
textinfo=None,
textposition=None,
textpositionsrc=None,
textsrc=None,
texttemplate=None,
texttemplatesrc=None,
uid=None,
uirevision=None,
visible=None,
width=None,
x=None,
x0=None,
xaxis=None,
xhoverformat=None,
xperiod=None,
xperiod0=None,
xperiodalignment=None,
xsrc=None,
y=None,
y0=None,
yaxis=None,
yhoverformat=None,
yperiod=None,
yperiod0=None,
yperiodalignment=None,
ysrc=None,
zorder=None,
**kwargs,
):
"""
Construct a new Funnel object
Visualize stages in a process using length-encoded bars. This
trace can be used to show data in either a part-to-whole
representation wherein each item appears in a single stage, or
in a "drop-off" representation wherein each item appears in
each stage it traversed. See also the "funnelarea" trace type
for a different approach to visualizing funnel data.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Funnel`
alignmentgroup
Set several traces linked to the same position axis or
matching axes to the same alignmentgroup. This controls
whether bars compute their positional range dependently
or independently.
cliponaxis
Determines whether the text nodes are clipped about the
subplot axes. To show the text nodes above axis lines
and tick labels, make sure to set `xaxis.layer` and
`yaxis.layer` to *below traces*.
connector
:class:`plotly.graph_objects.funnel.Connector` instance
or dict with compatible properties
constraintext
Constrain the size of text inside or outside a bar to
be no larger than the bar itself.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
dx
Sets the x coordinate step. See `x0` for more info.
dy
Sets the y coordinate step. See `y0` for more info.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.funnel.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `percentInitial`, `percentPrevious` and
`percentTotal`. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Sets hover text elements associated with each (x,y)
pair. If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to the this trace's (x,y)
coordinates. To be seen, trace `hoverinfo` must contain
a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
insidetextanchor
Determines if texts are kept at center or start/end
points in `textposition` "inside" mode.
insidetextfont
Sets the font used for `text` lying inside the bar.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.funnel.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
marker
:class:`plotly.graph_objects.funnel.Marker` instance or
dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
offset
Shifts the position where the bar is drawn (in position
axis units). In "group" barmode, traces that set
"offset" will be excluded and drawn in "overlay" mode
instead.
offsetgroup
Set several traces linked to the same position axis or
matching axes to the same offsetgroup where bars of the
same position coordinate will line up.
opacity
Sets the opacity of the trace.
orientation
Sets the orientation of the funnels. With "v" ("h"),
the value of the each bar spans along the vertical
(horizontal). By default funnels are tend to be
oriented horizontally; unless only "y" array is
presented or orientation is set to "v". Also regarding
graphs including only 'horizontal' funnels, "autorange"
on the "y-axis" are set to "reversed".
outsidetextfont
Sets the font used for `text` lying outside the bar.
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.funnel.Stream` instance or
dict with compatible properties
text
Sets text elements associated with each (x,y) pair. If
a single string, the same string appears over all the
data points. If an array of string, the items are
mapped in order to the this trace's (x,y) coordinates.
If trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textangle
Sets the angle of the tick labels with respect to the
bar. For example, a `tickangle` of -90 draws the tick
labels vertically. With "auto" the texts may
automatically be rotated to fit with the maximum size
in bars.
textfont
Sets the font used for `text`.
textinfo
Determines which trace information appear on the graph.
In the case of having multiple funnels, percentages &
totals are computed separately (per trace).
textposition
Specifies the location of the `text`. "inside"
positions `text` inside, next to the bar end (rotated
and scaled if needed). "outside" positions `text`
outside, next to the bar end (scaled if needed), unless
there is another bar stacked on this one, then the text
gets pushed inside. "auto" tries to position `text`
inside the bar, but if the bar is too small and no bar
is stacked on this one the text is moved outside. If
"none", no text appears.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
`textposition`.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `percentInitial`, `percentPrevious`,
`percentTotal`, `label` and `value`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
width
Sets the bar width (in position axis units).
x
Sets the x coordinates.
x0
Alternate to `x`. Builds a linear space of x
coordinates. Use with `dx` where `x0` is the starting
coordinate and `dx` the step.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xperiod
Only relevant when the axis `type` is "date". Sets the
period positioning in milliseconds or "M<n>" on the x
axis. Special values in the form of "M<n>" could be
used to declare the number of months. In this case `n`
must be a positive integer.
xperiod0
Only relevant when the axis `type` is "date". Sets the
base for period positioning in milliseconds or date
string on the x0 axis. When `x0period` is round number
of weeks, the `x0period0` by default would be on a
Sunday i.e. 2000-01-02, otherwise it would be at
2000-01-01.
xperiodalignment
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the x axis.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
y
Sets the y coordinates.
y0
Alternate to `y`. Builds a linear space of y
coordinates. Use with `dy` where `y0` is the starting
coordinate and `dy` the step.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
yperiod
Only relevant when the axis `type` is "date". Sets the
period positioning in milliseconds or "M<n>" on the y
axis. Special values in the form of "M<n>" could be
used to declare the number of months. In this case `n`
must be a positive integer.
yperiod0
Only relevant when the axis `type` is "date". Sets the
base for period positioning in milliseconds or date
string on the y0 axis. When `y0period` is round number
of weeks, the `y0period0` by default would be on a
Sunday i.e. 2000-01-02, otherwise it would be at
2000-01-01.
yperiodalignment
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the y axis.
ysrc
Sets the source reference on Chart Studio Cloud for
`y`.
zorder
Sets the layer on which this trace is displayed,
relative to other SVG traces on the same subplot. SVG
traces with higher `zorder` appear in front of those
with lower `zorder`.
Returns
-------
Funnel
"""
super(Funnel, self).__init__("funnel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Funnel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Funnel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("alignmentgroup", None)
_v = alignmentgroup if alignmentgroup is not None else _v
if _v is not None:
self["alignmentgroup"] = _v
_v = arg.pop("cliponaxis", None)
_v = cliponaxis if cliponaxis is not None else _v
if _v is not None:
self["cliponaxis"] = _v
_v = arg.pop("connector", None)
_v = connector if connector is not None else _v
if _v is not None:
self["connector"] = _v
_v = arg.pop("constraintext", None)
_v = constraintext if constraintext is not None else _v
if _v is not None:
self["constraintext"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("dx", None)
_v = dx if dx is not None else _v
if _v is not None:
self["dx"] = _v
_v = arg.pop("dy", None)
_v = dy if dy is not None else _v
if _v is not None:
self["dy"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hovertemplate", None)
_v = hovertemplate if hovertemplate is not None else _v
if _v is not None:
self["hovertemplate"] = _v
_v = arg.pop("hovertemplatesrc", None)
_v = hovertemplatesrc if hovertemplatesrc is not None else _v
if _v is not None:
self["hovertemplatesrc"] = _v
_v = arg.pop("hovertext", None)
_v = hovertext if hovertext is not None else _v
if _v is not None:
self["hovertext"] = _v
_v = arg.pop("hovertextsrc", None)
_v = hovertextsrc if hovertextsrc is not None else _v
if _v is not None:
self["hovertextsrc"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("insidetextanchor", None)
_v = insidetextanchor if insidetextanchor is not None else _v
if _v is not None:
self["insidetextanchor"] = _v
_v = arg.pop("insidetextfont", None)
_v = insidetextfont if insidetextfont is not None else _v
if _v is not None:
self["insidetextfont"] = _v
_v = arg.pop("legend", None)
_v = legend if legend is not None else _v
if _v is not None:
self["legend"] = _v
_v = arg.pop("legendgroup", None)
_v = legendgroup if legendgroup is not None else _v
if _v is not None:
self["legendgroup"] = _v
_v = arg.pop("legendgrouptitle", None)
_v = legendgrouptitle if legendgrouptitle is not None else _v
if _v is not None:
self["legendgrouptitle"] = _v
_v = arg.pop("legendrank", None)
_v = legendrank if legendrank is not None else _v
if _v is not None:
self["legendrank"] = _v
_v = arg.pop("legendwidth", None)
_v = legendwidth if legendwidth is not None else _v
if _v is not None:
self["legendwidth"] = _v
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("offset", None)
_v = offset if offset is not None else _v
if _v is not None:
self["offset"] = _v
_v = arg.pop("offsetgroup", None)
_v = offsetgroup if offsetgroup is not None else _v
if _v is not None:
self["offsetgroup"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("orientation", None)
_v = orientation if orientation is not None else _v
if _v is not None:
self["orientation"] = _v
_v = arg.pop("outsidetextfont", None)
_v = outsidetextfont if outsidetextfont is not None else _v
if _v is not None:
self["outsidetextfont"] = _v
_v = arg.pop("selectedpoints", None)
_v = selectedpoints if selectedpoints is not None else _v
if _v is not None:
self["selectedpoints"] = _v
_v = arg.pop("showlegend", None)
_v = showlegend if showlegend is not None else _v
if _v is not None:
self["showlegend"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textangle", None)
_v = textangle if textangle is not None else _v
if _v is not None:
self["textangle"] = _v
_v = arg.pop("textfont", None)
_v = textfont if textfont is not None else _v
if _v is not None:
self["textfont"] = _v
_v = arg.pop("textinfo", None)
_v = textinfo if textinfo is not None else _v
if _v is not None:
self["textinfo"] = _v
_v = arg.pop("textposition", None)
_v = textposition if textposition is not None else _v
if _v is not None:
self["textposition"] = _v
_v = arg.pop("textpositionsrc", None)
_v = textpositionsrc if textpositionsrc is not None else _v
if _v is not None:
self["textpositionsrc"] = _v
_v = arg.pop("textsrc", None)
_v = textsrc if textsrc is not None else _v
if _v is not None:
self["textsrc"] = _v
_v = arg.pop("texttemplate", None)
_v = texttemplate if texttemplate is not None else _v
if _v is not None:
self["texttemplate"] = _v
_v = arg.pop("texttemplatesrc", None)
_v = texttemplatesrc if texttemplatesrc is not None else _v
if _v is not None:
self["texttemplatesrc"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("x0", None)
_v = x0 if x0 is not None else _v
if _v is not None:
self["x0"] = _v
_v = arg.pop("xaxis", None)
_v = xaxis if xaxis is not None else _v
if _v is not None:
self["xaxis"] = _v
_v = arg.pop("xhoverformat", None)
_v = xhoverformat if xhoverformat is not None else _v
if _v is not None:
self["xhoverformat"] = _v
_v = arg.pop("xperiod", None)
_v = xperiod if xperiod is not None else _v
if _v is not None:
self["xperiod"] = _v
_v = arg.pop("xperiod0", None)
_v = xperiod0 if xperiod0 is not None else _v
if _v is not None:
self["xperiod0"] = _v
_v = arg.pop("xperiodalignment", None)
_v = xperiodalignment if xperiodalignment is not None else _v
if _v is not None:
self["xperiodalignment"] = _v
_v = arg.pop("xsrc", None)
_v = xsrc if xsrc is not None else _v
if _v is not None:
self["xsrc"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("y0", None)
_v = y0 if y0 is not None else _v
if _v is not None:
self["y0"] = _v
_v = arg.pop("yaxis", None)
_v = yaxis if yaxis is not None else _v
if _v is not None:
self["yaxis"] = _v
_v = arg.pop("yhoverformat", None)
_v = yhoverformat if yhoverformat is not None else _v
if _v is not None:
self["yhoverformat"] = _v
_v = arg.pop("yperiod", None)
_v = yperiod if yperiod is not None else _v
if _v is not None:
self["yperiod"] = _v
_v = arg.pop("yperiod0", None)
_v = yperiod0 if yperiod0 is not None else _v
if _v is not None:
self["yperiod0"] = _v
_v = arg.pop("yperiodalignment", None)
_v = yperiodalignment if yperiodalignment is not None else _v
if _v is not None:
self["yperiodalignment"] = _v
_v = arg.pop("ysrc", None)
_v = ysrc if ysrc is not None else _v
if _v is not None:
self["ysrc"] = _v
_v = arg.pop("zorder", None)
_v = zorder if zorder is not None else _v
if _v is not None:
self["zorder"] = _v
# Read-only literals
# ------------------
self._props["type"] = "funnel"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| _funnel.Funnel.__init__ | Self-Contained |
plotly.py | 11 | packages/python/plotly/plotly/graph_objs/_funnelarea.py | def __init__(
self,
arg=None,
aspectratio=None,
baseratio=None,
customdata=None,
customdatasrc=None,
dlabel=None,
domain=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
insidetextfont=None,
label0=None,
labels=None,
labelssrc=None,
legend=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
marker=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
scalegroup=None,
showlegend=None,
stream=None,
text=None,
textfont=None,
textinfo=None,
textposition=None,
textpositionsrc=None,
textsrc=None,
texttemplate=None,
texttemplatesrc=None,
title=None,
uid=None,
uirevision=None,
values=None,
valuessrc=None,
visible=None,
**kwargs,
):
"""
Construct a new Funnelarea object
Visualize stages in a process using area-encoded trapezoids.
This trace can be used to show data in a part-to-whole
representation similar to a "pie" trace, wherein each item
appears in a single stage. See also the "funnel" trace type for
a different approach to visualizing funnel data.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Funnelarea`
aspectratio
Sets the ratio between height and width
baseratio
Sets the ratio between bottom length and maximum top
length.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
dlabel
Sets the label step. See `label0` for more info.
domain
:class:`plotly.graph_objects.funnelarea.Domain`
instance or dict with compatible properties
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.funnelarea.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `label`, `color`, `value`, `text` and
`percent`. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Sets hover text elements associated with each sector.
If a single string, the same string appears for all
data points. If an array of string, the items are
mapped in order of this trace's sectors. To be seen,
trace `hoverinfo` must contain a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
insidetextfont
Sets the font used for `textinfo` lying inside the
sector.
label0
Alternate to `labels`. Builds a numeric set of labels.
Use with `dlabel` where `label0` is the starting label
and `dlabel` the step.
labels
Sets the sector labels. If `labels` entries are
duplicated, we sum associated `values` or simply count
occurrences if `values` is not provided. For other
array attributes (including color) we use the first
non-empty entry among all occurrences of the label.
labelssrc
Sets the source reference on Chart Studio Cloud for
`labels`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.funnelarea.Legendgrouptitl
e` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
marker
:class:`plotly.graph_objects.funnelarea.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the trace.
scalegroup
If there are multiple funnelareas that should be sized
according to their totals, link them by providing a
non-empty group id here shared by every trace in the
same group.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.funnelarea.Stream`
instance or dict with compatible properties
text
Sets text elements associated with each sector. If
trace `textinfo` contains a "text" flag, these elements
will be seen on the chart. If trace `hoverinfo`
contains a "text" flag and "hovertext" is not set,
these elements will be seen in the hover labels.
textfont
Sets the font used for `textinfo`.
textinfo
Determines which trace information appear on the graph.
textposition
Specifies the location of the `textinfo`.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
`textposition`.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `label`, `color`, `value`, `text` and
`percent`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
title
:class:`plotly.graph_objects.funnelarea.Title` instance
or dict with compatible properties
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
values
Sets the values of the sectors. If omitted, we count
occurrences of each label.
valuessrc
Sets the source reference on Chart Studio Cloud for
`values`.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
Returns
-------
Funnelarea
"""
| /usr/src/app/target_test_cases/failed_tests__funnelarea.Funnelarea.__init__.txt | def __init__(
self,
arg=None,
aspectratio=None,
baseratio=None,
customdata=None,
customdatasrc=None,
dlabel=None,
domain=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
insidetextfont=None,
label0=None,
labels=None,
labelssrc=None,
legend=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
marker=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
scalegroup=None,
showlegend=None,
stream=None,
text=None,
textfont=None,
textinfo=None,
textposition=None,
textpositionsrc=None,
textsrc=None,
texttemplate=None,
texttemplatesrc=None,
title=None,
uid=None,
uirevision=None,
values=None,
valuessrc=None,
visible=None,
**kwargs,
):
"""
Construct a new Funnelarea object
Visualize stages in a process using area-encoded trapezoids.
This trace can be used to show data in a part-to-whole
representation similar to a "pie" trace, wherein each item
appears in a single stage. See also the "funnel" trace type for
a different approach to visualizing funnel data.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Funnelarea`
aspectratio
Sets the ratio between height and width
baseratio
Sets the ratio between bottom length and maximum top
length.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
dlabel
Sets the label step. See `label0` for more info.
domain
:class:`plotly.graph_objects.funnelarea.Domain`
instance or dict with compatible properties
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.funnelarea.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `label`, `color`, `value`, `text` and
`percent`. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Sets hover text elements associated with each sector.
If a single string, the same string appears for all
data points. If an array of string, the items are
mapped in order of this trace's sectors. To be seen,
trace `hoverinfo` must contain a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
insidetextfont
Sets the font used for `textinfo` lying inside the
sector.
label0
Alternate to `labels`. Builds a numeric set of labels.
Use with `dlabel` where `label0` is the starting label
and `dlabel` the step.
labels
Sets the sector labels. If `labels` entries are
duplicated, we sum associated `values` or simply count
occurrences if `values` is not provided. For other
array attributes (including color) we use the first
non-empty entry among all occurrences of the label.
labelssrc
Sets the source reference on Chart Studio Cloud for
`labels`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.funnelarea.Legendgrouptitl
e` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
marker
:class:`plotly.graph_objects.funnelarea.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the trace.
scalegroup
If there are multiple funnelareas that should be sized
according to their totals, link them by providing a
non-empty group id here shared by every trace in the
same group.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.funnelarea.Stream`
instance or dict with compatible properties
text
Sets text elements associated with each sector. If
trace `textinfo` contains a "text" flag, these elements
will be seen on the chart. If trace `hoverinfo`
contains a "text" flag and "hovertext" is not set,
these elements will be seen in the hover labels.
textfont
Sets the font used for `textinfo`.
textinfo
Determines which trace information appear on the graph.
textposition
Specifies the location of the `textinfo`.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
`textposition`.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `label`, `color`, `value`, `text` and
`percent`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
title
:class:`plotly.graph_objects.funnelarea.Title` instance
or dict with compatible properties
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
values
Sets the values of the sectors. If omitted, we count
occurrences of each label.
valuessrc
Sets the source reference on Chart Studio Cloud for
`values`.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
Returns
-------
Funnelarea
"""
super(Funnelarea, self).__init__("funnelarea")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Funnelarea
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Funnelarea`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("aspectratio", None)
_v = aspectratio if aspectratio is not None else _v
if _v is not None:
self["aspectratio"] = _v
_v = arg.pop("baseratio", None)
_v = baseratio if baseratio is not None else _v
if _v is not None:
self["baseratio"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("dlabel", None)
_v = dlabel if dlabel is not None else _v
if _v is not None:
self["dlabel"] = _v
_v = arg.pop("domain", None)
_v = domain if domain is not None else _v
if _v is not None:
self["domain"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hovertemplate", None)
_v = hovertemplate if hovertemplate is not None else _v
if _v is not None:
self["hovertemplate"] = _v
_v = arg.pop("hovertemplatesrc", None)
_v = hovertemplatesrc if hovertemplatesrc is not None else _v
if _v is not None:
self["hovertemplatesrc"] = _v
_v = arg.pop("hovertext", None)
_v = hovertext if hovertext is not None else _v
if _v is not None:
self["hovertext"] = _v
_v = arg.pop("hovertextsrc", None)
_v = hovertextsrc if hovertextsrc is not None else _v
if _v is not None:
self["hovertextsrc"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("insidetextfont", None)
_v = insidetextfont if insidetextfont is not None else _v
if _v is not None:
self["insidetextfont"] = _v
_v = arg.pop("label0", None)
_v = label0 if label0 is not None else _v
if _v is not None:
self["label0"] = _v
_v = arg.pop("labels", None)
_v = labels if labels is not None else _v
if _v is not None:
self["labels"] = _v
_v = arg.pop("labelssrc", None)
_v = labelssrc if labelssrc is not None else _v
if _v is not None:
self["labelssrc"] = _v
_v = arg.pop("legend", None)
_v = legend if legend is not None else _v
if _v is not None:
self["legend"] = _v
_v = arg.pop("legendgroup", None)
_v = legendgroup if legendgroup is not None else _v
if _v is not None:
self["legendgroup"] = _v
_v = arg.pop("legendgrouptitle", None)
_v = legendgrouptitle if legendgrouptitle is not None else _v
if _v is not None:
self["legendgrouptitle"] = _v
_v = arg.pop("legendrank", None)
_v = legendrank if legendrank is not None else _v
if _v is not None:
self["legendrank"] = _v
_v = arg.pop("legendwidth", None)
_v = legendwidth if legendwidth is not None else _v
if _v is not None:
self["legendwidth"] = _v
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("scalegroup", None)
_v = scalegroup if scalegroup is not None else _v
if _v is not None:
self["scalegroup"] = _v
_v = arg.pop("showlegend", None)
_v = showlegend if showlegend is not None else _v
if _v is not None:
self["showlegend"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textfont", None)
_v = textfont if textfont is not None else _v
if _v is not None:
self["textfont"] = _v
_v = arg.pop("textinfo", None)
_v = textinfo if textinfo is not None else _v
if _v is not None:
self["textinfo"] = _v
_v = arg.pop("textposition", None)
_v = textposition if textposition is not None else _v
if _v is not None:
self["textposition"] = _v
_v = arg.pop("textpositionsrc", None)
_v = textpositionsrc if textpositionsrc is not None else _v
if _v is not None:
self["textpositionsrc"] = _v
_v = arg.pop("textsrc", None)
_v = textsrc if textsrc is not None else _v
if _v is not None:
self["textsrc"] = _v
_v = arg.pop("texttemplate", None)
_v = texttemplate if texttemplate is not None else _v
if _v is not None:
self["texttemplate"] = _v
_v = arg.pop("texttemplatesrc", None)
_v = texttemplatesrc if texttemplatesrc is not None else _v
if _v is not None:
self["texttemplatesrc"] = _v
_v = arg.pop("title", None)
_v = title if title is not None else _v
if _v is not None:
self["title"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("values", None)
_v = values if values is not None else _v
if _v is not None:
self["values"] = _v
_v = arg.pop("valuessrc", None)
_v = valuessrc if valuessrc is not None else _v
if _v is not None:
self["valuessrc"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
# Read-only literals
# ------------------
self._props["type"] = "funnelarea"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| _funnelarea.Funnelarea.__init__ | Self-Contained |
plotly.py | 12 | packages/python/plotly/plotly/figure_factory/_gantt.py | def create_gantt(
df,
colors=None,
index_col=None,
show_colorbar=False,
reverse_colors=False,
title="Gantt Chart",
bar_width=0.2,
showgrid_x=False,
showgrid_y=False,
height=600,
width=None,
tasks=None,
task_names=None,
data=None,
group_tasks=False,
show_hover_fill=True,
):
"""
**deprecated**, use instead
:func:`plotly.express.timeline`.
Returns figure for a gantt chart
:param (array|list) df: input data for gantt chart. Must be either a
a dataframe or a list. If dataframe, the columns must include
'Task', 'Start' and 'Finish'. Other columns can be included and
used for indexing. If a list, its elements must be dictionaries
with the same required column headers: 'Task', 'Start' and
'Finish'.
:param (str|list|dict|tuple) colors: either a plotly scale name, an
rgb or hex color, a color tuple or a list of colors. An rgb color
is of the form 'rgb(x, y, z)' where x, y, z belong to the interval
[0, 255] and a color tuple is a tuple of the form (a, b, c) where
a, b and c belong to [0, 1]. If colors is a list, it must
contain the valid color types aforementioned as its members.
If a dictionary, all values of the indexing column must be keys in
colors.
:param (str|float) index_col: the column header (if df is a data
frame) that will function as the indexing column. If df is a list,
index_col must be one of the keys in all the items of df.
:param (bool) show_colorbar: determines if colorbar will be visible.
Only applies if values in the index column are numeric.
:param (bool) show_hover_fill: enables/disables the hovertext for the
filled area of the chart.
:param (bool) reverse_colors: reverses the order of selected colors
:param (str) title: the title of the chart
:param (float) bar_width: the width of the horizontal bars in the plot
:param (bool) showgrid_x: show/hide the x-axis grid
:param (bool) showgrid_y: show/hide the y-axis grid
:param (float) height: the height of the chart
:param (float) width: the width of the chart
Example 1: Simple Gantt Chart
>>> from plotly.figure_factory import create_gantt
>>> # Make data for chart
>>> df = [dict(Task="Job A", Start='2009-01-01', Finish='2009-02-30'),
... dict(Task="Job B", Start='2009-03-05', Finish='2009-04-15'),
... dict(Task="Job C", Start='2009-02-20', Finish='2009-05-30')]
>>> # Create a figure
>>> fig = create_gantt(df)
>>> fig.show()
Example 2: Index by Column with Numerical Entries
>>> from plotly.figure_factory import create_gantt
>>> # Make data for chart
>>> df = [dict(Task="Job A", Start='2009-01-01',
... Finish='2009-02-30', Complete=10),
... dict(Task="Job B", Start='2009-03-05',
... Finish='2009-04-15', Complete=60),
... dict(Task="Job C", Start='2009-02-20',
... Finish='2009-05-30', Complete=95)]
>>> # Create a figure with Plotly colorscale
>>> fig = create_gantt(df, colors='Blues', index_col='Complete',
... show_colorbar=True, bar_width=0.5,
... showgrid_x=True, showgrid_y=True)
>>> fig.show()
Example 3: Index by Column with String Entries
>>> from plotly.figure_factory import create_gantt
>>> # Make data for chart
>>> df = [dict(Task="Job A", Start='2009-01-01',
... Finish='2009-02-30', Resource='Apple'),
... dict(Task="Job B", Start='2009-03-05',
... Finish='2009-04-15', Resource='Grape'),
... dict(Task="Job C", Start='2009-02-20',
... Finish='2009-05-30', Resource='Banana')]
>>> # Create a figure with Plotly colorscale
>>> fig = create_gantt(df, colors=['rgb(200, 50, 25)', (1, 0, 1), '#6c4774'],
... index_col='Resource', reverse_colors=True,
... show_colorbar=True)
>>> fig.show()
Example 4: Use a dictionary for colors
>>> from plotly.figure_factory import create_gantt
>>> # Make data for chart
>>> df = [dict(Task="Job A", Start='2009-01-01',
... Finish='2009-02-30', Resource='Apple'),
... dict(Task="Job B", Start='2009-03-05',
... Finish='2009-04-15', Resource='Grape'),
... dict(Task="Job C", Start='2009-02-20',
... Finish='2009-05-30', Resource='Banana')]
>>> # Make a dictionary of colors
>>> colors = {'Apple': 'rgb(255, 0, 0)',
... 'Grape': 'rgb(170, 14, 200)',
... 'Banana': (1, 1, 0.2)}
>>> # Create a figure with Plotly colorscale
>>> fig = create_gantt(df, colors=colors, index_col='Resource',
... show_colorbar=True)
>>> fig.show()
Example 5: Use a pandas dataframe
>>> from plotly.figure_factory import create_gantt
>>> import pandas as pd
>>> # Make data as a dataframe
>>> df = pd.DataFrame([['Run', '2010-01-01', '2011-02-02', 10],
... ['Fast', '2011-01-01', '2012-06-05', 55],
... ['Eat', '2012-01-05', '2013-07-05', 94]],
... columns=['Task', 'Start', 'Finish', 'Complete'])
>>> # Create a figure with Plotly colorscale
>>> fig = create_gantt(df, colors='Blues', index_col='Complete',
... show_colorbar=True, bar_width=0.5,
... showgrid_x=True, showgrid_y=True)
>>> fig.show()
"""
| /usr/src/app/target_test_cases/failed_tests__gantt.create_gantt.txt | def create_gantt(
df,
colors=None,
index_col=None,
show_colorbar=False,
reverse_colors=False,
title="Gantt Chart",
bar_width=0.2,
showgrid_x=False,
showgrid_y=False,
height=600,
width=None,
tasks=None,
task_names=None,
data=None,
group_tasks=False,
show_hover_fill=True,
):
"""
**deprecated**, use instead
:func:`plotly.express.timeline`.
Returns figure for a gantt chart
:param (array|list) df: input data for gantt chart. Must be either a
a dataframe or a list. If dataframe, the columns must include
'Task', 'Start' and 'Finish'. Other columns can be included and
used for indexing. If a list, its elements must be dictionaries
with the same required column headers: 'Task', 'Start' and
'Finish'.
:param (str|list|dict|tuple) colors: either a plotly scale name, an
rgb or hex color, a color tuple or a list of colors. An rgb color
is of the form 'rgb(x, y, z)' where x, y, z belong to the interval
[0, 255] and a color tuple is a tuple of the form (a, b, c) where
a, b and c belong to [0, 1]. If colors is a list, it must
contain the valid color types aforementioned as its members.
If a dictionary, all values of the indexing column must be keys in
colors.
:param (str|float) index_col: the column header (if df is a data
frame) that will function as the indexing column. If df is a list,
index_col must be one of the keys in all the items of df.
:param (bool) show_colorbar: determines if colorbar will be visible.
Only applies if values in the index column are numeric.
:param (bool) show_hover_fill: enables/disables the hovertext for the
filled area of the chart.
:param (bool) reverse_colors: reverses the order of selected colors
:param (str) title: the title of the chart
:param (float) bar_width: the width of the horizontal bars in the plot
:param (bool) showgrid_x: show/hide the x-axis grid
:param (bool) showgrid_y: show/hide the y-axis grid
:param (float) height: the height of the chart
:param (float) width: the width of the chart
Example 1: Simple Gantt Chart
>>> from plotly.figure_factory import create_gantt
>>> # Make data for chart
>>> df = [dict(Task="Job A", Start='2009-01-01', Finish='2009-02-30'),
... dict(Task="Job B", Start='2009-03-05', Finish='2009-04-15'),
... dict(Task="Job C", Start='2009-02-20', Finish='2009-05-30')]
>>> # Create a figure
>>> fig = create_gantt(df)
>>> fig.show()
Example 2: Index by Column with Numerical Entries
>>> from plotly.figure_factory import create_gantt
>>> # Make data for chart
>>> df = [dict(Task="Job A", Start='2009-01-01',
... Finish='2009-02-30', Complete=10),
... dict(Task="Job B", Start='2009-03-05',
... Finish='2009-04-15', Complete=60),
... dict(Task="Job C", Start='2009-02-20',
... Finish='2009-05-30', Complete=95)]
>>> # Create a figure with Plotly colorscale
>>> fig = create_gantt(df, colors='Blues', index_col='Complete',
... show_colorbar=True, bar_width=0.5,
... showgrid_x=True, showgrid_y=True)
>>> fig.show()
Example 3: Index by Column with String Entries
>>> from plotly.figure_factory import create_gantt
>>> # Make data for chart
>>> df = [dict(Task="Job A", Start='2009-01-01',
... Finish='2009-02-30', Resource='Apple'),
... dict(Task="Job B", Start='2009-03-05',
... Finish='2009-04-15', Resource='Grape'),
... dict(Task="Job C", Start='2009-02-20',
... Finish='2009-05-30', Resource='Banana')]
>>> # Create a figure with Plotly colorscale
>>> fig = create_gantt(df, colors=['rgb(200, 50, 25)', (1, 0, 1), '#6c4774'],
... index_col='Resource', reverse_colors=True,
... show_colorbar=True)
>>> fig.show()
Example 4: Use a dictionary for colors
>>> from plotly.figure_factory import create_gantt
>>> # Make data for chart
>>> df = [dict(Task="Job A", Start='2009-01-01',
... Finish='2009-02-30', Resource='Apple'),
... dict(Task="Job B", Start='2009-03-05',
... Finish='2009-04-15', Resource='Grape'),
... dict(Task="Job C", Start='2009-02-20',
... Finish='2009-05-30', Resource='Banana')]
>>> # Make a dictionary of colors
>>> colors = {'Apple': 'rgb(255, 0, 0)',
... 'Grape': 'rgb(170, 14, 200)',
... 'Banana': (1, 1, 0.2)}
>>> # Create a figure with Plotly colorscale
>>> fig = create_gantt(df, colors=colors, index_col='Resource',
... show_colorbar=True)
>>> fig.show()
Example 5: Use a pandas dataframe
>>> from plotly.figure_factory import create_gantt
>>> import pandas as pd
>>> # Make data as a dataframe
>>> df = pd.DataFrame([['Run', '2010-01-01', '2011-02-02', 10],
... ['Fast', '2011-01-01', '2012-06-05', 55],
... ['Eat', '2012-01-05', '2013-07-05', 94]],
... columns=['Task', 'Start', 'Finish', 'Complete'])
>>> # Create a figure with Plotly colorscale
>>> fig = create_gantt(df, colors='Blues', index_col='Complete',
... show_colorbar=True, bar_width=0.5,
... showgrid_x=True, showgrid_y=True)
>>> fig.show()
"""
# validate gantt input data
chart = validate_gantt(df)
if index_col:
if index_col not in chart[0]:
raise exceptions.PlotlyError(
"In order to use an indexing column and assign colors to "
"the values of the index, you must choose an actual "
"column name in the dataframe or key if a list of "
"dictionaries is being used."
)
# validate gantt index column
index_list = []
for dictionary in chart:
index_list.append(dictionary[index_col])
utils.validate_index(index_list)
# Validate colors
if isinstance(colors, dict):
colors = clrs.validate_colors_dict(colors, "rgb")
else:
colors = clrs.validate_colors(colors, "rgb")
if reverse_colors is True:
colors.reverse()
if not index_col:
if isinstance(colors, dict):
raise exceptions.PlotlyError(
"Error. You have set colors to a dictionary but have not "
"picked an index. An index is required if you are "
"assigning colors to particular values in a dictionary."
)
fig = gantt(
chart,
colors,
title,
bar_width,
showgrid_x,
showgrid_y,
height,
width,
tasks=None,
task_names=None,
data=None,
group_tasks=group_tasks,
show_hover_fill=show_hover_fill,
show_colorbar=show_colorbar,
)
return fig
else:
if not isinstance(colors, dict):
fig = gantt_colorscale(
chart,
colors,
title,
index_col,
show_colorbar,
bar_width,
showgrid_x,
showgrid_y,
height,
width,
tasks=None,
task_names=None,
data=None,
group_tasks=group_tasks,
show_hover_fill=show_hover_fill,
)
return fig
else:
fig = gantt_dict(
chart,
colors,
title,
index_col,
show_colorbar,
bar_width,
showgrid_x,
showgrid_y,
height,
width,
tasks=None,
task_names=None,
data=None,
group_tasks=group_tasks,
show_hover_fill=show_hover_fill,
)
return fig
| _gantt.create_gantt | File-Level |
plotly.py | 13 | packages/python/plotly/plotly/graph_objs/layout/_grid.py | def __init__(
self,
arg=None,
columns=None,
domain=None,
pattern=None,
roworder=None,
rows=None,
subplots=None,
xaxes=None,
xgap=None,
xside=None,
yaxes=None,
ygap=None,
yside=None,
**kwargs,
):
"""
Construct a new Grid object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.Grid`
columns
The number of columns in the grid. If you provide a 2D
`subplots` array, the length of its longest row is used
as the default. If you give an `xaxes` array, its
length is used as the default. But it's also possible
to have a different length, if you want to leave a row
at the end for non-cartesian subplots.
domain
:class:`plotly.graph_objects.layout.grid.Domain`
instance or dict with compatible properties
pattern
If no `subplots`, `xaxes`, or `yaxes` are given but we
do have `rows` and `columns`, we can generate defaults
using consecutive axis IDs, in two ways: "coupled"
gives one x axis per column and one y axis per row.
"independent" uses a new xy pair for each cell, left-
to-right across each row then iterating rows according
to `roworder`.
roworder
Is the first row the top or the bottom? Note that
columns are always enumerated from left to right.
rows
The number of rows in the grid. If you provide a 2D
`subplots` array or a `yaxes` array, its length is used
as the default. But it's also possible to have a
different length, if you want to leave a row at the end
for non-cartesian subplots.
subplots
Used for freeform grids, where some axes may be shared
across subplots but others are not. Each entry should
be a cartesian subplot id, like "xy" or "x3y2", or ""
to leave that cell empty. You may reuse x axes within
the same column, and y axes within the same row. Non-
cartesian subplots and traces that support `domain` can
place themselves in this grid separately using the
`gridcell` attribute.
xaxes
Used with `yaxes` when the x and y axes are shared
across columns and rows. Each entry should be an x axis
id like "x", "x2", etc., or "" to not put an x axis in
that column. Entries other than "" must be unique.
Ignored if `subplots` is present. If missing but
`yaxes` is present, will generate consecutive IDs.
xgap
Horizontal space between grid cells, expressed as a
fraction of the total width available to one cell.
Defaults to 0.1 for coupled-axes grids and 0.2 for
independent grids.
xside
Sets where the x axis labels and titles go. "bottom"
means the very bottom of the grid. "bottom plot" is the
lowest plot that each x axis is used in. "top" and "top
plot" are similar.
yaxes
Used with `yaxes` when the x and y axes are shared
across columns and rows. Each entry should be an y axis
id like "y", "y2", etc., or "" to not put a y axis in
that row. Entries other than "" must be unique. Ignored
if `subplots` is present. If missing but `xaxes` is
present, will generate consecutive IDs.
ygap
Vertical space between grid cells, expressed as a
fraction of the total height available to one cell.
Defaults to 0.1 for coupled-axes grids and 0.3 for
independent grids.
yside
Sets where the y axis labels and titles go. "left"
means the very left edge of the grid. *left plot* is
the leftmost plot that each y axis is used in. "right"
and *right plot* are similar.
Returns
-------
Grid
"""
| /usr/src/app/target_test_cases/failed_tests__grid.Grid.__init__.txt | def __init__(
self,
arg=None,
columns=None,
domain=None,
pattern=None,
roworder=None,
rows=None,
subplots=None,
xaxes=None,
xgap=None,
xside=None,
yaxes=None,
ygap=None,
yside=None,
**kwargs,
):
"""
Construct a new Grid object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.Grid`
columns
The number of columns in the grid. If you provide a 2D
`subplots` array, the length of its longest row is used
as the default. If you give an `xaxes` array, its
length is used as the default. But it's also possible
to have a different length, if you want to leave a row
at the end for non-cartesian subplots.
domain
:class:`plotly.graph_objects.layout.grid.Domain`
instance or dict with compatible properties
pattern
If no `subplots`, `xaxes`, or `yaxes` are given but we
do have `rows` and `columns`, we can generate defaults
using consecutive axis IDs, in two ways: "coupled"
gives one x axis per column and one y axis per row.
"independent" uses a new xy pair for each cell, left-
to-right across each row then iterating rows according
to `roworder`.
roworder
Is the first row the top or the bottom? Note that
columns are always enumerated from left to right.
rows
The number of rows in the grid. If you provide a 2D
`subplots` array or a `yaxes` array, its length is used
as the default. But it's also possible to have a
different length, if you want to leave a row at the end
for non-cartesian subplots.
subplots
Used for freeform grids, where some axes may be shared
across subplots but others are not. Each entry should
be a cartesian subplot id, like "xy" or "x3y2", or ""
to leave that cell empty. You may reuse x axes within
the same column, and y axes within the same row. Non-
cartesian subplots and traces that support `domain` can
place themselves in this grid separately using the
`gridcell` attribute.
xaxes
Used with `yaxes` when the x and y axes are shared
across columns and rows. Each entry should be an x axis
id like "x", "x2", etc., or "" to not put an x axis in
that column. Entries other than "" must be unique.
Ignored if `subplots` is present. If missing but
`yaxes` is present, will generate consecutive IDs.
xgap
Horizontal space between grid cells, expressed as a
fraction of the total width available to one cell.
Defaults to 0.1 for coupled-axes grids and 0.2 for
independent grids.
xside
Sets where the x axis labels and titles go. "bottom"
means the very bottom of the grid. "bottom plot" is the
lowest plot that each x axis is used in. "top" and "top
plot" are similar.
yaxes
Used with `yaxes` when the x and y axes are shared
across columns and rows. Each entry should be an y axis
id like "y", "y2", etc., or "" to not put a y axis in
that row. Entries other than "" must be unique. Ignored
if `subplots` is present. If missing but `xaxes` is
present, will generate consecutive IDs.
ygap
Vertical space between grid cells, expressed as a
fraction of the total height available to one cell.
Defaults to 0.1 for coupled-axes grids and 0.3 for
independent grids.
yside
Sets where the y axis labels and titles go. "left"
means the very left edge of the grid. *left plot* is
the leftmost plot that each y axis is used in. "right"
and *right plot* are similar.
Returns
-------
Grid
"""
super(Grid, self).__init__("grid")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.Grid
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Grid`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("columns", None)
_v = columns if columns is not None else _v
if _v is not None:
self["columns"] = _v
_v = arg.pop("domain", None)
_v = domain if domain is not None else _v
if _v is not None:
self["domain"] = _v
_v = arg.pop("pattern", None)
_v = pattern if pattern is not None else _v
if _v is not None:
self["pattern"] = _v
_v = arg.pop("roworder", None)
_v = roworder if roworder is not None else _v
if _v is not None:
self["roworder"] = _v
_v = arg.pop("rows", None)
_v = rows if rows is not None else _v
if _v is not None:
self["rows"] = _v
_v = arg.pop("subplots", None)
_v = subplots if subplots is not None else _v
if _v is not None:
self["subplots"] = _v
_v = arg.pop("xaxes", None)
_v = xaxes if xaxes is not None else _v
if _v is not None:
self["xaxes"] = _v
_v = arg.pop("xgap", None)
_v = xgap if xgap is not None else _v
if _v is not None:
self["xgap"] = _v
_v = arg.pop("xside", None)
_v = xside if xside is not None else _v
if _v is not None:
self["xside"] = _v
_v = arg.pop("yaxes", None)
_v = yaxes if yaxes is not None else _v
if _v is not None:
self["yaxes"] = _v
_v = arg.pop("ygap", None)
_v = ygap if ygap is not None else _v
if _v is not None:
self["ygap"] = _v
_v = arg.pop("yside", None)
_v = yside if yside is not None else _v
if _v is not None:
self["yside"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| _grid.Grid.__init__ | Self-Contained |
plotly.py | 15 | packages/python/plotly/plotly/io/_html.py | def to_html(
fig,
config=None,
auto_play=True,
include_plotlyjs=True,
include_mathjax=False,
post_script=None,
full_html=True,
animation_opts=None,
default_width="100%",
default_height="100%",
validate=True,
div_id=None,
):
"""
Convert a figure to an HTML string representation.
Parameters
----------
fig:
Figure object or dict representing a figure
config: dict or None (default None)
Plotly.js figure config options
auto_play: bool (default=True)
Whether to automatically start the animation sequence on page load
if the figure contains frames. Has no effect if the figure does not
contain frames.
include_plotlyjs: bool or string (default True)
Specifies how the plotly.js library is included/loaded in the output
div string.
If True, a script tag containing the plotly.js source code (~3MB)
is included in the output. HTML files generated with this option are
fully self-contained and can be used offline.
If 'cdn', a script tag that references the plotly.js CDN is included
in the output. The url used is versioned to match the bundled plotly.js.
HTML files generated with this option are about 3MB smaller than those
generated with include_plotlyjs=True, but they require an active
internet connection in order to load the plotly.js library.
If 'directory', a script tag is included that references an external
plotly.min.js bundle that is assumed to reside in the same
directory as the HTML file.
If 'require', Plotly.js is loaded using require.js. This option
assumes that require.js is globally available and that it has been
globally configured to know how to find Plotly.js as 'plotly'.
This option is not advised when full_html=True as it will result
in a non-functional html file.
If a string that ends in '.js', a script tag is included that
references the specified path. This approach can be used to point
the resulting HTML file to an alternative CDN or local bundle.
If False, no script tag referencing plotly.js is included. This is
useful when the resulting div string will be placed inside an HTML
document that already loads plotly.js. This option is not advised
when full_html=True as it will result in a non-functional html file.
include_mathjax: bool or string (default False)
Specifies how the MathJax.js library is included in the output html
div string. MathJax is required in order to display labels
with LaTeX typesetting.
If False, no script tag referencing MathJax.js will be included in the
output.
If 'cdn', a script tag that references a MathJax CDN location will be
included in the output. HTML div strings generated with this option
will be able to display LaTeX typesetting as long as internet access
is available.
If a string that ends in '.js', a script tag is included that
references the specified path. This approach can be used to point the
resulting HTML div string to an alternative CDN.
post_script: str or list or None (default None)
JavaScript snippet(s) to be included in the resulting div just after
plot creation. The string(s) may include '{plot_id}' placeholders
that will then be replaced by the `id` of the div element that the
plotly.js figure is associated with. One application for this script
is to install custom plotly.js event handlers.
full_html: bool (default True)
If True, produce a string containing a complete HTML document
starting with an <html> tag. If False, produce a string containing
a single <div> element.
animation_opts: dict or None (default None)
dict of custom animation parameters to be passed to the function
Plotly.animate in Plotly.js. See
https://github.com/plotly/plotly.js/blob/master/src/plots/animation_attributes.js
for available options. Has no effect if the figure does not contain
frames, or auto_play is False.
default_width, default_height: number or str (default '100%')
The default figure width/height to use if the provided figure does not
specify its own layout.width/layout.height property. May be
specified in pixels as an integer (e.g. 500), or as a css width style
string (e.g. '500px', '100%').
validate: bool (default True)
True if the figure should be validated before being converted to
JSON, False otherwise.
div_id: str (default None)
If provided, this is the value of the id attribute of the div tag. If None, the
id attribute is a UUID.
Returns
-------
str
Representation of figure as an HTML div string
"""
| /usr/src/app/target_test_cases/failed_tests__html.to_html.txt | def to_html(
fig,
config=None,
auto_play=True,
include_plotlyjs=True,
include_mathjax=False,
post_script=None,
full_html=True,
animation_opts=None,
default_width="100%",
default_height="100%",
validate=True,
div_id=None,
):
"""
Convert a figure to an HTML string representation.
Parameters
----------
fig:
Figure object or dict representing a figure
config: dict or None (default None)
Plotly.js figure config options
auto_play: bool (default=True)
Whether to automatically start the animation sequence on page load
if the figure contains frames. Has no effect if the figure does not
contain frames.
include_plotlyjs: bool or string (default True)
Specifies how the plotly.js library is included/loaded in the output
div string.
If True, a script tag containing the plotly.js source code (~3MB)
is included in the output. HTML files generated with this option are
fully self-contained and can be used offline.
If 'cdn', a script tag that references the plotly.js CDN is included
in the output. The url used is versioned to match the bundled plotly.js.
HTML files generated with this option are about 3MB smaller than those
generated with include_plotlyjs=True, but they require an active
internet connection in order to load the plotly.js library.
If 'directory', a script tag is included that references an external
plotly.min.js bundle that is assumed to reside in the same
directory as the HTML file.
If 'require', Plotly.js is loaded using require.js. This option
assumes that require.js is globally available and that it has been
globally configured to know how to find Plotly.js as 'plotly'.
This option is not advised when full_html=True as it will result
in a non-functional html file.
If a string that ends in '.js', a script tag is included that
references the specified path. This approach can be used to point
the resulting HTML file to an alternative CDN or local bundle.
If False, no script tag referencing plotly.js is included. This is
useful when the resulting div string will be placed inside an HTML
document that already loads plotly.js. This option is not advised
when full_html=True as it will result in a non-functional html file.
include_mathjax: bool or string (default False)
Specifies how the MathJax.js library is included in the output html
div string. MathJax is required in order to display labels
with LaTeX typesetting.
If False, no script tag referencing MathJax.js will be included in the
output.
If 'cdn', a script tag that references a MathJax CDN location will be
included in the output. HTML div strings generated with this option
will be able to display LaTeX typesetting as long as internet access
is available.
If a string that ends in '.js', a script tag is included that
references the specified path. This approach can be used to point the
resulting HTML div string to an alternative CDN.
post_script: str or list or None (default None)
JavaScript snippet(s) to be included in the resulting div just after
plot creation. The string(s) may include '{plot_id}' placeholders
that will then be replaced by the `id` of the div element that the
plotly.js figure is associated with. One application for this script
is to install custom plotly.js event handlers.
full_html: bool (default True)
If True, produce a string containing a complete HTML document
starting with an <html> tag. If False, produce a string containing
a single <div> element.
animation_opts: dict or None (default None)
dict of custom animation parameters to be passed to the function
Plotly.animate in Plotly.js. See
https://github.com/plotly/plotly.js/blob/master/src/plots/animation_attributes.js
for available options. Has no effect if the figure does not contain
frames, or auto_play is False.
default_width, default_height: number or str (default '100%')
The default figure width/height to use if the provided figure does not
specify its own layout.width/layout.height property. May be
specified in pixels as an integer (e.g. 500), or as a css width style
string (e.g. '500px', '100%').
validate: bool (default True)
True if the figure should be validated before being converted to
JSON, False otherwise.
div_id: str (default None)
If provided, this is the value of the id attribute of the div tag. If None, the
id attribute is a UUID.
Returns
-------
str
Representation of figure as an HTML div string
"""
from plotly.io.json import to_json_plotly
# ## Validate figure ##
fig_dict = validate_coerce_fig_to_dict(fig, validate)
# ## Generate div id ##
plotdivid = div_id or str(uuid.uuid4())
# ## Serialize figure ##
jdata = to_json_plotly(fig_dict.get("data", []))
jlayout = to_json_plotly(fig_dict.get("layout", {}))
if fig_dict.get("frames", None):
jframes = to_json_plotly(fig_dict.get("frames", []))
else:
jframes = None
# ## Serialize figure config ##
config = _get_jconfig(config)
# Set responsive
config.setdefault("responsive", True)
# Get div width/height
layout_dict = fig_dict.get("layout", {})
template_dict = fig_dict.get("layout", {}).get("template", {}).get("layout", {})
div_width = layout_dict.get("width", template_dict.get("width", default_width))
div_height = layout_dict.get("height", template_dict.get("height", default_height))
# Add 'px' suffix to numeric widths
try:
float(div_width)
except (ValueError, TypeError):
pass
else:
div_width = str(div_width) + "px"
try:
float(div_height)
except (ValueError, TypeError):
pass
else:
div_height = str(div_height) + "px"
# ## Get platform URL ##
if config.get("showLink", False) or config.get("showSendToCloud", False):
# Figure is going to include a Chart Studio link or send-to-cloud button,
# So we need to configure the PLOTLYENV.BASE_URL property
base_url_line = """
window.PLOTLYENV.BASE_URL='{plotly_platform_url}';\
""".format(
plotly_platform_url=config.get("plotlyServerURL", "https://plot.ly")
)
else:
# Figure is not going to include a Chart Studio link or send-to-cloud button,
# In this case we don't want https://plot.ly to show up anywhere in the HTML
# output
config.pop("plotlyServerURL", None)
config.pop("linkText", None)
config.pop("showLink", None)
base_url_line = ""
# ## Build script body ##
# This is the part that actually calls Plotly.js
# build post script snippet(s)
then_post_script = ""
if post_script:
if not isinstance(post_script, (list, tuple)):
post_script = [post_script]
for ps in post_script:
then_post_script += """.then(function(){{
{post_script}
}})""".format(
post_script=ps.replace("{plot_id}", plotdivid)
)
then_addframes = ""
then_animate = ""
if jframes:
then_addframes = """.then(function(){{
Plotly.addFrames('{id}', {frames});
}})""".format(
id=plotdivid, frames=jframes
)
if auto_play:
if animation_opts:
animation_opts_arg = ", " + _json.dumps(animation_opts)
else:
animation_opts_arg = ""
then_animate = """.then(function(){{
Plotly.animate('{id}', null{animation_opts});
}})""".format(
id=plotdivid, animation_opts=animation_opts_arg
)
# Serialize config dict to JSON
jconfig = _json.dumps(config)
script = """\
if (document.getElementById("{id}")) {{\
Plotly.newPlot(\
"{id}",\
{data},\
{layout},\
{config}\
){then_addframes}{then_animate}{then_post_script}\
}}""".format(
id=plotdivid,
data=jdata,
layout=jlayout,
config=jconfig,
then_addframes=then_addframes,
then_animate=then_animate,
then_post_script=then_post_script,
)
# ## Handle loading/initializing plotly.js ##
include_plotlyjs_orig = include_plotlyjs
if isinstance(include_plotlyjs, str):
include_plotlyjs = include_plotlyjs.lower()
# Start/end of requirejs block (if any)
require_start = ""
require_end = ""
# Init and load
load_plotlyjs = ""
# Init plotlyjs. This block needs to run before plotly.js is loaded in
# order for MathJax configuration to work properly
if include_plotlyjs == "require":
require_start = 'require(["plotly"], function(Plotly) {'
require_end = "});"
elif include_plotlyjs == "cdn":
load_plotlyjs = """\
{win_config}
<script charset="utf-8" src="{cdn_url}"></script>\
""".format(
win_config=_window_plotly_config, cdn_url=plotly_cdn_url()
)
elif include_plotlyjs == "directory":
load_plotlyjs = """\
{win_config}
<script charset="utf-8" src="plotly.min.js"></script>\
""".format(
win_config=_window_plotly_config
)
elif isinstance(include_plotlyjs, str) and include_plotlyjs.endswith(".js"):
load_plotlyjs = """\
{win_config}
<script charset="utf-8" src="{url}"></script>\
""".format(
win_config=_window_plotly_config, url=include_plotlyjs_orig
)
elif include_plotlyjs:
load_plotlyjs = """\
{win_config}
<script type="text/javascript">{plotlyjs}</script>\
""".format(
win_config=_window_plotly_config, plotlyjs=get_plotlyjs()
)
# ## Handle loading/initializing MathJax ##
include_mathjax_orig = include_mathjax
if isinstance(include_mathjax, str):
include_mathjax = include_mathjax.lower()
mathjax_template = """\
<script src="{url}?config=TeX-AMS-MML_SVG"></script>"""
if include_mathjax == "cdn":
mathjax_script = (
mathjax_template.format(
url=(
"https://cdnjs.cloudflare.com" "/ajax/libs/mathjax/2.7.5/MathJax.js"
)
)
+ _mathjax_config
)
elif isinstance(include_mathjax, str) and include_mathjax.endswith(".js"):
mathjax_script = (
mathjax_template.format(url=include_mathjax_orig) + _mathjax_config
)
elif not include_mathjax:
mathjax_script = ""
else:
raise ValueError(
"""\
Invalid value of type {typ} received as the include_mathjax argument
Received value: {val}
include_mathjax may be specified as False, 'cdn', or a string ending with '.js'
""".format(
typ=type(include_mathjax), val=repr(include_mathjax)
)
)
plotly_html_div = """\
<div>\
{mathjax_script}\
{load_plotlyjs}\
<div id="{id}" class="plotly-graph-div" \
style="height:{height}; width:{width};"></div>\
<script type="text/javascript">\
{require_start}\
window.PLOTLYENV=window.PLOTLYENV || {{}};{base_url_line}\
{script};\
{require_end}\
</script>\
</div>""".format(
mathjax_script=mathjax_script,
load_plotlyjs=load_plotlyjs,
id=plotdivid,
width=div_width,
height=div_height,
base_url_line=base_url_line,
require_start=require_start,
script=script,
require_end=require_end,
).strip()
if full_html:
return """\
<html>
<head><meta charset="utf-8" /></head>
<body>
{div}
</body>
</html>""".format(
div=plotly_html_div
)
else:
return plotly_html_div
| _html.to_html | Self-Contained |
plotly.py | 16 | packages/python/plotly/plotly/io/_html.py | def write_html(
fig,
file,
config=None,
auto_play=True,
include_plotlyjs=True,
include_mathjax=False,
post_script=None,
full_html=True,
animation_opts=None,
validate=True,
default_width="100%",
default_height="100%",
auto_open=False,
div_id=None,
):
"""
Write a figure to an HTML file representation
Parameters
----------
fig:
Figure object or dict representing a figure
file: str or writeable
A string representing a local file path or a writeable object
(e.g. a pathlib.Path object or an open file descriptor)
config: dict or None (default None)
Plotly.js figure config options
auto_play: bool (default=True)
Whether to automatically start the animation sequence on page load
if the figure contains frames. Has no effect if the figure does not
contain frames.
include_plotlyjs: bool or string (default True)
Specifies how the plotly.js library is included/loaded in the output
div string.
If True, a script tag containing the plotly.js source code (~3MB)
is included in the output. HTML files generated with this option are
fully self-contained and can be used offline.
If 'cdn', a script tag that references the plotly.js CDN is included
in the output. The url used is versioned to match the bundled plotly.js.
HTML files generated with this option are about 3MB smaller than those
generated with include_plotlyjs=True, but they require an active
internet connection in order to load the plotly.js library.
If 'directory', a script tag is included that references an external
plotly.min.js bundle that is assumed to reside in the same
directory as the HTML file. If `file` is a string to a local file
path and `full_html` is True, then the plotly.min.js bundle is copied
into the directory of the resulting HTML file. If a file named
plotly.min.js already exists in the output directory then this file
is left unmodified and no copy is performed. HTML files generated
with this option can be used offline, but they require a copy of
the plotly.min.js bundle in the same directory. This option is
useful when many figures will be saved as HTML files in the same
directory because the plotly.js source code will be included only
once per output directory, rather than once per output file.
If 'require', Plotly.js is loaded using require.js. This option
assumes that require.js is globally available and that it has been
globally configured to know how to find Plotly.js as 'plotly'.
This option is not advised when full_html=True as it will result
in a non-functional html file.
If a string that ends in '.js', a script tag is included that
references the specified path. This approach can be used to point
the resulting HTML file to an alternative CDN or local bundle.
If False, no script tag referencing plotly.js is included. This is
useful when the resulting div string will be placed inside an HTML
document that already loads plotly.js. This option is not advised
when full_html=True as it will result in a non-functional html file.
include_mathjax: bool or string (default False)
Specifies how the MathJax.js library is included in the output html
div string. MathJax is required in order to display labels
with LaTeX typesetting.
If False, no script tag referencing MathJax.js will be included in the
output.
If 'cdn', a script tag that references a MathJax CDN location will be
included in the output. HTML div strings generated with this option
will be able to display LaTeX typesetting as long as internet access
is available.
If a string that ends in '.js', a script tag is included that
references the specified path. This approach can be used to point the
resulting HTML div string to an alternative CDN.
post_script: str or list or None (default None)
JavaScript snippet(s) to be included in the resulting div just after
plot creation. The string(s) may include '{plot_id}' placeholders
that will then be replaced by the `id` of the div element that the
plotly.js figure is associated with. One application for this script
is to install custom plotly.js event handlers.
full_html: bool (default True)
If True, produce a string containing a complete HTML document
starting with an <html> tag. If False, produce a string containing
a single <div> element.
animation_opts: dict or None (default None)
dict of custom animation parameters to be passed to the function
Plotly.animate in Plotly.js. See
https://github.com/plotly/plotly.js/blob/master/src/plots/animation_attributes.js
for available options. Has no effect if the figure does not contain
frames, or auto_play is False.
default_width, default_height: number or str (default '100%')
The default figure width/height to use if the provided figure does not
specify its own layout.width/layout.height property. May be
specified in pixels as an integer (e.g. 500), or as a css width style
string (e.g. '500px', '100%').
validate: bool (default True)
True if the figure should be validated before being converted to
JSON, False otherwise.
auto_open: bool (default True)
If True, open the saved file in a web browser after saving.
This argument only applies if `full_html` is True.
div_id: str (default None)
If provided, this is the value of the id attribute of the div tag. If None, the
id attribute is a UUID.
Returns
-------
str
Representation of figure as an HTML div string
"""
| /usr/src/app/target_test_cases/failed_tests__html.write_html.txt | def write_html(
fig,
file,
config=None,
auto_play=True,
include_plotlyjs=True,
include_mathjax=False,
post_script=None,
full_html=True,
animation_opts=None,
validate=True,
default_width="100%",
default_height="100%",
auto_open=False,
div_id=None,
):
"""
Write a figure to an HTML file representation
Parameters
----------
fig:
Figure object or dict representing a figure
file: str or writeable
A string representing a local file path or a writeable object
(e.g. a pathlib.Path object or an open file descriptor)
config: dict or None (default None)
Plotly.js figure config options
auto_play: bool (default=True)
Whether to automatically start the animation sequence on page load
if the figure contains frames. Has no effect if the figure does not
contain frames.
include_plotlyjs: bool or string (default True)
Specifies how the plotly.js library is included/loaded in the output
div string.
If True, a script tag containing the plotly.js source code (~3MB)
is included in the output. HTML files generated with this option are
fully self-contained and can be used offline.
If 'cdn', a script tag that references the plotly.js CDN is included
in the output. The url used is versioned to match the bundled plotly.js.
HTML files generated with this option are about 3MB smaller than those
generated with include_plotlyjs=True, but they require an active
internet connection in order to load the plotly.js library.
If 'directory', a script tag is included that references an external
plotly.min.js bundle that is assumed to reside in the same
directory as the HTML file. If `file` is a string to a local file
path and `full_html` is True, then the plotly.min.js bundle is copied
into the directory of the resulting HTML file. If a file named
plotly.min.js already exists in the output directory then this file
is left unmodified and no copy is performed. HTML files generated
with this option can be used offline, but they require a copy of
the plotly.min.js bundle in the same directory. This option is
useful when many figures will be saved as HTML files in the same
directory because the plotly.js source code will be included only
once per output directory, rather than once per output file.
If 'require', Plotly.js is loaded using require.js. This option
assumes that require.js is globally available and that it has been
globally configured to know how to find Plotly.js as 'plotly'.
This option is not advised when full_html=True as it will result
in a non-functional html file.
If a string that ends in '.js', a script tag is included that
references the specified path. This approach can be used to point
the resulting HTML file to an alternative CDN or local bundle.
If False, no script tag referencing plotly.js is included. This is
useful when the resulting div string will be placed inside an HTML
document that already loads plotly.js. This option is not advised
when full_html=True as it will result in a non-functional html file.
include_mathjax: bool or string (default False)
Specifies how the MathJax.js library is included in the output html
div string. MathJax is required in order to display labels
with LaTeX typesetting.
If False, no script tag referencing MathJax.js will be included in the
output.
If 'cdn', a script tag that references a MathJax CDN location will be
included in the output. HTML div strings generated with this option
will be able to display LaTeX typesetting as long as internet access
is available.
If a string that ends in '.js', a script tag is included that
references the specified path. This approach can be used to point the
resulting HTML div string to an alternative CDN.
post_script: str or list or None (default None)
JavaScript snippet(s) to be included in the resulting div just after
plot creation. The string(s) may include '{plot_id}' placeholders
that will then be replaced by the `id` of the div element that the
plotly.js figure is associated with. One application for this script
is to install custom plotly.js event handlers.
full_html: bool (default True)
If True, produce a string containing a complete HTML document
starting with an <html> tag. If False, produce a string containing
a single <div> element.
animation_opts: dict or None (default None)
dict of custom animation parameters to be passed to the function
Plotly.animate in Plotly.js. See
https://github.com/plotly/plotly.js/blob/master/src/plots/animation_attributes.js
for available options. Has no effect if the figure does not contain
frames, or auto_play is False.
default_width, default_height: number or str (default '100%')
The default figure width/height to use if the provided figure does not
specify its own layout.width/layout.height property. May be
specified in pixels as an integer (e.g. 500), or as a css width style
string (e.g. '500px', '100%').
validate: bool (default True)
True if the figure should be validated before being converted to
JSON, False otherwise.
auto_open: bool (default True)
If True, open the saved file in a web browser after saving.
This argument only applies if `full_html` is True.
div_id: str (default None)
If provided, this is the value of the id attribute of the div tag. If None, the
id attribute is a UUID.
Returns
-------
str
Representation of figure as an HTML div string
"""
# Build HTML string
html_str = to_html(
fig,
config=config,
auto_play=auto_play,
include_plotlyjs=include_plotlyjs,
include_mathjax=include_mathjax,
post_script=post_script,
full_html=full_html,
animation_opts=animation_opts,
default_width=default_width,
default_height=default_height,
validate=validate,
div_id=div_id,
)
# Check if file is a string
if isinstance(file, str):
# Use the standard pathlib constructor to make a pathlib object.
path = Path(file)
elif isinstance(file, Path): # PurePath is the most general pathlib object.
# `file` is already a pathlib object.
path = file
else:
# We could not make a pathlib object out of file. Either `file` is an open file
# descriptor with a `write()` method or it's an invalid object.
path = None
# Write HTML string
if path is not None:
# To use a different file encoding, pass a file descriptor
path.write_text(html_str, "utf-8")
else:
file.write(html_str)
# Check if we should copy plotly.min.js to output directory
if path is not None and full_html and include_plotlyjs == "directory":
bundle_path = path.parent / "plotly.min.js"
if not bundle_path.exists():
bundle_path.write_text(get_plotlyjs(), encoding="utf-8")
# Handle auto_open
if path is not None and full_html and auto_open:
url = path.absolute().as_uri()
webbrowser.open(url)
| _html.write_html | File-Level |